diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go
index b02970751..7ddfd5c7b 100644
--- a/apis/flowcollector/v1beta1/flowcollector_types.go
+++ b/apis/flowcollector/v1beta1/flowcollector_types.go
@@ -193,70 +193,86 @@ type EBPFMetrics struct {
DisableAlerts []EBPFAgentAlert `json:"disableAlerts"`
}
-// `EBPFFlowFilter` defines the desired eBPF agent configuration regarding flow filtering
-type EBPFFlowFilter struct {
- // Set `enable` to `true` to enable eBPF flow filtering feature.
- Enable *bool `json:"enable,omitempty"`
-
- // CIDR defines the IP CIDR to filter flows by.
- // Example: 10.10.10.0/24 or 100:100:100:100::/64
+// `EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.
+type EBPFFlowFilterRule struct {
+ // `cidr` defines the IP CIDR to filter flows by.
+ // Examples: `10.10.10.0/24` or `100:100:100:100::/64`
CIDR string `json:"cidr,omitempty"`
- // Action defines the action to perform on the flows that match the filter.
+ // `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.
// +kubebuilder:validation:Enum:="Accept";"Reject"
Action string `json:"action,omitempty"`
- // Protocol defines the protocol to filter flows by.
+ // `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.
// +kubebuilder:validation:Enum:="TCP";"UDP";"ICMP";"ICMPv6";"SCTP"
// +optional
Protocol string `json:"protocol,omitempty"`
- // Direction defines the direction to filter flows by.
+ // `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.
// +kubebuilder:validation:Enum:="Ingress";"Egress"
// +optional
Direction string `json:"direction,omitempty"`
- // `tcpFlags` defines the TCP flags to filter flows by.
+ // `tcpFlags` optionally defines TCP flags to filter flows by.
+ // In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
// +kubebuilder:validation:Enum:="SYN";"SYN-ACK";"ACK";"FIN";"RST";"URG";"ECE";"CWR";"FIN-ACK";"RST-ACK"
// +optional
TCPFlags string `json:"tcpFlags,omitempty"`
- // SourcePorts defines the source ports to filter flows by.
- // To filter a single port, set a single port as an integer value. For example, sourcePorts: 80.
- // To filter a range of ports, use a "start-end" range in string format. For example, sourcePorts: "80-100".
+ // `sourcePorts` optionally defines the source ports to filter flows by.
+ // To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+ // To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
// To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
// +optional
SourcePorts intstr.IntOrString `json:"sourcePorts,omitempty"`
- // DestPorts defines the destination ports to filter flows by.
- // To filter a single port, set a single port as an integer value. For example, destPorts: 80.
- // To filter a range of ports, use a "start-end" range in string format. For example, destPorts: "80-100".
+ // `destPorts` optionally defines the destination ports to filter flows by.
+ // To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+ // To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
// To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
// +optional
DestPorts intstr.IntOrString `json:"destPorts,omitempty"`
- // Ports defines the ports to filter flows by. it can be user for either source or destination ports.
- // To filter a single port, set a single port as an integer value. For example, ports: 80.
- // To filter a range of ports, use a "start-end" range in string format. For example, ports: "80-100".
+ // `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+ // To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+ // To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
// To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
Ports intstr.IntOrString `json:"ports,omitempty"`
- // PeerIP defines the IP address to filter flows by.
- // Example: 10.10.10.10
+ // `peerIP` optionally defines the remote IP address to filter flows by.
+ // Example: `10.10.10.10`.
// +optional
PeerIP string `json:"peerIP,omitempty"`
- // ICMPType defines the ICMP type to filter flows by.
+ // `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.
// +optional
- ICMPType *int `json:"icmpType,omitempty"`
+ ICMPCode *int `json:"icmpCode,omitempty"`
- // ICMPCode defines the ICMP code to filter flows by.
+ // `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.
// +optional
- ICMPCode *int `json:"icmpCode,omitempty"`
+ ICMPType *int `json:"icmpType,omitempty"`
- // `pktDrops`, to filter flows with packet drops
+ // `pktDrops` optionally filters only flows containing packet drops.
// +optional
PktDrops *bool `json:"pktDrops,omitempty"`
+
+ // `sampling` sampling rate for the matched flow
+ // +optional
+ Sampling *uint32 `json:"sampling,omitempty"`
+}
+
+// `EBPFFlowFilter` defines the desired eBPF agent configuration regarding flow filtering.
+type EBPFFlowFilter struct {
+ // Set `enable` to `true` to enable the eBPF flow filtering feature.
+ Enable *bool `json:"enable,omitempty"`
+
+ // [deprecated (*)] this setting is not used anymore.
+ EBPFFlowFilterRule `json:",inline"`
+
+ // `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ // +kubebuilder:validation:MinItems:=1
+ // +kubebuilder:validation:MaxItems:=16
+ FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"`
}
// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information
diff --git a/apis/flowcollector/v1beta1/zz_generated.conversion.go b/apis/flowcollector/v1beta1/zz_generated.conversion.go
index 7f681eac3..784d3a19e 100644
--- a/apis/flowcollector/v1beta1/zz_generated.conversion.go
+++ b/apis/flowcollector/v1beta1/zz_generated.conversion.go
@@ -88,6 +88,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
+ if err := s.AddGeneratedConversionFunc((*EBPFFlowFilterRule)(nil), (*v1beta2.EBPFFlowFilterRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(a.(*EBPFFlowFilterRule), b.(*v1beta2.EBPFFlowFilterRule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EBPFFlowFilterRule)(nil), (*EBPFFlowFilterRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(a.(*v1beta2.EBPFFlowFilterRule), b.(*EBPFFlowFilterRule), scope)
+ }); err != nil {
+ return err
+ }
if err := s.AddGeneratedConversionFunc((*EBPFMetrics)(nil), (*v1beta2.EBPFMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_EBPFMetrics_To_v1beta2_EBPFMetrics(a.(*EBPFMetrics), b.(*v1beta2.EBPFMetrics), scope)
}); err != nil {
@@ -517,6 +527,33 @@ func Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(
func autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFilter, out *v1beta2.EBPFFlowFilter, s conversion.Scope) error {
out.Enable = (*bool)(unsafe.Pointer(in.Enable))
+ if err := Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil {
+ return err
+ }
+ out.FlowFilterRules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules))
+ return nil
+}
+
+// Convert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter is an autogenerated conversion function.
+func Convert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFilter, out *v1beta2.EBPFFlowFilter, s conversion.Scope) error {
+ return autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in, out, s)
+}
+
+func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EBPFFlowFilter, out *EBPFFlowFilter, s conversion.Scope) error {
+ out.Enable = (*bool)(unsafe.Pointer(in.Enable))
+ if err := Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil {
+ return err
+ }
+ out.FlowFilterRules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules))
+ return nil
+}
+
+// Convert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter is an autogenerated conversion function.
+func Convert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EBPFFlowFilter, out *EBPFFlowFilter, s conversion.Scope) error {
+ return autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in, out, s)
+}
+
+func autoConvert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(in *EBPFFlowFilterRule, out *v1beta2.EBPFFlowFilterRule, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Action = in.Action
out.Protocol = in.Protocol
@@ -526,19 +563,19 @@ func autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFi
out.DestPorts = in.DestPorts
out.Ports = in.Ports
out.PeerIP = in.PeerIP
- out.ICMPType = (*int)(unsafe.Pointer(in.ICMPType))
out.ICMPCode = (*int)(unsafe.Pointer(in.ICMPCode))
+ out.ICMPType = (*int)(unsafe.Pointer(in.ICMPType))
out.PktDrops = (*bool)(unsafe.Pointer(in.PktDrops))
+ out.Sampling = (*uint32)(unsafe.Pointer(in.Sampling))
return nil
}
-// Convert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter is an autogenerated conversion function.
-func Convert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFilter, out *v1beta2.EBPFFlowFilter, s conversion.Scope) error {
- return autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in, out, s)
+// Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule is an autogenerated conversion function.
+func Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(in *EBPFFlowFilterRule, out *v1beta2.EBPFFlowFilterRule, s conversion.Scope) error {
+ return autoConvert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(in, out, s)
}
-func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EBPFFlowFilter, out *EBPFFlowFilter, s conversion.Scope) error {
- out.Enable = (*bool)(unsafe.Pointer(in.Enable))
+func autoConvert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(in *v1beta2.EBPFFlowFilterRule, out *EBPFFlowFilterRule, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Action = in.Action
out.Protocol = in.Protocol
@@ -551,12 +588,13 @@ func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EB
out.ICMPCode = (*int)(unsafe.Pointer(in.ICMPCode))
out.ICMPType = (*int)(unsafe.Pointer(in.ICMPType))
out.PktDrops = (*bool)(unsafe.Pointer(in.PktDrops))
+ out.Sampling = (*uint32)(unsafe.Pointer(in.Sampling))
return nil
}
-// Convert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter is an autogenerated conversion function.
-func Convert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EBPFFlowFilter, out *EBPFFlowFilter, s conversion.Scope) error {
- return autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in, out, s)
+// Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule is an autogenerated conversion function.
+func Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(in *v1beta2.EBPFFlowFilterRule, out *EBPFFlowFilterRule, s conversion.Scope) error {
+ return autoConvert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(in, out, s)
}
func autoConvert_v1beta1_EBPFMetrics_To_v1beta2_EBPFMetrics(in *EBPFMetrics, out *v1beta2.EBPFMetrics, s conversion.Scope) error {
diff --git a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
index b89109301..e79aca3d5 100644
--- a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
+++ b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
@@ -130,16 +130,39 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) {
*out = new(bool)
**out = **in
}
+ in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule)
+ if in.FlowFilterRules != nil {
+ in, out := &in.FlowFilterRules, &out.FlowFilterRules
+ *out = make([]EBPFFlowFilterRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilter.
+func (in *EBPFFlowFilter) DeepCopy() *EBPFFlowFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(EBPFFlowFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EBPFFlowFilterRule) DeepCopyInto(out *EBPFFlowFilterRule) {
+ *out = *in
out.SourcePorts = in.SourcePorts
out.DestPorts = in.DestPorts
out.Ports = in.Ports
- if in.ICMPType != nil {
- in, out := &in.ICMPType, &out.ICMPType
+ if in.ICMPCode != nil {
+ in, out := &in.ICMPCode, &out.ICMPCode
*out = new(int)
**out = **in
}
- if in.ICMPCode != nil {
- in, out := &in.ICMPCode, &out.ICMPCode
+ if in.ICMPType != nil {
+ in, out := &in.ICMPType, &out.ICMPType
*out = new(int)
**out = **in
}
@@ -148,14 +171,19 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) {
*out = new(bool)
**out = **in
}
+ if in.Sampling != nil {
+ in, out := &in.Sampling, &out.Sampling
+ *out = new(uint32)
+ **out = **in
+ }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilter.
-func (in *EBPFFlowFilter) DeepCopy() *EBPFFlowFilter {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilterRule.
+func (in *EBPFFlowFilterRule) DeepCopy() *EBPFFlowFilterRule {
if in == nil {
return nil
}
- out := new(EBPFFlowFilter)
+ out := new(EBPFFlowFilterRule)
in.DeepCopyInto(out)
return out
}
diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go
index 32f100a17..02057da06 100644
--- a/apis/flowcollector/v1beta2/flowcollector_types.go
+++ b/apis/flowcollector/v1beta2/flowcollector_types.go
@@ -216,11 +216,8 @@ type EBPFMetrics struct {
DisableAlerts []EBPFAgentAlert `json:"disableAlerts"`
}
-// `EBPFFlowFilter` defines the desired eBPF agent configuration regarding flow filtering.
-type EBPFFlowFilter struct {
- // Set `enable` to `true` to enable the eBPF flow filtering feature.
- Enable *bool `json:"enable,omitempty"`
-
+// `EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.
+type EBPFFlowFilterRule struct {
// `cidr` defines the IP CIDR to filter flows by.
// Examples: `10.10.10.0/24` or `100:100:100:100::/64`
CIDR string `json:"cidr,omitempty"`
@@ -281,6 +278,24 @@ type EBPFFlowFilter struct {
// `pktDrops` optionally filters only flows containing packet drops.
// +optional
PktDrops *bool `json:"pktDrops,omitempty"`
+
+ // `sampling` sampling rate for the matched flow
+ // +optional
+ Sampling *uint32 `json:"sampling,omitempty"`
+}
+
+// `EBPFFlowFilter` defines the desired eBPF agent configuration regarding flow filtering.
+type EBPFFlowFilter struct {
+ // Set `enable` to `true` to enable the eBPF flow filtering feature.
+ Enable *bool `json:"enable,omitempty"`
+
+ // [deprecated (*)] this setting is not used anymore.
+ EBPFFlowFilterRule `json:",inline"`
+
+ // `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ // +kubebuilder:validation:MinItems:=1
+ // +kubebuilder:validation:MaxItems:=16
+ FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"`
}
// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information
diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
index fb79a02aa..a2b6f301f 100644
--- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
+++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "net"
"slices"
"strconv"
"strings"
@@ -106,32 +107,91 @@ func (r *FlowCollector) validateAgent(_ context.Context, fc *FlowCollector) (adm
}
var errs []error
if fc.Spec.Agent.EBPF.FlowFilter != nil && fc.Spec.Agent.EBPF.FlowFilter.Enable != nil && *fc.Spec.Agent.EBPF.FlowFilter.Enable {
- hasPorts := fc.Spec.Agent.EBPF.FlowFilter.Ports.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.Ports.StrVal != ""
- if hasPorts {
- if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.Ports); err != nil {
- errs = append(errs, err)
- }
+ for i := range fc.Spec.Agent.EBPF.FlowFilter.FlowFilterRules {
+ errs = append(errs, validateFilter(&fc.Spec.Agent.EBPF.FlowFilter.FlowFilterRules[i])...)
}
- hasSrcPorts := fc.Spec.Agent.EBPF.FlowFilter.SourcePorts.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.SourcePorts.StrVal != ""
- if hasSrcPorts {
- if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.SourcePorts); err != nil {
- errs = append(errs, err)
- }
+ errs = append(errs, validateFilter(fc.Spec.Agent.EBPF.FlowFilter)...)
+ }
+
+ return warnings, errs
+}
+
+type filter interface {
+ getCIDR() string
+ getPorts() intstr.IntOrString
+ getSrcPorts() intstr.IntOrString
+ getDstPorts() intstr.IntOrString
+}
+
+func (f *EBPFFlowFilter) getCIDR() string {
+ return f.CIDR
+}
+
+func (f *EBPFFlowFilter) getPorts() intstr.IntOrString {
+ return f.Ports
+}
+
+func (f *EBPFFlowFilter) getSrcPorts() intstr.IntOrString {
+ return f.SourcePorts
+}
+
+func (f *EBPFFlowFilter) getDstPorts() intstr.IntOrString {
+ return f.DestPorts
+}
+
+func (f *EBPFFlowFilterRule) getCIDR() string {
+ return f.CIDR
+}
+
+func (f *EBPFFlowFilterRule) getPorts() intstr.IntOrString {
+ return f.Ports
+}
+
+func (f *EBPFFlowFilterRule) getSrcPorts() intstr.IntOrString {
+ return f.SourcePorts
+}
+
+func (f *EBPFFlowFilterRule) getDstPorts() intstr.IntOrString {
+ return f.DestPorts
+}
+
+func validateFilter[T filter](f T) []error {
+ var errs []error
+
+ cidr := f.getCIDR()
+ if cidr != "" {
+ if _, _, err := net.ParseCIDR(cidr); err != nil {
+ errs = append(errs, err)
}
- hasDstPorts := fc.Spec.Agent.EBPF.FlowFilter.DestPorts.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.DestPorts.StrVal != ""
- if hasDstPorts {
- if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.DestPorts); err != nil {
- errs = append(errs, err)
- }
+ }
+ ports := f.getPorts()
+ hasPorts := ports.IntVal > 0 || ports.StrVal != ""
+ if hasPorts {
+ if err := validateFilterPortConfig(ports); err != nil {
+ errs = append(errs, err)
}
- if hasPorts && hasSrcPorts {
- errs = append(errs, errors.New("cannot configure agent filter with ports and sourcePorts, they are mutually exclusive"))
+ }
+ srcPorts := f.getSrcPorts()
+ hasSrcPorts := srcPorts.IntVal > 0 || srcPorts.StrVal != ""
+ if hasSrcPorts {
+ if err := validateFilterPortConfig(srcPorts); err != nil {
+ errs = append(errs, err)
}
- if hasPorts && hasDstPorts {
- errs = append(errs, errors.New("cannot configure agent filter with ports and destPorts, they are mutually exclusive"))
+ }
+ dstPorts := f.getDstPorts()
+ hasDstPorts := dstPorts.IntVal > 0 || dstPorts.StrVal != ""
+ if hasDstPorts {
+ if err := validateFilterPortConfig(dstPorts); err != nil {
+ errs = append(errs, err)
}
}
- return warnings, errs
+ if hasPorts && hasSrcPorts {
+ errs = append(errs, errors.New("cannot configure agent filter with ports and sourcePorts, they are mutually exclusive"))
+ }
+ if hasPorts && hasDstPorts {
+ errs = append(errs, errors.New("cannot configure agent filter with ports and destPorts, they are mutually exclusive"))
+ }
+ return errs
}
func validateFilterPortConfig(value intstr.IntOrString) error {
diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
index 055df760b..e4314f618 100644
--- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
+++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
@@ -43,10 +43,14 @@ func TestValidateAgent(t *testing.T) {
Privileged: true,
Sampling: ptr.To(int32(100)),
FlowFilter: &EBPFFlowFilter{
- Enable: ptr.To(true),
- Action: "Accept",
- CIDR: "0.0.0.0/0",
- Direction: "Egress",
+ Enable: ptr.To(true),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ Action: "Accept",
+ CIDR: "0.0.0.0/0",
+ Direction: "Egress",
+ },
+ },
},
},
},
@@ -118,11 +122,15 @@ func TestValidateAgent(t *testing.T) {
Type: AgentEBPF,
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
- Enable: ptr.To(true),
- Action: "Accept",
- CIDR: "0.0.0.0/0",
- Ports: intstr.FromInt(80),
- SourcePorts: intstr.FromInt(443),
+ Enable: ptr.To(true),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ Action: "Accept",
+ CIDR: "0.0.0.0/0",
+ Ports: intstr.FromInt(80),
+ SourcePorts: intstr.FromInt(443),
+ },
+ },
},
},
},
@@ -142,7 +150,11 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- Ports: intstr.FromString("abcd"),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ Ports: intstr.FromString("abcd"),
+ },
+ },
},
},
},
@@ -162,7 +174,11 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- Ports: intstr.FromString("80-255"),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ Ports: intstr.FromString("80-255"),
+ },
+ },
},
},
},
@@ -181,7 +197,11 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- Ports: intstr.FromString("255-80"),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ Ports: intstr.FromString("255-80"),
+ },
+ },
},
},
},
@@ -201,7 +221,11 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- Ports: intstr.FromString("80-?"),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ Ports: intstr.FromString("80-?"),
+ },
+ },
},
},
},
@@ -221,7 +245,11 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- Ports: intstr.FromString("255,80"),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ Ports: intstr.FromString("255,80"),
+ },
+ },
},
},
},
@@ -240,7 +268,11 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- Ports: intstr.FromString("80,100,250"),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ Ports: intstr.FromString("80,100,250"),
+ },
+ },
},
},
},
@@ -248,6 +280,30 @@ func TestValidateAgent(t *testing.T) {
},
expectedError: "expected two integers",
},
+ {
+ name: "FlowFilter expect invalid CIDR",
+ fc: &FlowCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster",
+ },
+ Spec: FlowCollectorSpec{
+ Agent: FlowCollectorAgent{
+ Type: AgentEBPF,
+ EBPF: FlowCollectorEBPF{
+ FlowFilter: &EBPFFlowFilter{
+ Enable: ptr.To(true),
+ FlowFilterRules: []EBPFFlowFilterRule{
+ {
+ CIDR: "1.1.1.1",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedError: "invalid CIDR",
+ },
}
CurrentClusterInfo = &cluster.Info{}
diff --git a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
index fd6634a9b..6461b82b2 100644
--- a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
+++ b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
@@ -289,6 +289,29 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) {
*out = new(bool)
**out = **in
}
+ in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule)
+ if in.FlowFilterRules != nil {
+ in, out := &in.FlowFilterRules, &out.FlowFilterRules
+ *out = make([]EBPFFlowFilterRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilter.
+func (in *EBPFFlowFilter) DeepCopy() *EBPFFlowFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(EBPFFlowFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EBPFFlowFilterRule) DeepCopyInto(out *EBPFFlowFilterRule) {
+ *out = *in
out.SourcePorts = in.SourcePorts
out.DestPorts = in.DestPorts
out.Ports = in.Ports
@@ -307,14 +330,19 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) {
*out = new(bool)
**out = **in
}
+ if in.Sampling != nil {
+ in, out := &in.Sampling, &out.Sampling
+ *out = new(uint32)
+ **out = **in
+ }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilter.
-func (in *EBPFFlowFilter) DeepCopy() *EBPFFlowFilter {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilterRule.
+func (in *EBPFFlowFilterRule) DeepCopy() *EBPFFlowFilterRule {
if in == nil {
return nil
}
- out := new(EBPFFlowFilter)
+ out := new(EBPFFlowFilterRule)
in.DeepCopyInto(out)
return out
}
diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
index d883e8fe2..7f6791517 100644
--- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
+++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
@@ -160,68 +160,72 @@ spec:
regarding flow filtering'
properties:
action:
- description: Action defines the action to perform on the
- flows that match the filter.
+ description: '`action` defines the action to perform on
+ the flows that match the filter. The available options
+ are `Accept`, which is the default, and `Reject`.'
enum:
- Accept
- Reject
type: string
cidr:
description: |-
- CIDR defines the IP CIDR to filter flows by.
- Example: 10.10.10.0/24 or 100:100:100:100::/64
+ `cidr` defines the IP CIDR to filter flows by.
+ Examples: `10.10.10.0/24` or `100:100:100:100::/64`
type: string
destPorts:
anyOf:
- type: integer
- type: string
description: |-
- DestPorts defines the destination ports to filter flows by.
- To filter a single port, set a single port as an integer value. For example, destPorts: 80.
- To filter a range of ports, use a "start-end" range in string format. For example, destPorts: "80-100".
+ `destPorts` optionally defines the destination ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
x-kubernetes-int-or-string: true
direction:
- description: Direction defines the direction to filter
- flows by.
+ description: '`direction` optionally defines a direction
+ to filter flows by. The available options are `Ingress`
+ and `Egress`.'
enum:
- Ingress
- Egress
type: string
enable:
- description: Set `enable` to `true` to enable eBPF flow
- filtering feature.
+ description: Set `enable` to `true` to enable the eBPF
+ flow filtering feature.
type: boolean
icmpCode:
- description: ICMPCode defines the ICMP code to filter
- flows by.
+ description: '`icmpCode`, for Internet Control Message
+ Protocol (ICMP) traffic, optionally defines the ICMP
+ code to filter flows by.'
type: integer
icmpType:
- description: ICMPType defines the ICMP type to filter
- flows by.
+ description: '`icmpType`, for ICMP traffic, optionally
+ defines the ICMP type to filter flows by.'
type: integer
peerIP:
description: |-
- PeerIP defines the IP address to filter flows by.
- Example: 10.10.10.10
+ `peerIP` optionally defines the remote IP address to filter flows by.
+ Example: `10.10.10.10`.
type: string
pktDrops:
- description: '`pktDrops`, to filter flows with packet
- drops'
+ description: '`pktDrops` optionally filters only flows
+ containing packet drops.'
type: boolean
ports:
anyOf:
- type: integer
- type: string
description: |-
- Ports defines the ports to filter flows by. it can be user for either source or destination ports.
- To filter a single port, set a single port as an integer value. For example, ports: 80.
- To filter a range of ports, use a "start-end" range in string format. For example, ports: "80-100".
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+ To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
x-kubernetes-int-or-string: true
protocol:
- description: Protocol defines the protocol to filter flows
- by.
+ description: '`protocol` optionally defines a protocol
+ to filter flows by. The available options are `TCP`,
+ `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.'
enum:
- TCP
- UDP
@@ -229,19 +233,139 @@ spec:
- ICMPv6
- SCTP
type: string
+ rules:
+ description: '`flowFilterRules` defines a list of ebpf
+ agent flow filtering rules'
+ items:
+ description: '`EBPFFlowFilterRule` defines the desired
+ eBPF agent configuration regarding flow filtering
+ rule.'
+ properties:
+ action:
+ description: '`action` defines the action to perform
+ on the flows that match the filter. The available
+ options are `Accept`, which is the default, and
+ `Reject`.'
+ enum:
+ - Accept
+ - Reject
+ type: string
+ cidr:
+ description: |-
+ `cidr` defines the IP CIDR to filter flows by.
+ Examples: `10.10.10.0/24` or `100:100:100:100::/64`
+ type: string
+ destPorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `destPorts` optionally defines the destination ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ direction:
+ description: '`direction` optionally defines a direction
+ to filter flows by. The available options are
+ `Ingress` and `Egress`.'
+ enum:
+ - Ingress
+ - Egress
+ type: string
+ icmpCode:
+ description: '`icmpCode`, for Internet Control Message
+ Protocol (ICMP) traffic, optionally defines the
+ ICMP code to filter flows by.'
+ type: integer
+ icmpType:
+ description: '`icmpType`, for ICMP traffic, optionally
+ defines the ICMP type to filter flows by.'
+ type: integer
+ peerIP:
+ description: |-
+ `peerIP` optionally defines the remote IP address to filter flows by.
+ Example: `10.10.10.10`.
+ type: string
+ pktDrops:
+ description: '`pktDrops` optionally filters only
+ flows containing packet drops.'
+ type: boolean
+ ports:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+ To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ protocol:
+ description: '`protocol` optionally defines a protocol
+ to filter flows by. The available options are
+ `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.'
+ enum:
+ - TCP
+ - UDP
+ - ICMP
+ - ICMPv6
+ - SCTP
+ type: string
+ sampling:
+ description: '`sampling` sampling rate for the matched
+ flow'
+ format: int32
+ type: integer
+ sourcePorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `sourcePorts` optionally defines the source ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ tcpFlags:
+ description: |-
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+ In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+ enum:
+ - SYN
+ - SYN-ACK
+ - ACK
+ - FIN
+ - RST
+ - URG
+ - ECE
+ - CWR
+ - FIN-ACK
+ - RST-ACK
+ type: string
+ type: object
+ maxItems: 16
+ minItems: 1
+ type: array
+ sampling:
+ description: '`sampling` sampling rate for the matched
+ flow'
+ format: int32
+ type: integer
sourcePorts:
anyOf:
- type: integer
- type: string
description: |-
- SourcePorts defines the source ports to filter flows by.
- To filter a single port, set a single port as an integer value. For example, sourcePorts: 80.
- To filter a range of ports, use a "start-end" range in string format. For example, sourcePorts: "80-100".
+ `sourcePorts` optionally defines the source ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
x-kubernetes-int-or-string: true
tcpFlags:
- description: '`tcpFlags` defines the TCP flags to filter
- flows by.'
+ description: |-
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+ In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
enum:
- SYN
- SYN-ACK
@@ -3921,6 +4045,125 @@ spec:
- ICMPv6
- SCTP
type: string
+ rules:
+ description: '`flowFilterRules` defines a list of ebpf
+ agent flow filtering rules'
+ items:
+ description: '`EBPFFlowFilterRule` defines the desired
+ eBPF agent configuration regarding flow filtering
+ rule.'
+ properties:
+ action:
+ description: '`action` defines the action to perform
+ on the flows that match the filter. The available
+ options are `Accept`, which is the default, and
+ `Reject`.'
+ enum:
+ - Accept
+ - Reject
+ type: string
+ cidr:
+ description: |-
+ `cidr` defines the IP CIDR to filter flows by.
+ Examples: `10.10.10.0/24` or `100:100:100:100::/64`
+ type: string
+ destPorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `destPorts` optionally defines the destination ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ direction:
+ description: '`direction` optionally defines a direction
+ to filter flows by. The available options are
+ `Ingress` and `Egress`.'
+ enum:
+ - Ingress
+ - Egress
+ type: string
+ icmpCode:
+ description: '`icmpCode`, for Internet Control Message
+ Protocol (ICMP) traffic, optionally defines the
+ ICMP code to filter flows by.'
+ type: integer
+ icmpType:
+ description: '`icmpType`, for ICMP traffic, optionally
+ defines the ICMP type to filter flows by.'
+ type: integer
+ peerIP:
+ description: |-
+ `peerIP` optionally defines the remote IP address to filter flows by.
+ Example: `10.10.10.10`.
+ type: string
+ pktDrops:
+ description: '`pktDrops` optionally filters only
+ flows containing packet drops.'
+ type: boolean
+ ports:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+ To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ protocol:
+ description: '`protocol` optionally defines a protocol
+ to filter flows by. The available options are
+ `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.'
+ enum:
+ - TCP
+ - UDP
+ - ICMP
+ - ICMPv6
+ - SCTP
+ type: string
+ sampling:
+ description: '`sampling` sampling rate for the matched
+ flow'
+ format: int32
+ type: integer
+ sourcePorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `sourcePorts` optionally defines the source ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ tcpFlags:
+ description: |-
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+ In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+ enum:
+ - SYN
+ - SYN-ACK
+ - ACK
+ - FIN
+ - RST
+ - URG
+ - ECE
+ - CWR
+ - FIN-ACK
+ - RST-ACK
+ type: string
+ type: object
+ maxItems: 16
+ minItems: 1
+ type: array
+ sampling:
+ description: '`sampling` sampling rate for the matched
+ flow'
+ format: int32
+ type: integer
sourcePorts:
anyOf:
- type: integer
diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
index 45e839976..6e348b6f8 100644
--- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
+++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
@@ -144,61 +144,61 @@ spec:
description: '`flowFilter` defines the eBPF agent configuration regarding flow filtering'
properties:
action:
- description: Action defines the action to perform on the flows that match the filter.
+ description: '`action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.'
enum:
- Accept
- Reject
type: string
cidr:
description: |-
- CIDR defines the IP CIDR to filter flows by.
- Example: 10.10.10.0/24 or 100:100:100:100::/64
+ `cidr` defines the IP CIDR to filter flows by.
+ Examples: `10.10.10.0/24` or `100:100:100:100::/64`
type: string
destPorts:
anyOf:
- type: integer
- type: string
description: |-
- DestPorts defines the destination ports to filter flows by.
- To filter a single port, set a single port as an integer value. For example, destPorts: 80.
- To filter a range of ports, use a "start-end" range in string format. For example, destPorts: "80-100".
+ `destPorts` optionally defines the destination ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
x-kubernetes-int-or-string: true
direction:
- description: Direction defines the direction to filter flows by.
+ description: '`direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.'
enum:
- Ingress
- Egress
type: string
enable:
- description: Set `enable` to `true` to enable eBPF flow filtering feature.
+ description: Set `enable` to `true` to enable the eBPF flow filtering feature.
type: boolean
icmpCode:
- description: ICMPCode defines the ICMP code to filter flows by.
+ description: '`icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.'
type: integer
icmpType:
- description: ICMPType defines the ICMP type to filter flows by.
+ description: '`icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.'
type: integer
peerIP:
description: |-
- PeerIP defines the IP address to filter flows by.
- Example: 10.10.10.10
+ `peerIP` optionally defines the remote IP address to filter flows by.
+ Example: `10.10.10.10`.
type: string
pktDrops:
- description: '`pktDrops`, to filter flows with packet drops'
+ description: '`pktDrops` optionally filters only flows containing packet drops.'
type: boolean
ports:
anyOf:
- type: integer
- type: string
description: |-
- Ports defines the ports to filter flows by. it can be user for either source or destination ports.
- To filter a single port, set a single port as an integer value. For example, ports: 80.
- To filter a range of ports, use a "start-end" range in string format. For example, ports: "80-100".
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+ To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
x-kubernetes-int-or-string: true
protocol:
- description: Protocol defines the protocol to filter flows by.
+ description: '`protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.'
enum:
- TCP
- UDP
@@ -206,18 +206,123 @@ spec:
- ICMPv6
- SCTP
type: string
+ rules:
+ description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules'
+ items:
+ description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.'
+ properties:
+ action:
+ description: '`action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.'
+ enum:
+ - Accept
+ - Reject
+ type: string
+ cidr:
+ description: |-
+ `cidr` defines the IP CIDR to filter flows by.
+ Examples: `10.10.10.0/24` or `100:100:100:100::/64`
+ type: string
+ destPorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `destPorts` optionally defines the destination ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ direction:
+ description: '`direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.'
+ enum:
+ - Ingress
+ - Egress
+ type: string
+ icmpCode:
+ description: '`icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.'
+ type: integer
+ icmpType:
+ description: '`icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.'
+ type: integer
+ peerIP:
+ description: |-
+ `peerIP` optionally defines the remote IP address to filter flows by.
+ Example: `10.10.10.10`.
+ type: string
+ pktDrops:
+ description: '`pktDrops` optionally filters only flows containing packet drops.'
+ type: boolean
+ ports:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+ To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ protocol:
+ description: '`protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.'
+ enum:
+ - TCP
+ - UDP
+ - ICMP
+ - ICMPv6
+ - SCTP
+ type: string
+ sampling:
+ description: '`sampling` sampling rate for the matched flow'
+ format: int32
+ type: integer
+ sourcePorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `sourcePorts` optionally defines the source ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ tcpFlags:
+ description: |-
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+ In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+ enum:
+ - SYN
+ - SYN-ACK
+ - ACK
+ - FIN
+ - RST
+ - URG
+ - ECE
+ - CWR
+ - FIN-ACK
+ - RST-ACK
+ type: string
+ type: object
+ maxItems: 16
+ minItems: 1
+ type: array
+ sampling:
+ description: '`sampling` sampling rate for the matched flow'
+ format: int32
+ type: integer
sourcePorts:
anyOf:
- type: integer
- type: string
description: |-
- SourcePorts defines the source ports to filter flows by.
- To filter a single port, set a single port as an integer value. For example, sourcePorts: 80.
- To filter a range of ports, use a "start-end" range in string format. For example, sourcePorts: "80-100".
+ `sourcePorts` optionally defines the source ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
x-kubernetes-int-or-string: true
tcpFlags:
- description: '`tcpFlags` defines the TCP flags to filter flows by.'
+ description: |-
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+ In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
enum:
- SYN
- SYN-ACK
@@ -3610,6 +3715,109 @@ spec:
- ICMPv6
- SCTP
type: string
+ rules:
+ description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules'
+ items:
+ description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.'
+ properties:
+ action:
+ description: '`action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.'
+ enum:
+ - Accept
+ - Reject
+ type: string
+ cidr:
+ description: |-
+ `cidr` defines the IP CIDR to filter flows by.
+ Examples: `10.10.10.0/24` or `100:100:100:100::/64`
+ type: string
+ destPorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `destPorts` optionally defines the destination ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ direction:
+ description: '`direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.'
+ enum:
+ - Ingress
+ - Egress
+ type: string
+ icmpCode:
+ description: '`icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.'
+ type: integer
+ icmpType:
+ description: '`icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.'
+ type: integer
+ peerIP:
+ description: |-
+ `peerIP` optionally defines the remote IP address to filter flows by.
+ Example: `10.10.10.10`.
+ type: string
+ pktDrops:
+ description: '`pktDrops` optionally filters only flows containing packet drops.'
+ type: boolean
+ ports:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+ To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ protocol:
+ description: '`protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.'
+ enum:
+ - TCP
+ - UDP
+ - ICMP
+ - ICMPv6
+ - SCTP
+ type: string
+ sampling:
+ description: '`sampling` sampling rate for the matched flow'
+ format: int32
+ type: integer
+ sourcePorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ `sourcePorts` optionally defines the source ports to filter flows by.
+ To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+ To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
+ To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ x-kubernetes-int-or-string: true
+ tcpFlags:
+ description: |-
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+ In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+ enum:
+ - SYN
+ - SYN-ACK
+ - ACK
+ - FIN
+ - RST
+ - URG
+ - ECE
+ - CWR
+ - FIN-ACK
+ - RST-ACK
+ type: string
+ type: object
+ maxItems: 16
+ minItems: 1
+ type: array
+ sampling:
+ description: '`sampling` sampling rate for the matched flow'
+ format: int32
+ type: integer
sourcePorts:
anyOf:
- type: integer
diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml
index 07358df69..31df2805f 100644
--- a/config/samples/flows_v1beta2_flowcollector.yaml
+++ b/config/samples/flows_v1beta2_flowcollector.yaml
@@ -29,9 +29,25 @@ spec:
excludeInterfaces: ["lo"]
kafkaBatchSize: 1048576
#flowFilter:
+ # rules:
+ # - action: Accept
+ # cidr: 10.128.0.1/24
+ # ports: 6443
+ # protocol: TCP
+ # sampling: 10
+ # - action: Accept
+ # cidr: 10.129.0.1/24
+ # ports: 53
+ # protocol: UDP
+ # sampling: 20
+ # - action: Accept
+ # cidr: 172.30.0.0/16
+ # protocol: TCP
+ # sampling: 30
+ # sourcePorts: 443
# tcpFlags: "SYN"
# action: Accept
- # cidr: 0.0.0.0/0
+ # cidr: 2.2.2.2/24
# protocol: TCP
# sourcePorts: 53
# enable: true
diff --git a/controllers/consoleplugin/config/static-frontend-config.yaml b/controllers/consoleplugin/config/static-frontend-config.yaml
index 71ad3231c..feed43ab1 100644
--- a/controllers/consoleplugin/config/static-frontend-config.yaml
+++ b/controllers/consoleplugin/config/static-frontend-config.yaml
@@ -346,6 +346,11 @@ columns:
calculated: '[SrcK8S_HostIP,DstK8S_HostIP]'
default: false
width: 10
+ - id: Sampling
+ name: Sampling
+ field: Sampling
+ default: false
+ width: 10
- id: K8S_HostName
name: Node Name
calculated: '[SrcK8S_HostName,DstK8S_HostName]'
@@ -1239,6 +1244,9 @@ fields:
- name: Dscp
type: number
description: Differentiated Services Code Point (DSCP) value
+ - name: Sampling
+ type: number
+ description: Sampling rate used for this flow
- name: IcmpType
type: number
description: ICMP type
diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go
index a6b511593..7c3ad17d9 100644
--- a/controllers/ebpf/agent_controller.go
+++ b/controllers/ebpf/agent_controller.go
@@ -2,10 +2,13 @@ package ebpf
import (
"context"
+ "encoding/json"
"fmt"
"strconv"
"strings"
+ "github.com/go-logr/logr"
+ ebpfconfig "github.com/netobserv/netobserv-ebpf-agent/pkg/agent"
flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2"
"github.com/netobserv/network-observability-operator/controllers/constants"
"github.com/netobserv/network-observability-operator/controllers/ebpf/internal/permissions"
@@ -14,7 +17,6 @@ import (
"github.com/netobserv/network-observability-operator/pkg/volumes"
"github.com/netobserv/network-observability-operator/pkg/watchers"
- "github.com/go-logr/logr"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -64,24 +66,7 @@ const (
envMetricsTLSCertPath = "METRICS_TLS_CERT_PATH"
envMetricsTLSKeyPath = "METRICS_TLS_KEY_PATH"
envEnableFlowFilter = "ENABLE_FLOW_FILTER"
- envFilterIPCIDR = "FILTER_IP_CIDR"
- envFilterAction = "FILTER_ACTION"
- envFilterDirection = "FILTER_DIRECTION"
- envFilterProtocol = "FILTER_PROTOCOL"
- envFilterSourcePort = "FILTER_SOURCE_PORT"
- envFilterDestPort = "FILTER_DESTINATION_PORT"
- envFilterPort = "FILTER_PORT"
- envFilterSourcePortRange = "FILTER_SOURCE_PORT_RANGE"
- envFilterDestPortRange = "FILTER_DESTINATION_PORT_RANGE"
- envFilterPortRange = "FILTER_PORT_RANGE"
- envFilterSourcePorts = "FILTER_SOURCE_PORTS"
- envFilterDestPorts = "FILTER_DESTINATION_PORTS"
- envFilterPorts = "FILTER_PORTS"
- envFilterICMPType = "FILTER_ICMP_TYPE"
- envFilterICMPCode = "FILTER_ICMP_CODE"
- envFilterPeerIPAddress = "FILTER_PEER_IP"
- envFilterTCPFlags = "FILTER_TCP_FLAGS"
- envFilterPktDrops = "FILTER_DROPS"
+ envFilterRules = "FLOW_FILTER_RULES"
envEnablePacketTranslation = "ENABLE_PKT_TRANSLATION"
envEnableEbpfMgr = "EBPF_PROGRAM_MANAGER_MODE"
envListSeparator = ","
@@ -517,109 +502,126 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC
if helper.IsEBFPFlowFilterEnabled(&coll.Spec.Agent.EBPF) {
config = append(config, corev1.EnvVar{Name: envEnableFlowFilter, Value: "true"})
-
- config = append(config, c.configureFlowFilter(coll.Spec.Agent.EBPF.FlowFilter, config)...)
+ if len(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules) != 0 {
+ if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules); filterRules != nil {
+ config = append(config, filterRules...)
+ }
+ } else {
+ if filter := c.configureFlowFilter(coll.Spec.Agent.EBPF.FlowFilter); filter != nil {
+ config = append(config, filter...)
+ }
+ }
}
return config, nil
}
-// nolint:cyclop
-func (c *AgentController) configureFlowFilter(filter *flowslatest.EBPFFlowFilter, config []corev1.EnvVar) []corev1.EnvVar {
- if filter.CIDR != "" {
- config = append(config, corev1.EnvVar{Name: envFilterIPCIDR,
- Value: filter.CIDR,
- })
+func mapFlowFilterRuleToFilter(rule *flowslatest.EBPFFlowFilterRule) ebpfconfig.FlowFilter {
+ f := ebpfconfig.FlowFilter{
+ FilterIPCIDR: rule.CIDR,
+ FilterAction: rule.Action,
+ FilterDirection: rule.Direction,
+ FilterProtocol: rule.Protocol,
}
- if filter.Action != "" {
- config = append(config, corev1.EnvVar{Name: envFilterAction,
- Value: filter.Action,
- })
+
+ if rule.ICMPType != nil && *rule.ICMPType != 0 {
+ f.FilterICMPType = *rule.ICMPType
}
- if filter.Direction != "" {
- config = append(config, corev1.EnvVar{Name: envFilterDirection,
- Value: filter.Direction,
- })
+ if rule.ICMPCode != nil && *rule.ICMPCode != 0 {
+ f.FilterICMPCode = *rule.ICMPCode
}
- if filter.Protocol != "" {
- config = append(config, corev1.EnvVar{Name: envFilterProtocol,
- Value: filter.Protocol,
- })
+
+ processPorts(rule.SourcePorts, &f.FilterSourcePort, &f.FilterSourcePorts, &f.FilterSourcePortRange)
+ processPorts(rule.DestPorts, &f.FilterDestinationPort, &f.FilterDestinationPorts, &f.FilterDestinationPortRange)
+ processPorts(rule.Ports, &f.FilterPort, &f.FilterPorts, &f.FilterPortRange)
+
+ if rule.PeerIP != "" {
+ f.FilterPeerIP = rule.PeerIP
+ }
+ if rule.TCPFlags != "" {
+ f.FilterTCPFlags = rule.TCPFlags
}
+ if rule.PktDrops != nil && *rule.PktDrops {
+ f.FilterDrops = *rule.PktDrops
+ }
+ if rule.Sampling != nil && *rule.Sampling != 0 {
+ f.FilterSample = *rule.Sampling
+ }
+
+ return f
+}
+
+func mapFlowFilterToFilter(filter *flowslatest.EBPFFlowFilter) ebpfconfig.FlowFilter {
+ f := ebpfconfig.FlowFilter{
+ FilterIPCIDR: filter.CIDR,
+ FilterAction: filter.Action,
+ FilterDirection: filter.Direction,
+ FilterProtocol: filter.Protocol,
+ }
+
if filter.ICMPType != nil && *filter.ICMPType != 0 {
- config = append(config, corev1.EnvVar{Name: envFilterICMPType,
- Value: strconv.Itoa(*filter.ICMPType),
- })
+ f.FilterICMPType = *filter.ICMPType
}
if filter.ICMPCode != nil && *filter.ICMPCode != 0 {
- config = append(config, corev1.EnvVar{Name: envFilterICMPCode,
- Value: strconv.Itoa(*filter.ICMPCode)})
+ f.FilterICMPCode = *filter.ICMPCode
}
- if filter.SourcePorts.Type == intstr.String {
- if strings.Contains(filter.SourcePorts.String(), "-") {
- config = append(config, corev1.EnvVar{Name: envFilterSourcePortRange,
- Value: filter.SourcePorts.String(),
- })
- }
- if strings.Contains(filter.SourcePorts.String(), ",") {
- config = append(config, corev1.EnvVar{Name: envFilterSourcePorts,
- Value: filter.SourcePorts.String(),
- })
- }
+
+ processPorts(filter.SourcePorts, &f.FilterSourcePort, &f.FilterSourcePorts, &f.FilterSourcePortRange)
+ processPorts(filter.DestPorts, &f.FilterDestinationPort, &f.FilterDestinationPorts, &f.FilterDestinationPortRange)
+ processPorts(filter.Ports, &f.FilterPort, &f.FilterPorts, &f.FilterPortRange)
+
+ if filter.PeerIP != "" {
+ f.FilterPeerIP = filter.PeerIP
}
- if filter.SourcePorts.Type == intstr.Int {
- config = append(config, corev1.EnvVar{Name: envFilterSourcePort,
- Value: strconv.Itoa(filter.SourcePorts.IntValue()),
- })
+ if filter.TCPFlags != "" {
+ f.FilterTCPFlags = filter.TCPFlags
}
- if filter.DestPorts.Type == intstr.String {
- if strings.Contains(filter.DestPorts.String(), "-") {
- config = append(config, corev1.EnvVar{Name: envFilterDestPortRange,
- Value: filter.DestPorts.String(),
- })
- }
- if strings.Contains(filter.DestPorts.String(), ",") {
- config = append(config, corev1.EnvVar{Name: envFilterDestPorts,
- Value: filter.DestPorts.String(),
- })
- }
+ if filter.PktDrops != nil && *filter.PktDrops {
+ f.FilterDrops = *filter.PktDrops
}
- if filter.DestPorts.Type == intstr.Int {
- config = append(config, corev1.EnvVar{Name: envFilterDestPort,
- Value: strconv.Itoa(filter.DestPorts.IntValue()),
- })
+ if filter.Sampling != nil && *filter.Sampling != 0 {
+ f.FilterSample = *filter.Sampling
}
- if filter.Ports.Type == intstr.String {
- if strings.Contains(filter.Ports.String(), "-") {
- config = append(config, corev1.EnvVar{Name: envFilterPortRange,
- Value: filter.Ports.String(),
- })
+
+ return f
+}
+
+func processPorts(ports intstr.IntOrString, single *int32, list *string, rangeField *string) {
+ if ports.Type == intstr.String {
+ portStr := ports.String()
+ if strings.Contains(portStr, "-") {
+ *rangeField = portStr
}
- if strings.Contains(filter.Ports.String(), ",") {
- config = append(config, corev1.EnvVar{Name: envFilterPorts,
- Value: filter.Ports.String(),
- })
+ if strings.Contains(portStr, ",") {
+ *list = portStr
}
+ } else if ports.Type == intstr.Int {
+ *single = int32(ports.IntValue())
}
- if filter.Ports.Type == intstr.Int {
- config = append(config, corev1.EnvVar{Name: envFilterPort,
- Value: strconv.Itoa(filter.Ports.IntValue()),
- })
- }
- if filter.PeerIP != "" {
- config = append(config, corev1.EnvVar{Name: envFilterPeerIPAddress,
- Value: filter.PeerIP})
+}
+
+func (c *AgentController) configureFlowFiltersRules(rules []flowslatest.EBPFFlowFilterRule) []corev1.EnvVar {
+ filters := make([]ebpfconfig.FlowFilter, 0)
+ for i := range rules {
+ filters = append(filters, mapFlowFilterRuleToFilter(&rules[i]))
}
- if filter.TCPFlags != "" {
- config = append(config, corev1.EnvVar{Name: envFilterTCPFlags,
- Value: filter.TCPFlags,
- })
+
+ jsonData, err := json.Marshal(filters)
+ if err != nil {
+ return nil
}
- if filter.PktDrops != nil && *filter.PktDrops {
- config = append(config, corev1.EnvVar{Name: envFilterPktDrops, Value: "true"})
+ return []corev1.EnvVar{{Name: envFilterRules, Value: string(jsonData)}}
+}
+
+func (c *AgentController) configureFlowFilter(filter *flowslatest.EBPFFlowFilter) []corev1.EnvVar {
+ f := mapFlowFilterToFilter(filter)
+ jsonData, err := json.Marshal([]ebpfconfig.FlowFilter{f})
+ if err != nil {
+ return nil
}
- return config
+
+ return []corev1.EnvVar{{Name: envFilterRules, Value: string(jsonData)}}
}
func (c *AgentController) securityContext(coll *flowslatest.FlowCollector) *corev1.SecurityContext {
diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md
index ab9d68e6a..99f6db0f9 100644
--- a/docs/FlowCollector.md
+++ b/docs/FlowCollector.md
@@ -437,7 +437,7 @@ in edge debug or support scenarios.
action
enum
- Action defines the action to perform on the flows that match the filter.
+ `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.
Enum: Accept, Reject
@@ -446,17 +446,17 @@ in edge debug or support scenarios.
cidr
string
- CIDR defines the IP CIDR to filter flows by.
-Example: 10.10.10.0/24 or 100:100:100:100::/64
+ `cidr` defines the IP CIDR to filter flows by.
+Examples: `10.10.10.0/24` or `100:100:100:100::/64`
false
destPorts
int or string
- DestPorts defines the destination ports to filter flows by.
-To filter a single port, set a single port as an integer value. For example, destPorts: 80.
-To filter a range of ports, use a "start-end" range in string format. For example, destPorts: "80-100".
+ `destPorts` optionally defines the destination ports to filter flows by.
+To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
false
@@ -464,7 +464,7 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports:
direction
enum
- Direction defines the direction to filter flows by.
+ `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.
Enum: Ingress, Egress
@@ -473,45 +473,45 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports:
enable
boolean
- Set `enable` to `true` to enable eBPF flow filtering feature.
+ Set `enable` to `true` to enable the eBPF flow filtering feature.
false
icmpCode
integer
- ICMPCode defines the ICMP code to filter flows by.
+ `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.
false
icmpType
integer
- ICMPType defines the ICMP type to filter flows by.
+ `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.
false
peerIP
string
- PeerIP defines the IP address to filter flows by.
-Example: 10.10.10.10
+ `peerIP` optionally defines the remote IP address to filter flows by.
+Example: `10.10.10.10`.
false
pktDrops
boolean
- `pktDrops`, to filter flows with packet drops
+ `pktDrops` optionally filters only flows containing packet drops.
false
ports
int or string
- Ports defines the ports to filter flows by. it can be user for either source or destination ports.
-To filter a single port, set a single port as an integer value. For example, ports: 80.
-To filter a range of ports, use a "start-end" range in string format. For example, ports: "80-100".
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
false
@@ -519,18 +519,167 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports:
protocol
enum
- Protocol defines the protocol to filter flows by.
+ `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.
+ `flowFilterRules` defines a list of ebpf agent flow filtering rules
+
+
false
+
+
sampling
+
integer
+
+ `sampling` sampling rate for the matched flow
+
+ Format: int32
+
+
false
+
+
sourcePorts
+
int or string
+
+ `sourcePorts` optionally defines the source ports to filter flows by.
+To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
+To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
+
false
+
+
tcpFlags
+
enum
+
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+
+ Enum: SYN, SYN-ACK, ACK, FIN, RST, URG, ECE, CWR, FIN-ACK, RST-ACK
+
+ `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.
+
+ Enum: Accept, Reject
+
+
false
+
+
cidr
+
string
+
+ `cidr` defines the IP CIDR to filter flows by.
+Examples: `10.10.10.0/24` or `100:100:100:100::/64`
+
+
false
+
+
destPorts
+
int or string
+
+ `destPorts` optionally defines the destination ports to filter flows by.
+To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
+To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
+
false
+
+
direction
+
enum
+
+ `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.
+
+ Enum: Ingress, Egress
+
+
false
+
+
icmpCode
+
integer
+
+ `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.
+
+
false
+
+
icmpType
+
integer
+
+ `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.
+
+
false
+
+
peerIP
+
string
+
+ `peerIP` optionally defines the remote IP address to filter flows by.
+Example: `10.10.10.10`.
+
+
false
+
+
pktDrops
+
boolean
+
+ `pktDrops` optionally filters only flows containing packet drops.
+
+
false
+
+
ports
+
int or string
+
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
+To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
+
false
+
+
protocol
+
enum
+
+ `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.
+
+ Enum: TCP, UDP, ICMP, ICMPv6, SCTP
+
+
false
+
+
sampling
+
integer
+
+ `sampling` sampling rate for the matched flow
+
+ Format: int32
+
+
false
sourcePorts
int or string
- SourcePorts defines the source ports to filter flows by.
-To filter a single port, set a single port as an integer value. For example, sourcePorts: 80.
-To filter a range of ports, use a "start-end" range in string format. For example, sourcePorts: "80-100".
+ `sourcePorts` optionally defines the source ports to filter flows by.
+To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
false
@@ -538,7 +687,8 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports:
tcpFlags
enum
- `tcpFlags` defines the TCP flags to filter flows by.
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+ `flowFilterRules` defines a list of ebpf agent flow filtering rules
+
+
false
+
+
sampling
+
integer
+
+ `sampling` sampling rate for the matched flow
+
+ Format: int32
+
+
false
+
+
sourcePorts
+
int or string
+
+ `sourcePorts` optionally defines the source ports to filter flows by.
+To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`.
+To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
+
false
+
+
tcpFlags
+
enum
+
+ `tcpFlags` optionally defines TCP flags to filter flows by.
+In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+
+ Enum: SYN, SYN-ACK, ACK, FIN, RST, URG, ECE, CWR, FIN-ACK, RST-ACK
+
+ `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.
+
+ Enum: Accept, Reject
+
+
false
+
+
cidr
+
string
+
+ `cidr` defines the IP CIDR to filter flows by.
+Examples: `10.10.10.0/24` or `100:100:100:100::/64`
+
+
false
+
+
destPorts
+
int or string
+
+ `destPorts` optionally defines the destination ports to filter flows by.
+To filter a single port, set a single port as an integer value. For example, `destPorts: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`.
+To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
+
false
+
+
direction
+
enum
+
+ `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.
+
+ Enum: Ingress, Egress
+
+
false
+
+
icmpCode
+
integer
+
+ `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.
+
+
false
+
+
icmpType
+
integer
+
+ `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.
+
+
false
+
+
peerIP
+
string
+
+ `peerIP` optionally defines the remote IP address to filter flows by.
+Example: `10.10.10.10`.
+
+
false
+
+
pktDrops
+
boolean
+
+ `pktDrops` optionally filters only flows containing packet drops.
+
+
false
+
+
ports
+
int or string
+
+ `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports.
+To filter a single port, set a single port as an integer value. For example, `ports: 80`.
+To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`.
+To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
+
false
+
+
protocol
+
enum
+
+ `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.
+
+ Enum: TCP, UDP, ICMP, ICMPv6, SCTP
+
+
false
+
+
sampling
+
integer
+
+ `sampling` sampling rate for the matched flow
+
+ Format: int32
+
+
false
sourcePorts
int or string
diff --git a/go.mod b/go.mod
index 1ff9ac009..1f3c92335 100644
--- a/go.mod
+++ b/go.mod
@@ -10,6 +10,7 @@ require (
github.com/go-logr/logr v1.4.2
github.com/google/go-cmp v0.6.0
github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20241126155124-18e017f67001
+ github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20241008130234-a20397fb8f88
github.com/onsi/ginkgo/v2 v2.22.0
github.com/onsi/gomega v1.35.1
github.com/openshift/api v0.0.0-20240722135205-ae4f370f361f
@@ -31,44 +32,111 @@ require (
)
require (
+ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect
+ github.com/agoda-com/opentelemetry-logs-go v0.5.0 // indirect
+ github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cenkalti/hub v1.0.1 // indirect
+ github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/cilium/ebpf v0.16.0 // indirect
+ github.com/containernetworking/cni v1.1.2 // indirect
+ github.com/containernetworking/plugins v1.2.0 // indirect
+ github.com/coreos/go-iptables v0.6.0 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
+ github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424 // indirect
+ github.com/go-ini/ini v1.67.0 // indirect
+ github.com/go-kit/kit v0.13.0 // indirect
+ github.com/go-kit/log v0.2.1 // indirect
+ github.com/go-logfmt/logfmt v0.5.1 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
+ github.com/goccy/go-json v0.10.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
+ github.com/golang/snappy v0.0.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/uuid v1.6.0 // indirect
+ github.com/gopacket/gopacket v1.2.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
+ github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb // indirect
github.com/imdario/mergo v0.3.16 // indirect
+ github.com/ip2location/ip2location-go/v9 v9.7.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.8 // indirect
+ github.com/libp2p/go-reuseport v0.3.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 // indirect
+ github.com/minio/md5-simd v1.1.2 // indirect
+ github.com/minio/minio-go/v7 v7.0.77 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
+ github.com/netobserv/gopipes v0.3.0 // indirect
+ github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 // indirect
+ github.com/netsampler/goflow2 v1.3.7 // indirect
+ github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 // indirect
+ github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20241126140656-c95491e46334 // indirect
+ github.com/pierrec/lz4/v4 v4.1.17 // indirect
+ github.com/pion/dtls/v2 v2.2.4 // indirect
+ github.com/pion/logging v0.2.2 // indirect
+ github.com/pion/transport/v2 v2.0.0 // indirect
+ github.com/pion/udp v0.1.4 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_golang v1.20.3 // indirect
+ github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 // indirect
+ github.com/rs/xid v1.6.0 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
+ github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 // indirect
+ github.com/segmentio/kafka-go v0.4.47 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
+ github.com/urfave/cli/v2 v2.27.2 // indirect
+ github.com/vishvananda/netlink v1.3.0 // indirect
+ github.com/vishvananda/netns v0.0.4 // indirect
+ github.com/vmware/go-ipfix v0.9.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
+ github.com/xdg-go/pbkdf2 v1.0.0 // indirect
+ github.com/xdg-go/scram v1.1.2 // indirect
+ github.com/xdg-go/stringprep v1.0.4 // indirect
+ github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
+ go.opentelemetry.io/otel v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.29.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect
+ go.opentelemetry.io/otel/trace v1.31.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.3.1 // indirect
+ go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/crypto v0.28.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.22.0 // indirect
@@ -78,14 +146,23 @@ require (
golang.org/x/time v0.6.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect
+ google.golang.org/grpc v1.67.1 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
+ gopkg.in/gcfg.v1 v1.2.3 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
+ gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 // indirect
+ lukechampine.com/uint128 v1.2.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)
replace github.com/prometheus/common v0.55.0 => github.com/netobserv/prometheus-common v0.55.0-netobserv
+
+replace github.com/netobserv/netobserv-ebpf-agent => github.com/msherif1234/netobserv-ebpf-agent v0.0.0-20241213142900-5c07db3a6c0b
diff --git a/go.sum b/go.sum
index c97b326d3..abc0aa6ef 100644
--- a/go.sum
+++ b/go.sum
@@ -1,189 +1,1331 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
+github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
+github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+github.com/agoda-com/opentelemetry-logs-go v0.5.0 h1:9L6hRUiOX/Laoazk3u2hTdcMDOpkQXi2kMg5YBYA/I4=
+github.com/agoda-com/opentelemetry-logs-go v0.5.0/go.mod h1:35B5ypjX5pkVCPJR01i6owJSYWe8cnbWLpEyHgAGD/E=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alexflint/go-filemutex v1.2.0 h1:1v0TJPDtlhgpW4nJ+GvxCLSlUDC3+gW0CQQvlmfDR/s=
+github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
+github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bpfman/bpfman-operator v0.5.5-0.20241023163832-0bf84bbd3927 h1:2odrvZ4MI/tfUkJYAz/d35EiDISbEHsv3360CaF7Rco=
github.com/bpfman/bpfman-operator v0.5.5-0.20241023163832-0bf84bbd3927/go.mod h1:BREOhrpjbTdN3f/3bRx9/YSJsVwz79eUGafR2njaN4Q=
+github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA=
+github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg=
+github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs=
+github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ=
+github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok=
+github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
+github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
+github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU=
+github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
+github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk=
+github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
+github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/gaissmai/cidrtree v0.1.4 h1:/aYnv1LIwjtSDHNr1eNN99WJeh6vLrB+Sgr1tRMhHDc=
+github.com/gaissmai/cidrtree v0.1.4/go.mod h1:nrjEeeMZmvoJpLcSvZ3qIVFxw/+9GHKi7wDHHmHKGRI=
+github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424 h1:Vh7rylVZRZCj6W41lRlP17xPk4Nq260H4Xo/DDYmEZk=
+github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
+github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
+github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
+github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
+github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
+github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
+github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
+github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
+github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ=
+github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
+github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI=
+github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
+github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY=
+github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
+github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
+github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
+github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
+github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
+github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
+github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
+github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
+github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
+github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
+github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo=
+github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
+github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
+github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
+github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
+github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/gopacket/gopacket v1.2.0 h1:eXbzFad7f73P1n2EJHQlsKuvIMJjVXK5tXoSca78I3A=
+github.com/gopacket/gopacket v1.2.0/go.mod h1:BrAKEy5EOGQ76LSqh7DMAr7z0NNPdczWm2GxCG7+I8M=
+github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb h1:tsEKRC3PU9rMw18w/uAptoijhgG4EvlA5kfJPtwrMDk=
+github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb/go.mod h1:NtmN9h8vrTveVQRLHcX2HQ5wIPBDCsZ351TGbZWgg38=
+github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
+github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
+github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
+github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
+github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
+github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
+github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
+github.com/ip2location/ip2location-go/v9 v9.7.0 h1:ipwl67HOWcrw+6GOChkEXcreRQR37NabqBd2ayYa4Q0=
+github.com/ip2location/ip2location-go/v9 v9.7.0/go.mod h1:MPLnsKxwQlvd2lBNcQCsLoyzJLDBFizuO67wXXdzoyI=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
+github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
+github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM=
+github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
+github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4=
+github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U=
+github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g=
+github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI=
+github.com/k8snetworkplumbingwg/multi-networkpolicy v0.0.0-20200914073308-0f33b9190170 h1:rtPle+U5e7Fia0j44gm+p5QMgOIXXB3A8GtFeCCh8Kk=
+github.com/k8snetworkplumbingwg/multi-networkpolicy v0.0.0-20200914073308-0f33b9190170/go.mod h1:CF9uYILB8GY25A/6Hhi1AWKc29qbyLu8r7Gs+uINGZE=
+github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4=
+github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0/go.mod h1:wxt2YWRVItDtaQmVSmaN5ubE2L1c9CiNoHQwSJnM8Ko=
+github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc h1:v6+jUd70AayPbIRgTYUNpnBLG5cBPTY0+10y80CZeMk=
+github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc/go.mod h1:jyWzGe6ZtYiPq6ih6aXCOy6mZ49Y9mNyBOLBBXnli+k=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
+github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
+github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw=
+github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f h1:mjj2aCHv9orQy7Y0OGs03wZNtuQHfNgCKY44eOIloe0=
+github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f/go.mod h1:Ec37gLe3vH+cnOp7x3qfd+0sz0pnP3CyIXKmQJ2ZOXU=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875 h1:ql8x//rJsHMjS+qqEag8n3i4azw1QneKh5PieH9UEbY=
+github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875/go.mod h1:kfOoFJuHWp76v1RgZCb9/gVUc7XdY877S2uVYbNliGc=
+github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE=
+github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og=
+github.com/mdlayher/ndp v1.0.1 h1:+yAD79/BWyFlvAoeG5ncPS0ItlHP/eVbH7bQ6/+LVA4=
+github.com/mdlayher/ndp v1.0.1/go.mod h1:rf3wKaWhAYJEXFKpgF8kQ2AxypxVbfNcZbqoAo6fVzk=
+github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
+github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
+github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8=
+github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU=
+github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E=
+github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
+github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
+github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
+github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw=
+github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
+github.com/msherif1234/netobserv-ebpf-agent v0.0.0-20241213142900-5c07db3a6c0b h1:E8vhCqGnKNO++9HFup4rXqpl9I1uyTJYpaKetV1elAA=
+github.com/msherif1234/netobserv-ebpf-agent v0.0.0-20241213142900-5c07db3a6c0b/go.mod h1:20e1OPAs7h3k9PvNZWS9D6BnXEtkTk2LlfzD66uhvxY=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20241126155124-18e017f67001 h1:Hb5RxMfFafng2+QbnkiEWTS4TUUz7zcpUS8RiauzWXw=
github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20241126155124-18e017f67001/go.mod h1:wnCpWttAFkLSSxOcfCkd9zA5pwV/1OcxS5tAfAxNWEc=
+github.com/netobserv/gopipes v0.3.0 h1:IYmPnnAVCdSK7VmHmpFhrVBOEm45qpgbZmJz1sSW+60=
+github.com/netobserv/gopipes v0.3.0/go.mod h1:N7/Gz05EOF0CQQSKWsv3eof22Cj2PB08Pbttw98YFYU=
+github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 h1:RmnoJe/ci5q+QdM7upFdxiU+D8F3L3qTd5wXCwwHefw=
+github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500/go.mod h1:LHXpc5tjKvsfZn0pwLKrvlgEhZcCaw3Di9mUEZGAI4E=
github.com/netobserv/prometheus-common v0.55.0-netobserv h1:Fapr74g0S3gRh/kTTyv9Ytm4DJJfFuUTEToiU/np9eg=
github.com/netobserv/prometheus-common v0.55.0-netobserv/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/netsampler/goflow2 v1.3.7 h1:XZaTy8kkMnGXpJ9hS3KbO1McyrFTpVNhVFEx9rNhMmc=
+github.com/netsampler/goflow2 v1.3.7/go.mod h1:4UZsVGVAs//iMCptUHn3WNScztJeUhZH7kDW2+/vDdQ=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/openshift/api v0.0.0-20240722135205-ae4f370f361f h1:B+uJ4LmjO+qwMTZP2YhlpMziMPD4MD1++WdCAV2y+GI=
github.com/openshift/api v0.0.0-20240722135205-ae4f370f361f/go.mod h1:OOh6Qopf21pSzqNVCB5gomomBXb8o5sGKZxG2KNpaXM=
+github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a h1:4FVrw8hz0Wb3izbf6JfOEK+pJTYpEvteRR73mCh2g/A=
+github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a/go.mod h1:arApQobmOjZqtxw44TwnQdUCH+t9DgZ8geYPFqksHws=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 h1:OoDvzyaK7F/ZANIIFOgb4Haj7mye3Hle0fYZZNdidSs=
+github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20/go.mod h1:dJbxEaalQl83nn904K32FaMjlH/qOObZ0bj4ejQ78AI=
+github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20241126140656-c95491e46334 h1:DRWKIJpIDHgp9JWOHOwDywnfBnJOyHolGpg3OioY+dI=
+github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20241126140656-c95491e46334/go.mod h1:xn0ACVOiv+fi6wJN5y0XeYRbDrAHLgDLCEdageHCObI=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
+github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
+github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
+github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pion/dtls/v2 v2.2.4 h1:YSfYwDQgrxMYXLBc/m7PFY5BVtWlNm/DN4qoU2CbcWg=
+github.com/pion/dtls/v2 v2.2.4/go.mod h1:WGKfxqhrddne4Kg3p11FUMJrynkOY4lb25zHNO49wuw=
+github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
+github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
+github.com/pion/transport/v2 v2.0.0 h1:bsMYyqHCbkvHwj+eNCFBuxtlKndKfyGI2vaQmM3fIE4=
+github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc=
+github.com/pion/udp v0.1.4 h1:OowsTmu1Od3sD6i3fQUJxJn2fEvJO6L1TidgadtbTI8=
+github.com/pion/udp v0.1.4/go.mod h1:G8LDo56HsFwC24LIcnT4YIDU5qcB6NepqqjP0keL2us=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1 h1:QU2cs0xxKYvF1JfibP/8vs+pFy6OvIpqNR2lYC4jYNU=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM=
-github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4=
-github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 h1:V/4Cj2GytqdqK7OMEz6c4LNjey3SNyfw3pg5jPKtJvQ=
+github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
+github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 h1:q815fjV3G+4JvXNo2VwT2m+/msMU0sUkCK68CgHV9Y8=
+github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91/go.mod h1:qIWCTaK0xQlXNlNlIVoZjKMZFopqfMZcg4JcRqGoYc0=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
+github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
+github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
+github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
+github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
+github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
+github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
+github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
+github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
+github.com/vmware/go-ipfix v0.9.0 h1:4/N5eFliqULEaCUQV0lafOpN/1bItPE9OTAPGhrIXus=
+github.com/vmware/go-ipfix v0.9.0/go.mod h1:MYEdL6Uel2ufOZyVCKvIAaw9hwnewK8aPr7rnwRbxMY=
+github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
+github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
+github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
+github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg=
+github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
+github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
+go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 h1:k6fQVDQexDE+3jG2SfCQjnHS7OamcP73YMoxEVq5B6k=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0/go.mod h1:t4BrYLHU450Zo9fnydWlIuswB1bm7rM8havDpWOJeDo=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 h1:xvhQxJ/C9+RTnAj5DpTg7LSM1vbbMTiXt7e9hsfqHNw=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0/go.mod h1:Fcvs2Bz1jkDM+Wf5/ozBGmi3tQ/c9zPKLnsipnfhGAo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
+go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
+go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
+go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
+golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -192,46 +1334,205 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
+gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo=
+google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
+google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY=
+gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=
+gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
+gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI=
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0=
k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM=
+k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-aggregator v0.31.2 h1:Uw1zUP2D/4wiSjKWVVzSOcCGLuW/+IdRwjjC0FJooYU=
k8s.io/kube-aggregator v0.31.2/go.mod h1:41/VIXH+/Qcg9ERNAY6bRF/WQR6xL1wFgYagdHac1X4=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 h1:1Wof1cGQgA5pqgo8MxKPtf+qN6Sh/0JzznmeGPm1HnE=
k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8/go.mod h1:Os6V6dZwLNii3vxFpxcNaTmH8LJJBkOTg1N0tOA0fvA=
+k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/network-policy-api v0.1.5 h1:xyS7VAaM9EfyB428oFk7WjWaCK6B129i+ILUF4C8l6E=
+sigs.k8s.io/network-policy-api v0.1.5/go.mod h1:D7Nkr43VLNd7iYryemnj8qf0N/WjBzTZDxYA+g4u1/Y=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/vendor/github.com/Knetic/govaluate/.gitignore b/vendor/github.com/Knetic/govaluate/.gitignore
new file mode 100644
index 000000000..da210fb31
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/.gitignore
@@ -0,0 +1,28 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+coverage.out
+
+manual_test.go
+*.out
+*.err
diff --git a/vendor/github.com/Knetic/govaluate/.travis.yml b/vendor/github.com/Knetic/govaluate/.travis.yml
new file mode 100644
index 000000000..35ae404ab
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+
+script: ./test.sh
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
diff --git a/vendor/github.com/Knetic/govaluate/CONTRIBUTORS b/vendor/github.com/Knetic/govaluate/CONTRIBUTORS
new file mode 100644
index 000000000..c1a7fe42d
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/CONTRIBUTORS
@@ -0,0 +1,15 @@
+This library was authored by George Lester, and contains contributions from:
+
+vjeantet (regex support)
+iasci (ternary operator)
+oxtoacart (parameter structures, deferred parameter retrieval)
+wmiller848 (bitwise operators)
+prashantv (optimization of bools)
+dpaolella (exposure of variables used in an expression)
+benpaxton (fix for missing type checks during literal elide process)
+abrander (panic-finding testing tool, float32 conversions)
+xfennec (fix for dates being parsed in the current Location)
+bgaifullin (lifting restriction on complex/struct types)
+gautambt (hexadecimal literals)
+felixonmars (fix multiple typos in test names)
+sambonfire (automatic type conversion for accessor function calls)
\ No newline at end of file
diff --git a/vendor/github.com/Knetic/govaluate/EvaluableExpression.go b/vendor/github.com/Knetic/govaluate/EvaluableExpression.go
new file mode 100644
index 000000000..a5fe50d47
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/EvaluableExpression.go
@@ -0,0 +1,276 @@
+package govaluate
+
+import (
+ "errors"
+ "fmt"
+)
+
+const isoDateFormat string = "2006-01-02T15:04:05.999999999Z0700"
+const shortCircuitHolder int = -1
+
+var DUMMY_PARAMETERS = MapParameters(map[string]interface{}{})
+
+/*
+ EvaluableExpression represents a set of ExpressionTokens which, taken together,
+ are an expression that can be evaluated down into a single value.
+*/
+type EvaluableExpression struct {
+
+ /*
+ Represents the query format used to output dates. Typically only used when creating SQL or Mongo queries from an expression.
+ Defaults to the complete ISO8601 format, including nanoseconds.
+ */
+ QueryDateFormat string
+
+ /*
+ Whether or not to safely check types when evaluating.
+ If true, this library will return error messages when invalid types are used.
+ If false, the library will panic when operators encounter types they can't use.
+
+ This is exclusively for users who need to squeeze every ounce of speed out of the library as they can,
+ and you should only set this to false if you know exactly what you're doing.
+ */
+ ChecksTypes bool
+
+ tokens []ExpressionToken
+ evaluationStages *evaluationStage
+ inputExpression string
+}
+
+/*
+ Parses a new EvaluableExpression from the given [expression] string.
+ Returns an error if the given expression has invalid syntax.
+*/
+func NewEvaluableExpression(expression string) (*EvaluableExpression, error) {
+
+ functions := make(map[string]ExpressionFunction)
+ return NewEvaluableExpressionWithFunctions(expression, functions)
+}
+
+/*
+ Similar to [NewEvaluableExpression], except that instead of a string, an already-tokenized expression is given.
+ This is useful in cases where you may be generating an expression automatically, or using some other parser (e.g., to parse from a query language)
+*/
+func NewEvaluableExpressionFromTokens(tokens []ExpressionToken) (*EvaluableExpression, error) {
+
+ var ret *EvaluableExpression
+ var err error
+
+ ret = new(EvaluableExpression)
+ ret.QueryDateFormat = isoDateFormat
+
+ err = checkBalance(tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ err = checkExpressionSyntax(tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ ret.tokens, err = optimizeTokens(tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ ret.evaluationStages, err = planStages(ret.tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ ret.ChecksTypes = true
+ return ret, nil
+}
+
+/*
+ Similar to [NewEvaluableExpression], except enables the use of user-defined functions.
+ Functions passed into this will be available to the expression.
+*/
+func NewEvaluableExpressionWithFunctions(expression string, functions map[string]ExpressionFunction) (*EvaluableExpression, error) {
+
+ var ret *EvaluableExpression
+ var err error
+
+ ret = new(EvaluableExpression)
+ ret.QueryDateFormat = isoDateFormat
+ ret.inputExpression = expression
+
+ ret.tokens, err = parseTokens(expression, functions)
+ if err != nil {
+ return nil, err
+ }
+
+ err = checkBalance(ret.tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ err = checkExpressionSyntax(ret.tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ ret.tokens, err = optimizeTokens(ret.tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ ret.evaluationStages, err = planStages(ret.tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ ret.ChecksTypes = true
+ return ret, nil
+}
+
+/*
+ Same as `Eval`, but automatically wraps a map of parameters into a `govalute.Parameters` structure.
+*/
+func (this EvaluableExpression) Evaluate(parameters map[string]interface{}) (interface{}, error) {
+
+ if parameters == nil {
+ return this.Eval(nil)
+ }
+
+ return this.Eval(MapParameters(parameters))
+}
+
+/*
+ Runs the entire expression using the given [parameters].
+ e.g., If the expression contains a reference to the variable "foo", it will be taken from `parameters.Get("foo")`.
+
+ This function returns errors if the combination of expression and parameters cannot be run,
+ such as if a variable in the expression is not present in [parameters].
+
+ In all non-error circumstances, this returns the single value result of the expression and parameters given.
+ e.g., if the expression is "1 + 1", this will return 2.0.
+ e.g., if the expression is "foo + 1" and parameters contains "foo" = 2, this will return 3.0
+*/
+func (this EvaluableExpression) Eval(parameters Parameters) (interface{}, error) {
+
+ if this.evaluationStages == nil {
+ return nil, nil
+ }
+
+ if parameters != nil {
+ parameters = &sanitizedParameters{parameters}
+ } else {
+ parameters = DUMMY_PARAMETERS
+ }
+
+ return this.evaluateStage(this.evaluationStages, parameters)
+}
+
+func (this EvaluableExpression) evaluateStage(stage *evaluationStage, parameters Parameters) (interface{}, error) {
+
+ var left, right interface{}
+ var err error
+
+ if stage.leftStage != nil {
+ left, err = this.evaluateStage(stage.leftStage, parameters)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if stage.isShortCircuitable() {
+ switch stage.symbol {
+ case AND:
+ if left == false {
+ return false, nil
+ }
+ case OR:
+ if left == true {
+ return true, nil
+ }
+ case COALESCE:
+ if left != nil {
+ return left, nil
+ }
+
+ case TERNARY_TRUE:
+ if left == false {
+ right = shortCircuitHolder
+ }
+ case TERNARY_FALSE:
+ if left != nil {
+ right = shortCircuitHolder
+ }
+ }
+ }
+
+ if right != shortCircuitHolder && stage.rightStage != nil {
+ right, err = this.evaluateStage(stage.rightStage, parameters)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if this.ChecksTypes {
+ if stage.typeCheck == nil {
+
+ err = typeCheck(stage.leftTypeCheck, left, stage.symbol, stage.typeErrorFormat)
+ if err != nil {
+ return nil, err
+ }
+
+ err = typeCheck(stage.rightTypeCheck, right, stage.symbol, stage.typeErrorFormat)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // special case where the type check needs to know both sides to determine if the operator can handle it
+ if !stage.typeCheck(left, right) {
+ errorMsg := fmt.Sprintf(stage.typeErrorFormat, left, stage.symbol.String())
+ return nil, errors.New(errorMsg)
+ }
+ }
+ }
+
+ return stage.operator(left, right, parameters)
+}
+
+func typeCheck(check stageTypeCheck, value interface{}, symbol OperatorSymbol, format string) error {
+
+ if check == nil {
+ return nil
+ }
+
+ if check(value) {
+ return nil
+ }
+
+ errorMsg := fmt.Sprintf(format, value, symbol.String())
+ return errors.New(errorMsg)
+}
+
+/*
+ Returns an array representing the ExpressionTokens that make up this expression.
+*/
+func (this EvaluableExpression) Tokens() []ExpressionToken {
+
+ return this.tokens
+}
+
+/*
+ Returns the original expression used to create this EvaluableExpression.
+*/
+func (this EvaluableExpression) String() string {
+
+ return this.inputExpression
+}
+
+/*
+ Returns an array representing the variables contained in this EvaluableExpression.
+*/
+func (this EvaluableExpression) Vars() []string {
+ var varlist []string
+ for _, val := range this.Tokens() {
+ if val.Kind == VARIABLE {
+ varlist = append(varlist, val.Value.(string))
+ }
+ }
+ return varlist
+}
diff --git a/vendor/github.com/Knetic/govaluate/EvaluableExpression_sql.go b/vendor/github.com/Knetic/govaluate/EvaluableExpression_sql.go
new file mode 100644
index 000000000..7e0ad1c88
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/EvaluableExpression_sql.go
@@ -0,0 +1,167 @@
+package govaluate
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+/*
+ Returns a string representing this expression as if it were written in SQL.
+ This function assumes that all parameters exist within the same table, and that the table essentially represents
+ a serialized object of some sort (e.g., hibernate).
+ If your data model is more normalized, you may need to consider iterating through each actual token given by `Tokens()`
+ to create your query.
+
+ Boolean values are considered to be "1" for true, "0" for false.
+
+ Times are formatted according to this.QueryDateFormat.
+*/
+func (this EvaluableExpression) ToSQLQuery() (string, error) {
+
+ var stream *tokenStream
+ var transactions *expressionOutputStream
+ var transaction string
+ var err error
+
+ stream = newTokenStream(this.tokens)
+ transactions = new(expressionOutputStream)
+
+ for stream.hasNext() {
+
+ transaction, err = this.findNextSQLString(stream, transactions)
+ if err != nil {
+ return "", err
+ }
+
+ transactions.add(transaction)
+ }
+
+ return transactions.createString(" "), nil
+}
+
+func (this EvaluableExpression) findNextSQLString(stream *tokenStream, transactions *expressionOutputStream) (string, error) {
+
+ var token ExpressionToken
+ var ret string
+
+ token = stream.next()
+
+ switch token.Kind {
+
+ case STRING:
+ ret = fmt.Sprintf("'%v'", token.Value)
+ case PATTERN:
+ ret = fmt.Sprintf("'%s'", token.Value.(*regexp.Regexp).String())
+ case TIME:
+ ret = fmt.Sprintf("'%s'", token.Value.(time.Time).Format(this.QueryDateFormat))
+
+ case LOGICALOP:
+ switch logicalSymbols[token.Value.(string)] {
+
+ case AND:
+ ret = "AND"
+ case OR:
+ ret = "OR"
+ }
+
+ case BOOLEAN:
+ if token.Value.(bool) {
+ ret = "1"
+ } else {
+ ret = "0"
+ }
+
+ case VARIABLE:
+ ret = fmt.Sprintf("[%s]", token.Value.(string))
+
+ case NUMERIC:
+ ret = fmt.Sprintf("%g", token.Value.(float64))
+
+ case COMPARATOR:
+ switch comparatorSymbols[token.Value.(string)] {
+
+ case EQ:
+ ret = "="
+ case NEQ:
+ ret = "<>"
+ case REQ:
+ ret = "RLIKE"
+ case NREQ:
+ ret = "NOT RLIKE"
+ default:
+ ret = fmt.Sprintf("%s", token.Value.(string))
+ }
+
+ case TERNARY:
+
+ switch ternarySymbols[token.Value.(string)] {
+
+ case COALESCE:
+
+ left := transactions.rollback()
+ right, err := this.findNextSQLString(stream, transactions)
+ if err != nil {
+ return "", err
+ }
+
+ ret = fmt.Sprintf("COALESCE(%v, %v)", left, right)
+ case TERNARY_TRUE:
+ fallthrough
+ case TERNARY_FALSE:
+ return "", errors.New("Ternary operators are unsupported in SQL output")
+ }
+ case PREFIX:
+ switch prefixSymbols[token.Value.(string)] {
+
+ case INVERT:
+ ret = fmt.Sprintf("NOT")
+ default:
+
+ right, err := this.findNextSQLString(stream, transactions)
+ if err != nil {
+ return "", err
+ }
+
+ ret = fmt.Sprintf("%s%s", token.Value.(string), right)
+ }
+ case MODIFIER:
+
+ switch modifierSymbols[token.Value.(string)] {
+
+ case EXPONENT:
+
+ left := transactions.rollback()
+ right, err := this.findNextSQLString(stream, transactions)
+ if err != nil {
+ return "", err
+ }
+
+ ret = fmt.Sprintf("POW(%s, %s)", left, right)
+ case MODULUS:
+
+ left := transactions.rollback()
+ right, err := this.findNextSQLString(stream, transactions)
+ if err != nil {
+ return "", err
+ }
+
+ ret = fmt.Sprintf("MOD(%s, %s)", left, right)
+ default:
+ ret = fmt.Sprintf("%s", token.Value.(string))
+ }
+ case CLAUSE:
+ ret = "("
+ case CLAUSE_CLOSE:
+ ret = ")"
+ case SEPARATOR:
+ ret = ","
+
+ default:
+ errorMsg := fmt.Sprintf("Unrecognized query token '%s' of kind '%s'", token.Value, token.Kind)
+ return "", errors.New(errorMsg)
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/Knetic/govaluate/ExpressionToken.go b/vendor/github.com/Knetic/govaluate/ExpressionToken.go
new file mode 100644
index 000000000..f849f3813
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/ExpressionToken.go
@@ -0,0 +1,9 @@
+package govaluate
+
+/*
+ Represents a single parsed token.
+*/
+type ExpressionToken struct {
+ Kind TokenKind
+ Value interface{}
+}
diff --git a/vendor/github.com/Knetic/govaluate/LICENSE b/vendor/github.com/Knetic/govaluate/LICENSE
new file mode 100644
index 000000000..24b9b4591
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2016 George Lester
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/Knetic/govaluate/MANUAL.md b/vendor/github.com/Knetic/govaluate/MANUAL.md
new file mode 100644
index 000000000..e06582851
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/MANUAL.md
@@ -0,0 +1,176 @@
+govaluate
+====
+
+This library contains quite a lot of functionality, this document is meant to be formal documentation on the operators and features of it.
+Some of this documentation may duplicate what's in README.md, but should never conflict.
+
+# Types
+
+This library only officially deals with four types; `float64`, `bool`, `string`, and arrays.
+
+All numeric literals, with or without a radix, will be converted to `float64` for evaluation. For instance; in practice, there is no difference between the literals "1.0" and "1", they both end up as `float64`. This matters to users because if you intend to return numeric values from your expressions, then the returned value will be `float64`, not any other numeric type.
+
+Any string _literal_ (not parameter) which is interpretable as a date will be converted to a `float64` representation of that date's unix time. Any `time.Time` parameters will not be operable with these date literals; such parameters will need to use the `time.Time.Unix()` method to get a numeric representation.
+
+Arrays are untyped, and can be mixed-type. Internally they're all just `interface{}`. Only two operators can interact with arrays, `IN` and `,`. All other operators will refuse to operate on arrays.
+
+# Operators
+
+## Modifiers
+
+### Addition, concatenation `+`
+
+If either left or right sides of the `+` operator are a `string`, then this operator will perform string concatenation and return that result. If neither are string, then both must be numeric, and this will return a numeric result.
+
+Any other case is invalid.
+
+### Arithmetic `-` `*` `/` `**` `%`
+
+`**` refers to "take to the power of". For instance, `3 ** 4` == 81.
+
+* _Left side_: numeric
+* _Right side_: numeric
+* _Returns_: numeric
+
+### Bitwise shifts, masks `>>` `<<` `|` `&` `^`
+
+All of these operators convert their `float64` left and right sides to `int64`, perform their operation, and then convert back.
+Given how this library assumes numeric are represented (as `float64`), it is unlikely that this behavior will change, even though it may cause havoc with extremely large or small numbers.
+
+* _Left side_: numeric
+* _Right side_: numeric
+* _Returns_: numeric
+
+### Negation `-`
+
+Prefix only. This can never have a left-hand value.
+
+* _Right side_: numeric
+* _Returns_: numeric
+
+### Inversion `!`
+
+Prefix only. This can never have a left-hand value.
+
+* _Right side_: bool
+* _Returns_: bool
+
+### Bitwise NOT `~`
+
+Prefix only. This can never have a left-hand value.
+
+* _Right side_: numeric
+* _Returns_: numeric
+
+## Logical Operators
+
+For all logical operators, this library will short-circuit the operation if the left-hand side is sufficient to determine what to do. For instance, `true || expensiveOperation()` will not actually call `expensiveOperation()`, since it knows the left-hand side is `true`.
+
+### Logical AND/OR `&&` `||`
+
+* _Left side_: bool
+* _Right side_: bool
+* _Returns_: bool
+
+### Ternary true `?`
+
+Checks if the left side is `true`. If so, returns the right side. If the left side is `false`, returns `nil`.
+In practice, this is commonly used with the other ternary operator.
+
+* _Left side_: bool
+* _Right side_: Any type.
+* _Returns_: Right side or `nil`
+
+### Ternary false `:`
+
+Checks if the left side is `nil`. If so, returns the right side. If the left side is non-nil, returns the left side.
+In practice, this is commonly used with the other ternary operator.
+
+* _Left side_: Any type.
+* _Right side_: Any type.
+* _Returns_: Right side or `nil`
+
+### Null coalescence `??`
+
+Similar to the C# operator. If the left value is non-nil, it returns that. If not, then the right-value is returned.
+
+* _Left side_: Any type.
+* _Right side_: Any type.
+* _Returns_: No specific type - whichever is passed to it.
+
+## Comparators
+
+### Numeric/lexicographic comparators `>` `<` `>=` `<=`
+
+If both sides are numeric, this returns the usual greater/lesser behavior that would be expected.
+If both sides are string, this returns the lexicographic comparison of the strings. This uses Go's standard lexicographic compare.
+
+* _Accepts_: Left and right side must either be both string, or both numeric.
+* _Returns_: bool
+
+### Regex comparators `=~` `!~`
+
+These use go's standard `regexp` flavor of regex. The left side is expected to be the candidate string, the right side is the pattern. `=~` returns whether or not the candidate string matches the regex pattern given on the right. `!~` is the inverted version of the same logic.
+
+* _Left side_: string
+* _Right side_: string
+* _Returns_: bool
+
+## Arrays
+
+### Separator `,`
+
+The separator, always paired with parenthesis, creates arrays. It must always have both a left and right-hand value, so for instance `(, 0)` and `(0,)` are invalid uses of it.
+
+Again, this should always be used with parenthesis; like `(1, 2, 3, 4)`.
+
+### Membership `IN`
+
+The only operator with a text name, this operator checks the right-hand side array to see if it contains a value that is equal to the left-side value.
+Equality is determined by the use of the `==` operator, and this library doesn't check types between the values. Any two values, when cast to `interface{}`, and can still be checked for equality with `==` will act as expected.
+
+Note that you can use a parameter for the array, but it must be an `[]interface{}`.
+
+* _Left side_: Any type.
+* _Right side_: array
+* _Returns_: bool
+
+# Parameters
+
+Parameters must be passed in every time the expression is evaluated. Parameters can be of any type, but will not cause errors unless actually used in an erroneous way. There is no difference in behavior for any of the above operators for parameters - they are type checked when used.
+
+All `int` and `float` values of any width will be converted to `float64` before use.
+
+At no point is the parameter structure, or any value thereof, modified by this library.
+
+## Alternates to maps
+
+The default form of parameters as a map may not serve your use case. You may have parameters in some other structure, you may want to change the no-parameter-found behavior, or maybe even just have some debugging print statements invoked when a parameter is accessed.
+
+To do this, define a type that implements the `govaluate.Parameters` interface. When you want to evaluate, instead call `EvaluableExpression.Eval` and pass your parameter structure.
+
+# Functions
+
+During expression parsing (_not_ evaluation), a map of functions can be given to `govaluate.NewEvaluableExpressionWithFunctions` (the lengthiest and finest of function names). The resultant expression will be able to invoke those functions during evaluation. Once parsed, an expression cannot have functions added or removed - a new expression will need to be created if you want to change the functions, or behavior of said functions.
+
+Functions always take the form `()`, including parens. Functions can have an empty list of parameters, like `()`, but still must have parens.
+
+If the expression contains something that looks like it ought to be a function (such as `foo()`), but no such function was given to it, it will error on parsing.
+
+Functions must be of type `map[string]govaluate.ExpressionFunction`. `ExpressionFunction`, for brevity, has the following signature:
+
+`func(args ...interface{}) (interface{}, error)`
+
+Where `args` is whatever is passed to the function when called. If a non-nil error is returned from a function during evaluation, the evaluation stops and ultimately returns that error to the caller of `Evaluate()` or `Eval()`.
+
+## Built-in functions
+
+There aren't any builtin functions. The author is opposed to maintaining a standard library of functions to be used.
+
+Every use case of this library is different, and even in simple use cases (such as parameters, see above) different users need different behavior, naming, or even functionality. The author prefers that users make their own decisions about what functions they need, and how they operate.
+
+# Equality
+
+The `==` and `!=` operators involve a moderately complex workflow. They use [`reflect.DeepEqual`](https://golang.org/pkg/reflect/#DeepEqual). This is for complicated reasons, but there are some types in Go that cannot be compared with the native `==` operator. Arrays, in particular, cannot be compared - Go will panic if you try. One might assume this could be handled with the type checking system in `govaluate`, but unfortunately without reflection there is no way to know if a variable is a slice/array. Worse, structs can be incomparable if they _contain incomparable types_.
+
+It's all very complicated. Fortunately, Go includes the `reflect.DeepEqual` function to handle all the edge cases. Currently, `govaluate` uses that for all equality/inequality.
diff --git a/vendor/github.com/Knetic/govaluate/OperatorSymbol.go b/vendor/github.com/Knetic/govaluate/OperatorSymbol.go
new file mode 100644
index 000000000..4b810658b
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/OperatorSymbol.go
@@ -0,0 +1,309 @@
+package govaluate
+
+/*
+ Represents the valid symbols for operators.
+
+*/
+type OperatorSymbol int
+
+const (
+ VALUE OperatorSymbol = iota
+ LITERAL
+ NOOP
+ EQ
+ NEQ
+ GT
+ LT
+ GTE
+ LTE
+ REQ
+ NREQ
+ IN
+
+ AND
+ OR
+
+ PLUS
+ MINUS
+ BITWISE_AND
+ BITWISE_OR
+ BITWISE_XOR
+ BITWISE_LSHIFT
+ BITWISE_RSHIFT
+ MULTIPLY
+ DIVIDE
+ MODULUS
+ EXPONENT
+
+ NEGATE
+ INVERT
+ BITWISE_NOT
+
+ TERNARY_TRUE
+ TERNARY_FALSE
+ COALESCE
+
+ FUNCTIONAL
+ ACCESS
+ SEPARATE
+)
+
+type operatorPrecedence int
+
+const (
+ noopPrecedence operatorPrecedence = iota
+ valuePrecedence
+ functionalPrecedence
+ prefixPrecedence
+ exponentialPrecedence
+ additivePrecedence
+ bitwisePrecedence
+ bitwiseShiftPrecedence
+ multiplicativePrecedence
+ comparatorPrecedence
+ ternaryPrecedence
+ logicalAndPrecedence
+ logicalOrPrecedence
+ separatePrecedence
+)
+
+func findOperatorPrecedenceForSymbol(symbol OperatorSymbol) operatorPrecedence {
+
+ switch symbol {
+ case NOOP:
+ return noopPrecedence
+ case VALUE:
+ return valuePrecedence
+ case EQ:
+ fallthrough
+ case NEQ:
+ fallthrough
+ case GT:
+ fallthrough
+ case LT:
+ fallthrough
+ case GTE:
+ fallthrough
+ case LTE:
+ fallthrough
+ case REQ:
+ fallthrough
+ case NREQ:
+ fallthrough
+ case IN:
+ return comparatorPrecedence
+ case AND:
+ return logicalAndPrecedence
+ case OR:
+ return logicalOrPrecedence
+ case BITWISE_AND:
+ fallthrough
+ case BITWISE_OR:
+ fallthrough
+ case BITWISE_XOR:
+ return bitwisePrecedence
+ case BITWISE_LSHIFT:
+ fallthrough
+ case BITWISE_RSHIFT:
+ return bitwiseShiftPrecedence
+ case PLUS:
+ fallthrough
+ case MINUS:
+ return additivePrecedence
+ case MULTIPLY:
+ fallthrough
+ case DIVIDE:
+ fallthrough
+ case MODULUS:
+ return multiplicativePrecedence
+ case EXPONENT:
+ return exponentialPrecedence
+ case BITWISE_NOT:
+ fallthrough
+ case NEGATE:
+ fallthrough
+ case INVERT:
+ return prefixPrecedence
+ case COALESCE:
+ fallthrough
+ case TERNARY_TRUE:
+ fallthrough
+ case TERNARY_FALSE:
+ return ternaryPrecedence
+ case ACCESS:
+ fallthrough
+ case FUNCTIONAL:
+ return functionalPrecedence
+ case SEPARATE:
+ return separatePrecedence
+ }
+
+ return valuePrecedence
+}
+
+/*
+ Map of all valid comparators, and their string equivalents.
+ Used during parsing of expressions to determine if a symbol is, in fact, a comparator.
+ Also used during evaluation to determine exactly which comparator is being used.
+*/
+var comparatorSymbols = map[string]OperatorSymbol{
+ "==": EQ,
+ "!=": NEQ,
+ ">": GT,
+ ">=": GTE,
+ "<": LT,
+ "<=": LTE,
+ "=~": REQ,
+ "!~": NREQ,
+ "in": IN,
+}
+
+var logicalSymbols = map[string]OperatorSymbol{
+ "&&": AND,
+ "||": OR,
+}
+
+var bitwiseSymbols = map[string]OperatorSymbol{
+ "^": BITWISE_XOR,
+ "&": BITWISE_AND,
+ "|": BITWISE_OR,
+}
+
+var bitwiseShiftSymbols = map[string]OperatorSymbol{
+ ">>": BITWISE_RSHIFT,
+ "<<": BITWISE_LSHIFT,
+}
+
+var additiveSymbols = map[string]OperatorSymbol{
+ "+": PLUS,
+ "-": MINUS,
+}
+
+var multiplicativeSymbols = map[string]OperatorSymbol{
+ "*": MULTIPLY,
+ "/": DIVIDE,
+ "%": MODULUS,
+}
+
+var exponentialSymbolsS = map[string]OperatorSymbol{
+ "**": EXPONENT,
+}
+
+var prefixSymbols = map[string]OperatorSymbol{
+ "-": NEGATE,
+ "!": INVERT,
+ "~": BITWISE_NOT,
+}
+
+var ternarySymbols = map[string]OperatorSymbol{
+ "?": TERNARY_TRUE,
+ ":": TERNARY_FALSE,
+ "??": COALESCE,
+}
+
+// this is defined separately from additiveSymbols et al because it's needed for parsing, not stage planning.
+var modifierSymbols = map[string]OperatorSymbol{
+ "+": PLUS,
+ "-": MINUS,
+ "*": MULTIPLY,
+ "/": DIVIDE,
+ "%": MODULUS,
+ "**": EXPONENT,
+ "&": BITWISE_AND,
+ "|": BITWISE_OR,
+ "^": BITWISE_XOR,
+ ">>": BITWISE_RSHIFT,
+ "<<": BITWISE_LSHIFT,
+}
+
+var separatorSymbols = map[string]OperatorSymbol{
+ ",": SEPARATE,
+}
+
+/*
+ Returns true if this operator is contained by the given array of candidate symbols.
+ False otherwise.
+*/
+func (this OperatorSymbol) IsModifierType(candidate []OperatorSymbol) bool {
+
+ for _, symbolType := range candidate {
+ if this == symbolType {
+ return true
+ }
+ }
+
+ return false
+}
+
+/*
+ Generally used when formatting type check errors.
+ We could store the stringified symbol somewhere else and not require a duplicated codeblock to translate
+ OperatorSymbol to string, but that would require more memory, and another field somewhere.
+ Adding operators is rare enough that we just stringify it here instead.
+*/
+func (this OperatorSymbol) String() string {
+
+ switch this {
+ case NOOP:
+ return "NOOP"
+ case VALUE:
+ return "VALUE"
+ case EQ:
+ return "="
+ case NEQ:
+ return "!="
+ case GT:
+ return ">"
+ case LT:
+ return "<"
+ case GTE:
+ return ">="
+ case LTE:
+ return "<="
+ case REQ:
+ return "=~"
+ case NREQ:
+ return "!~"
+ case AND:
+ return "&&"
+ case OR:
+ return "||"
+ case IN:
+ return "in"
+ case BITWISE_AND:
+ return "&"
+ case BITWISE_OR:
+ return "|"
+ case BITWISE_XOR:
+ return "^"
+ case BITWISE_LSHIFT:
+ return "<<"
+ case BITWISE_RSHIFT:
+ return ">>"
+ case PLUS:
+ return "+"
+ case MINUS:
+ return "-"
+ case MULTIPLY:
+ return "*"
+ case DIVIDE:
+ return "/"
+ case MODULUS:
+ return "%"
+ case EXPONENT:
+ return "**"
+ case NEGATE:
+ return "-"
+ case INVERT:
+ return "!"
+ case BITWISE_NOT:
+ return "~"
+ case TERNARY_TRUE:
+ return "?"
+ case TERNARY_FALSE:
+ return ":"
+ case COALESCE:
+ return "??"
+ }
+ return ""
+}
diff --git a/vendor/github.com/Knetic/govaluate/README.md b/vendor/github.com/Knetic/govaluate/README.md
new file mode 100644
index 000000000..2e5716d4f
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/README.md
@@ -0,0 +1,233 @@
+govaluate
+====
+
+[![Build Status](https://travis-ci.org/Knetic/govaluate.svg?branch=master)](https://travis-ci.org/Knetic/govaluate)
+[![Godoc](https://img.shields.io/badge/godoc-reference-5272B4.svg)](https://godoc.org/github.com/Knetic/govaluate)
+[![Go Report Card](https://goreportcard.com/badge/github.com/Knetic/govaluate)](https://goreportcard.com/report/github.com/Knetic/govaluate)
+[![Gocover](https://gocover.io/_badge/github.com/Knetic/govaluate)](https://gocover.io/github.com/Knetic/govaluate)
+
+Provides support for evaluating arbitrary C-like artithmetic/string expressions.
+
+Why can't you just write these expressions in code?
+--
+
+Sometimes, you can't know ahead-of-time what an expression will look like, or you want those expressions to be configurable.
+Perhaps you've got a set of data running through your application, and you want to allow your users to specify some validations to run on it before committing it to a database. Or maybe you've written a monitoring framework which is capable of gathering a bunch of metrics, then evaluating a few expressions to see if any metrics should be alerted upon, but the conditions for alerting are different for each monitor.
+
+A lot of people wind up writing their own half-baked style of evaluation language that fits their needs, but isn't complete. Or they wind up baking the expression into the actual executable, even if they know it's subject to change. These strategies may work, but they take time to implement, time for users to learn, and induce technical debt as requirements change. This library is meant to cover all the normal C-like expressions, so that you don't have to reinvent one of the oldest wheels on a computer.
+
+How do I use it?
+--
+
+You create a new EvaluableExpression, then call "Evaluate" on it.
+
+```go
+ expression, err := govaluate.NewEvaluableExpression("10 > 0");
+ result, err := expression.Evaluate(nil);
+ // result is now set to "true", the bool value.
+```
+
+Cool, but how about with parameters?
+
+```go
+ expression, err := govaluate.NewEvaluableExpression("foo > 0");
+
+ parameters := make(map[string]interface{}, 8)
+ parameters["foo"] = -1;
+
+ result, err := expression.Evaluate(parameters);
+ // result is now set to "false", the bool value.
+```
+
+That's cool, but we can almost certainly have done all that in code. What about a complex use case that involves some math?
+
+```go
+ expression, err := govaluate.NewEvaluableExpression("(requests_made * requests_succeeded / 100) >= 90");
+
+ parameters := make(map[string]interface{}, 8)
+ parameters["requests_made"] = 100;
+ parameters["requests_succeeded"] = 80;
+
+ result, err := expression.Evaluate(parameters);
+ // result is now set to "false", the bool value.
+```
+
+Or maybe you want to check the status of an alive check ("smoketest") page, which will be a string?
+
+```go
+ expression, err := govaluate.NewEvaluableExpression("http_response_body == 'service is ok'");
+
+ parameters := make(map[string]interface{}, 8)
+ parameters["http_response_body"] = "service is ok";
+
+ result, err := expression.Evaluate(parameters);
+ // result is now set to "true", the bool value.
+```
+
+These examples have all returned boolean values, but it's equally possible to return numeric ones.
+
+```go
+ expression, err := govaluate.NewEvaluableExpression("(mem_used / total_mem) * 100");
+
+ parameters := make(map[string]interface{}, 8)
+ parameters["total_mem"] = 1024;
+ parameters["mem_used"] = 512;
+
+ result, err := expression.Evaluate(parameters);
+ // result is now set to "50.0", the float64 value.
+```
+
+You can also do date parsing, though the formats are somewhat limited. Stick to RF3339, ISO8061, unix date, or ruby date formats. If you're having trouble getting a date string to parse, check the list of formats actually used: [parsing.go:248](https://github.com/Knetic/govaluate/blob/0580e9b47a69125afa0e4ebd1cf93c49eb5a43ec/parsing.go#L258).
+
+```go
+ expression, err := govaluate.NewEvaluableExpression("'2014-01-02' > '2014-01-01 23:59:59'");
+ result, err := expression.Evaluate(nil);
+
+ // result is now set to true
+```
+
+Expressions are parsed once, and can be re-used multiple times. Parsing is the compute-intensive phase of the process, so if you intend to use the same expression with different parameters, just parse it once. Like so;
+
+```go
+ expression, err := govaluate.NewEvaluableExpression("response_time <= 100");
+ parameters := make(map[string]interface{}, 8)
+
+ for {
+ parameters["response_time"] = pingSomething();
+ result, err := expression.Evaluate(parameters)
+ }
+```
+
+The normal C-standard order of operators is respected. When writing an expression, be sure that you either order the operators correctly, or use parenthesis to clarify which portions of an expression should be run first.
+
+Escaping characters
+--
+
+Sometimes you'll have parameters that have spaces, slashes, pluses, ampersands or some other character
+that this library interprets as something special. For example, the following expression will not
+act as one might expect:
+
+ "response-time < 100"
+
+As written, the library will parse it as "[response] minus [time] is less than 100". In reality,
+"response-time" is meant to be one variable that just happens to have a dash in it.
+
+There are two ways to work around this. First, you can escape the entire parameter name:
+
+ "[response-time] < 100"
+
+Or you can use backslashes to escape only the minus sign.
+
+ "response\\-time < 100"
+
+Backslashes can be used anywhere in an expression to escape the very next character. Square bracketed parameter names can be used instead of plain parameter names at any time.
+
+Functions
+--
+
+You may have cases where you want to call a function on a parameter during execution of the expression. Perhaps you want to aggregate some set of data, but don't know the exact aggregation you want to use until you're writing the expression itself. Or maybe you have a mathematical operation you want to perform, for which there is no operator; like `log` or `tan` or `sqrt`. For cases like this, you can provide a map of functions to `NewEvaluableExpressionWithFunctions`, which will then be able to use them during execution. For instance;
+
+```go
+ functions := map[string]govaluate.ExpressionFunction {
+ "strlen": func(args ...interface{}) (interface{}, error) {
+ length := len(args[0].(string))
+ return (float64)(length), nil
+ },
+ }
+
+ expString := "strlen('someReallyLongInputString') <= 16"
+ expression, _ := govaluate.NewEvaluableExpressionWithFunctions(expString, functions)
+
+ result, _ := expression.Evaluate(nil)
+ // result is now "false", the boolean value
+```
+
+Functions can accept any number of arguments, correctly handles nested functions, and arguments can be of any type (even if none of this library's operators support evaluation of that type). For instance, each of these usages of functions in an expression are valid (assuming that the appropriate functions and parameters are given):
+
+```go
+"sqrt(x1 ** y1, x2 ** y2)"
+"max(someValue, abs(anotherValue), 10 * lastValue)"
+```
+
+Functions cannot be passed as parameters, they must be known at the time when the expression is parsed, and are unchangeable after parsing.
+
+Accessors
+--
+
+If you have structs in your parameters, you can access their fields and methods in the usual way. For instance, given a struct that has a method "Echo", present in the parameters as `foo`, the following is valid:
+
+ "foo.Echo('hello world')"
+
+Fields are accessed in a similar way. Assuming `foo` has a field called "Length":
+
+ "foo.Length > 9000"
+
+Accessors can be nested to any depth, like the following
+
+ "foo.Bar.Baz.SomeFunction()"
+
+However it is not _currently_ supported to access values in `map`s. So the following will not work
+
+ "foo.SomeMap['key']"
+
+This may be convenient, but note that using accessors involves a _lot_ of reflection. This makes the expression about four times slower than just using a parameter (consult the benchmarks for more precise measurements on your system).
+If at all reasonable, the author recommends extracting the values you care about into a parameter map beforehand, or defining a struct that implements the `Parameters` interface, and which grabs fields as required. If there are functions you want to use, it's better to pass them as expression functions (see the above section). These approaches use no reflection, and are designed to be fast and clean.
+
+What operators and types does this support?
+--
+
+* Modifiers: `+` `-` `/` `*` `&` `|` `^` `**` `%` `>>` `<<`
+* Comparators: `>` `>=` `<` `<=` `==` `!=` `=~` `!~`
+* Logical ops: `||` `&&`
+* Numeric constants, as 64-bit floating point (`12345.678`)
+* String constants (single quotes: `'foobar'`)
+* Date constants (single quotes, using any permutation of RFC3339, ISO8601, ruby date, or unix date; date parsing is automatically tried with any string constant)
+* Boolean constants: `true` `false`
+* Parenthesis to control order of evaluation `(` `)`
+* Arrays (anything separated by `,` within parenthesis: `(1, 2, 'foo')`)
+* Prefixes: `!` `-` `~`
+* Ternary conditional: `?` `:`
+* Null coalescence: `??`
+
+See [MANUAL.md](https://github.com/Knetic/govaluate/blob/master/MANUAL.md) for exacting details on what types each operator supports.
+
+Types
+--
+
+Some operators don't make sense when used with some types. For instance, what does it mean to get the modulo of a string? What happens if you check to see if two numbers are logically AND'ed together?
+
+Everyone has a different intuition about the answers to these questions. To prevent confusion, this library will _refuse to operate_ upon types for which there is not an unambiguous meaning for the operation. See [MANUAL.md](https://github.com/Knetic/govaluate/blob/master/MANUAL.md) for details about what operators are valid for which types.
+
+Benchmarks
+--
+
+If you're concerned about the overhead of this library, a good range of benchmarks are built into this repo. You can run them with `go test -bench=.`. The library is built with an eye towards being quick, but has not been aggressively profiled and optimized. For most applications, though, it is completely fine.
+
+For a very rough idea of performance, here are the results output from a benchmark run on a 3rd-gen Macbook Pro (Linux Mint 17.1).
+
+```
+BenchmarkSingleParse-12 1000000 1382 ns/op
+BenchmarkSimpleParse-12 200000 10771 ns/op
+BenchmarkFullParse-12 30000 49383 ns/op
+BenchmarkEvaluationSingle-12 50000000 30.1 ns/op
+BenchmarkEvaluationNumericLiteral-12 10000000 119 ns/op
+BenchmarkEvaluationLiteralModifiers-12 10000000 236 ns/op
+BenchmarkEvaluationParameters-12 5000000 260 ns/op
+BenchmarkEvaluationParametersModifiers-12 3000000 547 ns/op
+BenchmarkComplexExpression-12 2000000 963 ns/op
+BenchmarkRegexExpression-12 100000 20357 ns/op
+BenchmarkConstantRegexExpression-12 1000000 1392 ns/op
+ok
+```
+
+API Breaks
+--
+
+While this library has very few cases which will ever result in an API break, it can (and [has](https://github.com/Knetic/govaluate/releases/tag/v2.0.0)) happened. If you are using this in production, vendor the commit you've tested against, or use gopkg.in to redirect your import (e.g., `import "gopkg.in/Knetic/govaluate.v2"`). Master branch (while infrequent) _may_ at some point contain API breaking changes, and the author will have no way to communicate these to downstreams, other than creating a new major release.
+
+Releases will explicitly state when an API break happens, and if they do not specify an API break it should be safe to upgrade.
+
+License
+--
+
+This project is licensed under the MIT general use license. You're free to integrate, fork, and play with this code as you feel fit without consulting the author, as long as you provide proper credit to the author in your works.
diff --git a/vendor/github.com/Knetic/govaluate/TokenKind.go b/vendor/github.com/Knetic/govaluate/TokenKind.go
new file mode 100644
index 000000000..7c9516d2d
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/TokenKind.go
@@ -0,0 +1,75 @@
+package govaluate
+
+/*
+ Represents all valid types of tokens that a token can be.
+*/
+type TokenKind int
+
+const (
+ UNKNOWN TokenKind = iota
+
+ PREFIX
+ NUMERIC
+ BOOLEAN
+ STRING
+ PATTERN
+ TIME
+ VARIABLE
+ FUNCTION
+ SEPARATOR
+ ACCESSOR
+
+ COMPARATOR
+ LOGICALOP
+ MODIFIER
+
+ CLAUSE
+ CLAUSE_CLOSE
+
+ TERNARY
+)
+
+/*
+ GetTokenKindString returns a string that describes the given TokenKind.
+ e.g., when passed the NUMERIC TokenKind, this returns the string "NUMERIC".
+*/
+func (kind TokenKind) String() string {
+
+ switch kind {
+
+ case PREFIX:
+ return "PREFIX"
+ case NUMERIC:
+ return "NUMERIC"
+ case BOOLEAN:
+ return "BOOLEAN"
+ case STRING:
+ return "STRING"
+ case PATTERN:
+ return "PATTERN"
+ case TIME:
+ return "TIME"
+ case VARIABLE:
+ return "VARIABLE"
+ case FUNCTION:
+ return "FUNCTION"
+ case SEPARATOR:
+ return "SEPARATOR"
+ case COMPARATOR:
+ return "COMPARATOR"
+ case LOGICALOP:
+ return "LOGICALOP"
+ case MODIFIER:
+ return "MODIFIER"
+ case CLAUSE:
+ return "CLAUSE"
+ case CLAUSE_CLOSE:
+ return "CLAUSE_CLOSE"
+ case TERNARY:
+ return "TERNARY"
+ case ACCESSOR:
+ return "ACCESSOR"
+ }
+
+ return "UNKNOWN"
+}
diff --git a/vendor/github.com/Knetic/govaluate/evaluationStage.go b/vendor/github.com/Knetic/govaluate/evaluationStage.go
new file mode 100644
index 000000000..11ea58724
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/evaluationStage.go
@@ -0,0 +1,516 @@
+package govaluate
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "regexp"
+ "strings"
+)
+
+const (
+ logicalErrorFormat string = "Value '%v' cannot be used with the logical operator '%v', it is not a bool"
+ modifierErrorFormat string = "Value '%v' cannot be used with the modifier '%v', it is not a number"
+ comparatorErrorFormat string = "Value '%v' cannot be used with the comparator '%v', it is not a number"
+ ternaryErrorFormat string = "Value '%v' cannot be used with the ternary operator '%v', it is not a bool"
+ prefixErrorFormat string = "Value '%v' cannot be used with the prefix '%v'"
+)
+
+type evaluationOperator func(left interface{}, right interface{}, parameters Parameters) (interface{}, error)
+type stageTypeCheck func(value interface{}) bool
+type stageCombinedTypeCheck func(left interface{}, right interface{}) bool
+
+type evaluationStage struct {
+ symbol OperatorSymbol
+
+ leftStage, rightStage *evaluationStage
+
+ // the operation that will be used to evaluate this stage (such as adding [left] to [right] and return the result)
+ operator evaluationOperator
+
+ // ensures that both left and right values are appropriate for this stage. Returns an error if they aren't operable.
+ leftTypeCheck stageTypeCheck
+ rightTypeCheck stageTypeCheck
+
+ // if specified, will override whatever is used in "leftTypeCheck" and "rightTypeCheck".
+ // primarily used for specific operators that don't care which side a given type is on, but still requires one side to be of a given type
+ // (like string concat)
+ typeCheck stageCombinedTypeCheck
+
+ // regardless of which type check is used, this string format will be used as the error message for type errors
+ typeErrorFormat string
+}
+
+var (
+ _true = interface{}(true)
+ _false = interface{}(false)
+)
+
+func (this *evaluationStage) swapWith(other *evaluationStage) {
+
+ temp := *other
+ other.setToNonStage(*this)
+ this.setToNonStage(temp)
+}
+
+func (this *evaluationStage) setToNonStage(other evaluationStage) {
+
+ this.symbol = other.symbol
+ this.operator = other.operator
+ this.leftTypeCheck = other.leftTypeCheck
+ this.rightTypeCheck = other.rightTypeCheck
+ this.typeCheck = other.typeCheck
+ this.typeErrorFormat = other.typeErrorFormat
+}
+
+func (this *evaluationStage) isShortCircuitable() bool {
+
+ switch this.symbol {
+ case AND:
+ fallthrough
+ case OR:
+ fallthrough
+ case TERNARY_TRUE:
+ fallthrough
+ case TERNARY_FALSE:
+ fallthrough
+ case COALESCE:
+ return true
+ }
+
+ return false
+}
+
+func noopStageRight(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return right, nil
+}
+
+func addStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+
+ // string concat if either are strings
+ if isString(left) || isString(right) {
+ return fmt.Sprintf("%v%v", left, right), nil
+ }
+
+ return left.(float64) + right.(float64), nil
+}
+func subtractStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return left.(float64) - right.(float64), nil
+}
+func multiplyStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return left.(float64) * right.(float64), nil
+}
+func divideStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return left.(float64) / right.(float64), nil
+}
+func exponentStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return math.Pow(left.(float64), right.(float64)), nil
+}
+func modulusStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return math.Mod(left.(float64), right.(float64)), nil
+}
+func gteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ if isString(left) && isString(right) {
+ return boolIface(left.(string) >= right.(string)), nil
+ }
+ return boolIface(left.(float64) >= right.(float64)), nil
+}
+func gtStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ if isString(left) && isString(right) {
+ return boolIface(left.(string) > right.(string)), nil
+ }
+ return boolIface(left.(float64) > right.(float64)), nil
+}
+func lteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ if isString(left) && isString(right) {
+ return boolIface(left.(string) <= right.(string)), nil
+ }
+ return boolIface(left.(float64) <= right.(float64)), nil
+}
+func ltStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ if isString(left) && isString(right) {
+ return boolIface(left.(string) < right.(string)), nil
+ }
+ return boolIface(left.(float64) < right.(float64)), nil
+}
+func equalStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return boolIface(reflect.DeepEqual(left, right)), nil
+}
+func notEqualStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return boolIface(!reflect.DeepEqual(left, right)), nil
+}
+func andStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return boolIface(left.(bool) && right.(bool)), nil
+}
+func orStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return boolIface(left.(bool) || right.(bool)), nil
+}
+func negateStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return -right.(float64), nil
+}
+func invertStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return boolIface(!right.(bool)), nil
+}
+func bitwiseNotStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return float64(^int64(right.(float64))), nil
+}
+func ternaryIfStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ if left.(bool) {
+ return right, nil
+ }
+ return nil, nil
+}
+func ternaryElseStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ if left != nil {
+ return left, nil
+ }
+ return right, nil
+}
+
+func regexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+
+ var pattern *regexp.Regexp
+ var err error
+
+ switch right.(type) {
+ case string:
+ pattern, err = regexp.Compile(right.(string))
+ if err != nil {
+ return nil, errors.New(fmt.Sprintf("Unable to compile regexp pattern '%v': %v", right, err))
+ }
+ case *regexp.Regexp:
+ pattern = right.(*regexp.Regexp)
+ }
+
+ return pattern.Match([]byte(left.(string))), nil
+}
+
+func notRegexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+
+ ret, err := regexStage(left, right, parameters)
+ if err != nil {
+ return nil, err
+ }
+
+ return !(ret.(bool)), nil
+}
+
+func bitwiseOrStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return float64(int64(left.(float64)) | int64(right.(float64))), nil
+}
+func bitwiseAndStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return float64(int64(left.(float64)) & int64(right.(float64))), nil
+}
+func bitwiseXORStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return float64(int64(left.(float64)) ^ int64(right.(float64))), nil
+}
+func leftShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return float64(uint64(left.(float64)) << uint64(right.(float64))), nil
+}
+func rightShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return float64(uint64(left.(float64)) >> uint64(right.(float64))), nil
+}
+
+func makeParameterStage(parameterName string) evaluationOperator {
+
+ return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ value, err := parameters.Get(parameterName)
+ if err != nil {
+ return nil, err
+ }
+
+ return value, nil
+ }
+}
+
+func makeLiteralStage(literal interface{}) evaluationOperator {
+ return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+ return literal, nil
+ }
+}
+
+func makeFunctionStage(function ExpressionFunction) evaluationOperator {
+
+ return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+
+ if right == nil {
+ return function()
+ }
+
+ switch right.(type) {
+ case []interface{}:
+ return function(right.([]interface{})...)
+ default:
+ return function(right)
+ }
+ }
+}
+
+func typeConvertParam(p reflect.Value, t reflect.Type) (ret reflect.Value, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ errorMsg := fmt.Sprintf("Argument type conversion failed: failed to convert '%s' to '%s'", p.Kind().String(), t.Kind().String())
+ err = errors.New(errorMsg)
+ ret = p
+ }
+ }()
+
+ return p.Convert(t), nil
+}
+
+func typeConvertParams(method reflect.Value, params []reflect.Value) ([]reflect.Value, error) {
+
+ methodType := method.Type()
+ numIn := methodType.NumIn()
+ numParams := len(params)
+
+ if numIn != numParams {
+ if numIn > numParams {
+ return nil, fmt.Errorf("Too few arguments to parameter call: got %d arguments, expected %d", len(params), numIn)
+ }
+ return nil, fmt.Errorf("Too many arguments to parameter call: got %d arguments, expected %d", len(params), numIn)
+ }
+
+ for i := 0; i < numIn; i++ {
+ t := methodType.In(i)
+ p := params[i]
+ pt := p.Type()
+
+ if t.Kind() != pt.Kind() {
+ np, err := typeConvertParam(p, t)
+ if err != nil {
+ return nil, err
+ }
+ params[i] = np
+ }
+ }
+
+ return params, nil
+}
+
+func makeAccessorStage(pair []string) evaluationOperator {
+
+ reconstructed := strings.Join(pair, ".")
+
+ return func(left interface{}, right interface{}, parameters Parameters) (ret interface{}, err error) {
+
+ var params []reflect.Value
+
+ value, err := parameters.Get(pair[0])
+ if err != nil {
+ return nil, err
+ }
+
+ // while this library generally tries to handle panic-inducing cases on its own,
+ // accessors are a sticky case which have a lot of possible ways to fail.
+ // therefore every call to an accessor sets up a defer that tries to recover from panics, converting them to errors.
+ defer func() {
+ if r := recover(); r != nil {
+ errorMsg := fmt.Sprintf("Failed to access '%s': %v", reconstructed, r.(string))
+ err = errors.New(errorMsg)
+ ret = nil
+ }
+ }()
+
+ for i := 1; i < len(pair); i++ {
+
+ coreValue := reflect.ValueOf(value)
+
+ var corePtrVal reflect.Value
+
+ // if this is a pointer, resolve it.
+ if coreValue.Kind() == reflect.Ptr {
+ corePtrVal = coreValue
+ coreValue = coreValue.Elem()
+ }
+
+ if coreValue.Kind() != reflect.Struct {
+ return nil, errors.New("Unable to access '" + pair[i] + "', '" + pair[i-1] + "' is not a struct")
+ }
+
+ field := coreValue.FieldByName(pair[i])
+ if field != (reflect.Value{}) {
+ value = field.Interface()
+ continue
+ }
+
+ method := coreValue.MethodByName(pair[i])
+ if method == (reflect.Value{}) {
+ if corePtrVal.IsValid() {
+ method = corePtrVal.MethodByName(pair[i])
+ }
+ if method == (reflect.Value{}) {
+ return nil, errors.New("No method or field '" + pair[i] + "' present on parameter '" + pair[i-1] + "'")
+ }
+ }
+
+ switch right.(type) {
+ case []interface{}:
+
+ givenParams := right.([]interface{})
+ params = make([]reflect.Value, len(givenParams))
+ for idx, _ := range givenParams {
+ params[idx] = reflect.ValueOf(givenParams[idx])
+ }
+
+ default:
+
+ if right == nil {
+ params = []reflect.Value{}
+ break
+ }
+
+ params = []reflect.Value{reflect.ValueOf(right.(interface{}))}
+ }
+
+ params, err = typeConvertParams(method, params)
+
+ if err != nil {
+ return nil, errors.New("Method call failed - '" + pair[0] + "." + pair[1] + "': " + err.Error())
+ }
+
+ returned := method.Call(params)
+ retLength := len(returned)
+
+ if retLength == 0 {
+ return nil, errors.New("Method call '" + pair[i-1] + "." + pair[i] + "' did not return any values.")
+ }
+
+ if retLength == 1 {
+
+ value = returned[0].Interface()
+ continue
+ }
+
+ if retLength == 2 {
+
+ errIface := returned[1].Interface()
+ err, validType := errIface.(error)
+
+ if validType && errIface != nil {
+ return returned[0].Interface(), err
+ }
+
+ value = returned[0].Interface()
+ continue
+ }
+
+ return nil, errors.New("Method call '" + pair[0] + "." + pair[1] + "' did not return either one value, or a value and an error. Cannot interpret meaning.")
+ }
+
+ value = castToFloat64(value)
+ return value, nil
+ }
+}
+
+func separatorStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+
+ var ret []interface{}
+
+ switch left.(type) {
+ case []interface{}:
+ ret = append(left.([]interface{}), right)
+ default:
+ ret = []interface{}{left, right}
+ }
+
+ return ret, nil
+}
+
+func inStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) {
+
+ for _, value := range right.([]interface{}) {
+ if left == value {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+//
+
+func isString(value interface{}) bool {
+
+ switch value.(type) {
+ case string:
+ return true
+ }
+ return false
+}
+
+func isRegexOrString(value interface{}) bool {
+
+ switch value.(type) {
+ case string:
+ return true
+ case *regexp.Regexp:
+ return true
+ }
+ return false
+}
+
+func isBool(value interface{}) bool {
+ switch value.(type) {
+ case bool:
+ return true
+ }
+ return false
+}
+
+func isFloat64(value interface{}) bool {
+ switch value.(type) {
+ case float64:
+ return true
+ }
+ return false
+}
+
+/*
+ Addition usually means between numbers, but can also mean string concat.
+ String concat needs one (or both) of the sides to be a string.
+*/
+func additionTypeCheck(left interface{}, right interface{}) bool {
+
+ if isFloat64(left) && isFloat64(right) {
+ return true
+ }
+ if !isString(left) && !isString(right) {
+ return false
+ }
+ return true
+}
+
+/*
+ Comparison can either be between numbers, or lexicographic between two strings,
+ but never between the two.
+*/
+func comparatorTypeCheck(left interface{}, right interface{}) bool {
+
+ if isFloat64(left) && isFloat64(right) {
+ return true
+ }
+ if isString(left) && isString(right) {
+ return true
+ }
+ return false
+}
+
+func isArray(value interface{}) bool {
+ switch value.(type) {
+ case []interface{}:
+ return true
+ }
+ return false
+}
+
+/*
+ Converting a boolean to an interface{} requires an allocation.
+ We can use interned bools to avoid this cost.
+*/
+func boolIface(b bool) interface{} {
+ if b {
+ return _true
+ }
+ return _false
+}
diff --git a/vendor/github.com/Knetic/govaluate/expressionFunctions.go b/vendor/github.com/Knetic/govaluate/expressionFunctions.go
new file mode 100644
index 000000000..ac6592b3f
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/expressionFunctions.go
@@ -0,0 +1,8 @@
+package govaluate
+
+/*
+ Represents a function that can be called from within an expression.
+ This method must return an error if, for any reason, it is unable to produce exactly one unambiguous result.
+ An error returned will halt execution of the expression.
+*/
+type ExpressionFunction func(arguments ...interface{}) (interface{}, error)
diff --git a/vendor/github.com/Knetic/govaluate/expressionOutputStream.go b/vendor/github.com/Knetic/govaluate/expressionOutputStream.go
new file mode 100644
index 000000000..88a841639
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/expressionOutputStream.go
@@ -0,0 +1,46 @@
+package govaluate
+
+import (
+ "bytes"
+)
+
+/*
+ Holds a series of "transactions" which represent each token as it is output by an outputter (such as ToSQLQuery()).
+ Some outputs (such as SQL) require a function call or non-c-like syntax to represent an expression.
+ To accomplish this, this struct keeps track of each translated token as it is output, and can return and rollback those transactions.
+*/
+type expressionOutputStream struct {
+ transactions []string
+}
+
+func (this *expressionOutputStream) add(transaction string) {
+ this.transactions = append(this.transactions, transaction)
+}
+
+func (this *expressionOutputStream) rollback() string {
+
+ index := len(this.transactions) - 1
+ ret := this.transactions[index]
+
+ this.transactions = this.transactions[:index]
+ return ret
+}
+
+func (this *expressionOutputStream) createString(delimiter string) string {
+
+ var retBuffer bytes.Buffer
+ var transaction string
+
+ penultimate := len(this.transactions) - 1
+
+ for i := 0; i < penultimate; i++ {
+
+ transaction = this.transactions[i]
+
+ retBuffer.WriteString(transaction)
+ retBuffer.WriteString(delimiter)
+ }
+ retBuffer.WriteString(this.transactions[penultimate])
+
+ return retBuffer.String()
+}
diff --git a/vendor/github.com/Knetic/govaluate/lexerState.go b/vendor/github.com/Knetic/govaluate/lexerState.go
new file mode 100644
index 000000000..6726e909e
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/lexerState.go
@@ -0,0 +1,373 @@
+package govaluate
+
+import (
+ "errors"
+ "fmt"
+)
+
+type lexerState struct {
+ isEOF bool
+ isNullable bool
+ kind TokenKind
+ validNextKinds []TokenKind
+}
+
+// lexer states.
+// Constant for all purposes except compiler.
+var validLexerStates = []lexerState{
+
+ lexerState{
+ kind: UNKNOWN,
+ isEOF: false,
+ isNullable: true,
+ validNextKinds: []TokenKind{
+
+ PREFIX,
+ NUMERIC,
+ BOOLEAN,
+ VARIABLE,
+ PATTERN,
+ FUNCTION,
+ ACCESSOR,
+ STRING,
+ TIME,
+ CLAUSE,
+ },
+ },
+
+ lexerState{
+
+ kind: CLAUSE,
+ isEOF: false,
+ isNullable: true,
+ validNextKinds: []TokenKind{
+
+ PREFIX,
+ NUMERIC,
+ BOOLEAN,
+ VARIABLE,
+ PATTERN,
+ FUNCTION,
+ ACCESSOR,
+ STRING,
+ TIME,
+ CLAUSE,
+ CLAUSE_CLOSE,
+ },
+ },
+
+ lexerState{
+
+ kind: CLAUSE_CLOSE,
+ isEOF: true,
+ isNullable: true,
+ validNextKinds: []TokenKind{
+
+ COMPARATOR,
+ MODIFIER,
+ NUMERIC,
+ BOOLEAN,
+ VARIABLE,
+ STRING,
+ PATTERN,
+ TIME,
+ CLAUSE,
+ CLAUSE_CLOSE,
+ LOGICALOP,
+ TERNARY,
+ SEPARATOR,
+ },
+ },
+
+ lexerState{
+
+ kind: NUMERIC,
+ isEOF: true,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ MODIFIER,
+ COMPARATOR,
+ LOGICALOP,
+ CLAUSE_CLOSE,
+ TERNARY,
+ SEPARATOR,
+ },
+ },
+ lexerState{
+
+ kind: BOOLEAN,
+ isEOF: true,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ MODIFIER,
+ COMPARATOR,
+ LOGICALOP,
+ CLAUSE_CLOSE,
+ TERNARY,
+ SEPARATOR,
+ },
+ },
+ lexerState{
+
+ kind: STRING,
+ isEOF: true,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ MODIFIER,
+ COMPARATOR,
+ LOGICALOP,
+ CLAUSE_CLOSE,
+ TERNARY,
+ SEPARATOR,
+ },
+ },
+ lexerState{
+
+ kind: TIME,
+ isEOF: true,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ MODIFIER,
+ COMPARATOR,
+ LOGICALOP,
+ CLAUSE_CLOSE,
+ SEPARATOR,
+ },
+ },
+ lexerState{
+
+ kind: PATTERN,
+ isEOF: true,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ MODIFIER,
+ COMPARATOR,
+ LOGICALOP,
+ CLAUSE_CLOSE,
+ SEPARATOR,
+ },
+ },
+ lexerState{
+
+ kind: VARIABLE,
+ isEOF: true,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ MODIFIER,
+ COMPARATOR,
+ LOGICALOP,
+ CLAUSE_CLOSE,
+ TERNARY,
+ SEPARATOR,
+ },
+ },
+ lexerState{
+
+ kind: MODIFIER,
+ isEOF: false,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ PREFIX,
+ NUMERIC,
+ VARIABLE,
+ FUNCTION,
+ ACCESSOR,
+ STRING,
+ BOOLEAN,
+ CLAUSE,
+ CLAUSE_CLOSE,
+ },
+ },
+ lexerState{
+
+ kind: COMPARATOR,
+ isEOF: false,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ PREFIX,
+ NUMERIC,
+ BOOLEAN,
+ VARIABLE,
+ FUNCTION,
+ ACCESSOR,
+ STRING,
+ TIME,
+ CLAUSE,
+ CLAUSE_CLOSE,
+ PATTERN,
+ },
+ },
+ lexerState{
+
+ kind: LOGICALOP,
+ isEOF: false,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ PREFIX,
+ NUMERIC,
+ BOOLEAN,
+ VARIABLE,
+ FUNCTION,
+ ACCESSOR,
+ STRING,
+ TIME,
+ CLAUSE,
+ CLAUSE_CLOSE,
+ },
+ },
+ lexerState{
+
+ kind: PREFIX,
+ isEOF: false,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ NUMERIC,
+ BOOLEAN,
+ VARIABLE,
+ FUNCTION,
+ ACCESSOR,
+ CLAUSE,
+ CLAUSE_CLOSE,
+ },
+ },
+
+ lexerState{
+
+ kind: TERNARY,
+ isEOF: false,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+
+ PREFIX,
+ NUMERIC,
+ BOOLEAN,
+ STRING,
+ TIME,
+ VARIABLE,
+ FUNCTION,
+ ACCESSOR,
+ CLAUSE,
+ SEPARATOR,
+ },
+ },
+ lexerState{
+
+ kind: FUNCTION,
+ isEOF: false,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+ CLAUSE,
+ },
+ },
+ lexerState{
+
+ kind: ACCESSOR,
+ isEOF: true,
+ isNullable: false,
+ validNextKinds: []TokenKind{
+ CLAUSE,
+ MODIFIER,
+ COMPARATOR,
+ LOGICALOP,
+ CLAUSE_CLOSE,
+ TERNARY,
+ SEPARATOR,
+ },
+ },
+ lexerState{
+
+ kind: SEPARATOR,
+ isEOF: false,
+ isNullable: true,
+ validNextKinds: []TokenKind{
+
+ PREFIX,
+ NUMERIC,
+ BOOLEAN,
+ STRING,
+ TIME,
+ VARIABLE,
+ FUNCTION,
+ ACCESSOR,
+ CLAUSE,
+ },
+ },
+}
+
+func (this lexerState) canTransitionTo(kind TokenKind) bool {
+
+ for _, validKind := range this.validNextKinds {
+
+ if validKind == kind {
+ return true
+ }
+ }
+
+ return false
+}
+
+func checkExpressionSyntax(tokens []ExpressionToken) error {
+
+ var state lexerState
+ var lastToken ExpressionToken
+ var err error
+
+ state = validLexerStates[0]
+
+ for _, token := range tokens {
+
+ if !state.canTransitionTo(token.Kind) {
+
+ // call out a specific error for tokens looking like they want to be functions.
+ if lastToken.Kind == VARIABLE && token.Kind == CLAUSE {
+ return errors.New("Undefined function " + lastToken.Value.(string))
+ }
+
+ firstStateName := fmt.Sprintf("%s [%v]", state.kind.String(), lastToken.Value)
+ nextStateName := fmt.Sprintf("%s [%v]", token.Kind.String(), token.Value)
+
+ return errors.New("Cannot transition token types from " + firstStateName + " to " + nextStateName)
+ }
+
+ state, err = getLexerStateForToken(token.Kind)
+ if err != nil {
+ return err
+ }
+
+ if !state.isNullable && token.Value == nil {
+
+ errorMsg := fmt.Sprintf("Token kind '%v' cannot have a nil value", token.Kind.String())
+ return errors.New(errorMsg)
+ }
+
+ lastToken = token
+ }
+
+ if !state.isEOF {
+ return errors.New("Unexpected end of expression")
+ }
+ return nil
+}
+
+func getLexerStateForToken(kind TokenKind) (lexerState, error) {
+
+ for _, possibleState := range validLexerStates {
+
+ if possibleState.kind == kind {
+ return possibleState, nil
+ }
+ }
+
+ errorMsg := fmt.Sprintf("No lexer state found for token kind '%v'\n", kind.String())
+ return validLexerStates[0], errors.New(errorMsg)
+}
diff --git a/vendor/github.com/Knetic/govaluate/lexerStream.go b/vendor/github.com/Knetic/govaluate/lexerStream.go
new file mode 100644
index 000000000..b72e6bdb1
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/lexerStream.go
@@ -0,0 +1,39 @@
+package govaluate
+
+type lexerStream struct {
+ source []rune
+ position int
+ length int
+}
+
+func newLexerStream(source string) *lexerStream {
+
+ var ret *lexerStream
+ var runes []rune
+
+ for _, character := range source {
+ runes = append(runes, character)
+ }
+
+ ret = new(lexerStream)
+ ret.source = runes
+ ret.length = len(runes)
+ return ret
+}
+
+func (this *lexerStream) readCharacter() rune {
+
+ var character rune
+
+ character = this.source[this.position]
+ this.position += 1
+ return character
+}
+
+func (this *lexerStream) rewind(amount int) {
+ this.position -= amount
+}
+
+func (this lexerStream) canRead() bool {
+ return this.position < this.length
+}
diff --git a/vendor/github.com/Knetic/govaluate/parameters.go b/vendor/github.com/Knetic/govaluate/parameters.go
new file mode 100644
index 000000000..6c5b9ecb5
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/parameters.go
@@ -0,0 +1,32 @@
+package govaluate
+
+import (
+ "errors"
+)
+
+/*
+ Parameters is a collection of named parameters that can be used by an EvaluableExpression to retrieve parameters
+ when an expression tries to use them.
+*/
+type Parameters interface {
+
+ /*
+ Get gets the parameter of the given name, or an error if the parameter is unavailable.
+ Failure to find the given parameter should be indicated by returning an error.
+ */
+ Get(name string) (interface{}, error)
+}
+
+type MapParameters map[string]interface{}
+
+func (p MapParameters) Get(name string) (interface{}, error) {
+
+ value, found := p[name]
+
+ if !found {
+ errorMessage := "No parameter '" + name + "' found."
+ return nil, errors.New(errorMessage)
+ }
+
+ return value, nil
+}
diff --git a/vendor/github.com/Knetic/govaluate/parsing.go b/vendor/github.com/Knetic/govaluate/parsing.go
new file mode 100644
index 000000000..40c7ed2c4
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/parsing.go
@@ -0,0 +1,526 @@
+package govaluate
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+)
+
+func parseTokens(expression string, functions map[string]ExpressionFunction) ([]ExpressionToken, error) {
+
+ var ret []ExpressionToken
+ var token ExpressionToken
+ var stream *lexerStream
+ var state lexerState
+ var err error
+ var found bool
+
+ stream = newLexerStream(expression)
+ state = validLexerStates[0]
+
+ for stream.canRead() {
+
+ token, err, found = readToken(stream, state, functions)
+
+ if err != nil {
+ return ret, err
+ }
+
+ if !found {
+ break
+ }
+
+ state, err = getLexerStateForToken(token.Kind)
+ if err != nil {
+ return ret, err
+ }
+
+ // append this valid token
+ ret = append(ret, token)
+ }
+
+ err = checkBalance(ret)
+ if err != nil {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+func readToken(stream *lexerStream, state lexerState, functions map[string]ExpressionFunction) (ExpressionToken, error, bool) {
+
+ var function ExpressionFunction
+ var ret ExpressionToken
+ var tokenValue interface{}
+ var tokenTime time.Time
+ var tokenString string
+ var kind TokenKind
+ var character rune
+ var found bool
+ var completed bool
+ var err error
+
+ // numeric is 0-9, or . or 0x followed by digits
+ // string starts with '
+ // variable is alphanumeric, always starts with a letter
+ // bracket always means variable
+ // symbols are anything non-alphanumeric
+ // all others read into a buffer until they reach the end of the stream
+ for stream.canRead() {
+
+ character = stream.readCharacter()
+
+ if unicode.IsSpace(character) {
+ continue
+ }
+
+ kind = UNKNOWN
+
+ // numeric constant
+ if isNumeric(character) {
+
+ if stream.canRead() && character == '0' {
+ character = stream.readCharacter()
+
+ if stream.canRead() && character == 'x' {
+ tokenString, _ = readUntilFalse(stream, false, true, true, isHexDigit)
+ tokenValueInt, err := strconv.ParseUint(tokenString, 16, 64)
+
+ if err != nil {
+ errorMsg := fmt.Sprintf("Unable to parse hex value '%v' to uint64\n", tokenString)
+ return ExpressionToken{}, errors.New(errorMsg), false
+ }
+
+ kind = NUMERIC
+ tokenValue = float64(tokenValueInt)
+ break
+ } else {
+ stream.rewind(1)
+ }
+ }
+
+ tokenString = readTokenUntilFalse(stream, isNumeric)
+ tokenValue, err = strconv.ParseFloat(tokenString, 64)
+
+ if err != nil {
+ errorMsg := fmt.Sprintf("Unable to parse numeric value '%v' to float64\n", tokenString)
+ return ExpressionToken{}, errors.New(errorMsg), false
+ }
+ kind = NUMERIC
+ break
+ }
+
+ // comma, separator
+ if character == ',' {
+
+ tokenValue = ","
+ kind = SEPARATOR
+ break
+ }
+
+ // escaped variable
+ if character == '[' {
+
+ tokenValue, completed = readUntilFalse(stream, true, false, true, isNotClosingBracket)
+ kind = VARIABLE
+
+ if !completed {
+ return ExpressionToken{}, errors.New("Unclosed parameter bracket"), false
+ }
+
+ // above method normally rewinds us to the closing bracket, which we want to skip.
+ stream.rewind(-1)
+ break
+ }
+
+ // regular variable - or function?
+ if unicode.IsLetter(character) {
+
+ tokenString = readTokenUntilFalse(stream, isVariableName)
+
+ tokenValue = tokenString
+ kind = VARIABLE
+
+ // boolean?
+ if tokenValue == "true" {
+
+ kind = BOOLEAN
+ tokenValue = true
+ } else {
+
+ if tokenValue == "false" {
+
+ kind = BOOLEAN
+ tokenValue = false
+ }
+ }
+
+ // textual operator?
+ if tokenValue == "in" || tokenValue == "IN" {
+
+ // force lower case for consistency
+ tokenValue = "in"
+ kind = COMPARATOR
+ }
+
+ // function?
+ function, found = functions[tokenString]
+ if found {
+ kind = FUNCTION
+ tokenValue = function
+ }
+
+ // accessor?
+ accessorIndex := strings.Index(tokenString, ".")
+ if accessorIndex > 0 {
+
+ // check that it doesn't end with a hanging period
+ if tokenString[len(tokenString)-1] == '.' {
+ errorMsg := fmt.Sprintf("Hanging accessor on token '%s'", tokenString)
+ return ExpressionToken{}, errors.New(errorMsg), false
+ }
+
+ kind = ACCESSOR
+ splits := strings.Split(tokenString, ".")
+ tokenValue = splits
+
+ // check that none of them are unexported
+ for i := 1; i < len(splits); i++ {
+
+ firstCharacter := getFirstRune(splits[i])
+
+ if unicode.ToUpper(firstCharacter) != firstCharacter {
+ errorMsg := fmt.Sprintf("Unable to access unexported field '%s' in token '%s'", splits[i], tokenString)
+ return ExpressionToken{}, errors.New(errorMsg), false
+ }
+ }
+ }
+ break
+ }
+
+ if !isNotQuote(character) {
+ tokenValue, completed = readUntilFalse(stream, true, false, true, isNotQuote)
+
+ if !completed {
+ return ExpressionToken{}, errors.New("Unclosed string literal"), false
+ }
+
+ // advance the stream one position, since reading until false assumes the terminator is a real token
+ stream.rewind(-1)
+
+ // check to see if this can be parsed as a time.
+ tokenTime, found = tryParseTime(tokenValue.(string))
+ if found {
+ kind = TIME
+ tokenValue = tokenTime
+ } else {
+ kind = STRING
+ }
+ break
+ }
+
+ if character == '(' {
+ tokenValue = character
+ kind = CLAUSE
+ break
+ }
+
+ if character == ')' {
+ tokenValue = character
+ kind = CLAUSE_CLOSE
+ break
+ }
+
+ // must be a known symbol
+ tokenString = readTokenUntilFalse(stream, isNotAlphanumeric)
+ tokenValue = tokenString
+
+ // quick hack for the case where "-" can mean "prefixed negation" or "minus", which are used
+ // very differently.
+ if state.canTransitionTo(PREFIX) {
+ _, found = prefixSymbols[tokenString]
+ if found {
+
+ kind = PREFIX
+ break
+ }
+ }
+ _, found = modifierSymbols[tokenString]
+ if found {
+
+ kind = MODIFIER
+ break
+ }
+
+ _, found = logicalSymbols[tokenString]
+ if found {
+
+ kind = LOGICALOP
+ break
+ }
+
+ _, found = comparatorSymbols[tokenString]
+ if found {
+
+ kind = COMPARATOR
+ break
+ }
+
+ _, found = ternarySymbols[tokenString]
+ if found {
+
+ kind = TERNARY
+ break
+ }
+
+ errorMessage := fmt.Sprintf("Invalid token: '%s'", tokenString)
+ return ret, errors.New(errorMessage), false
+ }
+
+ ret.Kind = kind
+ ret.Value = tokenValue
+
+ return ret, nil, (kind != UNKNOWN)
+}
+
+func readTokenUntilFalse(stream *lexerStream, condition func(rune) bool) string {
+
+ var ret string
+
+ stream.rewind(1)
+ ret, _ = readUntilFalse(stream, false, true, true, condition)
+ return ret
+}
+
+/*
+ Returns the string that was read until the given [condition] was false, or whitespace was broken.
+ Returns false if the stream ended before whitespace was broken or condition was met.
+*/
+func readUntilFalse(stream *lexerStream, includeWhitespace bool, breakWhitespace bool, allowEscaping bool, condition func(rune) bool) (string, bool) {
+
+ var tokenBuffer bytes.Buffer
+ var character rune
+ var conditioned bool
+
+ conditioned = false
+
+ for stream.canRead() {
+
+ character = stream.readCharacter()
+
+ // Use backslashes to escape anything
+ if allowEscaping && character == '\\' {
+
+ character = stream.readCharacter()
+ tokenBuffer.WriteString(string(character))
+ continue
+ }
+
+ if unicode.IsSpace(character) {
+
+ if breakWhitespace && tokenBuffer.Len() > 0 {
+ conditioned = true
+ break
+ }
+ if !includeWhitespace {
+ continue
+ }
+ }
+
+ if condition(character) {
+ tokenBuffer.WriteString(string(character))
+ } else {
+ conditioned = true
+ stream.rewind(1)
+ break
+ }
+ }
+
+ return tokenBuffer.String(), conditioned
+}
+
+/*
+ Checks to see if any optimizations can be performed on the given [tokens], which form a complete, valid expression.
+ The returns slice will represent the optimized (or unmodified) list of tokens to use.
+*/
+func optimizeTokens(tokens []ExpressionToken) ([]ExpressionToken, error) {
+
+ var token ExpressionToken
+ var symbol OperatorSymbol
+ var err error
+ var index int
+
+ for index, token = range tokens {
+
+ // if we find a regex operator, and the right-hand value is a constant, precompile and replace with a pattern.
+ if token.Kind != COMPARATOR {
+ continue
+ }
+
+ symbol = comparatorSymbols[token.Value.(string)]
+ if symbol != REQ && symbol != NREQ {
+ continue
+ }
+
+ index++
+ token = tokens[index]
+ if token.Kind == STRING {
+
+ token.Kind = PATTERN
+ token.Value, err = regexp.Compile(token.Value.(string))
+
+ if err != nil {
+ return tokens, err
+ }
+
+ tokens[index] = token
+ }
+ }
+ return tokens, nil
+}
+
+/*
+ Checks the balance of tokens which have multiple parts, such as parenthesis.
+*/
+func checkBalance(tokens []ExpressionToken) error {
+
+ var stream *tokenStream
+ var token ExpressionToken
+ var parens int
+
+ stream = newTokenStream(tokens)
+
+ for stream.hasNext() {
+
+ token = stream.next()
+ if token.Kind == CLAUSE {
+ parens++
+ continue
+ }
+ if token.Kind == CLAUSE_CLOSE {
+ parens--
+ continue
+ }
+ }
+
+ if parens != 0 {
+ return errors.New("Unbalanced parenthesis")
+ }
+ return nil
+}
+
+func isDigit(character rune) bool {
+ return unicode.IsDigit(character)
+}
+
+func isHexDigit(character rune) bool {
+
+ character = unicode.ToLower(character)
+
+ return unicode.IsDigit(character) ||
+ character == 'a' ||
+ character == 'b' ||
+ character == 'c' ||
+ character == 'd' ||
+ character == 'e' ||
+ character == 'f'
+}
+
+func isNumeric(character rune) bool {
+
+ return unicode.IsDigit(character) || character == '.'
+}
+
+func isNotQuote(character rune) bool {
+
+ return character != '\'' && character != '"'
+}
+
+func isNotAlphanumeric(character rune) bool {
+
+ return !(unicode.IsDigit(character) ||
+ unicode.IsLetter(character) ||
+ character == '(' ||
+ character == ')' ||
+ character == '[' ||
+ character == ']' || // starting to feel like there needs to be an `isOperation` func (#59)
+ !isNotQuote(character))
+}
+
+func isVariableName(character rune) bool {
+
+ return unicode.IsLetter(character) ||
+ unicode.IsDigit(character) ||
+ character == '_' ||
+ character == '.'
+}
+
+func isNotClosingBracket(character rune) bool {
+
+ return character != ']'
+}
+
+/*
+ Attempts to parse the [candidate] as a Time.
+ Tries a series of standardized date formats, returns the Time if one applies,
+ otherwise returns false through the second return.
+*/
+func tryParseTime(candidate string) (time.Time, bool) {
+
+ var ret time.Time
+ var found bool
+
+ timeFormats := [...]string{
+ time.ANSIC,
+ time.UnixDate,
+ time.RubyDate,
+ time.Kitchen,
+ time.RFC3339,
+ time.RFC3339Nano,
+ "2006-01-02", // RFC 3339
+ "2006-01-02 15:04", // RFC 3339 with minutes
+ "2006-01-02 15:04:05", // RFC 3339 with seconds
+ "2006-01-02 15:04:05-07:00", // RFC 3339 with seconds and timezone
+ "2006-01-02T15Z0700", // ISO8601 with hour
+ "2006-01-02T15:04Z0700", // ISO8601 with minutes
+ "2006-01-02T15:04:05Z0700", // ISO8601 with seconds
+ "2006-01-02T15:04:05.999999999Z0700", // ISO8601 with nanoseconds
+ }
+
+ for _, format := range timeFormats {
+
+ ret, found = tryParseExactTime(candidate, format)
+ if found {
+ return ret, true
+ }
+ }
+
+ return time.Now(), false
+}
+
+func tryParseExactTime(candidate string, format string) (time.Time, bool) {
+
+ var ret time.Time
+ var err error
+
+ ret, err = time.ParseInLocation(format, candidate, time.Local)
+ if err != nil {
+ return time.Now(), false
+ }
+
+ return ret, true
+}
+
+func getFirstRune(candidate string) rune {
+
+ for _, character := range candidate {
+ return character
+ }
+
+ return 0
+}
diff --git a/vendor/github.com/Knetic/govaluate/sanitizedParameters.go b/vendor/github.com/Knetic/govaluate/sanitizedParameters.go
new file mode 100644
index 000000000..28bd795d9
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/sanitizedParameters.go
@@ -0,0 +1,43 @@
+package govaluate
+
+// sanitizedParameters is a wrapper for Parameters that does sanitization as
+// parameters are accessed.
+type sanitizedParameters struct {
+ orig Parameters
+}
+
+func (p sanitizedParameters) Get(key string) (interface{}, error) {
+ value, err := p.orig.Get(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return castToFloat64(value), nil
+}
+
+func castToFloat64(value interface{}) interface{} {
+ switch value.(type) {
+ case uint8:
+ return float64(value.(uint8))
+ case uint16:
+ return float64(value.(uint16))
+ case uint32:
+ return float64(value.(uint32))
+ case uint64:
+ return float64(value.(uint64))
+ case int8:
+ return float64(value.(int8))
+ case int16:
+ return float64(value.(int16))
+ case int32:
+ return float64(value.(int32))
+ case int64:
+ return float64(value.(int64))
+ case int:
+ return float64(value.(int))
+ case float32:
+ return float64(value.(float32))
+ }
+
+ return value
+}
diff --git a/vendor/github.com/Knetic/govaluate/stagePlanner.go b/vendor/github.com/Knetic/govaluate/stagePlanner.go
new file mode 100644
index 000000000..d71ed129d
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/stagePlanner.go
@@ -0,0 +1,724 @@
+package govaluate
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+var stageSymbolMap = map[OperatorSymbol]evaluationOperator{
+ EQ: equalStage,
+ NEQ: notEqualStage,
+ GT: gtStage,
+ LT: ltStage,
+ GTE: gteStage,
+ LTE: lteStage,
+ REQ: regexStage,
+ NREQ: notRegexStage,
+ AND: andStage,
+ OR: orStage,
+ IN: inStage,
+ BITWISE_OR: bitwiseOrStage,
+ BITWISE_AND: bitwiseAndStage,
+ BITWISE_XOR: bitwiseXORStage,
+ BITWISE_LSHIFT: leftShiftStage,
+ BITWISE_RSHIFT: rightShiftStage,
+ PLUS: addStage,
+ MINUS: subtractStage,
+ MULTIPLY: multiplyStage,
+ DIVIDE: divideStage,
+ MODULUS: modulusStage,
+ EXPONENT: exponentStage,
+ NEGATE: negateStage,
+ INVERT: invertStage,
+ BITWISE_NOT: bitwiseNotStage,
+ TERNARY_TRUE: ternaryIfStage,
+ TERNARY_FALSE: ternaryElseStage,
+ COALESCE: ternaryElseStage,
+ SEPARATE: separatorStage,
+}
+
+/*
+ A "precedent" is a function which will recursively parse new evaluateionStages from a given stream of tokens.
+ It's called a `precedent` because it is expected to handle exactly what precedence of operator,
+ and defer to other `precedent`s for other operators.
+*/
+type precedent func(stream *tokenStream) (*evaluationStage, error)
+
+/*
+ A convenience function for specifying the behavior of a `precedent`.
+ Most `precedent` functions can be described by the same function, just with different type checks, symbols, and error formats.
+ This struct is passed to `makePrecedentFromPlanner` to create a `precedent` function.
+*/
+type precedencePlanner struct {
+ validSymbols map[string]OperatorSymbol
+ validKinds []TokenKind
+
+ typeErrorFormat string
+
+ next precedent
+ nextRight precedent
+}
+
+var planPrefix precedent
+var planExponential precedent
+var planMultiplicative precedent
+var planAdditive precedent
+var planBitwise precedent
+var planShift precedent
+var planComparator precedent
+var planLogicalAnd precedent
+var planLogicalOr precedent
+var planTernary precedent
+var planSeparator precedent
+
+func init() {
+
+ // all these stages can use the same code (in `planPrecedenceLevel`) to execute,
+ // they simply need different type checks, symbols, and recursive precedents.
+ // While not all precedent phases are listed here, most can be represented this way.
+ planPrefix = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: prefixSymbols,
+ validKinds: []TokenKind{PREFIX},
+ typeErrorFormat: prefixErrorFormat,
+ nextRight: planFunction,
+ })
+ planExponential = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: exponentialSymbolsS,
+ validKinds: []TokenKind{MODIFIER},
+ typeErrorFormat: modifierErrorFormat,
+ next: planFunction,
+ })
+ planMultiplicative = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: multiplicativeSymbols,
+ validKinds: []TokenKind{MODIFIER},
+ typeErrorFormat: modifierErrorFormat,
+ next: planExponential,
+ })
+ planAdditive = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: additiveSymbols,
+ validKinds: []TokenKind{MODIFIER},
+ typeErrorFormat: modifierErrorFormat,
+ next: planMultiplicative,
+ })
+ planShift = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: bitwiseShiftSymbols,
+ validKinds: []TokenKind{MODIFIER},
+ typeErrorFormat: modifierErrorFormat,
+ next: planAdditive,
+ })
+ planBitwise = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: bitwiseSymbols,
+ validKinds: []TokenKind{MODIFIER},
+ typeErrorFormat: modifierErrorFormat,
+ next: planShift,
+ })
+ planComparator = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: comparatorSymbols,
+ validKinds: []TokenKind{COMPARATOR},
+ typeErrorFormat: comparatorErrorFormat,
+ next: planBitwise,
+ })
+ planLogicalAnd = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: map[string]OperatorSymbol{"&&": AND},
+ validKinds: []TokenKind{LOGICALOP},
+ typeErrorFormat: logicalErrorFormat,
+ next: planComparator,
+ })
+ planLogicalOr = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: map[string]OperatorSymbol{"||": OR},
+ validKinds: []TokenKind{LOGICALOP},
+ typeErrorFormat: logicalErrorFormat,
+ next: planLogicalAnd,
+ })
+ planTernary = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: ternarySymbols,
+ validKinds: []TokenKind{TERNARY},
+ typeErrorFormat: ternaryErrorFormat,
+ next: planLogicalOr,
+ })
+ planSeparator = makePrecedentFromPlanner(&precedencePlanner{
+ validSymbols: separatorSymbols,
+ validKinds: []TokenKind{SEPARATOR},
+ next: planTernary,
+ })
+}
+
+/*
+ Given a planner, creates a function which will evaluate a specific precedence level of operators,
+ and link it to other `precedent`s which recurse to parse other precedence levels.
+*/
+func makePrecedentFromPlanner(planner *precedencePlanner) precedent {
+
+ var generated precedent
+ var nextRight precedent
+
+ generated = func(stream *tokenStream) (*evaluationStage, error) {
+ return planPrecedenceLevel(
+ stream,
+ planner.typeErrorFormat,
+ planner.validSymbols,
+ planner.validKinds,
+ nextRight,
+ planner.next,
+ )
+ }
+
+ if planner.nextRight != nil {
+ nextRight = planner.nextRight
+ } else {
+ nextRight = generated
+ }
+
+ return generated
+}
+
+/*
+ Creates a `evaluationStageList` object which represents an execution plan (or tree)
+ which is used to completely evaluate a set of tokens at evaluation-time.
+ The three stages of evaluation can be thought of as parsing strings to tokens, then tokens to a stage list, then evaluation with parameters.
+*/
+func planStages(tokens []ExpressionToken) (*evaluationStage, error) {
+
+ stream := newTokenStream(tokens)
+
+ stage, err := planTokens(stream)
+ if err != nil {
+ return nil, err
+ }
+
+ // while we're now fully-planned, we now need to re-order same-precedence operators.
+ // this could probably be avoided with a different planning method
+ reorderStages(stage)
+
+ stage = elideLiterals(stage)
+ return stage, nil
+}
+
+func planTokens(stream *tokenStream) (*evaluationStage, error) {
+
+ if !stream.hasNext() {
+ return nil, nil
+ }
+
+ return planSeparator(stream)
+}
+
+/*
+ The most usual method of parsing an evaluation stage for a given precedence.
+ Most stages use the same logic
+*/
+func planPrecedenceLevel(
+ stream *tokenStream,
+ typeErrorFormat string,
+ validSymbols map[string]OperatorSymbol,
+ validKinds []TokenKind,
+ rightPrecedent precedent,
+ leftPrecedent precedent) (*evaluationStage, error) {
+
+ var token ExpressionToken
+ var symbol OperatorSymbol
+ var leftStage, rightStage *evaluationStage
+ var checks typeChecks
+ var err error
+ var keyFound bool
+
+ if leftPrecedent != nil {
+
+ leftStage, err = leftPrecedent(stream)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for stream.hasNext() {
+
+ token = stream.next()
+
+ if len(validKinds) > 0 {
+
+ keyFound = false
+ for _, kind := range validKinds {
+ if kind == token.Kind {
+ keyFound = true
+ break
+ }
+ }
+
+ if !keyFound {
+ break
+ }
+ }
+
+ if validSymbols != nil {
+
+ if !isString(token.Value) {
+ break
+ }
+
+ symbol, keyFound = validSymbols[token.Value.(string)]
+ if !keyFound {
+ break
+ }
+ }
+
+ if rightPrecedent != nil {
+ rightStage, err = rightPrecedent(stream)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ checks = findTypeChecks(symbol)
+
+ return &evaluationStage{
+
+ symbol: symbol,
+ leftStage: leftStage,
+ rightStage: rightStage,
+ operator: stageSymbolMap[symbol],
+
+ leftTypeCheck: checks.left,
+ rightTypeCheck: checks.right,
+ typeCheck: checks.combined,
+ typeErrorFormat: typeErrorFormat,
+ }, nil
+ }
+
+ stream.rewind()
+ return leftStage, nil
+}
+
+/*
+ A special case where functions need to be of higher precedence than values, and need a special wrapped execution stage operator.
+*/
+func planFunction(stream *tokenStream) (*evaluationStage, error) {
+
+ var token ExpressionToken
+ var rightStage *evaluationStage
+ var err error
+
+ token = stream.next()
+
+ if token.Kind != FUNCTION {
+ stream.rewind()
+ return planAccessor(stream)
+ }
+
+ rightStage, err = planAccessor(stream)
+ if err != nil {
+ return nil, err
+ }
+
+ return &evaluationStage{
+
+ symbol: FUNCTIONAL,
+ rightStage: rightStage,
+ operator: makeFunctionStage(token.Value.(ExpressionFunction)),
+ typeErrorFormat: "Unable to run function '%v': %v",
+ }, nil
+}
+
+func planAccessor(stream *tokenStream) (*evaluationStage, error) {
+
+ var token, otherToken ExpressionToken
+ var rightStage *evaluationStage
+ var err error
+
+ if !stream.hasNext() {
+ return nil, nil
+ }
+
+ token = stream.next()
+
+ if token.Kind != ACCESSOR {
+ stream.rewind()
+ return planValue(stream)
+ }
+
+ // check if this is meant to be a function or a field.
+ // fields have a clause next to them, functions do not.
+ // if it's a function, parse the arguments. Otherwise leave the right stage null.
+ if stream.hasNext() {
+
+ otherToken = stream.next()
+ if otherToken.Kind == CLAUSE {
+
+ stream.rewind()
+
+ rightStage, err = planTokens(stream)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ stream.rewind()
+ }
+ }
+
+ return &evaluationStage{
+
+ symbol: ACCESS,
+ rightStage: rightStage,
+ operator: makeAccessorStage(token.Value.([]string)),
+ typeErrorFormat: "Unable to access parameter field or method '%v': %v",
+ }, nil
+}
+
+/*
+ A truly special precedence function, this handles all the "lowest-case" errata of the process, including literals, parmeters,
+ clauses, and prefixes.
+*/
+func planValue(stream *tokenStream) (*evaluationStage, error) {
+
+ var token ExpressionToken
+ var symbol OperatorSymbol
+ var ret *evaluationStage
+ var operator evaluationOperator
+ var err error
+
+ if !stream.hasNext() {
+ return nil, nil
+ }
+
+ token = stream.next()
+
+ switch token.Kind {
+
+ case CLAUSE:
+
+ ret, err = planTokens(stream)
+ if err != nil {
+ return nil, err
+ }
+
+ // advance past the CLAUSE_CLOSE token. We know that it's a CLAUSE_CLOSE, because at parse-time we check for unbalanced parens.
+ stream.next()
+
+ // the stage we got represents all of the logic contained within the parens
+ // but for technical reasons, we need to wrap this stage in a "noop" stage which breaks long chains of precedence.
+ // see github #33.
+ ret = &evaluationStage{
+ rightStage: ret,
+ operator: noopStageRight,
+ symbol: NOOP,
+ }
+
+ return ret, nil
+
+ case CLAUSE_CLOSE:
+
+ // when functions have empty params, this will be hit. In this case, we don't have any evaluation stage to do,
+ // so we just return nil so that the stage planner continues on its way.
+ stream.rewind()
+ return nil, nil
+
+ case VARIABLE:
+ operator = makeParameterStage(token.Value.(string))
+
+ case NUMERIC:
+ fallthrough
+ case STRING:
+ fallthrough
+ case PATTERN:
+ fallthrough
+ case BOOLEAN:
+ symbol = LITERAL
+ operator = makeLiteralStage(token.Value)
+ case TIME:
+ symbol = LITERAL
+ operator = makeLiteralStage(float64(token.Value.(time.Time).Unix()))
+
+ case PREFIX:
+ stream.rewind()
+ return planPrefix(stream)
+ }
+
+ if operator == nil {
+ errorMsg := fmt.Sprintf("Unable to plan token kind: '%s', value: '%v'", token.Kind.String(), token.Value)
+ return nil, errors.New(errorMsg)
+ }
+
+ return &evaluationStage{
+ symbol: symbol,
+ operator: operator,
+ }, nil
+}
+
+/*
+ Convenience function to pass a triplet of typechecks between `findTypeChecks` and `planPrecedenceLevel`.
+ Each of these members may be nil, which indicates that type does not matter for that value.
+*/
+type typeChecks struct {
+ left stageTypeCheck
+ right stageTypeCheck
+ combined stageCombinedTypeCheck
+}
+
+/*
+ Maps a given [symbol] to a set of typechecks to be used during runtime.
+*/
+func findTypeChecks(symbol OperatorSymbol) typeChecks {
+
+ switch symbol {
+ case GT:
+ fallthrough
+ case LT:
+ fallthrough
+ case GTE:
+ fallthrough
+ case LTE:
+ return typeChecks{
+ combined: comparatorTypeCheck,
+ }
+ case REQ:
+ fallthrough
+ case NREQ:
+ return typeChecks{
+ left: isString,
+ right: isRegexOrString,
+ }
+ case AND:
+ fallthrough
+ case OR:
+ return typeChecks{
+ left: isBool,
+ right: isBool,
+ }
+ case IN:
+ return typeChecks{
+ right: isArray,
+ }
+ case BITWISE_LSHIFT:
+ fallthrough
+ case BITWISE_RSHIFT:
+ fallthrough
+ case BITWISE_OR:
+ fallthrough
+ case BITWISE_AND:
+ fallthrough
+ case BITWISE_XOR:
+ return typeChecks{
+ left: isFloat64,
+ right: isFloat64,
+ }
+ case PLUS:
+ return typeChecks{
+ combined: additionTypeCheck,
+ }
+ case MINUS:
+ fallthrough
+ case MULTIPLY:
+ fallthrough
+ case DIVIDE:
+ fallthrough
+ case MODULUS:
+ fallthrough
+ case EXPONENT:
+ return typeChecks{
+ left: isFloat64,
+ right: isFloat64,
+ }
+ case NEGATE:
+ return typeChecks{
+ right: isFloat64,
+ }
+ case INVERT:
+ return typeChecks{
+ right: isBool,
+ }
+ case BITWISE_NOT:
+ return typeChecks{
+ right: isFloat64,
+ }
+ case TERNARY_TRUE:
+ return typeChecks{
+ left: isBool,
+ }
+
+ // unchecked cases
+ case EQ:
+ fallthrough
+ case NEQ:
+ return typeChecks{}
+ case TERNARY_FALSE:
+ fallthrough
+ case COALESCE:
+ fallthrough
+ default:
+ return typeChecks{}
+ }
+}
+
+/*
+ During stage planning, stages of equal precedence are parsed such that they'll be evaluated in reverse order.
+ For commutative operators like "+" or "-", it's no big deal. But for order-specific operators, it ruins the expected result.
+*/
+func reorderStages(rootStage *evaluationStage) {
+
+ // traverse every rightStage until we find multiples in a row of the same precedence.
+ var identicalPrecedences []*evaluationStage
+ var currentStage, nextStage *evaluationStage
+ var precedence, currentPrecedence operatorPrecedence
+
+ nextStage = rootStage
+ precedence = findOperatorPrecedenceForSymbol(rootStage.symbol)
+
+ for nextStage != nil {
+
+ currentStage = nextStage
+ nextStage = currentStage.rightStage
+
+ // left depth first, since this entire method only looks for precedences down the right side of the tree
+ if currentStage.leftStage != nil {
+ reorderStages(currentStage.leftStage)
+ }
+
+ currentPrecedence = findOperatorPrecedenceForSymbol(currentStage.symbol)
+
+ if currentPrecedence == precedence {
+ identicalPrecedences = append(identicalPrecedences, currentStage)
+ continue
+ }
+
+ // precedence break.
+ // See how many in a row we had, and reorder if there's more than one.
+ if len(identicalPrecedences) > 1 {
+ mirrorStageSubtree(identicalPrecedences)
+ }
+
+ identicalPrecedences = []*evaluationStage{currentStage}
+ precedence = currentPrecedence
+ }
+
+ if len(identicalPrecedences) > 1 {
+ mirrorStageSubtree(identicalPrecedences)
+ }
+}
+
+/*
+ Performs a "mirror" on a subtree of stages.
+ This mirror functionally inverts the order of execution for all members of the [stages] list.
+ That list is assumed to be a root-to-leaf (ordered) list of evaluation stages, where each is a right-hand stage of the last.
+*/
+func mirrorStageSubtree(stages []*evaluationStage) {
+
+ var rootStage, inverseStage, carryStage, frontStage *evaluationStage
+
+ stagesLength := len(stages)
+
+ // reverse all right/left
+ for _, frontStage = range stages {
+
+ carryStage = frontStage.rightStage
+ frontStage.rightStage = frontStage.leftStage
+ frontStage.leftStage = carryStage
+ }
+
+ // end left swaps with root right
+ rootStage = stages[0]
+ frontStage = stages[stagesLength-1]
+
+ carryStage = frontStage.leftStage
+ frontStage.leftStage = rootStage.rightStage
+ rootStage.rightStage = carryStage
+
+ // for all non-root non-end stages, right is swapped with inverse stage right in list
+ for i := 0; i < (stagesLength-2)/2+1; i++ {
+
+ frontStage = stages[i+1]
+ inverseStage = stages[stagesLength-i-1]
+
+ carryStage = frontStage.rightStage
+ frontStage.rightStage = inverseStage.rightStage
+ inverseStage.rightStage = carryStage
+ }
+
+ // swap all other information with inverse stages
+ for i := 0; i < stagesLength/2; i++ {
+
+ frontStage = stages[i]
+ inverseStage = stages[stagesLength-i-1]
+ frontStage.swapWith(inverseStage)
+ }
+}
+
+/*
+ Recurses through all operators in the entire tree, eliding operators where both sides are literals.
+*/
+func elideLiterals(root *evaluationStage) *evaluationStage {
+
+ if root.leftStage != nil {
+ root.leftStage = elideLiterals(root.leftStage)
+ }
+
+ if root.rightStage != nil {
+ root.rightStage = elideLiterals(root.rightStage)
+ }
+
+ return elideStage(root)
+}
+
+/*
+ Elides a specific stage, if possible.
+ Returns the unmodified [root] stage if it cannot or should not be elided.
+ Otherwise, returns a new stage representing the condensed value from the elided stages.
+*/
+func elideStage(root *evaluationStage) *evaluationStage {
+
+ var leftValue, rightValue, result interface{}
+ var err error
+
+ // right side must be a non-nil value. Left side must be nil or a value.
+ if root.rightStage == nil ||
+ root.rightStage.symbol != LITERAL ||
+ root.leftStage == nil ||
+ root.leftStage.symbol != LITERAL {
+ return root
+ }
+
+ // don't elide some operators
+ switch root.symbol {
+ case SEPARATE:
+ fallthrough
+ case IN:
+ return root
+ }
+
+ // both sides are values, get their actual values.
+ // errors should be near-impossible here. If we encounter them, just abort this optimization.
+ leftValue, err = root.leftStage.operator(nil, nil, nil)
+ if err != nil {
+ return root
+ }
+
+ rightValue, err = root.rightStage.operator(nil, nil, nil)
+ if err != nil {
+ return root
+ }
+
+ // typcheck, since the grammar checker is a bit loose with which operator symbols go together.
+ err = typeCheck(root.leftTypeCheck, leftValue, root.symbol, root.typeErrorFormat)
+ if err != nil {
+ return root
+ }
+
+ err = typeCheck(root.rightTypeCheck, rightValue, root.symbol, root.typeErrorFormat)
+ if err != nil {
+ return root
+ }
+
+ if root.typeCheck != nil && !root.typeCheck(leftValue, rightValue) {
+ return root
+ }
+
+ // pre-calculate, and return a new stage representing the result.
+ result, err = root.operator(leftValue, rightValue, nil)
+ if err != nil {
+ return root
+ }
+
+ return &evaluationStage{
+ symbol: LITERAL,
+ operator: makeLiteralStage(result),
+ }
+}
diff --git a/vendor/github.com/Knetic/govaluate/test.sh b/vendor/github.com/Knetic/govaluate/test.sh
new file mode 100644
index 000000000..11aa8b332
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/test.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Script that runs tests, code coverage, and benchmarks all at once.
+# Builds a symlink in /tmp, mostly to avoid messing with GOPATH at the user's shell level.
+
+TEMPORARY_PATH="/tmp/govaluate_test"
+SRC_PATH="${TEMPORARY_PATH}/src"
+FULL_PATH="${TEMPORARY_PATH}/src/govaluate"
+
+# set up temporary directory
+rm -rf "${FULL_PATH}"
+mkdir -p "${SRC_PATH}"
+
+ln -s $(pwd) "${FULL_PATH}"
+export GOPATH="${TEMPORARY_PATH}"
+
+pushd "${TEMPORARY_PATH}/src/govaluate"
+
+# run the actual tests.
+export GOVALUATE_TORTURE_TEST="true"
+go test -bench=. -benchmem #-coverprofile coverage.out
+status=$?
+
+if [ "${status}" != 0 ];
+then
+ exit $status
+fi
+
+# coverage
+# disabled because travis go1.4 seems not to support it suddenly?
+#go tool cover -func=coverage.out
+
+popd
diff --git a/vendor/github.com/Knetic/govaluate/tokenStream.go b/vendor/github.com/Knetic/govaluate/tokenStream.go
new file mode 100644
index 000000000..d0029209d
--- /dev/null
+++ b/vendor/github.com/Knetic/govaluate/tokenStream.go
@@ -0,0 +1,36 @@
+package govaluate
+
+type tokenStream struct {
+ tokens []ExpressionToken
+ index int
+ tokenLength int
+}
+
+func newTokenStream(tokens []ExpressionToken) *tokenStream {
+
+ var ret *tokenStream
+
+ ret = new(tokenStream)
+ ret.tokens = tokens
+ ret.tokenLength = len(tokens)
+ return ret
+}
+
+func (this *tokenStream) rewind() {
+ this.index -= 1
+}
+
+func (this *tokenStream) next() ExpressionToken {
+
+ var token ExpressionToken
+
+ token = this.tokens[this.index]
+
+ this.index += 1
+ return token
+}
+
+func (this tokenStream) hasNext() bool {
+
+ return this.index < this.tokenLength
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore b/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore
new file mode 100644
index 000000000..ef8fdbd5f
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore
@@ -0,0 +1,28 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+coverage.html
+coverage.txt
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+
+# Intellij
+*.iml
+.idea/
+
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md b/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md
new file mode 100644
index 000000000..2b5df641e
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md
@@ -0,0 +1,102 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [v0.5.0] 2024-04-21
+
+### Changed
+
+- opentelemetry updated to 1.25.0
+
+### Removed
+
+- Drop support for [Go 1.20](https://go.dev/doc/go1.20)
+
+## [v0.4.3] 2023-11-02
+
+### Fixed
+
+- fix: race on batch processing (#30)
+
+## [v0.4.2] 2023-10-30
+
+### Fixed
+
+- fix: accept any 2xx status code in otlplogshttp client (#26)
+- fix: show the error body when status code is unknown (#27)
+- fix: grpc rapid-reset vulnerability (#28)
+
+## [v0.4.1] 2023-10-13
+
+### Fixed
+
+- autoconfiguration always emit error message on initialization (#23)
+- fix variables and private methods names (#22)
+- merge the logRecord resources with those provided by the logProvider (#21)
+
+## [v0.4.0] 2023-10-02
+
+### Changed
+
+- opentelemetry updated to 1.19.0
+- drop compatibility guarantee of Go [1.19](https://go.dev/doc/go1.19)
+
+## [v0.3.0] 2023-09-13
+
+### Changed
+
+- opentelemetry updated to 1.18.0
+
+### Fixed
+
+- stdoutlogs writer parameter was ignored
+
+## [v0.2.0] 2023-08-30
+
+### Changed
+
+- opentelemetry updated to 1.17.0
+- `github.com/golang/protobuf/proto` replaced with `google.golang.org/protobuf`
+- `otlp/internal` package moved to `otlp/otlplogs/internal`
+- more unit tests added
+
+## [v0.1.2] 2023-08-05
+
+### Fixed
+
+- reverted to all-in-one package
+- inconsistent v0.1.0 go package
+
+## [v0.1.0] 2023-08-05
+
+### Added
+
+- otlplogsgrpc exporter with `grpc` protocol
+- `http/json` protocol supported in otlplogshttp exporter
+- `stdout` logs logger
+- Package split into separate `otel`, `sdk`, `exporters/otlp/otlplogs` and `exporters/stdout/stdoutlogs` packages
+- `OTEL_EXPORTER_OTLP_PROTOCOL` env variable to configure `grpc`, `http/protobuf` and `http/json` otlp formats with OTEL
+ logs exporter
+- `autoconfigure` sdk package with `OTEL_LOGS_EXPORTER` env variable support with `none`,`otlp` and `logging` options to
+ autoconfigure logger provider
+
+## [v0.0.1] 2023-07-25
+
+### Added
+
+- implementation of [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api) with Stable API and SDK
+ API interfaces.
+- Package all-in-one for logs `github.com/agoda-com/opentelemetry-logs-go`
+- Package module `semconv`
+ with [Logs Exceptions Semantic Conventions](https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#attributes)
+- Package module `logs`
+ with [Stable Log Model](https://opentelemetry.io/docs/specs/otel/logs/data-model), [Logger](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#logger)
+ and [LoggerProvider](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#loggerprovider) interfaces
+- Package module `sdk` with [Logger SDK](https://opentelemetry.io/docs/specs/otel/logs/sdk/) implementation
+- Package module `exporters`
+ with [Built-in processors](https://opentelemetry.io/docs/specs/otel/logs/sdk/#built-in-processors), `otlp` interface
+ and `noop` and `http/protobuf` exporters
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS b/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS
new file mode 100644
index 000000000..9f8212f99
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS
@@ -0,0 +1,13 @@
+#####################################################
+#
+# List of approvers for this repository
+#
+#####################################################
+#
+# Learn about CODEOWNERS file format:
+# https://help.github.com/en/articles/about-code-owners
+#
+
+* @chameleon82
+
+CODEOWNERS @chameleon82
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE b/vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile b/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile
new file mode 100644
index 000000000..df59b790c
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile
@@ -0,0 +1,9 @@
+.PHONY: test-coverage
+
+test-coverage:
+ go test -coverprofile=coverage.out ./...
+ go tool cover -html=coverage.out -o coverage.html
+
+.PHONY: test-race
+test-race:
+ go test -race ./...
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md b/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md
new file mode 100644
index 000000000..69ff62d10
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md
@@ -0,0 +1,121 @@
+# OpenTelemetry-Logs-Go
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/agoda-com/opentelemetry-logs-go.svg)](https://pkg.go.dev/github.com/agoda-com/opentelemetry-logs-go)
+[![codecov](https://codecov.io/github/agoda-com/opentelemetry-logs-go/graph/badge.svg?token=F1NW0R0W75)](https://codecov.io/github/agoda-com/opentelemetry-logs-go)
+
+OpenTelemetry-Logs-Go is the [Go](https://golang.org) implementation of [OpenTelemetry](https://opentelemetry.io/) Logs.
+It provides API to directly send logging data to observability platforms. It is an extension of official
+[open-telemetry/opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go) to support Logs.
+
+## Project Life Cycle
+
+This project was created due log module freeze in
+official [opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go) repository:
+
+```
+The Logs signal development is halted for this project while we stablize the Metrics SDK.
+No Logs Pull Requests are currently being accepted.
+```
+
+This project will be deprecated once official [opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go)
+repository Logs module will have status "Stable".
+
+## Compatibility
+
+Minimal supported go version `1.21`
+
+## Project packages
+
+| Packages | Description |
+|----------------------------------|----------------------------------------------------------------------------|
+| [autoconfigure](./autoconfigure) | Autoconfiguration SDK. Allow to configure log exporters with env variables |
+| [sdk](./sdk) | Opentelemetry Logs SDK |
+| [exporters/otlp](./exporters) | OTLP format exporter |
+| [exporters/stdout](./exporters) | Console exporter |
+
+## Getting Started
+
+This is an implementation of [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/) and not
+intended to use by developers directly. It is provided for logging library authors to build log appenders, which use
+this API to bridge between existing logging libraries and the OpenTelemetry log data model.
+
+Example bellow will show how logging library could be instrumented with current API:
+
+```go
+package myInstrumentedLogger
+
+import (
+ otel "github.com/agoda-com/opentelemetry-logs-go"
+ "github.com/agoda-com/opentelemetry-logs-go/logs"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+const (
+ instrumentationName = "otel/zap"
+ instrumentationVersion = "0.0.1"
+)
+
+var (
+ logger = otel.GetLoggerProvider().Logger(
+ instrumentationName,
+ logs.WithInstrumentationVersion(instrumentationVersion),
+ logs.WithSchemaURL(semconv.SchemaURL),
+ )
+)
+
+func (c otlpCore) Write(ent zapcore.Entry, fields []zapcore.Field) error {
+
+ lrc := logs.LogRecordConfig{
+ Body: &ent.Message,
+ ...
+ }
+ logRecord := logs.NewLogRecord(lrc)
+ logger.Emit(logRecord)
+}
+```
+
+and application initialization code:
+
+```go
+package main
+
+import (
+ "os"
+ "context"
+ "github.com/agoda-com/opentelemetry-logs-go"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+ sdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+)
+
+func newResource() *resource.Resource {
+ host, _ := os.Hostname()
+ return resource.NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ServiceName("otlplogs-example"),
+ semconv.ServiceVersion("0.0.1"),
+ semconv.HostName(host),
+ )
+}
+
+func main() {
+ ctx := context.Background()
+
+ exporter, _ := otlplogs.NewExporter(ctx, otlplogs.WithClient(otlplogshttp.NewClient()))
+ loggerProvider := sdk.NewLoggerProvider(
+ sdk.WithBatcher(exporter),
+ sdk.WithResource(newResource()),
+ )
+ otel.SetLoggerProvider(loggerProvider)
+
+ myInstrumentedLogger.Info("Hello OpenTelemetry")
+}
+```
+
+## References
+
+Logger Bridge API implementations for `zap`, `slog`, `zerolog` and other
+loggers can be found in https://github.com/agoda-com/opentelemetry-go
+
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go
new file mode 100644
index 000000000..11886f1ed
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go
@@ -0,0 +1,50 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogs
+
+import (
+ "context"
+ logspb "go.opentelemetry.io/proto/otlp/logs/v1"
+)
+
+type Client interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Start should establish connection(s) to endpoint(s). It is
+ // called just once by the exporter, so the implementation
+ // does not need to worry about idempotence and locking.
+ Start(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Stop should close the connections. The function is called
+ // only once by the exporter, so the implementation does not
+ // need to worry about idempotence, but it may be called
+ // concurrently with UploadLogs, so proper
+ // locking is required. The function serves as a
+ // synchronization point - after the function returns, the
+ // process of closing connections is assumed to be finished.
+ Stop(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // UploadLogs should transform the passed logs to the wire
+ // format and send it to the collector. May be called
+ // concurrently.
+ UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go
new file mode 100644
index 000000000..8b59295ad
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go
@@ -0,0 +1,112 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogs
+
+import (
+ "context"
+ "errors"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform"
+ logssdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+ "sync"
+)
+
+var (
+ errAlreadyStarted = errors.New("already started")
+)
+
+type Exporter struct {
+ client Client
+
+ mu sync.RWMutex
+ started bool
+
+ startOnce sync.Once
+ stopOnce sync.Once
+}
+
+// Start establishes a connection to the receiving endpoint.
+func (e *Exporter) Start(ctx context.Context) error {
+ var err = errAlreadyStarted
+ e.startOnce.Do(func() {
+ e.mu.Lock()
+ e.started = true
+ e.mu.Unlock()
+ err = e.client.Start(ctx)
+ })
+
+ return err
+}
+
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ e.mu.RLock()
+ started := e.started
+ e.mu.RUnlock()
+
+ if !started {
+ return nil
+ }
+
+ var err error
+
+ e.stopOnce.Do(func() {
+ err = e.client.Stop(ctx)
+ e.mu.Lock()
+ e.started = false
+ e.mu.Unlock()
+ })
+
+ return err
+}
+
+// Export exports a batch of logs.
+func (e *Exporter) Export(ctx context.Context, ll []logssdk.ReadableLogRecord) error {
+ protoLogs := logstransform.Logs(ll)
+ if len(protoLogs) == 0 {
+ return nil
+ }
+
+ err := e.client.UploadLogs(ctx, protoLogs)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// New creates new exporter with client
+// Deprecated: Use NewExporter instead. Will be removed in v0.1.0
+func New(ctx context.Context, client Client) (*Exporter, error) {
+ return NewExporter(ctx, WithClient(client))
+}
+
+// NewExporter creates new Exporter
+func NewExporter(ctx context.Context, options ...ExporterOption) (*Exporter, error) {
+ // Create new client using env variables
+ config := NewExporterConfig(options...)
+
+ for _, opt := range options {
+ config = opt.apply(config)
+ }
+
+ exp := &Exporter{
+ client: config.client,
+ }
+
+ if err := exp.Start(ctx); err != nil {
+ return nil, err
+ }
+ return exp, nil
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go
new file mode 100644
index 000000000..36009a6ba
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/agoda-com/opentelemetry-logs-go/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+ name, err := url.QueryUnescape(n)
+ if err != nil {
+ global.Error(err, "escape header key", "key", n)
+ continue
+ }
+ trimmedName := strings.TrimSpace(name)
+ value, err := url.QueryUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go
new file mode 100644
index 000000000..04b78fcae
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go
@@ -0,0 +1,160 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logstransform
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*commonpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// Iterator transforms an attribute iterator into OTLP key-values.
+func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*commonpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// ResourceAttributes transforms a Resource OTLP key-values.
+func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
+ return Iterator(res.Iter())
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
+ return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *commonpb.AnyValue {
+ av := new(commonpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &commonpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &commonpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &commonpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &commonpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &commonpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go
new file mode 100644
index 000000000..8dc278f22
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go
@@ -0,0 +1,126 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logstransform
+
+import (
+ sdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+ logspb "go.opentelemetry.io/proto/otlp/logs/v1"
+ resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
+ "time"
+)
+
+// Logs transforms OpenTelemetry LogRecord's into a OTLP ResourceLogs
+func Logs(sdl []sdk.ReadableLogRecord) []*logspb.ResourceLogs {
+
+ var resourceLogs []*logspb.ResourceLogs
+
+ for _, sd := range sdl {
+
+ lr := logRecord(sd)
+
+ var is *commonpb.InstrumentationScope
+ var schemaURL = ""
+ if sd.InstrumentationScope() != nil {
+ is = &commonpb.InstrumentationScope{
+ Name: sd.InstrumentationScope().Name,
+ Version: sd.InstrumentationScope().Version,
+ }
+ schemaURL = sd.InstrumentationScope().SchemaURL
+ }
+
+ // Create a log resource
+ resourceLog := &logspb.ResourceLogs{
+ Resource: &resourcepb.Resource{
+ Attributes: KeyValues(sd.Resource().Attributes()),
+ },
+ // provide a resource description if available
+ ScopeLogs: []*logspb.ScopeLogs{
+ {
+ Scope: is,
+ SchemaUrl: schemaURL,
+ LogRecords: []*logspb.LogRecord{lr},
+ },
+ },
+ }
+
+ resourceLogs = append(resourceLogs, resourceLog)
+ }
+
+ return resourceLogs
+}
+
+func logRecord(record sdk.ReadableLogRecord) *logspb.LogRecord {
+ var body *commonpb.AnyValue = nil
+ if record.Body() != nil {
+ body = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_StringValue{
+ StringValue: *record.Body(),
+ },
+ }
+ }
+
+ var traceIDBytes []byte
+ if record.TraceId() != nil {
+ tid := *record.TraceId()
+ traceIDBytes = tid[:]
+ }
+ var spanIDBytes []byte
+ if record.SpanId() != nil {
+ sid := *record.SpanId()
+ spanIDBytes = sid[:]
+ }
+ var traceFlags byte = 0
+ if record.TraceFlags() != nil {
+ tf := *record.TraceFlags()
+ traceFlags = byte(tf)
+ }
+ var ts time.Time
+ if record.Timestamp() != nil {
+ ts = *record.Timestamp()
+ } else {
+ ts = record.ObservedTimestamp()
+ }
+
+ var kv []*commonpb.KeyValue
+ if record.Attributes() != nil {
+ kv = KeyValues(*record.Attributes())
+ }
+
+ var st = ""
+ if record.SeverityText() != nil {
+ st = *record.SeverityText()
+ }
+
+ var sn = logspb.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
+ if record.SeverityNumber() != nil {
+ sn = logspb.SeverityNumber(*record.SeverityNumber())
+ }
+
+ logRecord := &logspb.LogRecord{
+ TimeUnixNano: uint64(ts.UnixNano()),
+ ObservedTimeUnixNano: uint64(record.ObservedTimestamp().UnixNano()),
+ TraceId: traceIDBytes, // provide the associated trace ID if available
+ SpanId: spanIDBytes, // provide the associated span ID if available
+ Flags: uint32(traceFlags), // provide the associated trace flags
+ Body: body, // provide the associated log body if available
+ Attributes: kv, // provide additional log attributes if available
+ SeverityText: st,
+ SeverityNumber: sn,
+ }
+ return logRecord
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go
new file mode 100644
index 000000000..0827da0c6
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go
@@ -0,0 +1,192 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlpconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+func stringToProtocol(u string) Protocol {
+ switch strings.ToLower(u) {
+ case string(ExporterProtocolGrpc):
+ return ExporterProtocolGrpc
+ case string(ExporterProtocolHttpProtobuf):
+ return ExporterProtocolHttpProtobuf
+ case string(ExporterProtocolHttpJson):
+ return ExporterProtocolHttpJson
+ default:
+ return ExporterProtocolHttpProtobuf
+ }
+}
+
+// ApplyEnvProtocol Apply Protocol from environment to provided default value
+// This function is subject to change or removal in future versions.
+func ApplyEnvProtocol(protocol Protocol) Protocol {
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithString("PROTOCOL", func(s string) {
+ protocol = stringToProtocol(s)
+ }),
+ envconfig.WithString("LOGS_PROTOCOL", func(s string) {
+ protocol = stringToProtocol(s)
+ }),
+ )
+
+ return protocol
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Logs.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Logs.URLPath = path.Join(u.Path, DefaultLogsPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("LOGS_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Logs.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Logs.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithString("PROTOCOL", func(s string) {
+ opts = append(opts, withProtocol(s))
+ }),
+ envconfig.WithString("LOGS_PROTOCOL", func(s string) {
+ opts = append(opts, withProtocol(s))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("LOGS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("LOGS_CLIENT_CERTIFICATE", "LOGS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("LOGS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("LOGS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("LOGS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("LOGS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ )
+
+ return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Logs.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
+
+func withProtocol(b string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.Protocol = stringToProtocol(b)
+ return cfg
+ })
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go
new file mode 100644
index 000000000..83112bba7
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go
@@ -0,0 +1,348 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlpconfig
+
+import (
+ "crypto/tls"
+ "fmt"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+ "go.opentelemetry.io/otel"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+ "path"
+ "strings"
+ "time"
+)
+
+const (
+ // DefaultLogsPath is a default URL path for endpoint that
+ // receives logs.
+ DefaultLogsPath string = "/v1/logs"
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each logs batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ SignalConfig struct {
+ Endpoint string
+ Protocol Protocol
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Logs SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
+func CleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = fmt.Sprintf("/%s", tmp)
+ }
+ return tmp
+}
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Logs: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultLogsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Logs.URLPath = CleanPath(cfg.Logs.URLPath, DefaultLogsPath)
+ return cfg
+}
+
+func GetUserAgentHeader() string {
+ return "OTel OTLP Exporter Go/" + otel.Version()
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = fmt.Sprintf("/%s", tmp)
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ cfg := Config{
+ Logs: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultLogsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ DialOptions: []grpc.DialOption{grpc.WithUserAgent(GetUserAgentHeader())},
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Priroritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Logs.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Logs.GRPCCredentials))
+ } else if cfg.Logs.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Logs.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Logs.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if len(cfg.DialOptions) != 0 {
+ cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.Endpoint = endpoint
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Logs.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Logs.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.Timeout = duration
+ return cfg
+ })
+}
+
+func WithProtocol(protocol Protocol) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Logs.Protocol = protocol
+ return cfg
+ })
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go
new file mode 100644
index 000000000..d361b0383
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,58 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlpconfig
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+type Protocol string
+
+const (
+ ExporterProtocolGrpc Protocol = "grpc"
+ ExporterProtocolHttpProtobuf Protocol = "http/protobuf"
+ ExporterProtocolHttpJson Protocol = "http/json"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+ // MarshalProto tells the driver to send using the protobuf binary format.
+ MarshalProto Marshaler = iota
+ // MarshalJSON tells the driver to send using json format.
+ MarshalJSON
+)
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go
new file mode 100644
index 000000000..88cf551ed
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go
@@ -0,0 +1,20 @@
+package otlpconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go
new file mode 100644
index 000000000..39a421963
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go
@@ -0,0 +1,48 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+var _ error = PartialSuccess{}
+
+// LogRecordPartialSuccessError returns an error describing a partial success
+// response for the log signal.
+func LogRecordPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "logs",
+ }
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go
new file mode 100644
index 000000000..11c76e53c
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go
@@ -0,0 +1,152 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package retry
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %s", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go
new file mode 100644
index 000000000..1ce4df527
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go
@@ -0,0 +1,67 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogs
+
+import (
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp"
+)
+
+type ExporterConfig struct {
+ client Client
+}
+
+type ExporterOption interface {
+ apply(ExporterConfig) ExporterConfig
+}
+
+type exporterOptionFunc func(ExporterConfig) ExporterConfig
+
+func (fn exporterOptionFunc) apply(config ExporterConfig) ExporterConfig {
+ return fn(config)
+}
+
+// NewExporterConfig creates new configuration for exporter
+func NewExporterConfig(options ...ExporterOption) ExporterConfig {
+
+ config := ExporterConfig{}
+
+ for _, option := range options {
+ config = option.apply(config)
+ }
+
+ if config.client == nil {
+ // Default is http/protobuf client
+ protocol := otlpconfig.ApplyEnvProtocol(otlpconfig.ExporterProtocolHttpProtobuf)
+
+ if protocol == otlpconfig.ExporterProtocolGrpc {
+ config.client = otlplogsgrpc.NewClient()
+ } else {
+ config.client = otlplogshttp.NewClient()
+ }
+ }
+
+ return config
+}
+
+func WithClient(client Client) ExporterOption {
+ return exporterOptionFunc(func(cfg ExporterConfig) ExporterConfig {
+ cfg.client = client
+ return cfg
+ })
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go
new file mode 100644
index 000000000..bd4b19cd8
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go
@@ -0,0 +1,292 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogsgrpc
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+
+ "go.opentelemetry.io/otel"
+ collogspb "go.opentelemetry.io/proto/otlp/collector/logs/v1"
+ logspb "go.opentelemetry.io/proto/otlp/logs/v1"
+
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+type grpcClient struct {
+ endpoint string
+ dialOpts []grpc.DialOption
+ metadata metadata.MD
+ exportTimeout time.Duration
+ requestFunc retry.RequestFunc
+
+ // stopCtx is used as a parent context for all exports. Therefore, when it
+ // is canceled with the stopFunc all exports are canceled.
+ stopCtx context.Context
+ // stopFunc cancels stopCtx, stopping any active exports.
+ stopFunc context.CancelFunc
+
+ // ourConn keeps track of where conn was created: true if created here on
+ // Start, or false if passed with an option. This is important on Shutdown
+ // as the conn should only be closed if created here on start. Otherwise,
+ // it is up to the processes that passed the conn to close it.
+ ourConn bool
+ conn *grpc.ClientConn
+ tscMu sync.RWMutex
+ tsc collogspb.LogsServiceClient
+}
+
+func NewClient(opts ...Option) *grpcClient {
+ cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ c := &grpcClient{
+ endpoint: cfg.Logs.Endpoint,
+ exportTimeout: cfg.Logs.Timeout,
+ requestFunc: cfg.RetryConfig.RequestFunc(retryable),
+ dialOpts: cfg.DialOptions,
+ stopCtx: ctx,
+ stopFunc: cancel,
+ conn: cfg.GRPCConn,
+ }
+
+ if len(cfg.Logs.Headers) > 0 {
+ c.metadata = metadata.New(cfg.Logs.Headers)
+ }
+
+ return c
+}
+
+// Start establishes a gRPC connection to the collector.
+func (c *grpcClient) Start(ctx context.Context) error {
+ if c.conn == nil {
+ // If the caller did not provide a ClientConn when the grpcClient was
+ // created, create one using the configuration they did provide.
+ conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
+ if err != nil {
+ return err
+ }
+ // Keep track that we own the lifecycle of this conn and need to close
+ // it on Shutdown.
+ c.ourConn = true
+ c.conn = conn
+ }
+
+ // The otlplogs.Client interface states this method is called just once,
+ // so no need to check if already started.
+ c.tscMu.Lock()
+ c.tsc = collogspb.NewLogsServiceClient(c.conn)
+ c.tscMu.Unlock()
+
+ return nil
+}
+
+var errAlreadyStopped = errors.New("the grpcClient is already stopped")
+
+// Stop shuts down the grpcClient.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the grpcClient. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+//
+// This method synchronizes with the UploadLogs method of the grpcClient. It
+// will wait for any active calls to that method to complete unimpeded, or it
+// will cancel any active calls if ctx expires. If ctx expires, the context
+// error will be forwarded as the returned error. All grpcClient held resources
+// will still be released in this situation.
+//
+// If the grpcClient has already stopped, an error will be returned describing
+// this.
+func (c *grpcClient) Stop(ctx context.Context) error {
+ // Make sure to return context error if the context is done when calling this method.
+ err := ctx.Err()
+
+ // Acquire the c.tscMu lock within the ctx lifetime.
+ acquired := make(chan struct{})
+ go func() {
+ c.tscMu.Lock()
+ close(acquired)
+ }()
+
+ select {
+ case <-ctx.Done():
+ // The Stop timeout is reached. Kill any remaining exports to force
+ // the clear of the lock and save the timeout error to return and
+ // signal the shutdown timed out before cleanly stopping.
+ c.stopFunc()
+ err = ctx.Err()
+
+ // To ensure the grpcClient is not left in a dirty state c.tsc needs to be
+ // set to nil. To avoid the race condition when doing this, ensure
+ // that all the exports are killed (initiated by c.stopFunc).
+ <-acquired
+ case <-acquired:
+ }
+ // Hold the tscMu lock for the rest of the function to ensure no new
+ // exports are started.
+ defer c.tscMu.Unlock()
+
+ // The otlplogs.Client interface states this method is called only
+ // once, but there is no guarantee it is called after Start. Ensure the
+ // grpcClient is started before doing anything and let the called know if they
+ // made a mistake.
+ if c.tsc == nil {
+ return errAlreadyStopped
+ }
+
+ // Clear c.tsc to signal the grpcClient is stopped.
+ c.tsc = nil
+
+ if c.ourConn {
+ closeErr := c.conn.Close()
+ // A context timeout error takes precedence over this error.
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }
+ return err
+}
+
+var errShutdown = errors.New("the grpcClient is shutdown")
+
+// UploadLogs sends log records.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the grpcClient was created with.
+func (c *grpcClient) UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error {
+ // Hold a read lock to ensure a shut down initiated after this starts does
+ // not abandon the export. This read lock acquire has less priority than a
+ // write lock acquire (i.e. Stop), meaning if the grpcClient is shutting down
+ // this will come after the shut down.
+ c.tscMu.RLock()
+ defer c.tscMu.RUnlock()
+
+ if c.tsc == nil {
+ return errShutdown
+ }
+
+ ctx, cancel := c.exportContext(ctx)
+ defer cancel()
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ resp, err := c.tsc.Export(iCtx, &collogspb.ExportLogsServiceRequest{
+ ResourceLogs: protoLogs,
+ })
+ if resp != nil && resp.PartialSuccess != nil {
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedLogRecords()
+ if n != 0 || msg != "" {
+ err := internal.LogRecordPartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ // nil is converted to OK.
+ if status.Code(err) == codes.OK {
+ // Success.
+ return nil
+ }
+ return err
+ })
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *grpcClient) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+
+ if c.exportTimeout > 0 {
+ ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(parent)
+ }
+
+ if c.metadata.Len() > 0 {
+ ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+ }
+
+ // Unify the grpcClient stopCtx with the parent.
+ go func() {
+ select {
+ case <-ctx.Done():
+ case <-c.stopCtx.Done():
+ // Cancel the export as the shutdown has timed out.
+ cancel()
+ }
+ }()
+
+ return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+ s := status.Convert(err)
+ switch s.Code() {
+ case codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.ResourceExhausted,
+ codes.Aborted,
+ codes.OutOfRange,
+ codes.Unavailable,
+ codes.DataLoss:
+ return true, throttleDelay(s)
+ }
+
+ // Not a retry-able error.
+ return false, 0
+}
+
+// throttleDelay returns a duration to wait for if an explicit throttle time
+// is included in the response status.
+func throttleDelay(s *status.Status) time.Duration {
+ for _, detail := range s.Details() {
+ if t, ok := detail.(*errdetails.RetryInfo); ok {
+ return t.RetryDelay.AsDuration()
+ }
+ }
+ return 0
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (c *grpcClient) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Endpoint string
+ }{
+ Type: "otlphttpgrpc",
+ Endpoint: c.endpoint,
+ }
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go
new file mode 100644
index 000000000..a049ad1f5
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go
@@ -0,0 +1,191 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogsgrpc
+
+import (
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+ "go.opentelemetry.io/otel"
+)
+
+// Option applies an option to the gRPC driver.
+type Option interface {
+ applyGRPCOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
+ converted := make([]otlpconfig.GRPCOption, len(opts))
+ for i, o := range opts {
+ converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying export of logs batches that
+// failed to be received by the target endpoint.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ otlpconfig.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
+ return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables grpcClient transport security for the exporter's gRPC
+// connection just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
+// default, grpcClient security is required unless WithInsecure is used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+ return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint the exporter will connect to. If
+// unset, localhost:4317 will be used as a default.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.ReconnectionPeriod = rp
+ return cfg
+ })}
+}
+
+func compressorToCompression(compressor string) otlpconfig.Compression {
+ if compressor == "gzip" {
+ return otlpconfig.GzipCompression
+ }
+
+ otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+ return otlpconfig.NoCompression
+}
+
+// WithCompressor sets the compressor for the gRPC grpcClient to use when sending
+// requests. It is the responsibility of the caller to ensure that the
+// compressor set has been registered with google.golang.org/grpc/encoding.
+// This can be done by encoding.RegisterCompressor. Some compressors
+// auto-register on import, such as gzip, which can be registered by calling
+// `import _ "google.golang.org/grpc/encoding/gzip"`.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithCompressor(compressor string) Option {
+ return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTLSCredentials allows the connection to use TLS credentials when
+// talking to the server. It takes in grpc.TransportCredentials instead of say
+// a Certificate file or a tls.Certificate, because the retrieving of these
+// credentials can be done in many ways e.g. plain file, in code tls.Config or
+// by certificate rotation, so it is up to the caller to decide what to use.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.Logs.GRPCCredentials = creds
+ return cfg
+ })}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.ServiceConfig = serviceConfig
+ return cfg
+ })}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when making a
+// connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.DialOptions = opts
+ return cfg
+ })}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The grpcClient
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.GRPCConn = conn
+ return cfg
+ })}
+}
+
+// WithTimeout sets the max amount of time a grpcClient will attempt to export a
+// batch of logs. This takes precedence over any retry settings defined with
+// WithRetry, once this time limit has been reached the export is abandoned
+// and the batch of logs is dropped.
+//
+// If unset, the default timeout will be set to 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that may be
+// returned by the target endpoint when exporting a batch of logs.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response. That time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+ return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go
new file mode 100644
index 000000000..c25d0a051
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go
@@ -0,0 +1,371 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogshttp
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+ "go.opentelemetry.io/otel"
+ collogspb "go.opentelemetry.io/proto/otlp/collector/logs/v1"
+ logspb "go.opentelemetry.io/proto/otlp/logs/v1"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+)
+
+const contentTypeProto = "application/x-protobuf"
+const contentTypeJson = "application/json"
+
+var gzPool = sync.Pool{
+ New: func() interface{} {
+ w := gzip.NewWriter(io.Discard)
+ return w
+ },
+}
+
+// Keep it in sync with golang's DefaultTransport from net/http! We
+// have our own copy to avoid handling a situation where the
+// DefaultTransport is overwritten with some different implementation
+// of http.RoundTripper or it's modified by other package.
+var ourTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+}
+
+type httpClient struct {
+ name string
+ cfg otlpconfig.SignalConfig
+ generalCfg otlpconfig.Config
+ requestFunc retry.RequestFunc
+ client *http.Client
+ stopCh chan struct{}
+ stopOnce sync.Once
+}
+
+// NewClient creates a new HTTP logs httpClient.
+func NewClient(opts ...Option) *httpClient {
+
+ cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...)
+
+ // Fix Protocol to Default if incorrect one was provided
+ if cfg.Logs.Protocol != otlpconfig.ExporterProtocolHttpJson && cfg.Logs.Protocol != otlpconfig.ExporterProtocolHttpProtobuf {
+ cfg.Logs.Protocol = otlpconfig.ExporterProtocolHttpProtobuf
+ }
+
+ client := &http.Client{
+ Transport: ourTransport,
+ Timeout: cfg.Logs.Timeout,
+ }
+ if cfg.Logs.TLSCfg != nil {
+ transport := ourTransport.Clone()
+ transport.TLSClientConfig = cfg.Logs.TLSCfg
+ client.Transport = transport
+ }
+
+ stopCh := make(chan struct{})
+ return &httpClient{
+ name: "logs",
+ cfg: cfg.Logs,
+ generalCfg: cfg,
+ requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
+ stopCh: stopCh,
+ client: client,
+ }
+}
+
+// Start does nothing in a HTTP httpClient.
+func (d *httpClient) Start(ctx context.Context) error {
+ // nothing to do
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ return nil
+}
+
+// Stop shuts down the httpClient and interrupt any in-flight request.
+func (d *httpClient) Stop(ctx context.Context) error {
+ d.stopOnce.Do(func() {
+ close(d.stopCh)
+ })
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ return nil
+}
+
+// retryableError represents a request failure that can be retried.
+type retryableError struct {
+ throttle int64
+}
+
+// evaluate returns if err is retry-able. If it is and it includes an explicit
+// throttling delay, that delay is also returned.
+func evaluate(err error) (bool, time.Duration) {
+ if err == nil {
+ return false, 0
+ }
+
+ rErr, ok := err.(retryableError)
+ if !ok {
+ return false, 0
+ }
+
+ return true, time.Duration(rErr.throttle)
+}
+
+func (d *httpClient) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
+ // Unify the parent context Done signal with the httpClient's stop
+ // channel.
+ ctx, cancel := context.WithCancel(ctx)
+ go func(ctx context.Context, cancel context.CancelFunc) {
+ select {
+ case <-ctx.Done():
+ // Nothing to do, either cancelled or deadline
+ // happened.
+ case <-d.stopCh:
+ cancel()
+ }
+ }(ctx, cancel)
+ return ctx, cancel
+}
+
+func (d *httpClient) newRequest(body []byte) (request, error) {
+ u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath}
+ r, err := http.NewRequest(http.MethodPost, u.String(), nil)
+ if err != nil {
+ return request{Request: r}, err
+ }
+
+ r.Header.Set("User-Agent", otlpconfig.GetUserAgentHeader())
+
+ for k, v := range d.cfg.Headers {
+ r.Header.Set(k, v)
+ }
+ switch d.cfg.Protocol {
+ case otlpconfig.ExporterProtocolHttpJson:
+ r.Header.Set("Content-Type", contentTypeJson)
+ default:
+ r.Header.Set("Content-Type", contentTypeProto)
+ }
+
+ req := request{Request: r}
+ switch Compression(d.cfg.Compression) {
+ case NoCompression:
+ r.ContentLength = (int64)(len(body))
+ req.bodyReader = bodyReader(body)
+ case GzipCompression:
+ // Ensure the content length is not used.
+ r.ContentLength = -1
+ r.Header.Set("Content-Encoding", "gzip")
+
+ gz := gzPool.Get().(*gzip.Writer)
+ defer gzPool.Put(gz)
+
+ var b bytes.Buffer
+ gz.Reset(&b)
+
+ if _, err := gz.Write(body); err != nil {
+ return req, err
+ }
+ // Close needs to be called to ensure body if fully written.
+ if err := gz.Close(); err != nil {
+ return req, err
+ }
+
+ req.bodyReader = bodyReader(b.Bytes())
+ }
+
+ return req, nil
+}
+
+// bodyReader returns a closure returning a new reader for buf.
+func bodyReader(buf []byte) func() io.ReadCloser {
+ return func() io.ReadCloser {
+ return io.NopCloser(bytes.NewReader(buf))
+ }
+}
+
+func (d *httpClient) getScheme() string {
+ if d.cfg.Insecure {
+ return "http"
+ }
+ return "https"
+}
+
+// request wraps an http.Request with a resettable body reader.
+type request struct {
+ *http.Request
+
+ // bodyReader allows the same body to be used for multiple requests.
+ bodyReader func() io.ReadCloser
+}
+
+// reset reinitializes the request Body and uses ctx for the request.
+func (r *request) reset(ctx context.Context) {
+ r.Body = r.bodyReader()
+ r.Request = r.Request.WithContext(ctx)
+}
+
+// newResponseError returns a retryableError and will extract any explicit
+// throttle delay contained in headers.
+func newResponseError(header http.Header) error {
+ var rErr retryableError
+ if s, ok := header["Retry-After"]; ok {
+ if t, err := strconv.ParseInt(s[0], 10, 64); err == nil {
+ rErr.throttle = t
+ }
+ }
+ return rErr
+}
+
+func (e retryableError) Error() string {
+ return "retry-able request failure"
+}
+
+func (d *httpClient) UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error {
+
+ // Export the logs using the OTLP logs exporter httpClient
+ exportLogs := &collogspb.ExportLogsServiceRequest{
+ ResourceLogs: protoLogs,
+ }
+
+ // Serialize the OTLP logs payload
+ var rawRequest []byte
+ switch d.cfg.Protocol {
+ case otlpconfig.ExporterProtocolHttpJson:
+ rawRequest, _ = protojson.MarshalOptions{
+ UseProtoNames: false,
+ }.Marshal(exportLogs)
+ default:
+ rawRequest, _ = proto.Marshal(exportLogs)
+ }
+
+ ctx, cancel := d.contextWithStop(ctx)
+ defer cancel()
+
+ request, err := d.newRequest(rawRequest)
+ if err != nil {
+ return err
+ }
+
+ return d.requestFunc(ctx, func(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ request.reset(ctx)
+ resp, err := d.client.Do(request.Request)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil && resp.Body != nil {
+ defer func() {
+ if err := resp.Body.Close(); err != nil {
+ otel.Handle(err)
+ }
+ }()
+ }
+
+ switch sc := resp.StatusCode; {
+ case sc >= 200 && sc <= 299:
+ // Success, do not retry.
+ // Read the partial success message, if any.
+ var respData bytes.Buffer
+ if _, err := io.Copy(&respData, resp.Body); err != nil {
+ return err
+ }
+
+ if respData.Len() != 0 {
+ var respProto collogspb.ExportLogsServiceResponse
+ switch d.cfg.Protocol {
+ case otlpconfig.ExporterProtocolHttpJson:
+ if err := protojson.Unmarshal(respData.Bytes(), &respProto); err != nil {
+ return err
+ }
+ default:
+ if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
+ return err
+ }
+ }
+
+ // TODO: partialsuccess can't be handled properly by OTEL as current otlp.internal.PartialSuccess is custom
+ // need to have that interface in official OTEL otlp.internal package
+ if respProto.PartialSuccess != nil {
+ msg := respProto.PartialSuccess.GetErrorMessage()
+ n := respProto.PartialSuccess.GetRejectedLogRecords()
+ if n != 0 || msg != "" {
+ err := internal.LogRecordPartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ }
+ return nil
+ case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable:
+ // Retry-able failures. Drain the body to reuse the connection.
+ if _, err := io.Copy(io.Discard, resp.Body); err != nil {
+ otel.Handle(err)
+ }
+ return newResponseError(resp.Header)
+ default:
+ buffer := make([]byte, 4096)
+ _, _ = resp.Body.Read(buffer)
+ if len(buffer) == 0 {
+ return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status)
+ }
+ return fmt.Errorf("failed to send to %s: %s\n%s", request.URL, resp.Status, buffer)
+ }
+ })
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (d *httpClient) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Endpoint string
+ Insecure bool
+ }{
+ Type: string(d.cfg.Protocol),
+ Endpoint: d.cfg.Endpoint,
+ Insecure: d.cfg.Insecure,
+ }
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go
new file mode 100644
index 000000000..7d0e4da00
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go
@@ -0,0 +1,127 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogshttp
+
+import (
+ "crypto/tls"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+ "time"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression otlpconfig.Compression
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression = Compression(otlpconfig.NoCompression)
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression = Compression(otlpconfig.GzipCompression)
+)
+
+// Option applies an option to the HTTP httpClient.
+type Option interface {
+ applyHTTPOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption {
+ converted := make([]otlpconfig.HTTPOption, len(opts))
+ for i, o := range opts {
+ converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying batches in case of export
+// failure using an exponential backoff.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ otlpconfig.HTTPOption
+}
+
+func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config {
+ return w.ApplyHTTPOption(cfg)
+}
+
+// WithEndpoint allows one to set the address of the collector
+// endpoint that the driver will use to send logs. If
+// unset, it will instead try to use
+// the default endpoint (localhost:4318). Note that the endpoint
+// must not contain any URL path.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithJsonProtocol will apply http/json protocol to Http client
+func WithJsonProtocol() Option {
+ return wrappedOption{otlpconfig.WithProtocol(otlpconfig.ExporterProtocolHttpJson)}
+}
+
+// WithProtobufProtocol will apply http/protobuf protocol to Http client
+func WithProtobufProtocol() Option {
+ return wrappedOption{otlpconfig.WithProtocol(otlpconfig.ExporterProtocolHttpProtobuf)}
+}
+
+// WithCompression tells the driver to compress the sent data.
+func WithCompression(compression Compression) Option {
+ return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))}
+}
+
+// WithURLPath allows one to override the default URL path used
+// for sending logs. If unset, default ("/v1/logs") will be used.
+func WithURLPath(urlPath string) Option {
+ return wrappedOption{otlpconfig.WithURLPath(urlPath)}
+}
+
+// WithTLSClientConfig can be used to set up a custom TLS
+// configuration for the httpClient used to send payloads to the
+// collector. Use it if you want to use a custom certificate.
+func WithTLSClientConfig(tlsCfg *tls.Config) Option {
+ return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)}
+}
+
+// WithInsecure tells the driver to connect to the collector using the
+// HTTP scheme, instead of HTTPS.
+func WithInsecure() Option {
+ return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithHeaders allows one to tell the driver to send additional HTTP
+// headers with the payloads. Specifying headers like Content-Length,
+// Content-Encoding and Content-Type may result in a broken driver.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTimeout tells the driver the max waiting time for the backend to process
+// each logs batch. If unset, the default will be 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry configures the retry policy for transient errors that may occurs
+// when exporting logs. An exponential back-off algorithm is used to ensure
+// endpoints are not overwhelmed with retries. If unset, the default retry
+// policy will retry after 5 seconds and increase exponentially after each
+// error for a total of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+ return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go
new file mode 100644
index 000000000..801f40f20
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go
@@ -0,0 +1,70 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // Package global import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "log"
+ "os"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+)
+
+// globalLogger is the logging interface used within the otel api and sdk provide details of the internals.
+//
+// The default logger uses stdr which is backed by the standard `log.Logger`
+// interface. This logger will only show messages at the Error Level.
+var globalLogger unsafe.Pointer
+
+func init() {
+ SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+}
+
+// SetLogger overrides the globalLogger with l.
+//
+// To see Warn messages use a logger with `l.V(1).Enabled() == true`
+// To see Info messages use a logger with `l.V(4).Enabled() == true`
+// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
+func SetLogger(l logr.Logger) {
+ atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
+}
+
+func getLogger() logr.Logger {
+ return *(*logr.Logger)(atomic.LoadPointer(&globalLogger))
+}
+
+// Info prints messages about the general state of the API or SDK.
+// This should usually be less than 5 messages a minute.
+func Info(msg string, keysAndValues ...interface{}) {
+ getLogger().V(4).Info(msg, keysAndValues...)
+}
+
+// Error prints messages about exceptional states of the API or SDK.
+func Error(err error, msg string, keysAndValues ...interface{}) {
+ getLogger().Error(err, msg, keysAndValues...)
+}
+
+// Debug prints messages about all internal changes in the API or SDK.
+func Debug(msg string, keysAndValues ...interface{}) {
+ getLogger().V(8).Info(msg, keysAndValues...)
+}
+
+// Warn prints messages about warnings in the API or SDK.
+// Not an error but is likely more important than an informational event.
+func Warn(msg string, keysAndValues ...interface{}) {
+ getLogger().V(1).Info(msg, keysAndValues...)
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go
new file mode 100644
index 000000000..ab78fd29b
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go
@@ -0,0 +1,128 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package global
+
+import (
+ "github.com/agoda-com/opentelemetry-logs-go/logs"
+ "sync"
+ "sync/atomic"
+)
+
+// loggerProvider is a placeholder for a configured SDK LoggerProvider.
+//
+// All LoggerProvider functionality is forwarded to a delegate once
+// configured.
+type loggerProvider struct {
+ mtx sync.Mutex
+ loggers map[il]*logger
+ delegate logs.LoggerProvider
+}
+
+// Compile-time guarantee that loggerProvider implements the LoggerProvider
+// interface.
+var _ logs.LoggerProvider = &loggerProvider{}
+
+// setDelegate configures p to delegate all LoggerProvider functionality to
+// provider.
+//
+// All Loggers provided prior to this function call are switched out to be
+// Loggers provided by provider.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *loggerProvider) setDelegate(provider logs.LoggerProvider) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ p.delegate = provider
+
+ if len(p.loggers) == 0 {
+ return
+ }
+
+ for _, t := range p.loggers {
+ t.setDelegate(provider)
+ }
+
+ p.loggers = nil
+}
+
+// Logger implements LoggerProvider.
+func (p *loggerProvider) Logger(name string, opts ...logs.LoggerOption) logs.Logger {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ if p.delegate != nil {
+ return p.delegate.Logger(name, opts...)
+ }
+
+ // At this moment it is guaranteed that no sdk is installed, save the logger in the loggers map.
+
+ c := logs.NewLoggerConfig(opts...)
+ key := il{
+ name: name,
+ version: c.InstrumentationVersion(),
+ }
+
+ if p.loggers == nil {
+ p.loggers = make(map[il]*logger)
+ }
+
+ if val, ok := p.loggers[key]; ok {
+ return val
+ }
+
+ t := &logger{name: name, opts: opts, provider: p}
+ p.loggers[key] = t
+ return t
+}
+
+type il struct {
+ name string
+ version string
+}
+
+// logger is a placeholder for a logs.Logger.
+//
+// All Logger functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopLogger.
+type logger struct {
+ name string
+ opts []logs.LoggerOption
+ provider *loggerProvider
+
+ delegate atomic.Value
+}
+
+// Compile-time guarantee that logger implements the logs.Logger interface.
+var _ logs.Logger = &logger{}
+
+func (t *logger) Emit(logRecord logs.LogRecord) {
+ delegate := t.delegate.Load()
+ if delegate != nil {
+ delegate.(logs.Logger).Emit(logRecord)
+ }
+}
+
+// setDelegate configures t to delegate all Logger functionality to Loggers
+// created by provider.
+//
+// All subsequent calls to the Logger methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (t *logger) setDelegate(provider logs.LoggerProvider) {
+ t.delegate.Store(provider.Logger(t.name, t.opts...))
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go
new file mode 100644
index 000000000..53219030f
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go
@@ -0,0 +1,66 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package global
+
+import (
+ "errors"
+ "github.com/agoda-com/opentelemetry-logs-go/logs"
+ "sync"
+ "sync/atomic"
+)
+
+type (
+ loggerProviderHolder struct {
+ lp logs.LoggerProvider
+ }
+)
+
+var (
+ globalOtelLogger = defaultLoggerValue()
+
+ delegateLoggerOnce sync.Once
+)
+
+// LoggerProvider is the internal implementation for global.LoggerProvider.
+func LoggerProvider() logs.LoggerProvider {
+ return globalOtelLogger.Load().(loggerProviderHolder).lp
+}
+
+// SetLoggerProvider is the internal implementation for global.SetLoggerProvider.
+func SetLoggerProvider(lp logs.LoggerProvider) {
+ current := LoggerProvider()
+
+ if _, cOk := current.(*loggerProvider); cOk {
+ if _, tpOk := lp.(*loggerProvider); tpOk && current == lp {
+ // Do not assign the default delegating LoggerProvider to delegate
+ // to itself.
+ Error(
+ errors.New("no delegate configured in logger provider"),
+ "Setting logger provider to it's current value. No delegate will be configured",
+ )
+ return
+ }
+ }
+
+ globalOtelLogger.Store(loggerProviderHolder{lp: lp})
+}
+
+func defaultLoggerValue() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(loggerProviderHolder{lp: &loggerProvider{}})
+ return v
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go
new file mode 100644
index 000000000..490ba903f
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go
@@ -0,0 +1,35 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otel
+
+import (
+ "github.com/agoda-com/opentelemetry-logs-go/internal/global"
+ "github.com/agoda-com/opentelemetry-logs-go/logs"
+)
+
+// GetLoggerProvider returns the registered global logger provider.
+// If none is registered then an instance of NoopLoggerProvider is returned.
+//
+// loggerProvider := otel.GetLoggerProvider()
+func GetLoggerProvider() logs.LoggerProvider {
+ return global.LoggerProvider()
+}
+
+// SetLoggerProvider registers `lp` as the global logger provider.
+func SetLoggerProvider(lp logs.LoggerProvider) {
+ global.SetLoggerProvider(lp)
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go
new file mode 100644
index 000000000..b8ca22273
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go
@@ -0,0 +1,89 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs // Package logs import "github.com/agoda-com/opentelemetry-logs-go/logs"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// LoggerConfig is a group of options for a Logger.
+type LoggerConfig struct {
+ instrumentationVersion string
+ // Schema URL of the telemetry emitted by the Logger.
+ schemaURL string
+ attrs attribute.Set
+}
+
+// InstrumentationVersion returns the version of the library providing instrumentation.
+func (t *LoggerConfig) InstrumentationVersion() string {
+ return t.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (t *LoggerConfig) InstrumentationAttributes() attribute.Set {
+ return t.attrs
+}
+
+// SchemaURL returns the Schema URL of the telemetry emitted by the Logger.
+func (t *LoggerConfig) SchemaURL() string {
+ return t.schemaURL
+}
+
+// NewLoggerConfig applies all the options to a returned LoggerConfig.
+func NewLoggerConfig(options ...LoggerOption) LoggerConfig {
+ var config LoggerConfig
+ for _, option := range options {
+ config = option.apply(config)
+ }
+ return config
+}
+
+// LoggerOption applies an option to a LoggerConfig.
+type LoggerOption interface {
+ apply(LoggerConfig) LoggerConfig
+}
+
+type loggerOptionFunc func(LoggerConfig) LoggerConfig
+
+func (fn loggerOptionFunc) apply(cfg LoggerConfig) LoggerConfig {
+ return fn(cfg)
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) LoggerOption {
+ return loggerOptionFunc(func(cfg LoggerConfig) LoggerConfig {
+ cfg.instrumentationVersion = version
+ return cfg
+ })
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) LoggerOption {
+ return loggerOptionFunc(func(config LoggerConfig) LoggerConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
+// WithSchemaURL sets the schema URL for the Logger.
+func WithSchemaURL(schemaURL string) LoggerOption {
+ return loggerOptionFunc(func(cfg LoggerConfig) LoggerConfig {
+ cfg.schemaURL = schemaURL
+ return cfg
+ })
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go
new file mode 100644
index 000000000..db5f43546
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go
@@ -0,0 +1,68 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package logs provides an implementation of the logging part of the
+OpenTelemetry API.
+
+This package defines a log backend API. The API is not intended to be called by application developers directly.
+It is provided for logging library authors to build log appenders, which use this API to bridge between existing
+logging libraries and the OpenTelemetry log data model.
+
+To participate in logging a LogRecord needs to be created for the
+operation being performed as part of a logging workflow. In its simplest form:
+
+ var logger logger.Logger
+
+ func init() {
+ logger = otel.Logger()
+ }
+
+ func operation(ctx context.Context) {
+ logRecord := logger.NewLogRecord(..)
+ logger.Emit(logRecord)
+ }
+
+A Logger is unique to the instrumentation and is used to create Logs.
+Instrumentation should be designed to accept a LoggerProvider from which it
+can create its own unique Logger. Alternatively, the registered global
+LoggerProvider from the github.com/agoda-com/opentelemetry-logs-go package can be used as
+a default.
+
+ const (
+ name = "instrumentation/package/name"
+ version = "0.1.0"
+ )
+
+ type Instrumentation struct {
+ logger logging.Logger
+ }
+
+ func NewInstrumentation(tp logging.LoggerProvider) *Instrumentation {
+ if lp == nil {
+ lp = otel.LoggerProvider()
+ }
+ return &Instrumentation{
+ logger: lp.Logger(name, logs.WithInstrumentationVersion(version)),
+ }
+ }
+
+ func operation(ctx context.Context, inst *Instrumentation) {
+
+ // ...
+ }
+*/
+package logs // import "github.com/agoda-com/opentelemetry-logs-go/logs"
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go
new file mode 100644
index 000000000..cfe454e4b
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go
@@ -0,0 +1,167 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+ "time"
+)
+
+// LogRecordConfig contains mutable fields usable for constructing
+// an immutable LogRecord.
+type LogRecordConfig struct {
+ Timestamp *time.Time
+ ObservedTimestamp time.Time
+ TraceId *trace.TraceID
+ SpanId *trace.SpanID
+ TraceFlags *trace.TraceFlags
+ SeverityText *string
+ SeverityNumber *SeverityNumber
+ Body *string
+ Resource *resource.Resource
+ InstrumentationScope *instrumentation.Scope
+ Attributes *[]attribute.KeyValue
+}
+
+// NewLogRecord constructs a LogRecord using values from the provided
+// LogRecordConfig.
+func NewLogRecord(config LogRecordConfig) LogRecord {
+ return LogRecord{
+ timestamp: config.Timestamp,
+ observedTimestamp: config.ObservedTimestamp,
+ traceId: config.TraceId,
+ spanId: config.SpanId,
+ traceFlags: config.TraceFlags,
+ severityText: config.SeverityText,
+ severityNumber: config.SeverityNumber,
+ body: config.Body,
+ resource: config.Resource,
+ instrumentationScope: config.InstrumentationScope,
+ attributes: config.Attributes,
+ }
+}
+
+// LogRecord is an implementation of the OpenTelemetry Log API
+// representing the individual component of a log.
+// see https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition
+type LogRecord struct {
+ timestamp *time.Time
+ observedTimestamp time.Time
+ traceId *trace.TraceID
+ spanId *trace.SpanID
+ traceFlags *trace.TraceFlags
+ severityText *string
+ severityNumber *SeverityNumber
+ body *string
+ resource *resource.Resource
+ instrumentationScope *instrumentation.Scope
+ attributes *[]attribute.KeyValue
+}
+
+func (l LogRecord) Timestamp() *time.Time { return l.timestamp }
+func (l LogRecord) ObservedTimestamp() time.Time { return l.observedTimestamp }
+func (l LogRecord) TraceId() *trace.TraceID { return l.traceId }
+func (l LogRecord) SpanId() *trace.SpanID { return l.spanId }
+func (l LogRecord) TraceFlags() *trace.TraceFlags { return l.traceFlags }
+func (l LogRecord) SeverityText() *string { return l.severityText }
+func (l LogRecord) SeverityNumber() *SeverityNumber { return l.severityNumber }
+func (l LogRecord) Body() *string { return l.body }
+func (l LogRecord) Resource() *resource.Resource { return l.resource }
+func (l LogRecord) InstrumentationScope() *instrumentation.Scope { return l.instrumentationScope }
+func (l LogRecord) Attributes() *[]attribute.KeyValue { return l.attributes }
+func (l LogRecord) private() {}
+
+// SeverityNumber Possible values for LogRecord.SeverityNumber.
+type SeverityNumber int32
+
+const (
+ // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.
+ UNSPECIFIED SeverityNumber = 0
+ TRACE SeverityNumber = 1
+ TRACE2 SeverityNumber = 2
+ TRACE3 SeverityNumber = 3
+ TRACE4 SeverityNumber = 4
+ DEBUG SeverityNumber = 5
+ DEBUG2 SeverityNumber = 6
+ DEBUG3 SeverityNumber = 7
+ DEBUG4 SeverityNumber = 8
+ INFO SeverityNumber = 9
+ INFO2 SeverityNumber = 10
+ INFO3 SeverityNumber = 11
+ INFO4 SeverityNumber = 12
+ WARN SeverityNumber = 13
+ WARN2 SeverityNumber = 14
+ WARN3 SeverityNumber = 15
+ WARN4 SeverityNumber = 16
+ ERROR SeverityNumber = 17
+ ERROR2 SeverityNumber = 18
+ ERROR3 SeverityNumber = 19
+ ERROR4 SeverityNumber = 20
+ FATAL SeverityNumber = 21
+ FATAL2 SeverityNumber = 22
+ FATAL3 SeverityNumber = 23
+ FATAL4 SeverityNumber = 24
+)
+
+// Logger is the creator of Logs
+type Logger interface {
+ // Emit emits a log record
+ Emit(logRecord LogRecord)
+}
+
+// LoggerProvider provides Loggers that are used by instrumentation code to
+// log computational workflows.
+//
+// A LoggerProvider is the collection destination of logs
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Logs are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Loggers to instrument code.
+type LoggerProvider interface {
+ // Logger returns a unique Logger scoped to be used by instrumentation code
+ // to log computational workflows. The scope and identity of that
+ // instrumentation code is uniquely defined by the name and options passed.
+ //
+ // The passed name needs to uniquely identify instrumentation code.
+ // Therefore, it is recommended that name is the Go package name of the
+ // library providing instrumentation (note: not the code being
+ // instrumented). Instrumentation libraries can have multiple versions,
+ // therefore, the WithInstrumentationVersion option should be used to
+ // distinguish these different codebases. Additionally, instrumentation
+ // libraries may sometimes use traces to communicate different domains of
+ // workflow data (i.e. using logs to communicate workflow events only). If
+ // this is the case, the WithScopeAttributes option should be used to
+ // uniquely identify Loggers that handle the different domains of workflow
+ // data.
+ //
+ // If the same name and options are passed multiple times, the same Logger
+ // will be returned (it is up to the implementation if this will be the
+ // same underlying instance of that Logger or not). It is not necessary to
+ // call this multiple times with the same name and options to get an
+ // up-to-date Logger. All implementations will ensure any LoggerProvider
+ // configuration changes are propagated to all provided Loggers.
+ //
+ // If name is empty, then an implementation defined default name will be
+ // used instead.
+ //
+ // This method is safe to call concurrently.
+ Logger(name string, options ...LoggerOption) Logger
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go
new file mode 100644
index 000000000..3f4bb1d2d
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go
@@ -0,0 +1,38 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+// NewNoopLoggerProvider returns an implementation of LoggerProvider that
+// performs no operations. The Logger created from the returned
+// LoggerProvider also perform no operations.
+func NewNoopLoggerProvider() LoggerProvider {
+ return noopLoggerProvider{}
+}
+
+type noopLoggerProvider struct{}
+
+var _ LoggerProvider = noopLoggerProvider{}
+
+func (p noopLoggerProvider) Logger(string, ...LoggerOption) Logger {
+ return noopLogger{}
+}
+
+type noopLogger struct{}
+
+var _ Logger = noopLogger{}
+
+func (n noopLogger) Emit(logRecord LogRecord) {}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go
new file mode 100644
index 000000000..80fff0675
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go
@@ -0,0 +1,105 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package env
+
+import (
+ "os"
+ "strconv"
+)
+
+// Environment variable names.
+const (
+ // BatchLogsProcessorScheduleDelayKey is the delay interval between two
+ // consecutive exports (i.e. 5000).
+ BatchLogsProcessorScheduleDelayKey = "OTEL_BLRP_SCHEDULE_DELAY"
+ // BatchLogsProcessorExportTimeoutKey is the maximum allowed time to
+ // export data (i.e. 3000).
+ BatchLogsProcessorExportTimeoutKey = "OTEL_BLRP_EXPORT_TIMEOUT"
+ // BatchLogsProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
+ BatchLogsProcessorMaxQueueSizeKey = "OTEL_BLRP_MAX_QUEUE_SIZE"
+ // BatchLogsProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
+ // 512). Note: it must be less than or equal to
+ // EnvBatchLogsProcessorMaxQueueSize.
+ BatchLogsProcessorMaxExportBatchSizeKey = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"
+)
+
+// firstInt returns the value of the first matching environment variable from
+// keys. If the value is not an integer or no match is found, defaultValue is
+// returned.
+func firstInt(defaultValue int, keys ...string) int {
+ for _, key := range keys {
+ value := os.Getenv(key)
+ if value == "" {
+ continue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ //envconfig.Info("Got invalid value, number value expected.", key, value)
+ return defaultValue
+ }
+
+ return intValue
+ }
+
+ return defaultValue
+}
+
+// IntEnvOr returns the int value of the environment variable with name key if
+// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned.
+func IntEnvOr(key string, defaultValue int) int {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ //global.Info("Got invalid value, number value expected.", key, value)
+ return defaultValue
+ }
+
+ return intValue
+}
+
+// BatchLogsProcessorScheduleDelay returns the environment variable value for
+// the OTEL_BLRP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
+// returned.
+func BatchLogsProcessorScheduleDelay(defaultValue int) int {
+ return IntEnvOr(BatchLogsProcessorScheduleDelayKey, defaultValue)
+}
+
+// BatchLogsProcessorExportTimeout returns the environment variable value for
+// the OTEL_BLRP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
+// returned.
+func BatchLogsProcessorExportTimeout(defaultValue int) int {
+ return IntEnvOr(BatchLogsProcessorExportTimeoutKey, defaultValue)
+}
+
+// BatchLogsProcessorMaxQueueSize returns the environment variable value for
+// the OTEL_BLRP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
+// returned.
+func BatchLogsProcessorMaxQueueSize(defaultValue int) int {
+ return IntEnvOr(BatchLogsProcessorMaxQueueSizeKey, defaultValue)
+}
+
+// BatchLogsProcessorMaxExportBatchSize returns the environment variable value for
+// the OTEL_BLRP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
+// is returned.
+func BatchLogsProcessorMaxExportBatchSize(defaultValue int) int {
+ return IntEnvOr(BatchLogsProcessorMaxExportBatchSizeKey, defaultValue)
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go
new file mode 100644
index 000000000..0691f6ac6
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go
@@ -0,0 +1,435 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+ "context"
+ "github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env"
+ "go.opentelemetry.io/otel"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Defaults for BatchLogRecordProcessorOptions.
+const (
+ DefaultMaxQueueSize = 2048
+ DefaultScheduleDelay = 5000
+ DefaultExportTimeout = 30000
+ DefaultMaxExportBatchSize = 512
+)
+
+// BatchLogRecordProcessorOption configures a BatchLogsProcessor.
+type BatchLogRecordProcessorOption func(o *BatchLogRecordProcessorOptions)
+
+// BatchLogRecordProcessorOptions is configuration settings for a
+// BatchLogsProcessor.
+type BatchLogRecordProcessorOptions struct {
+ // MaxQueueSize is the maximum queue size to buffer logs for delayed processing. If the
+ // queue gets full it drops the logs. Use BlockOnQueueFull to change this behavior.
+ // The default value of MaxQueueSize is 2048.
+ MaxQueueSize int
+
+ // BatchTimeout is the maximum duration for constructing a batch. Processor
+ // forcefully sends available logs when timeout is reached.
+ // The default value of BatchTimeout is 5000 msec.
+ BatchTimeout time.Duration
+
+ // ExportTimeout specifies the maximum duration for exporting logs. If the timeout
+ // is reached, the export will be cancelled.
+ // The default value of ExportTimeout is 30000 msec.
+ ExportTimeout time.Duration
+
+ // MaxExportBatchSize is the maximum number of logs to process in a single batch.
+ // If there are more than one batch worth of logs then it processes multiple batches
+ // of logs one batch after the other without any delay.
+ // The default value of MaxExportBatchSize is 512.
+ MaxExportBatchSize int
+
+ // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
+ // AND if BlockOnQueueFull is set to true.
+ // Blocking option should be used carefully as it can severely affect the performance of an
+ // application.
+ BlockOnQueueFull bool
+}
+
+// WithMaxQueueSize returns a BatchLogRecordProcessorOption that configures the
+// maximum queue size allowed for a BatchLogRecordProcessor.
+func WithMaxQueueSize(size int) BatchLogRecordProcessorOption {
+ return func(o *BatchLogRecordProcessorOptions) {
+ o.MaxQueueSize = size
+ }
+}
+
+// WithMaxExportBatchSize returns a BatchLogRecordProcessorOption that configures
+// the maximum export batch size allowed for a BatchLogRecordProcessor.
+func WithMaxExportBatchSize(size int) BatchLogRecordProcessorOption {
+ return func(o *BatchLogRecordProcessorOptions) {
+ o.MaxExportBatchSize = size
+ }
+}
+
+// WithBatchTimeout returns a BatchLogRecordProcessorOption that configures the
+// maximum delay allowed for a BatchLogRecordProcessor before it will export any
+// held log (whether the queue is full or not).
+func WithBatchTimeout(delay time.Duration) BatchLogRecordProcessorOption {
+ return func(o *BatchLogRecordProcessorOptions) {
+ o.BatchTimeout = delay
+ }
+}
+
+// WithExportTimeout returns a BatchLogRecordProcessorOption that configures the
+// amount of time a BatchLogRecordProcessor waits for an exporter to export before
+// abandoning the export.
+func WithExportTimeout(timeout time.Duration) BatchLogRecordProcessorOption {
+ return func(o *BatchLogRecordProcessorOptions) {
+ o.ExportTimeout = timeout
+ }
+}
+
+// WithBlocking returns a BatchLogRecordProcessorOption that configures a
+// BatchLogRecordProcessor to wait for enqueue operations to succeed instead of
+// dropping data when the queue is full.
+func WithBlocking() BatchLogRecordProcessorOption {
+ return func(o *BatchLogRecordProcessorOptions) {
+ o.BlockOnQueueFull = true
+ }
+}
+
+// batchLogRecordProcessor is a LogRecordProcessor that batches asynchronously-received
+// logs and sends them to a logs.Exporter when complete.
+type batchLogRecordProcessor struct {
+ e LogRecordExporter
+ o BatchLogRecordProcessorOptions
+
+ queue chan ReadableLogRecord
+ dropped uint32
+
+ batch []ReadableLogRecord
+ batchMutex sync.Mutex
+ timer *time.Timer
+ stopWait sync.WaitGroup
+ stopOnce sync.Once
+ stopCh chan struct{}
+ stopped atomic.Bool
+}
+
+func (lrp *batchLogRecordProcessor) Shutdown(ctx context.Context) error {
+ var err error
+ lrp.stopOnce.Do(func() {
+ lrp.stopped.Store(true)
+ wait := make(chan struct{})
+ go func() {
+ close(lrp.stopCh)
+ lrp.stopWait.Wait()
+ if lrp.e != nil {
+ if err := lrp.e.Shutdown(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ close(wait)
+ }()
+ // Wait until the wait group is done or the context is cancelled
+ select {
+ case <-wait:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ })
+ return err
+}
+
+var _ LogRecordProcessor = (*batchLogRecordProcessor)(nil)
+
+// NewBatchLogRecordProcessor creates a new LogRecordProcessor that will send completed
+// log batches to the exporter with the supplied options.
+//
+// If the exporter is nil, the logs processor will perform no action.
+// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#batching-processor
+func NewBatchLogRecordProcessor(exporter LogRecordExporter, options ...BatchLogRecordProcessorOption) LogRecordProcessor {
+ maxQueueSize := env.BatchLogsProcessorMaxQueueSize(DefaultMaxQueueSize)
+ maxExportBatchSize := env.BatchLogsProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
+
+ if maxExportBatchSize > maxQueueSize {
+ if DefaultMaxExportBatchSize > maxQueueSize {
+ maxExportBatchSize = maxQueueSize
+ } else {
+ maxExportBatchSize = DefaultMaxExportBatchSize
+ }
+ }
+
+ o := BatchLogRecordProcessorOptions{
+ BatchTimeout: time.Duration(env.BatchLogsProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
+ ExportTimeout: time.Duration(env.BatchLogsProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
+ MaxQueueSize: maxQueueSize,
+ MaxExportBatchSize: maxExportBatchSize,
+ }
+ for _, opt := range options {
+ opt(&o)
+ }
+ blp := &batchLogRecordProcessor{
+ e: exporter,
+ o: o,
+ batch: make([]ReadableLogRecord, 0, o.MaxExportBatchSize),
+ timer: time.NewTimer(o.BatchTimeout),
+ queue: make(chan ReadableLogRecord, o.MaxQueueSize),
+ stopCh: make(chan struct{}),
+ }
+
+ blp.stopWait.Add(1)
+ go func() {
+ defer blp.stopWait.Done()
+ blp.processQueue()
+ blp.drainQueue()
+ }()
+
+ return blp
+}
+
+func (lrp *batchLogRecordProcessor) OnEmit(rol ReadableLogRecord) {
+
+ // Do not enqueue spans after Shutdown.
+ if lrp.stopped.Load() {
+ return
+ }
+ // Do not enqueue logs if we are just going to drop them.
+ if lrp.e == nil {
+ return
+ }
+
+ lrp.enqueue(rol)
+}
+
+type forceFlushLogs struct {
+ ReadableLogRecord
+ flushed chan struct{}
+}
+
+// processQueue removes logs from the `queue` channel until processor
+// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
+// waiting up to BatchTimeout to form a batch.
+func (lrp *batchLogRecordProcessor) processQueue() {
+ defer lrp.timer.Stop()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ for {
+ select {
+ case <-lrp.stopCh:
+ return
+ case <-lrp.timer.C:
+ if err := lrp.exportLogs(ctx); err != nil {
+ otel.Handle(err)
+ }
+ case sd := <-lrp.queue:
+ if ffs, ok := sd.(forceFlushLogs); ok {
+ close(ffs.flushed)
+ continue
+ }
+ lrp.batchMutex.Lock()
+ lrp.batch = append(lrp.batch, sd)
+ shouldExport := len(lrp.batch) >= lrp.o.MaxExportBatchSize
+ lrp.batchMutex.Unlock()
+ if shouldExport {
+ if !lrp.timer.Stop() {
+ <-lrp.timer.C
+ }
+ if err := lrp.exportLogs(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ }
+ }
+}
+
+// drainQueue awaits the any caller that had added to bsp.stopWait
+// to finish the enqueue, then exports the final batch.
+func (lrp *batchLogRecordProcessor) drainQueue() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ for {
+ select {
+ case sd := <-lrp.queue:
+ if sd == nil {
+ if err := lrp.exportLogs(ctx); err != nil {
+ otel.Handle(err)
+ }
+ return
+ }
+
+ lrp.batchMutex.Lock()
+ lrp.batch = append(lrp.batch, sd)
+ shouldExport := len(lrp.batch) == lrp.o.MaxExportBatchSize
+ lrp.batchMutex.Unlock()
+
+ if shouldExport {
+ if err := lrp.exportLogs(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ default:
+ close(lrp.queue)
+ }
+ }
+}
+
+// exportLogs is a subroutine of processing and draining the queue.
+func (lrp *batchLogRecordProcessor) exportLogs(ctx context.Context) error {
+ lrp.timer.Reset(lrp.o.BatchTimeout)
+
+ lrp.batchMutex.Lock()
+ defer lrp.batchMutex.Unlock()
+
+ if lrp.o.ExportTimeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, lrp.o.ExportTimeout)
+ defer cancel()
+ }
+
+ if l := len(lrp.batch); l > 0 {
+ //global.Debug("exporting logs", "count", len(lrp.batch), "total_dropped", atomic.LoadUint32(&lrp.dropped))
+ err := lrp.e.Export(ctx, lrp.batch)
+
+ // A new batch is always created after exporting, even if the batch failed to be exported.
+ //
+ // It is up to the exporter to implement any type of retry logic if a batch is failing
+ // to be exported, since it is specific to the protocol and backend being sent to.
+ lrp.batch = lrp.batch[:0]
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (lrp *batchLogRecordProcessor) enqueue(sd ReadableLogRecord) {
+ ctx := context.TODO()
+ if lrp.o.BlockOnQueueFull {
+ lrp.enqueueBlockOnQueueFull(ctx, sd)
+ } else {
+ lrp.enqueueDrop(ctx, sd)
+ }
+}
+
+// ForceFlush exports all ended logs that have not yet been exported.
+func (lrp *batchLogRecordProcessor) ForceFlush(ctx context.Context) error {
+
+ // Interrupt if context is already canceled.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ // Do nothing after Shutdown.
+ // Do not enqueue spans after Shutdown.
+ if lrp.stopped.Load() {
+ return nil
+ }
+
+ var err error
+ if lrp.e != nil {
+ flushCh := make(chan struct{})
+ if lrp.enqueueBlockOnQueueFull(ctx, forceFlushLogs{flushed: flushCh}) {
+ select {
+ case <-flushCh:
+ // Processed any items in queue prior to ForceFlush being called
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ wait := make(chan error)
+ go func() {
+ wait <- lrp.exportLogs(ctx)
+ close(wait)
+ }()
+ // Wait until the export is finished or the context is cancelled/timed out
+ select {
+ case err = <-wait:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ }
+ return err
+}
+
+func recoverSendOnClosedChan() {
+ x := recover()
+ switch err := x.(type) {
+ case nil:
+ return
+ case runtime.Error:
+ if err.Error() == "send on closed channel" {
+ return
+ }
+ }
+ panic(x)
+}
+
+func (lrp *batchLogRecordProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadableLogRecord) bool {
+
+ // This ensures the bsp.queue<- below does not panic as the
+ // processor shuts down.
+ defer recoverSendOnClosedChan()
+
+ select {
+ case <-lrp.stopCh:
+ return false
+ default:
+ }
+
+ select {
+ case lrp.queue <- sd:
+ return true
+ case <-ctx.Done():
+ return false
+ }
+}
+
+func (lrp *batchLogRecordProcessor) enqueueDrop(ctx context.Context, ld ReadableLogRecord) bool {
+
+ // This ensures the bsp.queue<- below does not panic as the
+ // processor shuts down.
+ defer recoverSendOnClosedChan()
+
+ select {
+ case <-lrp.stopCh:
+ return false
+ default:
+ }
+
+ select {
+ case lrp.queue <- ld:
+ return true
+ default:
+ atomic.AddUint32(&lrp.dropped, 1)
+ }
+ return false
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+func (lrp *batchLogRecordProcessor) MarshalLog() interface{} {
+ return struct {
+ Type string
+ LogRecordExporter LogRecordExporter
+ Config BatchLogRecordProcessorOptions
+ }{
+ Type: "BatchLogRecordProcessor",
+ LogRecordExporter: lrp.e,
+ Config: lrp.o,
+ }
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go
new file mode 100644
index 000000000..295270847
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go
@@ -0,0 +1,53 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+ "context"
+)
+
+// LogRecordExporter Interface for various logs exporters
+// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter
+type LogRecordExporter interface {
+
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Export exports a batch of logs.
+ //
+ // This function is called synchronously, so there is no concurrency
+ // safety requirement. However, due to the synchronous calling pattern,
+ // it is critical that all timeouts and cancellations contained in the
+ // passed context must be honored.
+ //
+ // Any retry logic must be contained in this function. The SDK that
+ // calls this function will not implement any retry logic. All errors
+ // returned by this function are considered unrecoverable and will be
+ // reported to a configured error Handler.
+ Export(ctx context.Context, batch []ReadableLogRecord) error
+
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown notifies the exporter of a pending halt to operations. The
+ // exporter is expected to perform any cleanup or synchronization it
+ // requires while honoring all timeouts and cancellations contained in
+ // the passed context.
+ Shutdown(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go
new file mode 100644
index 000000000..c924ccef2
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go
@@ -0,0 +1,67 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+ "context"
+ "sync"
+)
+
+// LogRecordProcessor is an interface which allows hooks for LogRecord emitting.
+// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor
+type LogRecordProcessor interface {
+
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // OnEmit is called when logs sent. It is called synchronously and
+ // hence not block.
+ OnEmit(rol ReadableLogRecord)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown is called when the SDK shuts down. Any cleanup or release of
+ // resources held by the processor should be done in this call.
+ //
+ // Calls to Process, or ForceFlush after this has been called
+ // should be ignored.
+ //
+ // All timeouts and cancellations contained in ctx must be honored, this
+ // should not block indefinitely.
+ Shutdown(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ForceFlush exports all ended logs to the configured Exporter that have not yet
+ // been exported. It should only be called when absolutely necessary, such as when
+ // using a FaaS provider that may suspend the process after an invocation, but before
+ // the Processor can export the completed logs.
+ ForceFlush(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type logRecordProcessorState struct {
+ lp LogRecordProcessor
+ state sync.Once
+}
+
+func newLogsProcessorState(lp LogRecordProcessor) *logRecordProcessorState {
+ return &logRecordProcessorState{lp: lp}
+}
+
+type logRecordProcessorStates []*logRecordProcessorState
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go
new file mode 100644
index 000000000..37eeb0675
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go
@@ -0,0 +1,191 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+ "github.com/agoda-com/opentelemetry-logs-go/logs"
+ "github.com/agoda-com/opentelemetry-logs-go/semconv"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+ "sync"
+ "time"
+)
+
+type logger struct {
+ provider *LoggerProvider
+ instrumentationScope instrumentation.Scope
+}
+
+var _ logs.Logger = &logger{}
+
+func (l logger) Emit(logRecord logs.LogRecord) {
+ lps := l.provider.getLogRecordProcessorStates()
+ if len(lps) == 0 {
+ return
+ }
+
+ pr, err := resource.Merge(l.provider.resource, logRecord.Resource())
+ if err != nil {
+ return
+ }
+
+ elr := &exportableLogRecord{
+ timestamp: logRecord.Timestamp(),
+ observedTimestamp: logRecord.ObservedTimestamp(),
+ traceId: logRecord.TraceId(),
+ spanId: logRecord.SpanId(),
+ traceFlags: logRecord.TraceFlags(),
+ severityText: logRecord.SeverityText(),
+ severityNumber: logRecord.SeverityNumber(),
+ body: logRecord.Body(),
+ resource: pr,
+ instrumentationScope: logRecord.InstrumentationScope(),
+ attributes: logRecord.Attributes(),
+ }
+
+ for _, lp := range lps {
+ lp.lp.OnEmit(elr)
+ }
+}
+
+// ReadableLogRecord Log structure
+// see https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition
+// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#readablelogrecord
+type ReadableLogRecord interface {
+ // Timestamp Time when the event occurred.
+ Timestamp() *time.Time
+ // ObservedTimestamp Time when the event was observed.
+ ObservedTimestamp() time.Time
+ // TraceId Request trace id.
+ TraceId() *trace.TraceID
+ // SpanId Request span id.
+ SpanId() *trace.SpanID
+ // TraceFlags W3C trace flag.
+ TraceFlags() *trace.TraceFlags
+ // SeverityText This is the original string representation of the severityNumber as it is known at the source
+ SeverityText() *string
+ // SeverityNumber Numerical value of the severityNumber.
+ SeverityNumber() *logs.SeverityNumber
+ // Body The body of the log record.
+ Body() *string
+ // Resource Describes the source of the log.
+ Resource() *resource.Resource
+ // InstrumentationScope returns information about the instrumentation
+ // scope that created the log.
+ InstrumentationScope() *instrumentation.Scope
+ // Attributes describe the aspects of the event.
+ Attributes() *[]attribute.KeyValue
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+}
+
+type ReadWriteLogRecord interface {
+ SetResource(resource *resource.Resource)
+ // RecordException message, stacktrace, type
+ RecordException(*string, *string, *string)
+ ReadableLogRecord
+}
+
+// exportableLogRecord is an implementation of the OpenTelemetry Log API
+// representing the individual component of a log.
+type exportableLogRecord struct {
+ // mu protects the contents of this log.
+ mu sync.Mutex
+ timestamp *time.Time
+ observedTimestamp time.Time
+ traceId *trace.TraceID
+ spanId *trace.SpanID
+ traceFlags *trace.TraceFlags
+ severityText *string
+ severityNumber *logs.SeverityNumber
+ body *string
+ resource *resource.Resource
+ instrumentationScope *instrumentation.Scope
+ attributes *[]attribute.KeyValue
+}
+
+// newReadWriteLogRecord create
+// This method may change in the future
+func newReadWriteLogRecord(
+ ctx *trace.SpanContext,
+ body *string,
+ severityText *string,
+ severityNumber *logs.SeverityNumber,
+ resource *resource.Resource,
+ instrumentationScope *instrumentation.Scope,
+ attributes *[]attribute.KeyValue,
+ timestamp *time.Time) ReadWriteLogRecord {
+
+ traceId := ctx.TraceID()
+ spanId := ctx.SpanID()
+ traceFlags := ctx.TraceFlags()
+
+ return &exportableLogRecord{
+ timestamp: timestamp,
+ observedTimestamp: time.Now(),
+ traceId: &traceId,
+ spanId: &spanId,
+ traceFlags: &traceFlags,
+ severityText: severityText,
+ severityNumber: severityNumber,
+ body: body,
+ resource: resource,
+ instrumentationScope: instrumentationScope,
+ attributes: attributes,
+ }
+}
+
+func (r *exportableLogRecord) SetResource(resource *resource.Resource) { r.resource = resource }
+
+// RecordException helper to add Exception related information as attributes of Log Record
+// see https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#recording-an-exception
+func (r *exportableLogRecord) RecordException(message *string, stacktrace *string, exceptionType *string) {
+ if message == nil && exceptionType == nil {
+ // one of the fields must present
+ return
+ }
+ if message != nil {
+ *r.attributes = append(*r.attributes, semconv.ExceptionMessage(*message))
+ }
+ if stacktrace != nil {
+ *r.attributes = append(*r.attributes, semconv.ExceptionStacktrace(*stacktrace))
+ }
+ if exceptionType != nil {
+ *r.attributes = append(*r.attributes, semconv.ExceptionType(*exceptionType))
+ }
+}
+
+func (r *exportableLogRecord) Timestamp() *time.Time { return r.timestamp }
+func (r *exportableLogRecord) ObservedTimestamp() time.Time { return r.observedTimestamp }
+func (r *exportableLogRecord) TraceId() *trace.TraceID { return r.traceId }
+
+func (r *exportableLogRecord) SpanId() *trace.SpanID { return r.spanId }
+func (r *exportableLogRecord) TraceFlags() *trace.TraceFlags { return r.traceFlags }
+func (r *exportableLogRecord) InstrumentationScope() *instrumentation.Scope {
+ return r.instrumentationScope
+}
+func (r *exportableLogRecord) SeverityText() *string { return r.severityText }
+func (r *exportableLogRecord) SeverityNumber() *logs.SeverityNumber { return r.severityNumber }
+func (r *exportableLogRecord) Body() *string { return r.body }
+func (r *exportableLogRecord) Resource() *resource.Resource { return r.resource }
+func (r *exportableLogRecord) Attributes() *[]attribute.KeyValue { return r.attributes }
+func (r *exportableLogRecord) private() {}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go
new file mode 100644
index 000000000..c0e41e7e2
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go
@@ -0,0 +1,274 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs // Package logs import "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+
+import (
+ "context"
+ "fmt"
+ "github.com/agoda-com/opentelemetry-logs-go/internal/global"
+ "github.com/agoda-com/opentelemetry-logs-go/logs"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "sync"
+ "sync/atomic"
+)
+
+const (
+ defaultLoggerName = "github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider"
+)
+
+// loggerProviderConfig Configuration for Logger Provider
+type loggerProviderConfig struct {
+ processors []LogRecordProcessor
+ // resource contains attributes representing an entity that produces telemetry.
+ resource *resource.Resource
+}
+
+// LoggerProviderOption configures a LoggerProvider.
+type LoggerProviderOption interface {
+ apply(loggerProviderConfig) loggerProviderConfig
+}
+type loggerProviderOptionFunc func(loggerProviderConfig) loggerProviderConfig
+
+func (fn loggerProviderOptionFunc) apply(cfg loggerProviderConfig) loggerProviderConfig {
+ return fn(cfg)
+}
+
+// WithLogRecordProcessor will configure processor to process logs
+func WithLogRecordProcessor(logsProcessor LogRecordProcessor) LoggerProviderOption {
+ return loggerProviderOptionFunc(func(cfg loggerProviderConfig) loggerProviderConfig {
+ cfg.processors = append(cfg.processors, logsProcessor)
+ return cfg
+ })
+}
+
+// WithSyncer registers the exporter with the LoggerProvider using a
+// SimpleLogRecordProcessor.
+//
+// This is not recommended for production use. The synchronous nature of the
+// SimpleLogRecordProcessor that will wrap the exporter make it good for testing,
+// debugging, or showing examples of other feature, but it will be slow and
+// have a high computation resource usage overhead. The WithBatcher option is
+// recommended for production use instead.
+func WithSyncer(e LogRecordExporter) LoggerProviderOption {
+ return WithLogRecordProcessor(NewSimpleLogRecordProcessor(e))
+}
+
+// WithBatcher registers the exporter with the LoggerProvider using a
+// BatchLogRecordProcessor configured with the passed opts.
+func WithBatcher(e LogRecordExporter, opts ...BatchLogRecordProcessorOption) LoggerProviderOption {
+ return WithLogRecordProcessor(NewBatchLogRecordProcessor(e, opts...))
+}
+
+// WithResource will configure OTLP logger with common resource attributes.
+//
+// Parameters:
+// r (*resource.Resource) list of resources will be added to every log as resource level tags
+func WithResource(r *resource.Resource) LoggerProviderOption {
+ return loggerProviderOptionFunc(func(cfg loggerProviderConfig) loggerProviderConfig {
+ var err error
+ cfg.resource, err = resource.Merge(resource.Environment(), r)
+ if err != nil {
+ otel.Handle(err)
+ }
+ return cfg
+ })
+}
+
+// LoggerProvider provide access to Logger. The API is not intended to be called by application developers directly.
+// see https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#loggerprovider
+type LoggerProvider struct {
+ mu sync.Mutex
+ namedLogger map[instrumentation.Scope]*logger
+ //cfg loggerProviderConfig
+
+ logProcessors atomic.Pointer[logRecordProcessorStates]
+ isShutdown atomic.Bool
+
+ // These fields are not protected by the lock mu. They are assumed to be
+ // immutable after creation of the LoggerProvider.
+ resource *resource.Resource
+}
+
+var _ logs.LoggerProvider = &LoggerProvider{}
+
+func (lp *LoggerProvider) Logger(name string, opts ...logs.LoggerOption) logs.Logger {
+
+ if lp.isShutdown.Load() {
+ return logs.NewNoopLoggerProvider().Logger(name, opts...)
+ }
+
+ c := logs.NewLoggerConfig(opts...)
+
+ if name == "" {
+ name = defaultLoggerName
+ }
+
+ is := instrumentation.Scope{
+ Name: name,
+ Version: c.InstrumentationVersion(),
+ SchemaURL: c.SchemaURL(),
+ }
+
+ t, ok := func() (logs.Logger, bool) {
+ lp.mu.Lock()
+ defer lp.mu.Unlock()
+ // Must check the flag after acquiring the mutex to avoid returning a valid logger if Shutdown() ran
+ // after the first check above but before we acquired the mutex.
+ if lp.isShutdown.Load() {
+ return logs.NewNoopLoggerProvider().Logger(name, opts...), true
+ }
+
+ t, ok := lp.namedLogger[is]
+ if !ok {
+ t = &logger{
+ provider: lp,
+ instrumentationScope: is,
+ }
+ }
+ return t, ok
+ }()
+ if !ok {
+ // This code is outside the mutex to not hold the lock while calling third party logging code:
+ // - That code may do slow things like I/O, which would prolong the duration the lock is held,
+ // slowing down all tracing consumers.
+ // - Logging code may be instrumented with logging and deadlock because it could try
+ // acquiring the same non-reentrant mutex.
+ global.Info("Logger created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL)
+
+ }
+ return t
+}
+
+var _ logs.LoggerProvider = &LoggerProvider{}
+
+func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider {
+ o := loggerProviderConfig{}
+
+ o = applyLoggerProviderEnvConfigs(o)
+
+ for _, opt := range opts {
+ o = opt.apply(o)
+ }
+
+ o = ensureValidLoggerProviderConfig(o)
+
+ lp := &LoggerProvider{
+ namedLogger: make(map[instrumentation.Scope]*logger),
+ resource: o.resource,
+ }
+
+ global.Info("LoggerProvider created", "config", o)
+
+ lrpss := make(logRecordProcessorStates, 0, len(o.processors))
+ for _, lrp := range o.processors {
+ lrpss = append(lrpss, newLogsProcessorState(lrp))
+ }
+ lp.logProcessors.Store(&lrpss)
+
+ return lp
+
+}
+
+func (p *LoggerProvider) getLogRecordProcessorStates() logRecordProcessorStates {
+ return *(p.logProcessors.Load())
+}
+
+func (p LoggerProvider) Shutdown(ctx context.Context) error {
+ // This check prevents deadlocks in case of recursive shutdown.
+ if p.isShutdown.Load() {
+ return nil
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown has already been done concurrently.
+ if !p.isShutdown.CompareAndSwap(false, true) { // did toggle?
+ return nil
+ }
+
+ var retErr error
+ for _, lrps := range p.getLogRecordProcessorStates() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var err error
+ lrps.state.Do(func() {
+ err = lrps.lp.Shutdown(ctx)
+ })
+ if err != nil {
+ if retErr == nil {
+ retErr = err
+ } else {
+ // Poor man's list of errors
+ retErr = fmt.Errorf("%v; %v", retErr, err)
+ }
+ }
+ }
+ p.logProcessors.Store(&logRecordProcessorStates{})
+ return retErr
+
+}
+
+// ForceFlush immediately exports all logs that have not yet been exported for
+// all the registered log processors.
+func (p *LoggerProvider) ForceFlush(ctx context.Context) error {
+ lrpss := p.getLogRecordProcessorStates()
+ if len(lrpss) == 0 {
+ return nil
+ }
+
+ for _, lrps := range lrpss {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if err := lrps.lp.ForceFlush(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func applyLoggerProviderEnvConfigs(cfg loggerProviderConfig) loggerProviderConfig {
+ for _, opt := range loggerProviderOptionsFromEnv() {
+ cfg = opt.apply(cfg)
+ }
+
+ return cfg
+}
+
+func loggerProviderOptionsFromEnv() []LoggerProviderOption {
+ var opts []LoggerProviderOption
+
+ return opts
+}
+
+// ensureValidLoggerProviderConfig ensures that given LoggerProviderConfig is valid.
+func ensureValidLoggerProviderConfig(cfg loggerProviderConfig) loggerProviderConfig {
+
+ if cfg.resource == nil {
+ cfg.resource = resource.Default()
+ }
+
+ return cfg
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go
new file mode 100644
index 000000000..453da09e1
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go
@@ -0,0 +1,78 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+ "context"
+ "go.opentelemetry.io/otel"
+ "log"
+ "sync"
+)
+
+type simpleLogRecordProcessor struct {
+ exporterMu sync.Mutex
+ stopOnce sync.Once
+ exporter LogRecordExporter
+}
+
+func (lrp *simpleLogRecordProcessor) Shutdown(ctx context.Context) error {
+ return nil
+}
+
+func (lrp *simpleLogRecordProcessor) ForceFlush(ctx context.Context) error {
+ return nil
+}
+
+var _ LogRecordProcessor = (*simpleLogRecordProcessor)(nil)
+
+// NewSimpleLogRecordProcessor returns a new LogRecordProcessor that will synchronously
+// send completed logs to the exporter immediately.
+//
+// This LogRecordProcessor is not recommended for production use. The synchronous
+// nature of this LogRecordProcessor make it good for testing, debugging, or
+// showing examples of other feature, but it will be slow and have a high
+// computation resource usage overhead. The BatchLogsProcessor is recommended
+// for production use instead.
+func NewSimpleLogRecordProcessor(exporter LogRecordExporter) LogRecordProcessor {
+ slp := &simpleLogRecordProcessor{
+ exporter: exporter,
+ }
+ log.Printf("SimpleLogsProcessor is not recommended for production use, consider using BatchLogRecordProcessor instead.")
+
+ return slp
+}
+
+// OnEmit Process immediately emits a LogRecord
+func (lrp *simpleLogRecordProcessor) OnEmit(rol ReadableLogRecord) {
+ lrp.exporterMu.Lock()
+ defer lrp.exporterMu.Unlock()
+
+ if err := lrp.exporter.Export(context.Background(), []ReadableLogRecord{rol}); err != nil {
+ otel.Handle(err)
+ }
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this LogRecord Processor.
+func (lrp *simpleLogRecordProcessor) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Exporter LogRecordExporter
+ }{
+ Type: "SimpleLogRecordProcessor",
+ Exporter: lrp.exporter,
+ }
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go
new file mode 100644
index 000000000..4f7215d71
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go
@@ -0,0 +1,70 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package semconv
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes Log Record attributes.
+// see also https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#attributes
+const (
+ // ExceptionMessageKey is the attribute Key conforming to the "exception.message"
+ // semantic conventions. It represents the exception message.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the "exception.stacktrace"
+ // semantic conventions. It represents the stacktrace message of exception.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of exception
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message
+// Examples: Division by zero; Can't convert 'int' object to str implicitly
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents the exception
+// stacktrace
+// Examples: Exception in thread "main" java.lang.RuntimeException: ...
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the exception type
+// Examples: java.net.ConnectException; OSError
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
diff --git a/vendor/github.com/benbjohnson/clock/LICENSE b/vendor/github.com/benbjohnson/clock/LICENSE
new file mode 100644
index 000000000..ce212cb1c
--- /dev/null
+++ b/vendor/github.com/benbjohnson/clock/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Ben Johnson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/benbjohnson/clock/README.md b/vendor/github.com/benbjohnson/clock/README.md
new file mode 100644
index 000000000..4f1f82fc6
--- /dev/null
+++ b/vendor/github.com/benbjohnson/clock/README.md
@@ -0,0 +1,105 @@
+clock
+=====
+
+[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/benbjohnson/clock)
+
+Clock is a small library for mocking time in Go. It provides an interface
+around the standard library's [`time`][time] package so that the application
+can use the realtime clock while tests can use the mock clock.
+
+The module is currently maintained by @djmitche.
+
+[time]: https://pkg.go.dev/github.com/benbjohnson/clock
+
+## Usage
+
+### Realtime Clock
+
+Your application can maintain a `Clock` variable that will allow realtime and
+mock clocks to be interchangeable. For example, if you had an `Application` type:
+
+```go
+import "github.com/benbjohnson/clock"
+
+type Application struct {
+ Clock clock.Clock
+}
+```
+
+You could initialize it to use the realtime clock like this:
+
+```go
+var app Application
+app.Clock = clock.New()
+...
+```
+
+Then all timers and time-related functionality should be performed from the
+`Clock` variable.
+
+
+### Mocking time
+
+In your tests, you will want to use a `Mock` clock:
+
+```go
+import (
+ "testing"
+
+ "github.com/benbjohnson/clock"
+)
+
+func TestApplication_DoSomething(t *testing.T) {
+ mock := clock.NewMock()
+ app := Application{Clock: mock}
+ ...
+}
+```
+
+Now that you've initialized your application to use the mock clock, you can
+adjust the time programmatically. The mock clock always starts from the Unix
+epoch (midnight UTC on Jan 1, 1970).
+
+
+### Controlling time
+
+The mock clock provides the same functions that the standard library's `time`
+package provides. For example, to find the current time, you use the `Now()`
+function:
+
+```go
+mock := clock.NewMock()
+
+// Find the current time.
+mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC
+
+// Move the clock forward.
+mock.Add(2 * time.Hour)
+
+// Check the time again. It's 2 hours later!
+mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC
+```
+
+Timers and Tickers are also controlled by this same mock clock. They will only
+execute when the clock is moved forward:
+
+```go
+mock := clock.NewMock()
+count := 0
+
+// Kick off a timer to increment every 1 mock second.
+go func() {
+ ticker := mock.Ticker(1 * time.Second)
+ for {
+ <-ticker.C
+ count++
+ }
+}()
+runtime.Gosched()
+
+// Move the clock forward 10 seconds.
+mock.Add(10 * time.Second)
+
+// This prints 10.
+fmt.Println(count)
+```
diff --git a/vendor/github.com/benbjohnson/clock/clock.go b/vendor/github.com/benbjohnson/clock/clock.go
new file mode 100644
index 000000000..14ddc0795
--- /dev/null
+++ b/vendor/github.com/benbjohnson/clock/clock.go
@@ -0,0 +1,422 @@
+package clock
+
+import (
+ "context"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Re-export of time.Duration
+type Duration = time.Duration
+
+// Clock represents an interface to the functions in the standard library time
+// package. Two implementations are available in the clock package. The first
+// is a real-time clock which simply wraps the time package's functions. The
+// second is a mock clock which will only change when
+// programmatically adjusted.
+type Clock interface {
+ After(d time.Duration) <-chan time.Time
+ AfterFunc(d time.Duration, f func()) *Timer
+ Now() time.Time
+ Since(t time.Time) time.Duration
+ Until(t time.Time) time.Duration
+ Sleep(d time.Duration)
+ Tick(d time.Duration) <-chan time.Time
+ Ticker(d time.Duration) *Ticker
+ Timer(d time.Duration) *Timer
+ WithDeadline(parent context.Context, d time.Time) (context.Context, context.CancelFunc)
+ WithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc)
+}
+
+// New returns an instance of a real-time clock.
+func New() Clock {
+ return &clock{}
+}
+
+// clock implements a real-time clock by simply wrapping the time package functions.
+type clock struct{}
+
+func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) }
+
+func (c *clock) AfterFunc(d time.Duration, f func()) *Timer {
+ return &Timer{timer: time.AfterFunc(d, f)}
+}
+
+func (c *clock) Now() time.Time { return time.Now() }
+
+func (c *clock) Since(t time.Time) time.Duration { return time.Since(t) }
+
+func (c *clock) Until(t time.Time) time.Duration { return time.Until(t) }
+
+func (c *clock) Sleep(d time.Duration) { time.Sleep(d) }
+
+func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) }
+
+func (c *clock) Ticker(d time.Duration) *Ticker {
+ t := time.NewTicker(d)
+ return &Ticker{C: t.C, ticker: t}
+}
+
+func (c *clock) Timer(d time.Duration) *Timer {
+ t := time.NewTimer(d)
+ return &Timer{C: t.C, timer: t}
+}
+
+func (c *clock) WithDeadline(parent context.Context, d time.Time) (context.Context, context.CancelFunc) {
+ return context.WithDeadline(parent, d)
+}
+
+func (c *clock) WithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc) {
+ return context.WithTimeout(parent, t)
+}
+
+// Mock represents a mock clock that only moves forward programmically.
+// It can be preferable to a real-time clock when testing time-based functionality.
+type Mock struct {
+ // mu protects all other fields in this struct, and the data that they
+ // point to.
+ mu sync.Mutex
+
+ now time.Time // current time
+ timers clockTimers // tickers & timers
+}
+
+// NewMock returns an instance of a mock clock.
+// The current time of the mock clock on initialization is the Unix epoch.
+func NewMock() *Mock {
+ return &Mock{now: time.Unix(0, 0)}
+}
+
+// Add moves the current time of the mock clock forward by the specified duration.
+// This should only be called from a single goroutine at a time.
+func (m *Mock) Add(d time.Duration) {
+ // Calculate the final current time.
+ m.mu.Lock()
+ t := m.now.Add(d)
+ m.mu.Unlock()
+
+ // Continue to execute timers until there are no more before the new time.
+ for {
+ if !m.runNextTimer(t) {
+ break
+ }
+ }
+
+ // Ensure that we end with the new time.
+ m.mu.Lock()
+ m.now = t
+ m.mu.Unlock()
+
+ // Give a small buffer to make sure that other goroutines get handled.
+ gosched()
+}
+
+// Set sets the current time of the mock clock to a specific one.
+// This should only be called from a single goroutine at a time.
+func (m *Mock) Set(t time.Time) {
+ // Continue to execute timers until there are no more before the new time.
+ for {
+ if !m.runNextTimer(t) {
+ break
+ }
+ }
+
+ // Ensure that we end with the new time.
+ m.mu.Lock()
+ m.now = t
+ m.mu.Unlock()
+
+ // Give a small buffer to make sure that other goroutines get handled.
+ gosched()
+}
+
+// WaitForAllTimers sets the clock until all timers are expired
+func (m *Mock) WaitForAllTimers() time.Time {
+ // Continue to execute timers until there are no more
+ for {
+ m.mu.Lock()
+ if len(m.timers) == 0 {
+ m.mu.Unlock()
+ return m.Now()
+ }
+
+ sort.Sort(m.timers)
+ next := m.timers[len(m.timers)-1].Next()
+ m.mu.Unlock()
+ m.Set(next)
+ }
+}
+
+// runNextTimer executes the next timer in chronological order and moves the
+// current time to the timer's next tick time. The next time is not executed if
+// its next time is after the max time. Returns true if a timer was executed.
+func (m *Mock) runNextTimer(max time.Time) bool {
+ m.mu.Lock()
+
+ // Sort timers by time.
+ sort.Sort(m.timers)
+
+ // If we have no more timers then exit.
+ if len(m.timers) == 0 {
+ m.mu.Unlock()
+ return false
+ }
+
+ // Retrieve next timer. Exit if next tick is after new time.
+ t := m.timers[0]
+ if t.Next().After(max) {
+ m.mu.Unlock()
+ return false
+ }
+
+ // Move "now" forward and unlock clock.
+ m.now = t.Next()
+ now := m.now
+ m.mu.Unlock()
+
+ // Execute timer.
+ t.Tick(now)
+ return true
+}
+
+// After waits for the duration to elapse and then sends the current time on the returned channel.
+func (m *Mock) After(d time.Duration) <-chan time.Time {
+ return m.Timer(d).C
+}
+
+// AfterFunc waits for the duration to elapse and then executes a function in its own goroutine.
+// A Timer is returned that can be stopped.
+func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ ch := make(chan time.Time, 1)
+ t := &Timer{
+ c: ch,
+ fn: f,
+ mock: m,
+ next: m.now.Add(d),
+ stopped: false,
+ }
+ m.timers = append(m.timers, (*internalTimer)(t))
+ return t
+}
+
+// Now returns the current wall time on the mock clock.
+func (m *Mock) Now() time.Time {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.now
+}
+
+// Since returns time since `t` using the mock clock's wall time.
+func (m *Mock) Since(t time.Time) time.Duration {
+ return m.Now().Sub(t)
+}
+
+// Until returns time until `t` using the mock clock's wall time.
+func (m *Mock) Until(t time.Time) time.Duration {
+ return t.Sub(m.Now())
+}
+
+// Sleep pauses the goroutine for the given duration on the mock clock.
+// The clock must be moved forward in a separate goroutine.
+func (m *Mock) Sleep(d time.Duration) {
+ <-m.After(d)
+}
+
+// Tick is a convenience function for Ticker().
+// It will return a ticker channel that cannot be stopped.
+func (m *Mock) Tick(d time.Duration) <-chan time.Time {
+ return m.Ticker(d).C
+}
+
+// Ticker creates a new instance of Ticker.
+func (m *Mock) Ticker(d time.Duration) *Ticker {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ ch := make(chan time.Time, 1)
+ t := &Ticker{
+ C: ch,
+ c: ch,
+ mock: m,
+ d: d,
+ next: m.now.Add(d),
+ }
+ m.timers = append(m.timers, (*internalTicker)(t))
+ return t
+}
+
+// Timer creates a new instance of Timer.
+func (m *Mock) Timer(d time.Duration) *Timer {
+ m.mu.Lock()
+ ch := make(chan time.Time, 1)
+ t := &Timer{
+ C: ch,
+ c: ch,
+ mock: m,
+ next: m.now.Add(d),
+ stopped: false,
+ }
+ m.timers = append(m.timers, (*internalTimer)(t))
+ now := m.now
+ m.mu.Unlock()
+ m.runNextTimer(now)
+ return t
+}
+
+// removeClockTimer removes a timer from m.timers. m.mu MUST be held
+// when this method is called.
+func (m *Mock) removeClockTimer(t clockTimer) {
+ for i, timer := range m.timers {
+ if timer == t {
+ copy(m.timers[i:], m.timers[i+1:])
+ m.timers[len(m.timers)-1] = nil
+ m.timers = m.timers[:len(m.timers)-1]
+ break
+ }
+ }
+ sort.Sort(m.timers)
+}
+
+// clockTimer represents an object with an associated start time.
+type clockTimer interface {
+ Next() time.Time
+ Tick(time.Time)
+}
+
+// clockTimers represents a list of sortable timers.
+type clockTimers []clockTimer
+
+func (a clockTimers) Len() int { return len(a) }
+func (a clockTimers) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) }
+
+// Timer represents a single event.
+// The current time will be sent on C, unless the timer was created by AfterFunc.
+type Timer struct {
+ C <-chan time.Time
+ c chan time.Time
+ timer *time.Timer // realtime impl, if set
+ next time.Time // next tick time
+ mock *Mock // mock clock, if set
+ fn func() // AfterFunc function, if set
+ stopped bool // True if stopped, false if running
+}
+
+// Stop turns off the ticker.
+func (t *Timer) Stop() bool {
+ if t.timer != nil {
+ return t.timer.Stop()
+ }
+
+ t.mock.mu.Lock()
+ registered := !t.stopped
+ t.mock.removeClockTimer((*internalTimer)(t))
+ t.stopped = true
+ t.mock.mu.Unlock()
+ return registered
+}
+
+// Reset changes the expiry time of the timer
+func (t *Timer) Reset(d time.Duration) bool {
+ if t.timer != nil {
+ return t.timer.Reset(d)
+ }
+
+ t.mock.mu.Lock()
+ t.next = t.mock.now.Add(d)
+ defer t.mock.mu.Unlock()
+
+ registered := !t.stopped
+ if t.stopped {
+ t.mock.timers = append(t.mock.timers, (*internalTimer)(t))
+ }
+
+ t.stopped = false
+ return registered
+}
+
+type internalTimer Timer
+
+func (t *internalTimer) Next() time.Time { return t.next }
+func (t *internalTimer) Tick(now time.Time) {
+ // a gosched() after ticking, to allow any consequences of the
+ // tick to complete
+ defer gosched()
+
+ t.mock.mu.Lock()
+ if t.fn != nil {
+ // defer function execution until the lock is released, and
+ defer func() { go t.fn() }()
+ } else {
+ t.c <- now
+ }
+ t.mock.removeClockTimer((*internalTimer)(t))
+ t.stopped = true
+ t.mock.mu.Unlock()
+}
+
+// Ticker holds a channel that receives "ticks" at regular intervals.
+type Ticker struct {
+ C <-chan time.Time
+ c chan time.Time
+ ticker *time.Ticker // realtime impl, if set
+ next time.Time // next tick time
+ mock *Mock // mock clock, if set
+ d time.Duration // time between ticks
+ stopped bool // True if stopped, false if running
+}
+
+// Stop turns off the ticker.
+func (t *Ticker) Stop() {
+ if t.ticker != nil {
+ t.ticker.Stop()
+ } else {
+ t.mock.mu.Lock()
+ t.mock.removeClockTimer((*internalTicker)(t))
+ t.stopped = true
+ t.mock.mu.Unlock()
+ }
+}
+
+// Reset resets the ticker to a new duration.
+func (t *Ticker) Reset(dur time.Duration) {
+ if t.ticker != nil {
+ t.ticker.Reset(dur)
+ return
+ }
+
+ t.mock.mu.Lock()
+ defer t.mock.mu.Unlock()
+
+ if t.stopped {
+ t.mock.timers = append(t.mock.timers, (*internalTicker)(t))
+ t.stopped = false
+ }
+
+ t.d = dur
+ t.next = t.mock.now.Add(dur)
+}
+
+type internalTicker Ticker
+
+func (t *internalTicker) Next() time.Time { return t.next }
+func (t *internalTicker) Tick(now time.Time) {
+ select {
+ case t.c <- now:
+ default:
+ }
+ t.mock.mu.Lock()
+ t.next = now.Add(t.d)
+ t.mock.mu.Unlock()
+ gosched()
+}
+
+// Sleep momentarily so that other goroutines can process.
+func gosched() { time.Sleep(1 * time.Millisecond) }
+
+var (
+ // type checking
+ _ Clock = &Mock{}
+)
diff --git a/vendor/github.com/benbjohnson/clock/context.go b/vendor/github.com/benbjohnson/clock/context.go
new file mode 100644
index 000000000..eb67594f2
--- /dev/null
+++ b/vendor/github.com/benbjohnson/clock/context.go
@@ -0,0 +1,86 @@
+package clock
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+)
+
+func (m *Mock) WithTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
+ return m.WithDeadline(parent, m.Now().Add(timeout))
+}
+
+func (m *Mock) WithDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return context.WithCancel(parent)
+ }
+ ctx := &timerCtx{clock: m, parent: parent, deadline: deadline, done: make(chan struct{})}
+ propagateCancel(parent, ctx)
+ dur := m.Until(deadline)
+ if dur <= 0 {
+ ctx.cancel(context.DeadlineExceeded) // deadline has already passed
+ return ctx, func() {}
+ }
+ ctx.Lock()
+ defer ctx.Unlock()
+ if ctx.err == nil {
+ ctx.timer = m.AfterFunc(dur, func() {
+ ctx.cancel(context.DeadlineExceeded)
+ })
+ }
+ return ctx, func() { ctx.cancel(context.Canceled) }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent context.Context, child *timerCtx) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(parent.Err())
+ case <-child.Done():
+ }
+ }()
+}
+
+type timerCtx struct {
+ sync.Mutex
+
+ clock Clock
+ parent context.Context
+ deadline time.Time
+ done chan struct{}
+
+ err error
+ timer *Timer
+}
+
+func (c *timerCtx) cancel(err error) {
+ c.Lock()
+ defer c.Unlock()
+ if c.err != nil {
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { return c.deadline, true }
+
+func (c *timerCtx) Done() <-chan struct{} { return c.done }
+
+func (c *timerCtx) Err() error { return c.err }
+
+func (c *timerCtx) Value(key interface{}) interface{} { return c.parent.Value(key) }
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("clock.WithDeadline(%s [%s])", c.deadline, c.deadline.Sub(c.clock.Now()))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore
new file mode 100644
index 000000000..50d95c548
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# IDEs
+.idea/
diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE
new file mode 100644
index 000000000..89b817996
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
new file mode 100644
index 000000000..9433004a2
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -0,0 +1,30 @@
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
+
+This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
+
+[Exponential backoff][exponential backoff wiki]
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+## Usage
+
+Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+
+Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+
+## Contributing
+
+* I would like to keep this library as small as possible.
+* Please don't send a PR without opening an issue and discussing it first.
+* If proposed change is not a common use case, I will probably not accept it.
+
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
+[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
+[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
+
+[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
+[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
+
+[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go
new file mode 100644
index 000000000..3676ee405
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go
@@ -0,0 +1,66 @@
+// Package backoff implements backoff algorithms for retrying operations.
+//
+// Use Retry function for retrying operations that may fail.
+// If Retry does not meet your needs,
+// copy/paste the function into your project and modify as you wish.
+//
+// There is also Ticker type similar to time.Ticker.
+// You can use it if you need to work with channels.
+//
+// See Examples section below for usage examples.
+package backoff
+
+import "time"
+
+// BackOff is a backoff policy for retrying an operation.
+type BackOff interface {
+ // NextBackOff returns the duration to wait before retrying the operation,
+ // or backoff. Stop to indicate that no more retries should be made.
+ //
+ // Example usage:
+ //
+ // duration := backoff.NextBackOff();
+ // if (duration == backoff.Stop) {
+ // // Do not retry operation.
+ // } else {
+ // // Sleep for duration and retry operation.
+ // }
+ //
+ NextBackOff() time.Duration
+
+ // Reset to initial state.
+ Reset()
+}
+
+// Stop indicates that no more retries should be made for use in NextBackOff().
+const Stop time.Duration = -1
+
+// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting, indefinitely.
+type ZeroBackOff struct{}
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
+
+// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
+// NextBackOff(), meaning that the operation should never be retried.
+type StopBackOff struct{}
+
+func (b *StopBackOff) Reset() {}
+
+func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
+
+// ConstantBackOff is a backoff policy that always returns the same backoff delay.
+// This is in contrast to an exponential backoff policy,
+// which returns a delay that grows longer as you call NextBackOff() over and over again.
+type ConstantBackOff struct {
+ Interval time.Duration
+}
+
+func (b *ConstantBackOff) Reset() {}
+func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
+
+func NewConstantBackOff(d time.Duration) *ConstantBackOff {
+ return &ConstantBackOff{Interval: d}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go
new file mode 100644
index 000000000..48482330e
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/context.go
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+ "context"
+ "time"
+)
+
+// BackOffContext is a backoff policy that stops retrying after the context
+// is canceled.
+type BackOffContext interface { // nolint: golint
+ BackOff
+ Context() context.Context
+}
+
+type backOffContext struct {
+ BackOff
+ ctx context.Context
+}
+
+// WithContext returns a BackOffContext with context ctx
+//
+// ctx must not be nil
+func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
+ if ctx == nil {
+ panic("nil context")
+ }
+
+ if b, ok := b.(*backOffContext); ok {
+ return &backOffContext{
+ BackOff: b.BackOff,
+ ctx: ctx,
+ }
+ }
+
+ return &backOffContext{
+ BackOff: b,
+ ctx: ctx,
+ }
+}
+
+func getContext(b BackOff) context.Context {
+ if cb, ok := b.(BackOffContext); ok {
+ return cb.Context()
+ }
+ if tb, ok := b.(*backOffTries); ok {
+ return getContext(tb.delegate)
+ }
+ return context.Background()
+}
+
+func (b *backOffContext) Context() context.Context {
+ return b.ctx
+}
+
+func (b *backOffContext) NextBackOff() time.Duration {
+ select {
+ case <-b.ctx.Done():
+ return Stop
+ default:
+ return b.BackOff.NextBackOff()
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
new file mode 100644
index 000000000..aac99f196
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -0,0 +1,216 @@
+package backoff
+
+import (
+ "math/rand"
+ "time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+ RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request # RetryInterval (seconds) Randomized Interval (seconds)
+
+ 1 0.5 [0.25, 0.75]
+ 2 0.75 [0.375, 1.125]
+ 3 1.125 [0.562, 1.687]
+ 4 1.687 [0.8435, 2.53]
+ 5 2.53 [1.265, 3.795]
+ 6 3.795 [1.897, 5.692]
+ 7 5.692 [2.846, 8.538]
+ 8 8.538 [4.269, 12.807]
+ 9 12.807 [6.403, 19.210]
+ 10 19.210 backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+ InitialInterval time.Duration
+ RandomizationFactor float64
+ Multiplier float64
+ MaxInterval time.Duration
+ // After MaxElapsedTime the ExponentialBackOff returns Stop.
+ // It never stops if MaxElapsedTime == 0.
+ MaxElapsedTime time.Duration
+ Stop time.Duration
+ Clock Clock
+
+ currentInterval time.Duration
+ startTime time.Time
+}
+
+// Clock is an interface that returns current time for BackOff.
+type Clock interface {
+ Now() time.Time
+}
+
+// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
+type ExponentialBackOffOpts func(*ExponentialBackOff)
+
+// Default values for ExponentialBackOff.
+const (
+ DefaultInitialInterval = 500 * time.Millisecond
+ DefaultRandomizationFactor = 0.5
+ DefaultMultiplier = 1.5
+ DefaultMaxInterval = 60 * time.Second
+ DefaultMaxElapsedTime = 15 * time.Minute
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
+ b := &ExponentialBackOff{
+ InitialInterval: DefaultInitialInterval,
+ RandomizationFactor: DefaultRandomizationFactor,
+ Multiplier: DefaultMultiplier,
+ MaxInterval: DefaultMaxInterval,
+ MaxElapsedTime: DefaultMaxElapsedTime,
+ Stop: Stop,
+ Clock: SystemClock,
+ }
+ for _, fn := range opts {
+ fn(b)
+ }
+ b.Reset()
+ return b
+}
+
+// WithInitialInterval sets the initial interval between retries.
+func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.InitialInterval = duration
+ }
+}
+
+// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
+func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.RandomizationFactor = randomizationFactor
+ }
+}
+
+// WithMultiplier sets the multiplier for increasing the interval after each retry.
+func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Multiplier = multiplier
+ }
+}
+
+// WithMaxInterval sets the maximum interval between retries.
+func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxInterval = duration
+ }
+}
+
+// WithMaxElapsedTime sets the maximum total time for retries.
+func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxElapsedTime = duration
+ }
+}
+
+// WithRetryStopDuration sets the duration after which retries should stop.
+func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Stop = duration
+ }
+}
+
+// WithClockProvider sets the clock used to measure time.
+func WithClockProvider(clock Clock) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Clock = clock
+ }
+}
+
+type systemClock struct{}
+
+func (t systemClock) Now() time.Time {
+ return time.Now()
+}
+
+// SystemClock implements Clock interface that uses time.Now().
+var SystemClock = systemClock{}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+ b.currentInterval = b.InitialInterval
+ b.startTime = b.Clock.Now()
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+ // Make sure we have not gone over the maximum elapsed time.
+ elapsed := b.GetElapsedTime()
+ next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+ b.incrementCurrentInterval()
+ if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
+ return b.Stop
+ }
+ return next
+}
+
+// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
+// is created and is reset when Reset() is called.
+//
+// The elapsed time is computed using time.Now().UnixNano(). It is
+// safe to call even while the backoff policy is used by a running
+// ticker.
+func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
+ return b.Clock.Now().Sub(b.startTime)
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+ // Check for overflow, if overflow is detected set the current interval to the max interval.
+ if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+ b.currentInterval = b.MaxInterval
+ } else {
+ b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+ }
+}
+
+// Returns a random value from the following interval:
+// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+ if randomizationFactor == 0 {
+ return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+ }
+ var delta = randomizationFactor * float64(currentInterval)
+ var minInterval = float64(currentInterval) - delta
+ var maxInterval = float64(currentInterval) + delta
+
+ // Get a random value from the range [minInterval, maxInterval].
+ // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+ // we want a 33% chance for selecting either 1, 2 or 3.
+ return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go
new file mode 100644
index 000000000..b9c0c51cd
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/retry.go
@@ -0,0 +1,146 @@
+package backoff
+
+import (
+ "errors"
+ "time"
+)
+
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+ return func() (struct{}, error) {
+ return struct{}{}, o()
+ }
+}
+
+// Notify is a notify-on-error function. It receives an operation error and
+// backoff delay if the operation failed (with an error).
+//
+// NOTE that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error, time.Duration)
+
+// Retry the operation o until it does not return error or BackOff stops.
+// o is guaranteed to be run at least once.
+//
+// If o returns a *PermanentError, the operation is not retried, and the
+// wrapped error is returned.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b BackOff) error {
+ return RetryNotify(o, b, nil)
+}
+
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+ return RetryNotifyWithData(o, b, nil)
+}
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b BackOff, notify Notify) error {
+ return RetryNotifyWithTimer(operation, b, notify, nil)
+}
+
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+ return doRetryNotify(operation, b, notify, nil)
+}
+
+// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
+// for each failed attempt before sleep.
+// A default timer that uses system timer is used when nil is passed.
+func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
+ _, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+ return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ var (
+ err error
+ next time.Duration
+ res T
+ )
+ if t == nil {
+ t = &defaultTimer{}
+ }
+
+ defer func() {
+ t.Stop()
+ }()
+
+ ctx := getContext(b)
+
+ b.Reset()
+ for {
+ res, err = operation()
+ if err == nil {
+ return res, nil
+ }
+
+ var permanent *PermanentError
+ if errors.As(err, &permanent) {
+ return res, permanent.Err
+ }
+
+ if next = b.NextBackOff(); next == Stop {
+ if cerr := ctx.Err(); cerr != nil {
+ return res, cerr
+ }
+
+ return res, err
+ }
+
+ if notify != nil {
+ notify(err, next)
+ }
+
+ t.Start(next)
+
+ select {
+ case <-ctx.Done():
+ return res, ctx.Err()
+ case <-t.C():
+ }
+ }
+}
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+ Err error
+}
+
+func (e *PermanentError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *PermanentError) Unwrap() error {
+ return e.Err
+}
+
+func (e *PermanentError) Is(target error) bool {
+ _, ok := target.(*PermanentError)
+ return ok
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &PermanentError{
+ Err: err,
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go
new file mode 100644
index 000000000..df9d68bce
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go
@@ -0,0 +1,97 @@
+package backoff
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
+//
+// Ticks will continue to arrive when the previous operation is still running,
+// so operations that take a while to fail could run in quick succession.
+type Ticker struct {
+ C <-chan time.Time
+ c chan time.Time
+ b BackOff
+ ctx context.Context
+ timer Timer
+ stop chan struct{}
+ stopOnce sync.Once
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the time at times specified by the BackOff argument. Ticker is
+// guaranteed to tick at least once. The channel is closed when Stop
+// method is called or BackOff stops. It is not safe to manipulate the
+// provided backoff policy (notably calling NextBackOff or Reset)
+// while the ticker is running.
+func NewTicker(b BackOff) *Ticker {
+ return NewTickerWithTimer(b, &defaultTimer{})
+}
+
+// NewTickerWithTimer returns a new Ticker with a custom timer.
+// A default timer that uses system timer is used when nil is passed.
+func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
+ if timer == nil {
+ timer = &defaultTimer{}
+ }
+ c := make(chan time.Time)
+ t := &Ticker{
+ C: c,
+ c: c,
+ b: b,
+ ctx: getContext(b),
+ timer: timer,
+ stop: make(chan struct{}),
+ }
+ t.b.Reset()
+ go t.run()
+ return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+func (t *Ticker) Stop() {
+ t.stopOnce.Do(func() { close(t.stop) })
+}
+
+func (t *Ticker) run() {
+ c := t.c
+ defer close(c)
+
+ // Ticker is guaranteed to tick at least once.
+ afterC := t.send(time.Now())
+
+ for {
+ if afterC == nil {
+ return
+ }
+
+ select {
+ case tick := <-afterC:
+ afterC = t.send(tick)
+ case <-t.stop:
+ t.c = nil // Prevent future ticks from being sent to the channel.
+ return
+ case <-t.ctx.Done():
+ return
+ }
+ }
+}
+
+func (t *Ticker) send(tick time.Time) <-chan time.Time {
+ select {
+ case t.c <- tick:
+ case <-t.stop:
+ return nil
+ }
+
+ next := t.b.NextBackOff()
+ if next == Stop {
+ t.Stop()
+ return nil
+ }
+
+ t.timer.Start(next)
+ return t.timer.C()
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go
new file mode 100644
index 000000000..8120d0213
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/timer.go
@@ -0,0 +1,35 @@
+package backoff
+
+import "time"
+
+type Timer interface {
+ Start(duration time.Duration)
+ Stop()
+ C() <-chan time.Time
+}
+
+// defaultTimer implements Timer interface using time.Timer
+type defaultTimer struct {
+ timer *time.Timer
+}
+
+// C returns the timers channel which receives the current time when the timer fires.
+func (t *defaultTimer) C() <-chan time.Time {
+ return t.timer.C
+}
+
+// Start starts the timer to fire after the given duration
+func (t *defaultTimer) Start(duration time.Duration) {
+ if t.timer == nil {
+ t.timer = time.NewTimer(duration)
+ } else {
+ t.timer.Reset(duration)
+ }
+}
+
+// Stop is called when the timer is not used anymore and resources may be freed.
+func (t *defaultTimer) Stop() {
+ if t.timer != nil {
+ t.timer.Stop()
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go
new file mode 100644
index 000000000..28d58ca37
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/tries.go
@@ -0,0 +1,38 @@
+package backoff
+
+import "time"
+
+/*
+WithMaxRetries creates a wrapper around another BackOff, which will
+return Stop if NextBackOff() has been called too many times since
+the last time Reset() was called
+
+Note: Implementation is not thread-safe.
+*/
+func WithMaxRetries(b BackOff, max uint64) BackOff {
+ return &backOffTries{delegate: b, maxTries: max}
+}
+
+type backOffTries struct {
+ delegate BackOff
+ maxTries uint64
+ numTries uint64
+}
+
+func (b *backOffTries) NextBackOff() time.Duration {
+ if b.maxTries == 0 {
+ return Stop
+ }
+ if b.maxTries > 0 {
+ if b.maxTries <= b.numTries {
+ return Stop
+ }
+ b.numTries++
+ }
+ return b.delegate.NextBackOff()
+}
+
+func (b *backOffTries) Reset() {
+ b.numTries = 0
+ b.delegate.Reset()
+}
diff --git a/vendor/github.com/cenkalti/hub/.gitignore b/vendor/github.com/cenkalti/hub/.gitignore
new file mode 100644
index 000000000..00268614f
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/cenkalti/hub/.travis.yml b/vendor/github.com/cenkalti/hub/.travis.yml
new file mode 100644
index 000000000..b05e4c53f
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go: 1.2
+
diff --git a/vendor/github.com/cenkalti/hub/LICENSE b/vendor/github.com/cenkalti/hub/LICENSE
new file mode 100644
index 000000000..89b817996
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cenkalti/hub/README.md b/vendor/github.com/cenkalti/hub/README.md
new file mode 100644
index 000000000..d3f211818
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/README.md
@@ -0,0 +1,5 @@
+hub
+===
+
+[![GoDoc](https://godoc.org/github.com/cenkalti/hub?status.png)](https://godoc.org/github.com/cenkalti/hub)
+[![Build Status](https://travis-ci.org/cenkalti/hub.png)](https://travis-ci.org/cenkalti/hub)
diff --git a/vendor/github.com/cenkalti/hub/hub.go b/vendor/github.com/cenkalti/hub/hub.go
new file mode 100644
index 000000000..24c5efa86
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/hub.go
@@ -0,0 +1,82 @@
+// Package hub provides a simple event dispatcher for publish/subscribe pattern.
+package hub
+
+import "sync"
+
+type Kind int
+
+// Event is an interface for published events.
+type Event interface {
+ Kind() Kind
+}
+
+// Hub is an event dispatcher, publishes events to the subscribers
+// which are subscribed for a specific event type.
+// Optimized for publish calls.
+// The handlers may be called in order different than they are registered.
+type Hub struct {
+ subscribers map[Kind][]handler
+ m sync.RWMutex
+ seq uint64
+}
+
+type handler struct {
+ f func(Event)
+ id uint64
+}
+
+// Subscribe registers f for the event of a specific kind.
+func (h *Hub) Subscribe(kind Kind, f func(Event)) (cancel func()) {
+ var cancelled bool
+ h.m.Lock()
+ h.seq++
+ id := h.seq
+ if h.subscribers == nil {
+ h.subscribers = make(map[Kind][]handler)
+ }
+ h.subscribers[kind] = append(h.subscribers[kind], handler{id: id, f: f})
+ h.m.Unlock()
+ return func() {
+ h.m.Lock()
+ if cancelled {
+ h.m.Unlock()
+ return
+ }
+ cancelled = true
+ a := h.subscribers[kind]
+ for i, f := range a {
+ if f.id == id {
+ a[i], h.subscribers[kind] = a[len(a)-1], a[:len(a)-1]
+ break
+ }
+ }
+ if len(a) == 0 {
+ delete(h.subscribers, kind)
+ }
+ h.m.Unlock()
+ }
+}
+
+// Publish an event to the subscribers.
+func (h *Hub) Publish(e Event) {
+ h.m.RLock()
+ if handlers, ok := h.subscribers[e.Kind()]; ok {
+ for _, h := range handlers {
+ h.f(e)
+ }
+ }
+ h.m.RUnlock()
+}
+
+// DefaultHub is the default Hub used by Publish and Subscribe.
+var DefaultHub Hub
+
+// Subscribe registers f for the event of a specific kind in the DefaultHub.
+func Subscribe(kind Kind, f func(Event)) (cancel func()) {
+ return DefaultHub.Subscribe(kind, f)
+}
+
+// Publish an event to the subscribers in DefaultHub.
+func Publish(e Event) {
+ DefaultHub.Publish(e)
+}
diff --git a/vendor/github.com/cenkalti/rpc2/.gitignore b/vendor/github.com/cenkalti/rpc2/.gitignore
new file mode 100644
index 000000000..836562412
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/cenkalti/rpc2/.travis.yml b/vendor/github.com/cenkalti/rpc2/.travis.yml
new file mode 100644
index 000000000..ae8233c2b
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+ - 1.15
+ - tip
+
+arch:
+ - amd64
+ - ppc64le
diff --git a/vendor/github.com/cenkalti/rpc2/LICENSE b/vendor/github.com/cenkalti/rpc2/LICENSE
new file mode 100644
index 000000000..d565b1b1f
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/cenkalti/rpc2/README.md b/vendor/github.com/cenkalti/rpc2/README.md
new file mode 100644
index 000000000..3dffd26e4
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/README.md
@@ -0,0 +1,82 @@
+rpc2
+====
+
+[![GoDoc](https://godoc.org/github.com/cenkalti/rpc2?status.png)](https://godoc.org/github.com/cenkalti/rpc2)
+[![Build Status](https://travis-ci.org/cenkalti/rpc2.png)](https://travis-ci.org/cenkalti/rpc2)
+
+rpc2 is a fork of net/rpc package in the standard library.
+The main goal is to add bi-directional support to calls.
+That means server can call the methods of client.
+This is not possible with net/rpc package.
+In order to do this it adds a `*Client` argument to method signatures.
+
+Install
+--------
+
+ go get github.com/cenkalti/rpc2
+
+Example server
+---------------
+
+```go
+package main
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/cenkalti/rpc2"
+)
+
+type Args struct{ A, B int }
+type Reply int
+
+func main() {
+ srv := rpc2.NewServer()
+ srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error {
+
+ // Reversed call (server to client)
+ var rep Reply
+ client.Call("mult", Args{2, 3}, &rep)
+ fmt.Println("mult result:", rep)
+
+ *reply = Reply(args.A + args.B)
+ return nil
+ })
+
+ lis, _ := net.Listen("tcp", "127.0.0.1:5000")
+ srv.Accept(lis)
+}
+```
+
+Example Client
+---------------
+
+```go
+package main
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/cenkalti/rpc2"
+)
+
+type Args struct{ A, B int }
+type Reply int
+
+func main() {
+ conn, _ := net.Dial("tcp", "127.0.0.1:5000")
+
+ clt := rpc2.NewClient(conn)
+ clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error {
+ *reply = Reply(args.A * args.B)
+ return nil
+ })
+ go clt.Run()
+
+ var rep Reply
+ clt.Call("add", Args{1, 2}, &rep)
+ fmt.Println("add result:", rep)
+}
+```
diff --git a/vendor/github.com/cenkalti/rpc2/client.go b/vendor/github.com/cenkalti/rpc2/client.go
new file mode 100644
index 000000000..cc9956976
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/client.go
@@ -0,0 +1,364 @@
+// Package rpc2 provides bi-directional RPC client and server similar to net/rpc.
+package rpc2
+
+import (
+ "context"
+ "errors"
+ "io"
+ "log"
+ "reflect"
+ "sync"
+)
+
+// Client represents an RPC Client.
+// There may be multiple outstanding Calls associated
+// with a single Client, and a Client may be used by
+// multiple goroutines simultaneously.
+type Client struct {
+ mutex sync.Mutex // protects pending, seq, request
+ sending sync.Mutex
+ request Request // temp area used in send()
+ seq uint64
+ pending map[uint64]*Call
+ closing bool
+ shutdown bool
+ server bool
+ codec Codec
+ handlers map[string]*handler
+ disconnect chan struct{}
+ State *State // additional information to associate with client
+ blocking bool // whether to block request handling
+}
+
+// NewClient returns a new Client to handle requests to the
+// set of services at the other end of the connection.
+// It adds a buffer to the write side of the connection so
+// the header and payload are sent as a unit.
+func NewClient(conn io.ReadWriteCloser) *Client {
+ return NewClientWithCodec(NewGobCodec(conn))
+}
+
+// NewClientWithCodec is like NewClient but uses the specified
+// codec to encode requests and decode responses.
+func NewClientWithCodec(codec Codec) *Client {
+ return &Client{
+ codec: codec,
+ pending: make(map[uint64]*Call),
+ handlers: make(map[string]*handler),
+ disconnect: make(chan struct{}),
+ seq: 1, // 0 means notification.
+ }
+}
+
+// SetBlocking puts the client in blocking mode.
+// In blocking mode, received requests are processes synchronously.
+// If you have methods that may take a long time, other subsequent requests may time out.
+func (c *Client) SetBlocking(blocking bool) {
+ c.blocking = blocking
+}
+
+// Run the client's read loop.
+// You must run this method before calling any methods on the server.
+func (c *Client) Run() {
+ c.readLoop()
+}
+
+// DisconnectNotify returns a channel that is closed
+// when the client connection has gone away.
+func (c *Client) DisconnectNotify() chan struct{} {
+ return c.disconnect
+}
+
+// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics.
+func (c *Client) Handle(method string, handlerFunc interface{}) {
+ addHandler(c.handlers, method, handlerFunc)
+}
+
+// readLoop reads messages from codec.
+// It reads a reqeust or a response to the previous request.
+// If the message is request, calls the handler function.
+// If the message is response, sends the reply to the associated call.
+func (c *Client) readLoop() {
+ var err error
+ var req Request
+ var resp Response
+ for err == nil {
+ req = Request{}
+ resp = Response{}
+ if err = c.codec.ReadHeader(&req, &resp); err != nil {
+ break
+ }
+
+ if req.Method != "" {
+ // request comes to server
+ if err = c.readRequest(&req); err != nil {
+ debugln("rpc2: error reading request:", err.Error())
+ }
+ } else {
+ // response comes to client
+ if err = c.readResponse(&resp); err != nil {
+ debugln("rpc2: error reading response:", err.Error())
+ }
+ }
+ }
+ // Terminate pending calls.
+ c.sending.Lock()
+ c.mutex.Lock()
+ c.shutdown = true
+ closing := c.closing
+ if err == io.EOF {
+ if closing {
+ err = ErrShutdown
+ } else {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ for _, call := range c.pending {
+ call.Error = err
+ call.done()
+ }
+ c.mutex.Unlock()
+ c.sending.Unlock()
+ if err != io.EOF && !closing && !c.server {
+ debugln("rpc2: client protocol error:", err)
+ }
+ close(c.disconnect)
+ if !closing {
+ c.codec.Close()
+ }
+}
+
+func (c *Client) handleRequest(req Request, method *handler, argv reflect.Value) {
+ // Invoke the method, providing a new value for the reply.
+ replyv := reflect.New(method.replyType.Elem())
+
+ returnValues := method.fn.Call([]reflect.Value{reflect.ValueOf(c), argv, replyv})
+
+ // Do not send response if request is a notification.
+ if req.Seq == 0 {
+ return
+ }
+
+ // The return value for the method is an error.
+ errInter := returnValues[0].Interface()
+ errmsg := ""
+ if errInter != nil {
+ errmsg = errInter.(error).Error()
+ }
+ resp := &Response{
+ Seq: req.Seq,
+ Error: errmsg,
+ }
+ if err := c.codec.WriteResponse(resp, replyv.Interface()); err != nil {
+ debugln("rpc2: error writing response:", err.Error())
+ }
+}
+
+func (c *Client) readRequest(req *Request) error {
+ method, ok := c.handlers[req.Method]
+ if !ok {
+ resp := &Response{
+ Seq: req.Seq,
+ Error: "rpc2: can't find method " + req.Method,
+ }
+ return c.codec.WriteResponse(resp, resp)
+ }
+
+ // Decode the argument value.
+ var argv reflect.Value
+ argIsValue := false // if true, need to indirect before calling.
+ if method.argType.Kind() == reflect.Ptr {
+ argv = reflect.New(method.argType.Elem())
+ } else {
+ argv = reflect.New(method.argType)
+ argIsValue = true
+ }
+ // argv guaranteed to be a pointer now.
+ if err := c.codec.ReadRequestBody(argv.Interface()); err != nil {
+ return err
+ }
+ if argIsValue {
+ argv = argv.Elem()
+ }
+
+ if c.blocking {
+ c.handleRequest(*req, method, argv)
+ } else {
+ go c.handleRequest(*req, method, argv)
+ }
+
+ return nil
+}
+
+func (c *Client) readResponse(resp *Response) error {
+ seq := resp.Seq
+ c.mutex.Lock()
+ call := c.pending[seq]
+ delete(c.pending, seq)
+ c.mutex.Unlock()
+
+ var err error
+ switch {
+ case call == nil:
+ // We've got no pending call. That usually means that
+ // WriteRequest partially failed, and call was already
+ // removed; response is a server telling us about an
+ // error reading request body. We should still attempt
+ // to read error body, but there's no one to give it to.
+ err = c.codec.ReadResponseBody(nil)
+ if err != nil {
+ err = errors.New("reading error body: " + err.Error())
+ }
+ case resp.Error != "":
+ // We've got an error response. Give this to the request;
+ // any subsequent requests will get the ReadResponseBody
+ // error if there is one.
+ call.Error = ServerError(resp.Error)
+ err = c.codec.ReadResponseBody(nil)
+ if err != nil {
+ err = errors.New("reading error body: " + err.Error())
+ }
+ call.done()
+ default:
+ err = c.codec.ReadResponseBody(call.Reply)
+ if err != nil {
+ call.Error = errors.New("reading body " + err.Error())
+ }
+ call.done()
+ }
+
+ return err
+}
+
+// Close waits for active calls to finish and closes the codec.
+func (c *Client) Close() error {
+ c.mutex.Lock()
+ if c.shutdown || c.closing {
+ c.mutex.Unlock()
+ return ErrShutdown
+ }
+ c.closing = true
+ c.mutex.Unlock()
+ return c.codec.Close()
+}
+
+// Go invokes the function asynchronously. It returns the Call structure representing
+// the invocation. The done channel will signal when the call is complete by returning
+// the same Call object. If done is nil, Go will allocate a new channel.
+// If non-nil, done must be buffered or Go will deliberately crash.
+func (c *Client) Go(method string, args interface{}, reply interface{}, done chan *Call) *Call {
+ call := new(Call)
+ call.Method = method
+ call.Args = args
+ call.Reply = reply
+ if done == nil {
+ done = make(chan *Call, 10) // buffered.
+ } else {
+ // If caller passes done != nil, it must arrange that
+ // done has enough buffer for the number of simultaneous
+ // RPCs that will be using that channel. If the channel
+ // is totally unbuffered, it's best not to run at all.
+ if cap(done) == 0 {
+ log.Panic("rpc2: done channel is unbuffered")
+ }
+ }
+ call.Done = done
+ c.send(call)
+ return call
+}
+
+// CallWithContext invokes the named function, waits for it to complete, and
+// returns its error status, or an error from Context timeout.
+func (c *Client) CallWithContext(ctx context.Context, method string, args interface{}, reply interface{}) error {
+ call := c.Go(method, args, reply, make(chan *Call, 1))
+ select {
+ case <-call.Done:
+ return call.Error
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return nil
+}
+
+// Call invokes the named function, waits for it to complete, and returns its error status.
+func (c *Client) Call(method string, args interface{}, reply interface{}) error {
+ return c.CallWithContext(context.Background(), method, args, reply)
+}
+
+func (call *Call) done() {
+ select {
+ case call.Done <- call:
+ // ok
+ default:
+ // We don't want to block here. It is the caller's responsibility to make
+ // sure the channel has enough buffer space. See comment in Go().
+ debugln("rpc2: discarding Call reply due to insufficient Done chan capacity")
+ }
+}
+
+// ServerError represents an error that has been returned from
+// the remote side of the RPC connection.
+type ServerError string
+
+func (e ServerError) Error() string {
+ return string(e)
+}
+
+// ErrShutdown is returned when the connection is closing or closed.
+var ErrShutdown = errors.New("connection is shut down")
+
+// Call represents an active RPC.
+type Call struct {
+ Method string // The name of the service and method to call.
+ Args interface{} // The argument to the function (*struct).
+ Reply interface{} // The reply from the function (*struct).
+ Error error // After completion, the error status.
+ Done chan *Call // Strobes when call is complete.
+}
+
+func (c *Client) send(call *Call) {
+ c.sending.Lock()
+ defer c.sending.Unlock()
+
+ // Register this call.
+ c.mutex.Lock()
+ if c.shutdown || c.closing {
+ call.Error = ErrShutdown
+ c.mutex.Unlock()
+ call.done()
+ return
+ }
+ seq := c.seq
+ c.seq++
+ c.pending[seq] = call
+ c.mutex.Unlock()
+
+ // Encode and send the request.
+ c.request.Seq = seq
+ c.request.Method = call.Method
+ err := c.codec.WriteRequest(&c.request, call.Args)
+ if err != nil {
+ c.mutex.Lock()
+ call = c.pending[seq]
+ delete(c.pending, seq)
+ c.mutex.Unlock()
+ if call != nil {
+ call.Error = err
+ call.done()
+ }
+ }
+}
+
+// Notify sends a request to the receiver but does not wait for a return value.
+func (c *Client) Notify(method string, args interface{}) error {
+ c.sending.Lock()
+ defer c.sending.Unlock()
+
+ if c.shutdown || c.closing {
+ return ErrShutdown
+ }
+
+ c.request.Seq = 0
+ c.request.Method = method
+ return c.codec.WriteRequest(&c.request, args)
+}
diff --git a/vendor/github.com/cenkalti/rpc2/codec.go b/vendor/github.com/cenkalti/rpc2/codec.go
new file mode 100644
index 000000000..b097d9aaa
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/codec.go
@@ -0,0 +1,125 @@
+package rpc2
+
+import (
+ "bufio"
+ "encoding/gob"
+ "io"
+ "sync"
+)
+
+// A Codec implements reading and writing of RPC requests and responses.
+// The client calls ReadHeader to read a message header.
+// The implementation must populate either Request or Response argument.
+// Depending on which argument is populated, ReadRequestBody or
+// ReadResponseBody is called right after ReadHeader.
+// ReadRequestBody and ReadResponseBody may be called with a nil
+// argument to force the body to be read and then discarded.
+type Codec interface {
+ // ReadHeader must read a message and populate either the request
+ // or the response by inspecting the incoming message.
+ ReadHeader(*Request, *Response) error
+
+ // ReadRequestBody into args argument of handler function.
+ ReadRequestBody(interface{}) error
+
+ // ReadResponseBody into reply argument of handler function.
+ ReadResponseBody(interface{}) error
+
+ // WriteRequest must be safe for concurrent use by multiple goroutines.
+ WriteRequest(*Request, interface{}) error
+
+ // WriteResponse must be safe for concurrent use by multiple goroutines.
+ WriteResponse(*Response, interface{}) error
+
+ // Close is called when client/server finished with the connection.
+ Close() error
+}
+
+// Request is a header written before every RPC call.
+type Request struct {
+ Seq uint64 // sequence number chosen by client
+ Method string
+}
+
+// Response is a header written before every RPC return.
+type Response struct {
+ Seq uint64 // echoes that of the request
+ Error string // error, if any.
+}
+
+type gobCodec struct {
+ rwc io.ReadWriteCloser
+ dec *gob.Decoder
+ enc *gob.Encoder
+ encBuf *bufio.Writer
+ mutex sync.Mutex
+}
+
+type message struct {
+ Seq uint64
+ Method string
+ Error string
+}
+
+// NewGobCodec returns a new rpc2.Codec using gob encoding/decoding on conn.
+func NewGobCodec(conn io.ReadWriteCloser) Codec {
+ buf := bufio.NewWriter(conn)
+ return &gobCodec{
+ rwc: conn,
+ dec: gob.NewDecoder(conn),
+ enc: gob.NewEncoder(buf),
+ encBuf: buf,
+ }
+}
+
+func (c *gobCodec) ReadHeader(req *Request, resp *Response) error {
+ var msg message
+ if err := c.dec.Decode(&msg); err != nil {
+ return err
+ }
+
+ if msg.Method != "" {
+ req.Seq = msg.Seq
+ req.Method = msg.Method
+ } else {
+ resp.Seq = msg.Seq
+ resp.Error = msg.Error
+ }
+ return nil
+}
+
+func (c *gobCodec) ReadRequestBody(body interface{}) error {
+ return c.dec.Decode(body)
+}
+
+func (c *gobCodec) ReadResponseBody(body interface{}) error {
+ return c.dec.Decode(body)
+}
+
+func (c *gobCodec) WriteRequest(r *Request, body interface{}) (err error) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ if err = c.enc.Encode(r); err != nil {
+ return
+ }
+ if err = c.enc.Encode(body); err != nil {
+ return
+ }
+ return c.encBuf.Flush()
+}
+
+func (c *gobCodec) WriteResponse(r *Response, body interface{}) (err error) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ if err = c.enc.Encode(r); err != nil {
+ return
+ }
+ if err = c.enc.Encode(body); err != nil {
+ return
+ }
+ return c.encBuf.Flush()
+}
+
+func (c *gobCodec) Close() error {
+ return c.rwc.Close()
+}
diff --git a/vendor/github.com/cenkalti/rpc2/debug.go b/vendor/github.com/cenkalti/rpc2/debug.go
new file mode 100644
index 000000000..ec1b62521
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/debug.go
@@ -0,0 +1,12 @@
+package rpc2
+
+import "log"
+
+// DebugLog controls the printing of internal and I/O errors.
+var DebugLog = false
+
+func debugln(v ...interface{}) {
+ if DebugLog {
+ log.Println(v...)
+ }
+}
diff --git a/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go
new file mode 100644
index 000000000..87e116887
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go
@@ -0,0 +1,226 @@
+// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec for the rpc2 package.
+//
+// Beside struct types, JSONCodec allows using positional arguments.
+// Use []interface{} as the type of argument when sending and receiving methods.
+//
+// Positional arguments example:
+// server.Handle("add", func(client *rpc2.Client, args []interface{}, result *float64) error {
+// *result = args[0].(float64) + args[1].(float64)
+// return nil
+// })
+//
+// var result float64
+// client.Call("add", []interface{}{1, 2}, &result)
+//
+package jsonrpc
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sync"
+
+ "github.com/cenkalti/rpc2"
+)
+
+type jsonCodec struct {
+ dec *json.Decoder // for reading JSON values
+ enc *json.Encoder // for writing JSON values
+ c io.Closer
+
+ // temporary work space
+ msg message
+ serverRequest serverRequest
+ clientResponse clientResponse
+
+ // JSON-RPC clients can use arbitrary json values as request IDs.
+ // Package rpc expects uint64 request IDs.
+ // We assign uint64 sequence numbers to incoming requests
+ // but save the original request ID in the pending map.
+ // When rpc responds, we use the sequence number in
+ // the response to find the original request ID.
+ mutex sync.Mutex // protects seq, pending
+ pending map[uint64]*json.RawMessage
+ seq uint64
+}
+
+// NewJSONCodec returns a new rpc2.Codec using JSON-RPC on conn.
+func NewJSONCodec(conn io.ReadWriteCloser) rpc2.Codec {
+ return &jsonCodec{
+ dec: json.NewDecoder(conn),
+ enc: json.NewEncoder(conn),
+ c: conn,
+ pending: make(map[uint64]*json.RawMessage),
+ }
+}
+
+// serverRequest and clientResponse combined
+type message struct {
+ Method string `json:"method"`
+ Params *json.RawMessage `json:"params"`
+ Id *json.RawMessage `json:"id"`
+ Result *json.RawMessage `json:"result"`
+ Error interface{} `json:"error"`
+}
+
+// Unmarshal to
+type serverRequest struct {
+ Method string `json:"method"`
+ Params *json.RawMessage `json:"params"`
+ Id *json.RawMessage `json:"id"`
+}
+type clientResponse struct {
+ Id uint64 `json:"id"`
+ Result *json.RawMessage `json:"result"`
+ Error interface{} `json:"error"`
+}
+
+// to Marshal
+type serverResponse struct {
+ Id *json.RawMessage `json:"id"`
+ Result interface{} `json:"result"`
+ Error interface{} `json:"error"`
+}
+type clientRequest struct {
+ Method string `json:"method"`
+ Params interface{} `json:"params"`
+ Id *uint64 `json:"id"`
+}
+
+func (c *jsonCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error {
+ c.msg = message{}
+ if err := c.dec.Decode(&c.msg); err != nil {
+ return err
+ }
+
+ if c.msg.Method != "" {
+ // request comes to server
+ c.serverRequest.Id = c.msg.Id
+ c.serverRequest.Method = c.msg.Method
+ c.serverRequest.Params = c.msg.Params
+
+ req.Method = c.serverRequest.Method
+
+ // JSON request id can be any JSON value;
+ // RPC package expects uint64. Translate to
+ // internal uint64 and save JSON on the side.
+ if c.serverRequest.Id == nil {
+ // Notification
+ } else {
+ c.mutex.Lock()
+ c.seq++
+ c.pending[c.seq] = c.serverRequest.Id
+ c.serverRequest.Id = nil
+ req.Seq = c.seq
+ c.mutex.Unlock()
+ }
+ } else {
+ // response comes to client
+ err := json.Unmarshal(*c.msg.Id, &c.clientResponse.Id)
+ if err != nil {
+ return err
+ }
+ c.clientResponse.Result = c.msg.Result
+ c.clientResponse.Error = c.msg.Error
+
+ resp.Error = ""
+ resp.Seq = c.clientResponse.Id
+ if c.clientResponse.Error != nil || c.clientResponse.Result == nil {
+ x, ok := c.clientResponse.Error.(string)
+ if !ok {
+ return fmt.Errorf("invalid error %v", c.clientResponse.Error)
+ }
+ if x == "" {
+ x = "unspecified error"
+ }
+ resp.Error = x
+ }
+ }
+ return nil
+}
+
+var errMissingParams = errors.New("jsonrpc: request body missing params")
+
+func (c *jsonCodec) ReadRequestBody(x interface{}) error {
+ if x == nil {
+ return nil
+ }
+ if c.serverRequest.Params == nil {
+ return errMissingParams
+ }
+
+ var err error
+
+ // Check if x points to a slice of any kind
+ rt := reflect.TypeOf(x)
+ if rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Slice {
+ // If it's a slice, unmarshal as is
+ err = json.Unmarshal(*c.serverRequest.Params, x)
+ } else {
+ // Anything else unmarshal into a slice containing x
+ params := &[]interface{}{x}
+ err = json.Unmarshal(*c.serverRequest.Params, params)
+ }
+
+ return err
+}
+
+func (c *jsonCodec) ReadResponseBody(x interface{}) error {
+ if x == nil {
+ return nil
+ }
+ return json.Unmarshal(*c.clientResponse.Result, x)
+}
+
+func (c *jsonCodec) WriteRequest(r *rpc2.Request, param interface{}) error {
+ req := &clientRequest{Method: r.Method}
+
+ // Check if param is a slice of any kind
+ if param != nil && reflect.TypeOf(param).Kind() == reflect.Slice {
+ // If it's a slice, leave as is
+ req.Params = param
+ } else {
+ // Put anything else into a slice
+ req.Params = []interface{}{param}
+ }
+
+ if r.Seq == 0 {
+ // Notification
+ req.Id = nil
+ } else {
+ seq := r.Seq
+ req.Id = &seq
+ }
+ return c.enc.Encode(req)
+}
+
+var null = json.RawMessage([]byte("null"))
+
+func (c *jsonCodec) WriteResponse(r *rpc2.Response, x interface{}) error {
+ c.mutex.Lock()
+ b, ok := c.pending[r.Seq]
+ if !ok {
+ c.mutex.Unlock()
+ return errors.New("invalid sequence number in response")
+ }
+ delete(c.pending, r.Seq)
+ c.mutex.Unlock()
+
+ if b == nil {
+ // Invalid request so no id. Use JSON null.
+ b = &null
+ }
+ resp := serverResponse{Id: b}
+ if r.Error == "" {
+ resp.Result = x
+ } else {
+ resp.Error = r.Error
+ }
+ return c.enc.Encode(resp)
+}
+
+func (c *jsonCodec) Close() error {
+ return c.c.Close()
+}
diff --git a/vendor/github.com/cenkalti/rpc2/server.go b/vendor/github.com/cenkalti/rpc2/server.go
new file mode 100644
index 000000000..2a5be7ed6
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/server.go
@@ -0,0 +1,181 @@
+package rpc2
+
+import (
+ "io"
+ "log"
+ "net"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/cenkalti/hub"
+)
+
+// Precompute the reflect type for error. Can't use error directly
+// because Typeof takes an empty interface value. This is annoying.
+var typeOfError = reflect.TypeOf((*error)(nil)).Elem()
+var typeOfClient = reflect.TypeOf((*Client)(nil))
+
+const (
+ clientConnected hub.Kind = iota
+ clientDisconnected
+)
+
+// Server responds to RPC requests made by Client.
+type Server struct {
+ handlers map[string]*handler
+ eventHub *hub.Hub
+}
+
+type handler struct {
+ fn reflect.Value
+ argType reflect.Type
+ replyType reflect.Type
+}
+
+type connectionEvent struct {
+ Client *Client
+}
+
+type disconnectionEvent struct {
+ Client *Client
+}
+
+func (connectionEvent) Kind() hub.Kind { return clientConnected }
+func (disconnectionEvent) Kind() hub.Kind { return clientDisconnected }
+
+// NewServer returns a new Server.
+func NewServer() *Server {
+ return &Server{
+ handlers: make(map[string]*handler),
+ eventHub: &hub.Hub{},
+ }
+}
+
+// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics.
+func (s *Server) Handle(method string, handlerFunc interface{}) {
+ addHandler(s.handlers, method, handlerFunc)
+}
+
+func addHandler(handlers map[string]*handler, mname string, handlerFunc interface{}) {
+ if _, ok := handlers[mname]; ok {
+ panic("rpc2: multiple registrations for " + mname)
+ }
+
+ method := reflect.ValueOf(handlerFunc)
+ mtype := method.Type()
+ // Method needs three ins: *client, *args, *reply.
+ if mtype.NumIn() != 3 {
+ log.Panicln("method", mname, "has wrong number of ins:", mtype.NumIn())
+ }
+ // First arg must be a pointer to rpc2.Client.
+ clientType := mtype.In(0)
+ if clientType.Kind() != reflect.Ptr {
+ log.Panicln("method", mname, "client type not a pointer:", clientType)
+ }
+ if clientType != typeOfClient {
+ log.Panicln("method", mname, "first argument", clientType.String(), "not *rpc2.Client")
+ }
+ // Second arg need not be a pointer.
+ argType := mtype.In(1)
+ if !isExportedOrBuiltinType(argType) {
+ log.Panicln(mname, "argument type not exported:", argType)
+ }
+ // Third arg must be a pointer.
+ replyType := mtype.In(2)
+ if replyType.Kind() != reflect.Ptr {
+ log.Panicln("method", mname, "reply type not a pointer:", replyType)
+ }
+ // Reply type must be exported.
+ if !isExportedOrBuiltinType(replyType) {
+ log.Panicln("method", mname, "reply type not exported:", replyType)
+ }
+ // Method needs one out.
+ if mtype.NumOut() != 1 {
+ log.Panicln("method", mname, "has wrong number of outs:", mtype.NumOut())
+ }
+ // The return type of the method must be error.
+ if returnType := mtype.Out(0); returnType != typeOfError {
+ log.Panicln("method", mname, "returns", returnType.String(), "not error")
+ }
+ handlers[mname] = &handler{
+ fn: method,
+ argType: argType,
+ replyType: replyType,
+ }
+}
+
+// Is this type exported or a builtin?
+func isExportedOrBuiltinType(t reflect.Type) bool {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ // PkgPath will be non-empty even for an exported type,
+ // so we need to check the type name as well.
+ return isExported(t.Name()) || t.PkgPath() == ""
+}
+
+// Is this an exported - upper case - name?
+func isExported(name string) bool {
+ rune, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(rune)
+}
+
+// OnConnect registers a function to run when a client connects.
+func (s *Server) OnConnect(f func(*Client)) {
+ s.eventHub.Subscribe(clientConnected, func(e hub.Event) {
+ go f(e.(connectionEvent).Client)
+ })
+}
+
+// OnDisconnect registers a function to run when a client disconnects.
+func (s *Server) OnDisconnect(f func(*Client)) {
+ s.eventHub.Subscribe(clientDisconnected, func(e hub.Event) {
+ go f(e.(disconnectionEvent).Client)
+ })
+}
+
+// Accept accepts connections on the listener and serves requests
+// for each incoming connection. Accept blocks; the caller typically
+// invokes it in a go statement.
+func (s *Server) Accept(lis net.Listener) {
+ for {
+ conn, err := lis.Accept()
+ if err != nil {
+ log.Print("rpc.Serve: accept:", err.Error())
+ return
+ }
+ go s.ServeConn(conn)
+ }
+}
+
+// ServeConn runs the server on a single connection.
+// ServeConn blocks, serving the connection until the client hangs up.
+// The caller typically invokes ServeConn in a go statement.
+// ServeConn uses the gob wire format (see package gob) on the
+// connection. To use an alternate codec, use ServeCodec.
+func (s *Server) ServeConn(conn io.ReadWriteCloser) {
+ s.ServeCodec(NewGobCodec(conn))
+}
+
+// ServeCodec is like ServeConn but uses the specified codec to
+// decode requests and encode responses.
+func (s *Server) ServeCodec(codec Codec) {
+ s.ServeCodecWithState(codec, NewState())
+}
+
+// ServeCodecWithState is like ServeCodec but also gives the ability to
+// associate a state variable with the client that persists across RPC calls.
+func (s *Server) ServeCodecWithState(codec Codec, state *State) {
+ defer codec.Close()
+
+ // Client also handles the incoming connections.
+ c := NewClientWithCodec(codec)
+ c.server = true
+ c.handlers = s.handlers
+ c.State = state
+
+ s.eventHub.Publish(connectionEvent{c})
+ c.Run()
+ s.eventHub.Publish(disconnectionEvent{c})
+}
diff --git a/vendor/github.com/cenkalti/rpc2/state.go b/vendor/github.com/cenkalti/rpc2/state.go
new file mode 100644
index 000000000..7a4f23e6d
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/state.go
@@ -0,0 +1,25 @@
+package rpc2
+
+import "sync"
+
+type State struct {
+ store map[string]interface{}
+ m sync.RWMutex
+}
+
+func NewState() *State {
+ return &State{store: make(map[string]interface{})}
+}
+
+func (s *State) Get(key string) (value interface{}, ok bool) {
+ s.m.RLock()
+ value, ok = s.store[key]
+ s.m.RUnlock()
+ return
+}
+
+func (s *State) Set(key string, value interface{}) {
+ s.m.Lock()
+ s.store[key] = value
+ s.m.Unlock()
+}
diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format
new file mode 100644
index 000000000..0ff425760
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.clang-format
@@ -0,0 +1,25 @@
+---
+Language: Cpp
+BasedOnStyle: LLVM
+AlignAfterOpenBracket: DontAlign
+AlignConsecutiveAssignments: true
+AlignEscapedNewlines: DontAlign
+# mkdocs annotations in source code are written as trailing comments
+# and alignment pushes these really far away from the content.
+AlignTrailingComments: false
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: false
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortFunctionsOnASingleLine: false
+BreakBeforeBraces: Attach
+IndentWidth: 4
+KeepEmptyLinesAtTheStartOfBlocks: false
+TabWidth: 4
+UseTab: ForContinuationAndIndentation
+ColumnLimit: 1000
+# Go compiler comments need to stay unindented.
+CommentPragmas: '^go:.*'
+# linux/bpf.h needs to be included before bpf/bpf_helpers.h for types like __u64
+# and sorting makes this impossible.
+SortIncludes: false
+...
diff --git a/vendor/github.com/cilium/ebpf/.gitattributes b/vendor/github.com/cilium/ebpf/.gitattributes
new file mode 100644
index 000000000..113f97b98
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.gitattributes
@@ -0,0 +1 @@
+internal/sys/types.go linguist-generated=false
diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore
new file mode 100644
index 000000000..b46162b8e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.gitignore
@@ -0,0 +1,14 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+*.o
+!*_bpf*.o
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml
new file mode 100644
index 000000000..65f91b910
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.golangci.yaml
@@ -0,0 +1,13 @@
+---
+linters:
+ disable-all: true
+ enable:
+ - goimports
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - staticcheck
+ - typecheck
+ - unused
+ - gofmt
diff --git a/vendor/github.com/cilium/ebpf/.vimto.toml b/vendor/github.com/cilium/ebpf/.vimto.toml
new file mode 100644
index 000000000..49a12dbc0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.vimto.toml
@@ -0,0 +1,12 @@
+kernel="ghcr.io/cilium/ci-kernels:stable"
+smp="cpus=2"
+memory="1G"
+user="root"
+setup=[
+ "mount -t cgroup2 -o nosuid,noexec,nodev cgroup2 /sys/fs/cgroup",
+ "/bin/sh -c 'modprobe bpf_testmod || true'",
+ "dmesg --clear",
+]
+teardown=[
+ "dmesg --read-clear",
+]
diff --git a/vendor/github.com/cilium/ebpf/CODEOWNERS b/vendor/github.com/cilium/ebpf/CODEOWNERS
new file mode 100644
index 000000000..ca65d23c0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/CODEOWNERS
@@ -0,0 +1,11 @@
+* @cilium/ebpf-lib-maintainers
+
+features/ @rgo3
+link/ @mmat11
+
+perf/ @florianl
+ringbuf/ @florianl
+
+btf/ @dylandreimerink
+
+cmd/bpf2go/ @mejedi
diff --git a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..8e42838c5
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
new file mode 100644
index 000000000..673a9ac29
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
@@ -0,0 +1,5 @@
+# Contributing to ebpf-go
+
+Want to contribute to ebpf-go? There are a few things you need to know.
+
+We wrote a [contribution guide](https://ebpf-go.dev/contributing/) to help you get started.
diff --git a/vendor/github.com/cilium/ebpf/LICENSE b/vendor/github.com/cilium/ebpf/LICENSE
new file mode 100644
index 000000000..c637ae99c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/LICENSE
@@ -0,0 +1,23 @@
+MIT License
+
+Copyright (c) 2017 Nathan Sweet
+Copyright (c) 2018, 2019 Cloudflare
+Copyright (c) 2019 Authors of Cilium
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
new file mode 100644
index 000000000..a56a03e39
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
@@ -0,0 +1,3 @@
+# Maintainers
+
+Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md)
diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile
new file mode 100644
index 000000000..d355eea71
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/Makefile
@@ -0,0 +1,112 @@
+# The development version of clang is distributed as the 'clang' binary,
+# while stable/released versions have a version number attached.
+# Pin the default clang to a stable version.
+CLANG ?= clang-17
+STRIP ?= llvm-strip-17
+OBJCOPY ?= llvm-objcopy-17
+CFLAGS := -O2 -g -Wall -Werror $(CFLAGS)
+
+CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/
+
+# Obtain an absolute path to the directory of the Makefile.
+# Assume the Makefile is in the root of the repository.
+REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
+UIDGID := $(shell stat -c '%u:%g' ${REPODIR})
+
+# Prefer podman if installed, otherwise use docker.
+# Note: Setting the var at runtime will always override.
+CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker)
+CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}")
+
+IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE)
+VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
+
+TARGETS := \
+ testdata/loader-clang-11 \
+ testdata/loader-clang-14 \
+ testdata/loader-$(CLANG) \
+ testdata/manyprogs \
+ testdata/btf_map_init \
+ testdata/invalid_map \
+ testdata/raw_tracepoint \
+ testdata/invalid_map_static \
+ testdata/invalid_btf_map_init \
+ testdata/strings \
+ testdata/freplace \
+ testdata/fentry_fexit \
+ testdata/iproute2_map_compat \
+ testdata/map_spin_lock \
+ testdata/subprog_reloc \
+ testdata/fwd_decl \
+ testdata/kconfig \
+ testdata/kconfig_config \
+ testdata/kfunc \
+ testdata/invalid-kfunc \
+ testdata/kfunc-kmod \
+ testdata/constants \
+ testdata/errors \
+ btf/testdata/relocs \
+ btf/testdata/relocs_read \
+ btf/testdata/relocs_read_tgt \
+ btf/testdata/relocs_enum \
+ cmd/bpf2go/testdata/minimal
+
+.PHONY: all clean container-all container-shell generate
+
+.DEFAULT_TARGET = container-all
+
+# Build all ELF binaries using a containerized LLVM toolchain.
+container-all:
+ +${CONTAINER_ENGINE} run --rm -t ${CONTAINER_RUN_ARGS} \
+ -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
+ --env HOME="/tmp" \
+ --env BPF2GO_CC="$(CLANG)" \
+ --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \
+ "${IMAGE}:${VERSION}" \
+ make all
+
+# (debug) Drop the user into a shell inside the container as root.
+# Set BPF2GO_ envs to make 'make generate' just work.
+container-shell:
+ ${CONTAINER_ENGINE} run --rm -ti \
+ -v "${REPODIR}":/ebpf -w /ebpf \
+ --env BPF2GO_CC="$(CLANG)" \
+ --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \
+ "${IMAGE}:${VERSION}"
+
+clean:
+ find "$(CURDIR)" -name "*.elf" -delete
+ find "$(CURDIR)" -name "*.o" -delete
+
+format:
+ find . -type f -name "*.c" | xargs clang-format -i
+
+all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate
+ ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf
+ ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
+
+generate:
+ go generate -run "internal/cmd/gentypes" ./...
+ go generate -skip "internal/cmd/gentypes" ./...
+
+testdata/loader-%-el.elf: testdata/loader.c
+ $* $(CFLAGS) -target bpfel -c $< -o $@
+ $(STRIP) -g $@
+
+testdata/loader-%-eb.elf: testdata/loader.c
+ $* $(CFLAGS) -target bpfeb -c $< -o $@
+ $(STRIP) -g $@
+
+%-el.elf: %.c
+ $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@
+ $(STRIP) -g $@
+
+%-eb.elf : %.c
+ $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@
+ $(STRIP) -g $@
+
+.PHONY: update-kernel-deps
+update-kernel-deps: export KERNEL_VERSION?=6.8
+update-kernel-deps:
+ ./testdata/sh/update-kernel-deps.sh
+ $(MAKE) container-all
diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md
new file mode 100644
index 000000000..85871db1a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/README.md
@@ -0,0 +1,72 @@
+# eBPF
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf)
+
+![HoneyGopher](docs/ebpf/ebpf-go.png)
+
+ebpf-go is a pure Go library that provides utilities for loading, compiling, and
+debugging eBPF programs. It has minimal external dependencies and is intended to
+be used in long running processes.
+
+See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF
+ecosystem.
+
+## Getting Started
+
+Please take a look at our [Getting Started] guide.
+
+[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of
+eBPF and the library, and help shape the future of the project.
+
+## Getting Help
+
+The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page.
+Please search for existing threads before starting a new one. Refrain from
+opening issues on the bug tracker if you're just starting out or if you're not
+sure if something is a bug in the library code.
+
+Alternatively, [join](https://ebpf.io/slack) the
+[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you
+have other questions regarding the project. Note that this channel is ephemeral
+and has its history erased past a certain point, which is less helpful for
+others running into the same problem later.
+
+## Packages
+
+This library includes the following packages:
+
+* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
+ assembler, allowing you to write eBPF assembly instructions directly
+ within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.)
+* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
+ compiling and embedding eBPF programs written in C within Go code. As well as
+ compiling the C code, it auto-generates Go code for loading and manipulating
+ the eBPF program and map objects.
+* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
+ to various hooks
+* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
+ `PERF_EVENT_ARRAY`
+* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a
+ `BPF_MAP_TYPE_RINGBUF` map
+* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent
+ of `bpftool feature probe` for discovering BPF-related kernel features using native Go.
+* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift
+ the `RLIMIT_MEMLOCK` constraint on kernels before 5.11.
+* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format.
+
+## Requirements
+
+* A version of Go that is [supported by
+ upstream](https://golang.org/doc/devel/release.html#policy)
+* CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed versions
+ are not supported.
+
+## License
+
+MIT
+
+### eBPF Gopher
+
+The eBPF honeygopher is based on the Go gopher designed by Renee French.
+
+[Getting Started]: https://ebpf-go.dev/guides/getting-started/
diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go
new file mode 100644
index 000000000..282233d32
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/alu.go
@@ -0,0 +1,180 @@
+package asm
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output alu_string.go -type=Source,Endianness,ALUOp
+
+// Source of ALU / ALU64 / Branch operations
+//
+// msb lsb
+// +------------+-+---+
+// | op |S|cls|
+// +------------+-+---+
+type Source uint16
+
+const sourceMask OpCode = 0x0008
+
+// Source bitmask
+const (
+ // InvalidSource is returned by getters when invoked
+ // on non ALU / branch OpCodes.
+ InvalidSource Source = 0xffff
+ // ImmSource src is from constant
+ ImmSource Source = 0x0000
+ // RegSource src is from register
+ RegSource Source = 0x0008
+)
+
+// The Endianness of a byte swap instruction.
+type Endianness uint8
+
+const endianMask = sourceMask
+
+// Endian flags
+const (
+ InvalidEndian Endianness = 0xff
+ // Convert to little endian
+ LE Endianness = 0x00
+ // Convert to big endian
+ BE Endianness = 0x08
+)
+
+// ALUOp are ALU / ALU64 operations
+//
+// msb lsb
+// +-------+----+-+---+
+// | EXT | OP |s|cls|
+// +-------+----+-+---+
+type ALUOp uint16
+
+const aluMask OpCode = 0x3ff0
+
+const (
+ // InvalidALUOp is returned by getters when invoked
+ // on non ALU OpCodes
+ InvalidALUOp ALUOp = 0xffff
+ // Add - addition
+ Add ALUOp = 0x0000
+ // Sub - subtraction
+ Sub ALUOp = 0x0010
+ // Mul - multiplication
+ Mul ALUOp = 0x0020
+ // Div - division
+ Div ALUOp = 0x0030
+ // SDiv - signed division
+ SDiv ALUOp = Div + 0x0100
+ // Or - bitwise or
+ Or ALUOp = 0x0040
+ // And - bitwise and
+ And ALUOp = 0x0050
+ // LSh - bitwise shift left
+ LSh ALUOp = 0x0060
+ // RSh - bitwise shift right
+ RSh ALUOp = 0x0070
+ // Neg - sign/unsign signing bit
+ Neg ALUOp = 0x0080
+ // Mod - modulo
+ Mod ALUOp = 0x0090
+ // SMod - signed modulo
+ SMod ALUOp = Mod + 0x0100
+ // Xor - bitwise xor
+ Xor ALUOp = 0x00a0
+ // Mov - move value from one place to another
+ Mov ALUOp = 0x00b0
+ // MovSX8 - move lower 8 bits, sign extended upper bits of target
+ MovSX8 ALUOp = Mov + 0x0100
+ // MovSX16 - move lower 16 bits, sign extended upper bits of target
+ MovSX16 ALUOp = Mov + 0x0200
+ // MovSX32 - move lower 32 bits, sign extended upper bits of target
+ MovSX32 ALUOp = Mov + 0x0300
+ // ArSh - arithmetic shift
+ ArSh ALUOp = 0x00c0
+ // Swap - endian conversions
+ Swap ALUOp = 0x00d0
+)
+
+// HostTo converts from host to another endianness.
+func HostTo(endian Endianness, dst Register, size Size) Instruction {
+ var imm int64
+ switch size {
+ case Half:
+ imm = 16
+ case Word:
+ imm = 32
+ case DWord:
+ imm = 64
+ default:
+ return Instruction{OpCode: InvalidOpCode}
+ }
+
+ return Instruction{
+ OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)),
+ Dst: dst,
+ Constant: imm,
+ }
+}
+
+// BSwap unconditionally reverses the order of bytes in a register.
+func BSwap(dst Register, size Size) Instruction {
+ var imm int64
+ switch size {
+ case Half:
+ imm = 16
+ case Word:
+ imm = 32
+ case DWord:
+ imm = 64
+ default:
+ return Instruction{OpCode: InvalidOpCode}
+ }
+
+ return Instruction{
+ OpCode: OpCode(ALU64Class).SetALUOp(Swap),
+ Dst: dst,
+ Constant: imm,
+ }
+}
+
+// Op returns the OpCode for an ALU operation with a given source.
+func (op ALUOp) Op(source Source) OpCode {
+ return OpCode(ALU64Class).SetALUOp(op).SetSource(source)
+}
+
+// Reg emits `dst (op) src`.
+func (op ALUOp) Reg(dst, src Register) Instruction {
+ return Instruction{
+ OpCode: op.Op(RegSource),
+ Dst: dst,
+ Src: src,
+ }
+}
+
+// Imm emits `dst (op) value`.
+func (op ALUOp) Imm(dst Register, value int32) Instruction {
+ return Instruction{
+ OpCode: op.Op(ImmSource),
+ Dst: dst,
+ Constant: int64(value),
+ }
+}
+
+// Op32 returns the OpCode for a 32-bit ALU operation with a given source.
+func (op ALUOp) Op32(source Source) OpCode {
+ return OpCode(ALUClass).SetALUOp(op).SetSource(source)
+}
+
+// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst.
+func (op ALUOp) Reg32(dst, src Register) Instruction {
+ return Instruction{
+ OpCode: op.Op32(RegSource),
+ Dst: dst,
+ Src: src,
+ }
+}
+
+// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst.
+func (op ALUOp) Imm32(dst Register, value int32) Instruction {
+ return Instruction{
+ OpCode: op.Op32(ImmSource),
+ Dst: dst,
+ Constant: int64(value),
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go
new file mode 100644
index 000000000..35b406bf3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/alu_string.go
@@ -0,0 +1,117 @@
+// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidSource-65535]
+ _ = x[ImmSource-0]
+ _ = x[RegSource-8]
+}
+
+const (
+ _Source_name_0 = "ImmSource"
+ _Source_name_1 = "RegSource"
+ _Source_name_2 = "InvalidSource"
+)
+
+func (i Source) String() string {
+ switch {
+ case i == 0:
+ return _Source_name_0
+ case i == 8:
+ return _Source_name_1
+ case i == 65535:
+ return _Source_name_2
+ default:
+ return "Source(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidEndian-255]
+ _ = x[LE-0]
+ _ = x[BE-8]
+}
+
+const (
+ _Endianness_name_0 = "LE"
+ _Endianness_name_1 = "BE"
+ _Endianness_name_2 = "InvalidEndian"
+)
+
+func (i Endianness) String() string {
+ switch {
+ case i == 0:
+ return _Endianness_name_0
+ case i == 8:
+ return _Endianness_name_1
+ case i == 255:
+ return _Endianness_name_2
+ default:
+ return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidALUOp-65535]
+ _ = x[Add-0]
+ _ = x[Sub-16]
+ _ = x[Mul-32]
+ _ = x[Div-48]
+ _ = x[SDiv-304]
+ _ = x[Or-64]
+ _ = x[And-80]
+ _ = x[LSh-96]
+ _ = x[RSh-112]
+ _ = x[Neg-128]
+ _ = x[Mod-144]
+ _ = x[SMod-400]
+ _ = x[Xor-160]
+ _ = x[Mov-176]
+ _ = x[MovSX8-432]
+ _ = x[MovSX16-688]
+ _ = x[MovSX32-944]
+ _ = x[ArSh-192]
+ _ = x[Swap-208]
+}
+
+const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapSDivSModMovSX8MovSX16MovSX32InvalidALUOp"
+
+var _ALUOp_map = map[ALUOp]string{
+ 0: _ALUOp_name[0:3],
+ 16: _ALUOp_name[3:6],
+ 32: _ALUOp_name[6:9],
+ 48: _ALUOp_name[9:12],
+ 64: _ALUOp_name[12:14],
+ 80: _ALUOp_name[14:17],
+ 96: _ALUOp_name[17:20],
+ 112: _ALUOp_name[20:23],
+ 128: _ALUOp_name[23:26],
+ 144: _ALUOp_name[26:29],
+ 160: _ALUOp_name[29:32],
+ 176: _ALUOp_name[32:35],
+ 192: _ALUOp_name[35:39],
+ 208: _ALUOp_name[39:43],
+ 304: _ALUOp_name[43:47],
+ 400: _ALUOp_name[47:51],
+ 432: _ALUOp_name[51:57],
+ 688: _ALUOp_name[57:64],
+ 944: _ALUOp_name[64:71],
+ 65535: _ALUOp_name[71:83],
+}
+
+func (i ALUOp) String() string {
+ if str, ok := _ALUOp_map[i]; ok {
+ return str
+ }
+ return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")"
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/doc.go b/vendor/github.com/cilium/ebpf/asm/doc.go
new file mode 100644
index 000000000..7031bdc27
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/doc.go
@@ -0,0 +1,2 @@
+// Package asm is an assembler for eBPF bytecode.
+package asm
diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go
new file mode 100644
index 000000000..84a40b227
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/func.go
@@ -0,0 +1,250 @@
+package asm
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output func_string.go -type=BuiltinFunc
+
+// BuiltinFunc is a built-in eBPF function.
+type BuiltinFunc int32
+
+func (_ BuiltinFunc) Max() BuiltinFunc {
+ return maxBuiltinFunc - 1
+}
+
+// eBPF built-in functions
+//
+// You can regenerate this list using the following gawk script:
+//
+// /FN\(.+\),/ {
+// match($1, /\(([a-z_0-9]+),/, r)
+// split(r[1], p, "_")
+// printf "Fn"
+// for (i in p) {
+// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2)
+// }
+// print ""
+// }
+//
+// The script expects include/uapi/linux/bpf.h as it's input.
+const (
+ FnUnspec BuiltinFunc = iota
+ FnMapLookupElem
+ FnMapUpdateElem
+ FnMapDeleteElem
+ FnProbeRead
+ FnKtimeGetNs
+ FnTracePrintk
+ FnGetPrandomU32
+ FnGetSmpProcessorId
+ FnSkbStoreBytes
+ FnL3CsumReplace
+ FnL4CsumReplace
+ FnTailCall
+ FnCloneRedirect
+ FnGetCurrentPidTgid
+ FnGetCurrentUidGid
+ FnGetCurrentComm
+ FnGetCgroupClassid
+ FnSkbVlanPush
+ FnSkbVlanPop
+ FnSkbGetTunnelKey
+ FnSkbSetTunnelKey
+ FnPerfEventRead
+ FnRedirect
+ FnGetRouteRealm
+ FnPerfEventOutput
+ FnSkbLoadBytes
+ FnGetStackid
+ FnCsumDiff
+ FnSkbGetTunnelOpt
+ FnSkbSetTunnelOpt
+ FnSkbChangeProto
+ FnSkbChangeType
+ FnSkbUnderCgroup
+ FnGetHashRecalc
+ FnGetCurrentTask
+ FnProbeWriteUser
+ FnCurrentTaskUnderCgroup
+ FnSkbChangeTail
+ FnSkbPullData
+ FnCsumUpdate
+ FnSetHashInvalid
+ FnGetNumaNodeId
+ FnSkbChangeHead
+ FnXdpAdjustHead
+ FnProbeReadStr
+ FnGetSocketCookie
+ FnGetSocketUid
+ FnSetHash
+ FnSetsockopt
+ FnSkbAdjustRoom
+ FnRedirectMap
+ FnSkRedirectMap
+ FnSockMapUpdate
+ FnXdpAdjustMeta
+ FnPerfEventReadValue
+ FnPerfProgReadValue
+ FnGetsockopt
+ FnOverrideReturn
+ FnSockOpsCbFlagsSet
+ FnMsgRedirectMap
+ FnMsgApplyBytes
+ FnMsgCorkBytes
+ FnMsgPullData
+ FnBind
+ FnXdpAdjustTail
+ FnSkbGetXfrmState
+ FnGetStack
+ FnSkbLoadBytesRelative
+ FnFibLookup
+ FnSockHashUpdate
+ FnMsgRedirectHash
+ FnSkRedirectHash
+ FnLwtPushEncap
+ FnLwtSeg6StoreBytes
+ FnLwtSeg6AdjustSrh
+ FnLwtSeg6Action
+ FnRcRepeat
+ FnRcKeydown
+ FnSkbCgroupId
+ FnGetCurrentCgroupId
+ FnGetLocalStorage
+ FnSkSelectReuseport
+ FnSkbAncestorCgroupId
+ FnSkLookupTcp
+ FnSkLookupUdp
+ FnSkRelease
+ FnMapPushElem
+ FnMapPopElem
+ FnMapPeekElem
+ FnMsgPushData
+ FnMsgPopData
+ FnRcPointerRel
+ FnSpinLock
+ FnSpinUnlock
+ FnSkFullsock
+ FnTcpSock
+ FnSkbEcnSetCe
+ FnGetListenerSock
+ FnSkcLookupTcp
+ FnTcpCheckSyncookie
+ FnSysctlGetName
+ FnSysctlGetCurrentValue
+ FnSysctlGetNewValue
+ FnSysctlSetNewValue
+ FnStrtol
+ FnStrtoul
+ FnSkStorageGet
+ FnSkStorageDelete
+ FnSendSignal
+ FnTcpGenSyncookie
+ FnSkbOutput
+ FnProbeReadUser
+ FnProbeReadKernel
+ FnProbeReadUserStr
+ FnProbeReadKernelStr
+ FnTcpSendAck
+ FnSendSignalThread
+ FnJiffies64
+ FnReadBranchRecords
+ FnGetNsCurrentPidTgid
+ FnXdpOutput
+ FnGetNetnsCookie
+ FnGetCurrentAncestorCgroupId
+ FnSkAssign
+ FnKtimeGetBootNs
+ FnSeqPrintf
+ FnSeqWrite
+ FnSkCgroupId
+ FnSkAncestorCgroupId
+ FnRingbufOutput
+ FnRingbufReserve
+ FnRingbufSubmit
+ FnRingbufDiscard
+ FnRingbufQuery
+ FnCsumLevel
+ FnSkcToTcp6Sock
+ FnSkcToTcpSock
+ FnSkcToTcpTimewaitSock
+ FnSkcToTcpRequestSock
+ FnSkcToUdp6Sock
+ FnGetTaskStack
+ FnLoadHdrOpt
+ FnStoreHdrOpt
+ FnReserveHdrOpt
+ FnInodeStorageGet
+ FnInodeStorageDelete
+ FnDPath
+ FnCopyFromUser
+ FnSnprintfBtf
+ FnSeqPrintfBtf
+ FnSkbCgroupClassid
+ FnRedirectNeigh
+ FnPerCpuPtr
+ FnThisCpuPtr
+ FnRedirectPeer
+ FnTaskStorageGet
+ FnTaskStorageDelete
+ FnGetCurrentTaskBtf
+ FnBprmOptsSet
+ FnKtimeGetCoarseNs
+ FnImaInodeHash
+ FnSockFromFile
+ FnCheckMtu
+ FnForEachMapElem
+ FnSnprintf
+ FnSysBpf
+ FnBtfFindByNameKind
+ FnSysClose
+ FnTimerInit
+ FnTimerSetCallback
+ FnTimerStart
+ FnTimerCancel
+ FnGetFuncIp
+ FnGetAttachCookie
+ FnTaskPtRegs
+ FnGetBranchSnapshot
+ FnTraceVprintk
+ FnSkcToUnixSock
+ FnKallsymsLookupName
+ FnFindVma
+ FnLoop
+ FnStrncmp
+ FnGetFuncArg
+ FnGetFuncRet
+ FnGetFuncArgCnt
+ FnGetRetval
+ FnSetRetval
+ FnXdpGetBuffLen
+ FnXdpLoadBytes
+ FnXdpStoreBytes
+ FnCopyFromUserTask
+ FnSkbSetTstamp
+ FnImaFileHash
+ FnKptrXchg
+ FnMapLookupPercpuElem
+ FnSkcToMptcpSock
+ FnDynptrFromMem
+ FnRingbufReserveDynptr
+ FnRingbufSubmitDynptr
+ FnRingbufDiscardDynptr
+ FnDynptrRead
+ FnDynptrWrite
+ FnDynptrData
+ FnTcpRawGenSyncookieIpv4
+ FnTcpRawGenSyncookieIpv6
+ FnTcpRawCheckSyncookieIpv4
+ FnTcpRawCheckSyncookieIpv6
+ FnKtimeGetTaiNs
+ FnUserRingbufDrain
+ FnCgrpStorageGet
+ FnCgrpStorageDelete
+
+ maxBuiltinFunc
+)
+
+// Call emits a function call.
+func (fn BuiltinFunc) Call() Instruction {
+ return Instruction{
+ OpCode: OpCode(JumpClass).SetJumpOp(Call),
+ Constant: int64(fn),
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go
new file mode 100644
index 000000000..47150bc4f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/func_string.go
@@ -0,0 +1,235 @@
+// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[FnUnspec-0]
+ _ = x[FnMapLookupElem-1]
+ _ = x[FnMapUpdateElem-2]
+ _ = x[FnMapDeleteElem-3]
+ _ = x[FnProbeRead-4]
+ _ = x[FnKtimeGetNs-5]
+ _ = x[FnTracePrintk-6]
+ _ = x[FnGetPrandomU32-7]
+ _ = x[FnGetSmpProcessorId-8]
+ _ = x[FnSkbStoreBytes-9]
+ _ = x[FnL3CsumReplace-10]
+ _ = x[FnL4CsumReplace-11]
+ _ = x[FnTailCall-12]
+ _ = x[FnCloneRedirect-13]
+ _ = x[FnGetCurrentPidTgid-14]
+ _ = x[FnGetCurrentUidGid-15]
+ _ = x[FnGetCurrentComm-16]
+ _ = x[FnGetCgroupClassid-17]
+ _ = x[FnSkbVlanPush-18]
+ _ = x[FnSkbVlanPop-19]
+ _ = x[FnSkbGetTunnelKey-20]
+ _ = x[FnSkbSetTunnelKey-21]
+ _ = x[FnPerfEventRead-22]
+ _ = x[FnRedirect-23]
+ _ = x[FnGetRouteRealm-24]
+ _ = x[FnPerfEventOutput-25]
+ _ = x[FnSkbLoadBytes-26]
+ _ = x[FnGetStackid-27]
+ _ = x[FnCsumDiff-28]
+ _ = x[FnSkbGetTunnelOpt-29]
+ _ = x[FnSkbSetTunnelOpt-30]
+ _ = x[FnSkbChangeProto-31]
+ _ = x[FnSkbChangeType-32]
+ _ = x[FnSkbUnderCgroup-33]
+ _ = x[FnGetHashRecalc-34]
+ _ = x[FnGetCurrentTask-35]
+ _ = x[FnProbeWriteUser-36]
+ _ = x[FnCurrentTaskUnderCgroup-37]
+ _ = x[FnSkbChangeTail-38]
+ _ = x[FnSkbPullData-39]
+ _ = x[FnCsumUpdate-40]
+ _ = x[FnSetHashInvalid-41]
+ _ = x[FnGetNumaNodeId-42]
+ _ = x[FnSkbChangeHead-43]
+ _ = x[FnXdpAdjustHead-44]
+ _ = x[FnProbeReadStr-45]
+ _ = x[FnGetSocketCookie-46]
+ _ = x[FnGetSocketUid-47]
+ _ = x[FnSetHash-48]
+ _ = x[FnSetsockopt-49]
+ _ = x[FnSkbAdjustRoom-50]
+ _ = x[FnRedirectMap-51]
+ _ = x[FnSkRedirectMap-52]
+ _ = x[FnSockMapUpdate-53]
+ _ = x[FnXdpAdjustMeta-54]
+ _ = x[FnPerfEventReadValue-55]
+ _ = x[FnPerfProgReadValue-56]
+ _ = x[FnGetsockopt-57]
+ _ = x[FnOverrideReturn-58]
+ _ = x[FnSockOpsCbFlagsSet-59]
+ _ = x[FnMsgRedirectMap-60]
+ _ = x[FnMsgApplyBytes-61]
+ _ = x[FnMsgCorkBytes-62]
+ _ = x[FnMsgPullData-63]
+ _ = x[FnBind-64]
+ _ = x[FnXdpAdjustTail-65]
+ _ = x[FnSkbGetXfrmState-66]
+ _ = x[FnGetStack-67]
+ _ = x[FnSkbLoadBytesRelative-68]
+ _ = x[FnFibLookup-69]
+ _ = x[FnSockHashUpdate-70]
+ _ = x[FnMsgRedirectHash-71]
+ _ = x[FnSkRedirectHash-72]
+ _ = x[FnLwtPushEncap-73]
+ _ = x[FnLwtSeg6StoreBytes-74]
+ _ = x[FnLwtSeg6AdjustSrh-75]
+ _ = x[FnLwtSeg6Action-76]
+ _ = x[FnRcRepeat-77]
+ _ = x[FnRcKeydown-78]
+ _ = x[FnSkbCgroupId-79]
+ _ = x[FnGetCurrentCgroupId-80]
+ _ = x[FnGetLocalStorage-81]
+ _ = x[FnSkSelectReuseport-82]
+ _ = x[FnSkbAncestorCgroupId-83]
+ _ = x[FnSkLookupTcp-84]
+ _ = x[FnSkLookupUdp-85]
+ _ = x[FnSkRelease-86]
+ _ = x[FnMapPushElem-87]
+ _ = x[FnMapPopElem-88]
+ _ = x[FnMapPeekElem-89]
+ _ = x[FnMsgPushData-90]
+ _ = x[FnMsgPopData-91]
+ _ = x[FnRcPointerRel-92]
+ _ = x[FnSpinLock-93]
+ _ = x[FnSpinUnlock-94]
+ _ = x[FnSkFullsock-95]
+ _ = x[FnTcpSock-96]
+ _ = x[FnSkbEcnSetCe-97]
+ _ = x[FnGetListenerSock-98]
+ _ = x[FnSkcLookupTcp-99]
+ _ = x[FnTcpCheckSyncookie-100]
+ _ = x[FnSysctlGetName-101]
+ _ = x[FnSysctlGetCurrentValue-102]
+ _ = x[FnSysctlGetNewValue-103]
+ _ = x[FnSysctlSetNewValue-104]
+ _ = x[FnStrtol-105]
+ _ = x[FnStrtoul-106]
+ _ = x[FnSkStorageGet-107]
+ _ = x[FnSkStorageDelete-108]
+ _ = x[FnSendSignal-109]
+ _ = x[FnTcpGenSyncookie-110]
+ _ = x[FnSkbOutput-111]
+ _ = x[FnProbeReadUser-112]
+ _ = x[FnProbeReadKernel-113]
+ _ = x[FnProbeReadUserStr-114]
+ _ = x[FnProbeReadKernelStr-115]
+ _ = x[FnTcpSendAck-116]
+ _ = x[FnSendSignalThread-117]
+ _ = x[FnJiffies64-118]
+ _ = x[FnReadBranchRecords-119]
+ _ = x[FnGetNsCurrentPidTgid-120]
+ _ = x[FnXdpOutput-121]
+ _ = x[FnGetNetnsCookie-122]
+ _ = x[FnGetCurrentAncestorCgroupId-123]
+ _ = x[FnSkAssign-124]
+ _ = x[FnKtimeGetBootNs-125]
+ _ = x[FnSeqPrintf-126]
+ _ = x[FnSeqWrite-127]
+ _ = x[FnSkCgroupId-128]
+ _ = x[FnSkAncestorCgroupId-129]
+ _ = x[FnRingbufOutput-130]
+ _ = x[FnRingbufReserve-131]
+ _ = x[FnRingbufSubmit-132]
+ _ = x[FnRingbufDiscard-133]
+ _ = x[FnRingbufQuery-134]
+ _ = x[FnCsumLevel-135]
+ _ = x[FnSkcToTcp6Sock-136]
+ _ = x[FnSkcToTcpSock-137]
+ _ = x[FnSkcToTcpTimewaitSock-138]
+ _ = x[FnSkcToTcpRequestSock-139]
+ _ = x[FnSkcToUdp6Sock-140]
+ _ = x[FnGetTaskStack-141]
+ _ = x[FnLoadHdrOpt-142]
+ _ = x[FnStoreHdrOpt-143]
+ _ = x[FnReserveHdrOpt-144]
+ _ = x[FnInodeStorageGet-145]
+ _ = x[FnInodeStorageDelete-146]
+ _ = x[FnDPath-147]
+ _ = x[FnCopyFromUser-148]
+ _ = x[FnSnprintfBtf-149]
+ _ = x[FnSeqPrintfBtf-150]
+ _ = x[FnSkbCgroupClassid-151]
+ _ = x[FnRedirectNeigh-152]
+ _ = x[FnPerCpuPtr-153]
+ _ = x[FnThisCpuPtr-154]
+ _ = x[FnRedirectPeer-155]
+ _ = x[FnTaskStorageGet-156]
+ _ = x[FnTaskStorageDelete-157]
+ _ = x[FnGetCurrentTaskBtf-158]
+ _ = x[FnBprmOptsSet-159]
+ _ = x[FnKtimeGetCoarseNs-160]
+ _ = x[FnImaInodeHash-161]
+ _ = x[FnSockFromFile-162]
+ _ = x[FnCheckMtu-163]
+ _ = x[FnForEachMapElem-164]
+ _ = x[FnSnprintf-165]
+ _ = x[FnSysBpf-166]
+ _ = x[FnBtfFindByNameKind-167]
+ _ = x[FnSysClose-168]
+ _ = x[FnTimerInit-169]
+ _ = x[FnTimerSetCallback-170]
+ _ = x[FnTimerStart-171]
+ _ = x[FnTimerCancel-172]
+ _ = x[FnGetFuncIp-173]
+ _ = x[FnGetAttachCookie-174]
+ _ = x[FnTaskPtRegs-175]
+ _ = x[FnGetBranchSnapshot-176]
+ _ = x[FnTraceVprintk-177]
+ _ = x[FnSkcToUnixSock-178]
+ _ = x[FnKallsymsLookupName-179]
+ _ = x[FnFindVma-180]
+ _ = x[FnLoop-181]
+ _ = x[FnStrncmp-182]
+ _ = x[FnGetFuncArg-183]
+ _ = x[FnGetFuncRet-184]
+ _ = x[FnGetFuncArgCnt-185]
+ _ = x[FnGetRetval-186]
+ _ = x[FnSetRetval-187]
+ _ = x[FnXdpGetBuffLen-188]
+ _ = x[FnXdpLoadBytes-189]
+ _ = x[FnXdpStoreBytes-190]
+ _ = x[FnCopyFromUserTask-191]
+ _ = x[FnSkbSetTstamp-192]
+ _ = x[FnImaFileHash-193]
+ _ = x[FnKptrXchg-194]
+ _ = x[FnMapLookupPercpuElem-195]
+ _ = x[FnSkcToMptcpSock-196]
+ _ = x[FnDynptrFromMem-197]
+ _ = x[FnRingbufReserveDynptr-198]
+ _ = x[FnRingbufSubmitDynptr-199]
+ _ = x[FnRingbufDiscardDynptr-200]
+ _ = x[FnDynptrRead-201]
+ _ = x[FnDynptrWrite-202]
+ _ = x[FnDynptrData-203]
+ _ = x[FnTcpRawGenSyncookieIpv4-204]
+ _ = x[FnTcpRawGenSyncookieIpv6-205]
+ _ = x[FnTcpRawCheckSyncookieIpv4-206]
+ _ = x[FnTcpRawCheckSyncookieIpv6-207]
+ _ = x[FnKtimeGetTaiNs-208]
+ _ = x[FnUserRingbufDrain-209]
+ _ = x[FnCgrpStorageGet-210]
+ _ = x[FnCgrpStorageDelete-211]
+ _ = x[maxBuiltinFunc-212]
+}
+
+const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDeletemaxBuiltinFunc"
+
+var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165, 3179}
+
+func (i BuiltinFunc) String() string {
+ if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {
+ return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go
new file mode 100644
index 000000000..67cd39d6f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/instruction.go
@@ -0,0 +1,954 @@
+package asm
+
+import (
+ "crypto/sha1"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// InstructionSize is the size of a BPF instruction in bytes
+const InstructionSize = 8
+
+// RawInstructionOffset is an offset in units of raw BPF instructions.
+type RawInstructionOffset uint64
+
+var ErrUnreferencedSymbol = errors.New("unreferenced symbol")
+var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference")
+var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference")
+
+// Bytes returns the offset of an instruction in bytes.
+func (rio RawInstructionOffset) Bytes() uint64 {
+ return uint64(rio) * InstructionSize
+}
+
+// Instruction is a single eBPF instruction.
+type Instruction struct {
+ OpCode OpCode
+ Dst Register
+ Src Register
+ Offset int16
+ Constant int64
+
+ // Metadata contains optional metadata about this instruction.
+ Metadata Metadata
+}
+
+// Unmarshal decodes a BPF instruction.
+func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) {
+ data := make([]byte, InstructionSize)
+ if _, err := io.ReadFull(r, data); err != nil {
+ return 0, err
+ }
+
+ ins.OpCode = OpCode(data[0])
+
+ regs := data[1]
+ switch bo {
+ case binary.LittleEndian:
+ ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4)
+ case binary.BigEndian:
+ ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf)
+ }
+
+ ins.Offset = int16(bo.Uint16(data[2:4]))
+
+ if ins.OpCode.Class().IsALU() {
+ switch ins.OpCode.ALUOp() {
+ case Div:
+ if ins.Offset == 1 {
+ ins.OpCode = ins.OpCode.SetALUOp(SDiv)
+ ins.Offset = 0
+ }
+ case Mod:
+ if ins.Offset == 1 {
+ ins.OpCode = ins.OpCode.SetALUOp(SMod)
+ ins.Offset = 0
+ }
+ case Mov:
+ switch ins.Offset {
+ case 8:
+ ins.OpCode = ins.OpCode.SetALUOp(MovSX8)
+ ins.Offset = 0
+ case 16:
+ ins.OpCode = ins.OpCode.SetALUOp(MovSX16)
+ ins.Offset = 0
+ case 32:
+ ins.OpCode = ins.OpCode.SetALUOp(MovSX32)
+ ins.Offset = 0
+ }
+ }
+ }
+
+ // Convert to int32 before widening to int64
+ // to ensure the signed bit is carried over.
+ ins.Constant = int64(int32(bo.Uint32(data[4:8])))
+
+ if !ins.OpCode.IsDWordLoad() {
+ return InstructionSize, nil
+ }
+
+ // Pull another instruction from the stream to retrieve the second
+ // half of the 64-bit immediate value.
+ if _, err := io.ReadFull(r, data); err != nil {
+ // No Wrap, to avoid io.EOF clash
+ return 0, errors.New("64bit immediate is missing second half")
+ }
+
+ // Require that all fields other than the value are zero.
+ if bo.Uint32(data[0:4]) != 0 {
+ return 0, errors.New("64bit immediate has non-zero fields")
+ }
+
+ cons1 := uint32(ins.Constant)
+ cons2 := int32(bo.Uint32(data[4:8]))
+ ins.Constant = int64(cons2)<<32 | int64(cons1)
+
+ return 2 * InstructionSize, nil
+}
+
+// Marshal encodes a BPF instruction.
+func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) {
+ if ins.OpCode == InvalidOpCode {
+ return 0, errors.New("invalid opcode")
+ }
+
+ isDWordLoad := ins.OpCode.IsDWordLoad()
+
+ cons := int32(ins.Constant)
+ if isDWordLoad {
+ // Encode least significant 32bit first for 64bit operations.
+ cons = int32(uint32(ins.Constant))
+ }
+
+ regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
+ if err != nil {
+ return 0, fmt.Errorf("can't marshal registers: %s", err)
+ }
+
+ if ins.OpCode.Class().IsALU() {
+ newOffset := int16(0)
+ switch ins.OpCode.ALUOp() {
+ case SDiv:
+ ins.OpCode = ins.OpCode.SetALUOp(Div)
+ newOffset = 1
+ case SMod:
+ ins.OpCode = ins.OpCode.SetALUOp(Mod)
+ newOffset = 1
+ case MovSX8:
+ ins.OpCode = ins.OpCode.SetALUOp(Mov)
+ newOffset = 8
+ case MovSX16:
+ ins.OpCode = ins.OpCode.SetALUOp(Mov)
+ newOffset = 16
+ case MovSX32:
+ ins.OpCode = ins.OpCode.SetALUOp(Mov)
+ newOffset = 32
+ }
+ if newOffset != 0 && ins.Offset != 0 {
+ return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins)
+ }
+ ins.Offset = newOffset
+ }
+
+ op, err := ins.OpCode.bpfOpCode()
+ if err != nil {
+ return 0, err
+ }
+
+ data := make([]byte, InstructionSize)
+ data[0] = op
+ data[1] = byte(regs)
+ bo.PutUint16(data[2:4], uint16(ins.Offset))
+ bo.PutUint32(data[4:8], uint32(cons))
+ if _, err := w.Write(data); err != nil {
+ return 0, err
+ }
+
+ if !isDWordLoad {
+ return InstructionSize, nil
+ }
+
+ // The first half of the second part of a double-wide instruction
+ // must be zero. The second half carries the value.
+ bo.PutUint32(data[0:4], 0)
+ bo.PutUint32(data[4:8], uint32(ins.Constant>>32))
+ if _, err := w.Write(data); err != nil {
+ return 0, err
+ }
+
+ return 2 * InstructionSize, nil
+}
+
+// AssociateMap associates a Map with this Instruction.
+//
+// Implicitly clears the Instruction's Reference field.
+//
+// Returns an error if the Instruction is not a map load.
+func (ins *Instruction) AssociateMap(m FDer) error {
+ if !ins.IsLoadFromMap() {
+ return errors.New("not a load from a map")
+ }
+
+ ins.Metadata.Set(referenceMeta{}, nil)
+ ins.Metadata.Set(mapMeta{}, m)
+
+ return nil
+}
+
+// RewriteMapPtr changes an instruction to use a new map fd.
+//
+// Returns an error if the instruction doesn't load a map.
+//
+// Deprecated: use AssociateMap instead. If you cannot provide a Map,
+// wrap an fd in a type implementing FDer.
+func (ins *Instruction) RewriteMapPtr(fd int) error {
+ if !ins.IsLoadFromMap() {
+ return errors.New("not a load from a map")
+ }
+
+ ins.encodeMapFD(fd)
+
+ return nil
+}
+
+func (ins *Instruction) encodeMapFD(fd int) {
+ // Preserve the offset value for direct map loads.
+ offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
+ rawFd := uint64(uint32(fd))
+ ins.Constant = int64(offset | rawFd)
+}
+
+// MapPtr returns the map fd for this instruction.
+//
+// The result is undefined if the instruction is not a load from a map,
+// see IsLoadFromMap.
+//
+// Deprecated: use Map() instead.
+func (ins *Instruction) MapPtr() int {
+ // If there is a map associated with the instruction, return its FD.
+ if fd := ins.Metadata.Get(mapMeta{}); fd != nil {
+ return fd.(FDer).FD()
+ }
+
+ // Fall back to the fd stored in the Constant field
+ return ins.mapFd()
+}
+
+// mapFd returns the map file descriptor stored in the 32 least significant
+// bits of ins' Constant field.
+func (ins *Instruction) mapFd() int {
+ return int(int32(ins.Constant))
+}
+
+// RewriteMapOffset changes the offset of a direct load from a map.
+//
+// Returns an error if the instruction is not a direct load.
+func (ins *Instruction) RewriteMapOffset(offset uint32) error {
+ if !ins.OpCode.IsDWordLoad() {
+ return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
+ }
+
+ if ins.Src != PseudoMapValue {
+ return errors.New("not a direct load from a map")
+ }
+
+ fd := uint64(ins.Constant) & math.MaxUint32
+ ins.Constant = int64(uint64(offset)<<32 | fd)
+ return nil
+}
+
+func (ins *Instruction) mapOffset() uint32 {
+ return uint32(uint64(ins.Constant) >> 32)
+}
+
+// IsLoadFromMap returns true if the instruction loads from a map.
+//
+// This covers both loading the map pointer and direct map value loads.
+func (ins *Instruction) IsLoadFromMap() bool {
+ return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
+}
+
+// IsFunctionCall returns true if the instruction calls another BPF function.
+//
+// This is not the same thing as a BPF helper call.
+func (ins *Instruction) IsFunctionCall() bool {
+ return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
+}
+
+// IsKfuncCall returns true if the instruction calls a kfunc.
+//
+// This is not the same thing as a BPF helper call.
+func (ins *Instruction) IsKfuncCall() bool {
+ return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall
+}
+
+// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer.
+func (ins *Instruction) IsLoadOfFunctionPointer() bool {
+ return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc
+}
+
+// IsFunctionReference returns true if the instruction references another BPF
+// function, either by invoking a Call jump operation or by loading a function
+// pointer.
+func (ins *Instruction) IsFunctionReference() bool {
+ return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer()
+}
+
+// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call.
+func (ins *Instruction) IsBuiltinCall() bool {
+ return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0
+}
+
+// IsConstantLoad returns true if the instruction loads a constant of the
+// given size.
+func (ins *Instruction) IsConstantLoad(size Size) bool {
+ return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0
+}
+
+// Format implements fmt.Formatter.
+func (ins Instruction) Format(f fmt.State, c rune) {
+ if c != 'v' {
+ fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c)
+ return
+ }
+
+ op := ins.OpCode
+
+ if op == InvalidOpCode {
+ fmt.Fprint(f, "INVALID")
+ return
+ }
+
+ // Omit trailing space for Exit
+ if op.JumpOp() == Exit {
+ fmt.Fprint(f, op)
+ return
+ }
+
+ if ins.IsLoadFromMap() {
+ fd := ins.mapFd()
+ m := ins.Map()
+ switch ins.Src {
+ case PseudoMapFD:
+ if m != nil {
+ fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m)
+ } else {
+ fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
+ }
+
+ case PseudoMapValue:
+ if m != nil {
+ fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset())
+ } else {
+ fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
+ }
+ }
+
+ goto ref
+ }
+
+ switch cls := op.Class(); {
+ case cls.isLoadOrStore():
+ fmt.Fprintf(f, "%v ", op)
+ switch op.Mode() {
+ case ImmMode:
+ fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant)
+ case AbsMode:
+ fmt.Fprintf(f, "imm: %d", ins.Constant)
+ case IndMode:
+ fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant)
+ case MemMode, MemSXMode:
+ fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant)
+ case XAddMode:
+ fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src)
+ }
+
+ case cls.IsALU():
+ fmt.Fprintf(f, "%v", op)
+ if op == Swap.Op(ImmSource) {
+ fmt.Fprintf(f, "%d", ins.Constant)
+ }
+
+ fmt.Fprintf(f, " dst: %s ", ins.Dst)
+ switch {
+ case op.ALUOp() == Swap:
+ break
+ case op.Source() == ImmSource:
+ fmt.Fprintf(f, "imm: %d", ins.Constant)
+ default:
+ fmt.Fprintf(f, "src: %s", ins.Src)
+ }
+
+ case cls.IsJump():
+ fmt.Fprintf(f, "%v ", op)
+ switch jop := op.JumpOp(); jop {
+ case Call:
+ switch ins.Src {
+ case PseudoCall:
+ // bpf-to-bpf call
+ fmt.Fprint(f, ins.Constant)
+ case PseudoKfuncCall:
+ // kfunc call
+ fmt.Fprintf(f, "Kfunc(%d)", ins.Constant)
+ default:
+ fmt.Fprint(f, BuiltinFunc(ins.Constant))
+ }
+
+ case Ja:
+ if ins.OpCode.Class() == Jump32Class {
+ fmt.Fprintf(f, "imm: %d", ins.Constant)
+ } else {
+ fmt.Fprintf(f, "off: %d", ins.Offset)
+ }
+
+ default:
+ fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset)
+ if op.Source() == ImmSource {
+ fmt.Fprintf(f, "imm: %d", ins.Constant)
+ } else {
+ fmt.Fprintf(f, "src: %s", ins.Src)
+ }
+ }
+ default:
+ fmt.Fprintf(f, "%v ", op)
+ }
+
+ref:
+ if ins.Reference() != "" {
+ fmt.Fprintf(f, " <%s>", ins.Reference())
+ }
+}
+
+func (ins Instruction) equal(other Instruction) bool {
+ return ins.OpCode == other.OpCode &&
+ ins.Dst == other.Dst &&
+ ins.Src == other.Src &&
+ ins.Offset == other.Offset &&
+ ins.Constant == other.Constant
+}
+
+// Size returns the amount of bytes ins would occupy in binary form.
+func (ins Instruction) Size() uint64 {
+ return uint64(InstructionSize * ins.OpCode.rawInstructions())
+}
+
+// WithMetadata sets the given Metadata on the Instruction. e.g. to copy
+// Metadata from another Instruction when replacing it.
+func (ins Instruction) WithMetadata(meta Metadata) Instruction {
+ ins.Metadata = meta
+ return ins
+}
+
+type symbolMeta struct{}
+
+// WithSymbol marks the Instruction as a Symbol, which other Instructions
+// can point to using corresponding calls to WithReference.
+func (ins Instruction) WithSymbol(name string) Instruction {
+ ins.Metadata.Set(symbolMeta{}, name)
+ return ins
+}
+
+// Sym creates a symbol.
+//
+// Deprecated: use WithSymbol instead.
+func (ins Instruction) Sym(name string) Instruction {
+ return ins.WithSymbol(name)
+}
+
+// Symbol returns the value ins has been marked with using WithSymbol,
+// otherwise returns an empty string. A symbol is often an Instruction
+// at the start of a function body.
+func (ins Instruction) Symbol() string {
+ sym, _ := ins.Metadata.Get(symbolMeta{}).(string)
+ return sym
+}
+
+type referenceMeta struct{}
+
+// WithReference makes ins reference another Symbol or map by name.
+func (ins Instruction) WithReference(ref string) Instruction {
+ ins.Metadata.Set(referenceMeta{}, ref)
+ return ins
+}
+
+// Reference returns the Symbol or map name referenced by ins, if any.
+func (ins Instruction) Reference() string {
+ ref, _ := ins.Metadata.Get(referenceMeta{}).(string)
+ return ref
+}
+
+type mapMeta struct{}
+
+// Map returns the Map referenced by ins, if any.
+// An Instruction will contain a Map if e.g. it references an existing,
+// pinned map that was opened during ELF loading.
+func (ins Instruction) Map() FDer {
+ fd, _ := ins.Metadata.Get(mapMeta{}).(FDer)
+ return fd
+}
+
+type sourceMeta struct{}
+
+// WithSource adds source information about the Instruction.
+func (ins Instruction) WithSource(src fmt.Stringer) Instruction {
+ ins.Metadata.Set(sourceMeta{}, src)
+ return ins
+}
+
+// Source returns source information about the Instruction. The field is
+// present when the compiler emits BTF line info about the Instruction and
+// usually contains the line of source code responsible for it.
+func (ins Instruction) Source() fmt.Stringer {
+ str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer)
+ return str
+}
+
+// A Comment can be passed to Instruction.WithSource to add a comment
+// to an instruction.
+type Comment string
+
+func (s Comment) String() string {
+ return string(s)
+}
+
+// FDer represents a resource tied to an underlying file descriptor.
+// Used as a stand-in for e.g. ebpf.Map since that type cannot be
+// imported here and FD() is the only method we rely on.
+type FDer interface {
+ FD() int
+}
+
+// Instructions is an eBPF program.
+type Instructions []Instruction
+
+// Unmarshal unmarshals an Instructions from a binary instruction stream.
+// All instructions in insns are replaced by instructions decoded from r.
+func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error {
+ if len(*insns) > 0 {
+ *insns = nil
+ }
+
+ var offset uint64
+ for {
+ var ins Instruction
+ n, err := ins.Unmarshal(r, bo)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("offset %d: %w", offset, err)
+ }
+
+ *insns = append(*insns, ins)
+ offset += n
+ }
+
+ return nil
+}
+
+// Name returns the name of the function insns belongs to, if any.
+func (insns Instructions) Name() string {
+ if len(insns) == 0 {
+ return ""
+ }
+ return insns[0].Symbol()
+}
+
+func (insns Instructions) String() string {
+ return fmt.Sprint(insns)
+}
+
+// Size returns the amount of bytes insns would occupy in binary form.
+func (insns Instructions) Size() uint64 {
+ var sum uint64
+ for _, ins := range insns {
+ sum += ins.Size()
+ }
+ return sum
+}
+
+// AssociateMap updates all Instructions that Reference the given symbol
+// to point to an existing Map m instead.
+//
+// Returns ErrUnreferencedSymbol error if no references to symbol are found
+// in insns. If symbol is anything else than the symbol name of map (e.g.
+// a bpf2bpf subprogram), an error is returned.
+func (insns Instructions) AssociateMap(symbol string, m FDer) error {
+ if symbol == "" {
+ return errors.New("empty symbol")
+ }
+
+ var found bool
+ for i := range insns {
+ ins := &insns[i]
+ if ins.Reference() != symbol {
+ continue
+ }
+
+ if err := ins.AssociateMap(m); err != nil {
+ return err
+ }
+
+ found = true
+ }
+
+ if !found {
+ return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
+ }
+
+ return nil
+}
+
+// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
+//
+// Returns ErrUnreferencedSymbol if the symbol isn't used.
+//
+// Deprecated: use AssociateMap instead.
+func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
+ if symbol == "" {
+ return errors.New("empty symbol")
+ }
+
+ var found bool
+ for i := range insns {
+ ins := &insns[i]
+ if ins.Reference() != symbol {
+ continue
+ }
+
+ if !ins.IsLoadFromMap() {
+ return errors.New("not a load from a map")
+ }
+
+ ins.encodeMapFD(fd)
+
+ found = true
+ }
+
+ if !found {
+ return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
+ }
+
+ return nil
+}
+
+// SymbolOffsets returns the set of symbols and their offset in
+// the instructions.
+func (insns Instructions) SymbolOffsets() (map[string]int, error) {
+ offsets := make(map[string]int)
+
+ for i, ins := range insns {
+ if ins.Symbol() == "" {
+ continue
+ }
+
+ if _, ok := offsets[ins.Symbol()]; ok {
+ return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol())
+ }
+
+ offsets[ins.Symbol()] = i
+ }
+
+ return offsets, nil
+}
+
+// FunctionReferences returns a set of symbol names these Instructions make
+// bpf-to-bpf calls to.
+func (insns Instructions) FunctionReferences() []string {
+ calls := make(map[string]struct{})
+ for _, ins := range insns {
+ if ins.Constant != -1 {
+ // BPF-to-BPF calls have -1 constants.
+ continue
+ }
+
+ if ins.Reference() == "" {
+ continue
+ }
+
+ if !ins.IsFunctionReference() {
+ continue
+ }
+
+ calls[ins.Reference()] = struct{}{}
+ }
+
+ result := make([]string, 0, len(calls))
+ for call := range calls {
+ result = append(result, call)
+ }
+
+ sort.Strings(result)
+ return result
+}
+
+// ReferenceOffsets returns the set of references and their offset in
+// the instructions.
+func (insns Instructions) ReferenceOffsets() map[string][]int {
+ offsets := make(map[string][]int)
+
+ for i, ins := range insns {
+ if ins.Reference() == "" {
+ continue
+ }
+
+ offsets[ins.Reference()] = append(offsets[ins.Reference()], i)
+ }
+
+ return offsets
+}
+
+// Format implements fmt.Formatter.
+//
+// You can control indentation of symbols by
+// specifying a width. Setting a precision controls the indentation of
+// instructions.
+// The default character is a tab, which can be overridden by specifying
+// the ' ' space flag.
+func (insns Instructions) Format(f fmt.State, c rune) {
+ if c != 's' && c != 'v' {
+ fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c)
+ return
+ }
+
+ // Precision is better in this case, because it allows
+ // specifying 0 padding easily.
+ padding, ok := f.Precision()
+ if !ok {
+ padding = 1
+ }
+
+ indent := strings.Repeat("\t", padding)
+ if f.Flag(' ') {
+ indent = strings.Repeat(" ", padding)
+ }
+
+ symPadding, ok := f.Width()
+ if !ok {
+ symPadding = padding - 1
+ }
+ if symPadding < 0 {
+ symPadding = 0
+ }
+
+ symIndent := strings.Repeat("\t", symPadding)
+ if f.Flag(' ') {
+ symIndent = strings.Repeat(" ", symPadding)
+ }
+
+ // Guess how many digits we need at most, by assuming that all instructions
+ // are double wide.
+ highestOffset := len(insns) * 2
+ offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset))))
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ if iter.Ins.Symbol() != "" {
+ fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol())
+ }
+ if src := iter.Ins.Source(); src != nil {
+ line := strings.TrimSpace(src.String())
+ if line != "" {
+ fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line)
+ }
+ }
+ fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
+ }
+}
+
+// Marshal encodes a BPF program into the kernel format.
+//
+// insns may be modified if there are unresolved jumps or bpf2bpf calls.
+//
+// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
+// without a matching Symbol Instruction within insns.
+func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ if err := insns.encodeFunctionReferences(); err != nil {
+ return err
+ }
+
+ if err := insns.encodeMapPointers(); err != nil {
+ return err
+ }
+
+ for i, ins := range insns {
+ if _, err := ins.Marshal(w, bo); err != nil {
+ return fmt.Errorf("instruction %d: %w", i, err)
+ }
+ }
+ return nil
+}
+
+// Tag calculates the kernel tag for a series of instructions.
+//
+// It mirrors bpf_prog_calc_tag in the kernel and so can be compared
+// to ProgramInfo.Tag to figure out whether a loaded program matches
+// certain instructions.
+func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
+ h := sha1.New()
+ for i, ins := range insns {
+ if ins.IsLoadFromMap() {
+ ins.Constant = 0
+ }
+ _, err := ins.Marshal(h, bo)
+ if err != nil {
+ return "", fmt.Errorf("instruction %d: %w", i, err)
+ }
+ }
+ return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil
+}
+
+// encodeFunctionReferences populates the Offset (or Constant, depending on
+// the instruction type) field of instructions with a Reference field to point
+// to the offset of the corresponding instruction with a matching Symbol field.
+//
+// Only Reference Instructions that are either jumps or BPF function references
+// (calls or function pointer loads) are populated.
+//
+// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
+// without at least one corresponding Symbol Instruction within insns.
+func (insns Instructions) encodeFunctionReferences() error {
+ // Index the offsets of instructions tagged as a symbol.
+ symbolOffsets := make(map[string]RawInstructionOffset)
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+
+ if ins.Symbol() == "" {
+ continue
+ }
+
+ if _, ok := symbolOffsets[ins.Symbol()]; ok {
+ return fmt.Errorf("duplicate symbol %s", ins.Symbol())
+ }
+
+ symbolOffsets[ins.Symbol()] = iter.Offset
+ }
+
+ // Find all instructions tagged as references to other symbols.
+ // Depending on the instruction type, populate their constant or offset
+ // fields to point to the symbol they refer to within the insn stream.
+ iter = insns.Iterate()
+ for iter.Next() {
+ i := iter.Index
+ offset := iter.Offset
+ ins := iter.Ins
+
+ if ins.Reference() == "" {
+ continue
+ }
+
+ switch {
+ case ins.IsFunctionReference() && ins.Constant == -1,
+ ins.OpCode == Ja.opCode(Jump32Class, ImmSource) && ins.Constant == -1:
+ symOffset, ok := symbolOffsets[ins.Reference()]
+ if !ok {
+ return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
+ }
+
+ ins.Constant = int64(symOffset - offset - 1)
+
+ case ins.OpCode.Class().IsJump() && ins.Offset == -1:
+ symOffset, ok := symbolOffsets[ins.Reference()]
+ if !ok {
+ return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
+ }
+
+ ins.Offset = int16(symOffset - offset - 1)
+ }
+ }
+
+ return nil
+}
+
+// encodeMapPointers finds all Map Instructions and encodes their FDs
+// into their Constant fields.
+func (insns Instructions) encodeMapPointers() error {
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+
+ if !ins.IsLoadFromMap() {
+ continue
+ }
+
+ m := ins.Map()
+ if m == nil {
+ continue
+ }
+
+ fd := m.FD()
+ if fd < 0 {
+ return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd)
+ }
+
+ ins.encodeMapFD(m.FD())
+ }
+
+ return nil
+}
+
+// Iterate allows iterating a BPF program while keeping track of
+// various offsets.
+//
+// Modifying the instruction slice will lead to undefined behaviour.
+func (insns Instructions) Iterate() *InstructionIterator {
+ return &InstructionIterator{insns: insns}
+}
+
+// InstructionIterator iterates over a BPF program.
+type InstructionIterator struct {
+ insns Instructions
+ // The instruction in question.
+ Ins *Instruction
+ // The index of the instruction in the original instruction slice.
+ Index int
+ // The offset of the instruction in raw BPF instructions. This accounts
+ // for double-wide instructions.
+ Offset RawInstructionOffset
+}
+
+// Next returns true as long as there are any instructions remaining.
+func (iter *InstructionIterator) Next() bool {
+ if len(iter.insns) == 0 {
+ return false
+ }
+
+ if iter.Ins != nil {
+ iter.Index++
+ iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions())
+ }
+ iter.Ins = &iter.insns[0]
+ iter.insns = iter.insns[1:]
+ return true
+}
+
+type bpfRegisters uint8
+
+func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
+ switch bo {
+ case binary.LittleEndian:
+ return bpfRegisters((src << 4) | (dst & 0xF)), nil
+ case binary.BigEndian:
+ return bpfRegisters((dst << 4) | (src & 0xF)), nil
+ default:
+ return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
+ }
+}
+
+// IsUnreferencedSymbol returns true if err was caused by
+// an unreferenced symbol.
+//
+// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol).
+func IsUnreferencedSymbol(err error) bool {
+ return errors.Is(err, ErrUnreferencedSymbol)
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go
new file mode 100644
index 000000000..2738d736b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/jump.go
@@ -0,0 +1,135 @@
+package asm
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output jump_string.go -type=JumpOp
+
+// JumpOp affect control flow.
+//
+// msb lsb
+// +----+-+---+
+// |OP |s|cls|
+// +----+-+---+
+type JumpOp uint8
+
+const jumpMask OpCode = 0xf0
+
+const (
+ // InvalidJumpOp is returned by getters when invoked
+ // on non branch OpCodes
+ InvalidJumpOp JumpOp = 0xff
+ // Ja jumps by offset unconditionally
+ Ja JumpOp = 0x00
+ // JEq jumps by offset if r == imm
+ JEq JumpOp = 0x10
+ // JGT jumps by offset if r > imm
+ JGT JumpOp = 0x20
+ // JGE jumps by offset if r >= imm
+ JGE JumpOp = 0x30
+ // JSet jumps by offset if r & imm
+ JSet JumpOp = 0x40
+ // JNE jumps by offset if r != imm
+ JNE JumpOp = 0x50
+ // JSGT jumps by offset if signed r > signed imm
+ JSGT JumpOp = 0x60
+ // JSGE jumps by offset if signed r >= signed imm
+ JSGE JumpOp = 0x70
+ // Call builtin or user defined function from imm
+ Call JumpOp = 0x80
+ // Exit ends execution, with value in r0
+ Exit JumpOp = 0x90
+ // JLT jumps by offset if r < imm
+ JLT JumpOp = 0xa0
+ // JLE jumps by offset if r <= imm
+ JLE JumpOp = 0xb0
+ // JSLT jumps by offset if signed r < signed imm
+ JSLT JumpOp = 0xc0
+ // JSLE jumps by offset if signed r <= signed imm
+ JSLE JumpOp = 0xd0
+)
+
+// Return emits an exit instruction.
+//
+// Requires a return value in R0.
+func Return() Instruction {
+ return Instruction{
+ OpCode: OpCode(JumpClass).SetJumpOp(Exit),
+ }
+}
+
+// Op returns the OpCode for a given jump source.
+func (op JumpOp) Op(source Source) OpCode {
+ return OpCode(JumpClass).SetJumpOp(op).SetSource(source)
+}
+
+// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled.
+func (op JumpOp) Imm(dst Register, value int32, label string) Instruction {
+ return Instruction{
+ OpCode: op.opCode(JumpClass, ImmSource),
+ Dst: dst,
+ Offset: -1,
+ Constant: int64(value),
+ }.WithReference(label)
+}
+
+// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled.
+// Requires kernel 5.1.
+func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction {
+ return Instruction{
+ OpCode: op.opCode(Jump32Class, ImmSource),
+ Dst: dst,
+ Offset: -1,
+ Constant: int64(value),
+ }.WithReference(label)
+}
+
+// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled.
+func (op JumpOp) Reg(dst, src Register, label string) Instruction {
+ return Instruction{
+ OpCode: op.opCode(JumpClass, RegSource),
+ Dst: dst,
+ Src: src,
+ Offset: -1,
+ }.WithReference(label)
+}
+
+// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled.
+// Requires kernel 5.1.
+func (op JumpOp) Reg32(dst, src Register, label string) Instruction {
+ return Instruction{
+ OpCode: op.opCode(Jump32Class, RegSource),
+ Dst: dst,
+ Src: src,
+ Offset: -1,
+ }.WithReference(label)
+}
+
+func (op JumpOp) opCode(class Class, source Source) OpCode {
+ if op == Exit || op == Call {
+ return InvalidOpCode
+ }
+
+ return OpCode(class).SetJumpOp(op).SetSource(source)
+}
+
+// LongJump returns a jump always instruction with a range of [-2^31, 2^31 - 1].
+func LongJump(label string) Instruction {
+ return Instruction{
+ OpCode: Ja.opCode(Jump32Class, ImmSource),
+ Constant: -1,
+ }.WithReference(label)
+}
+
+// Label adjusts PC to the address of the label.
+func (op JumpOp) Label(label string) Instruction {
+ if op == Call {
+ return Instruction{
+ OpCode: OpCode(JumpClass).SetJumpOp(Call),
+ Src: PseudoCall,
+ Constant: -1,
+ }.WithReference(label)
+ }
+
+ return Instruction{
+ OpCode: OpCode(JumpClass).SetJumpOp(op),
+ Offset: -1,
+ }.WithReference(label)
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/jump_string.go b/vendor/github.com/cilium/ebpf/asm/jump_string.go
new file mode 100644
index 000000000..85a4aaffa
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/jump_string.go
@@ -0,0 +1,53 @@
+// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidJumpOp-255]
+ _ = x[Ja-0]
+ _ = x[JEq-16]
+ _ = x[JGT-32]
+ _ = x[JGE-48]
+ _ = x[JSet-64]
+ _ = x[JNE-80]
+ _ = x[JSGT-96]
+ _ = x[JSGE-112]
+ _ = x[Call-128]
+ _ = x[Exit-144]
+ _ = x[JLT-160]
+ _ = x[JLE-176]
+ _ = x[JSLT-192]
+ _ = x[JSLE-208]
+}
+
+const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp"
+
+var _JumpOp_map = map[JumpOp]string{
+ 0: _JumpOp_name[0:2],
+ 16: _JumpOp_name[2:5],
+ 32: _JumpOp_name[5:8],
+ 48: _JumpOp_name[8:11],
+ 64: _JumpOp_name[11:15],
+ 80: _JumpOp_name[15:18],
+ 96: _JumpOp_name[18:22],
+ 112: _JumpOp_name[22:26],
+ 128: _JumpOp_name[26:30],
+ 144: _JumpOp_name[30:34],
+ 160: _JumpOp_name[34:37],
+ 176: _JumpOp_name[37:40],
+ 192: _JumpOp_name[40:44],
+ 208: _JumpOp_name[44:48],
+ 255: _JumpOp_name[48:61],
+}
+
+func (i JumpOp) String() string {
+ if str, ok := _JumpOp_map[i]; ok {
+ return str
+ }
+ return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")"
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go
new file mode 100644
index 000000000..cdb5c5cfa
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/load_store.go
@@ -0,0 +1,225 @@
+package asm
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output load_store_string.go -type=Mode,Size
+
+// Mode for load and store operations
+//
+// msb lsb
+// +---+--+---+
+// |MDE|sz|cls|
+// +---+--+---+
+type Mode uint8
+
+const modeMask OpCode = 0xe0
+
+const (
+ // InvalidMode is returned by getters when invoked
+ // on non load / store OpCodes
+ InvalidMode Mode = 0xff
+ // ImmMode - immediate value
+ ImmMode Mode = 0x00
+ // AbsMode - immediate value + offset
+ AbsMode Mode = 0x20
+ // IndMode - indirect (imm+src)
+ IndMode Mode = 0x40
+ // MemMode - load from memory
+ MemMode Mode = 0x60
+ // MemSXMode - load from memory, sign extension
+ MemSXMode Mode = 0x80
+ // XAddMode - add atomically across processors.
+ XAddMode Mode = 0xc0
+)
+
+// Size of load and store operations
+//
+// msb lsb
+// +---+--+---+
+// |mde|SZ|cls|
+// +---+--+---+
+type Size uint8
+
+const sizeMask OpCode = 0x18
+
+const (
+ // InvalidSize is returned by getters when invoked
+ // on non load / store OpCodes
+ InvalidSize Size = 0xff
+ // DWord - double word; 64 bits
+ DWord Size = 0x18
+ // Word - word; 32 bits
+ Word Size = 0x00
+ // Half - half-word; 16 bits
+ Half Size = 0x08
+ // Byte - byte; 8 bits
+ Byte Size = 0x10
+)
+
+// Sizeof returns the size in bytes.
+func (s Size) Sizeof() int {
+ switch s {
+ case DWord:
+ return 8
+ case Word:
+ return 4
+ case Half:
+ return 2
+ case Byte:
+ return 1
+ default:
+ return -1
+ }
+}
+
+// LoadMemOp returns the OpCode to load a value of given size from memory.
+func LoadMemOp(size Size) OpCode {
+ return OpCode(LdXClass).SetMode(MemMode).SetSize(size)
+}
+
+// LoadMemSXOp returns the OpCode to load a value of given size from memory sign extended.
+func LoadMemSXOp(size Size) OpCode {
+ return OpCode(LdXClass).SetMode(MemSXMode).SetSize(size)
+}
+
+// LoadMem emits `dst = *(size *)(src + offset)`.
+func LoadMem(dst, src Register, offset int16, size Size) Instruction {
+ return Instruction{
+ OpCode: LoadMemOp(size),
+ Dst: dst,
+ Src: src,
+ Offset: offset,
+ }
+}
+
+// LoadMemSX emits `dst = *(size *)(src + offset)` but sign extends dst.
+func LoadMemSX(dst, src Register, offset int16, size Size) Instruction {
+ if size == DWord {
+ return Instruction{OpCode: InvalidOpCode}
+ }
+
+ return Instruction{
+ OpCode: LoadMemSXOp(size),
+ Dst: dst,
+ Src: src,
+ Offset: offset,
+ }
+}
+
+// LoadImmOp returns the OpCode to load an immediate of given size.
+//
+// As of kernel 4.20, only DWord size is accepted.
+func LoadImmOp(size Size) OpCode {
+ return OpCode(LdClass).SetMode(ImmMode).SetSize(size)
+}
+
+// LoadImm emits `dst = (size)value`.
+//
+// As of kernel 4.20, only DWord size is accepted.
+func LoadImm(dst Register, value int64, size Size) Instruction {
+ return Instruction{
+ OpCode: LoadImmOp(size),
+ Dst: dst,
+ Constant: value,
+ }
+}
+
+// LoadMapPtr stores a pointer to a map in dst.
+func LoadMapPtr(dst Register, fd int) Instruction {
+ if fd < 0 {
+ return Instruction{OpCode: InvalidOpCode}
+ }
+
+ return Instruction{
+ OpCode: LoadImmOp(DWord),
+ Dst: dst,
+ Src: PseudoMapFD,
+ Constant: int64(uint32(fd)),
+ }
+}
+
+// LoadMapValue stores a pointer to the value at a certain offset of a map.
+func LoadMapValue(dst Register, fd int, offset uint32) Instruction {
+ if fd < 0 {
+ return Instruction{OpCode: InvalidOpCode}
+ }
+
+ fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd))
+ return Instruction{
+ OpCode: LoadImmOp(DWord),
+ Dst: dst,
+ Src: PseudoMapValue,
+ Constant: int64(fdAndOffset),
+ }
+}
+
+// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff.
+func LoadIndOp(size Size) OpCode {
+ return OpCode(LdClass).SetMode(IndMode).SetSize(size)
+}
+
+// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`.
+func LoadInd(dst, src Register, offset int32, size Size) Instruction {
+ return Instruction{
+ OpCode: LoadIndOp(size),
+ Dst: dst,
+ Src: src,
+ Constant: int64(offset),
+ }
+}
+
+// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff.
+func LoadAbsOp(size Size) OpCode {
+ return OpCode(LdClass).SetMode(AbsMode).SetSize(size)
+}
+
+// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`.
+func LoadAbs(offset int32, size Size) Instruction {
+ return Instruction{
+ OpCode: LoadAbsOp(size),
+ Dst: R0,
+ Constant: int64(offset),
+ }
+}
+
+// StoreMemOp returns the OpCode for storing a register of given size in memory.
+func StoreMemOp(size Size) OpCode {
+ return OpCode(StXClass).SetMode(MemMode).SetSize(size)
+}
+
+// StoreMem emits `*(size *)(dst + offset) = src`
+func StoreMem(dst Register, offset int16, src Register, size Size) Instruction {
+ return Instruction{
+ OpCode: StoreMemOp(size),
+ Dst: dst,
+ Src: src,
+ Offset: offset,
+ }
+}
+
+// StoreImmOp returns the OpCode for storing an immediate of given size in memory.
+func StoreImmOp(size Size) OpCode {
+ return OpCode(StClass).SetMode(MemMode).SetSize(size)
+}
+
+// StoreImm emits `*(size *)(dst + offset) = value`.
+func StoreImm(dst Register, offset int16, value int64, size Size) Instruction {
+ return Instruction{
+ OpCode: StoreImmOp(size),
+ Dst: dst,
+ Offset: offset,
+ Constant: value,
+ }
+}
+
+// StoreXAddOp returns the OpCode to atomically add a register to a value in memory.
+func StoreXAddOp(size Size) OpCode {
+ return OpCode(StXClass).SetMode(XAddMode).SetSize(size)
+}
+
+// StoreXAdd atomically adds src to *dst.
+func StoreXAdd(dst, src Register, size Size) Instruction {
+ return Instruction{
+ OpCode: StoreXAddOp(size),
+ Dst: dst,
+ Src: src,
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go
new file mode 100644
index 000000000..c48080327
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/load_store_string.go
@@ -0,0 +1,84 @@
+// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidMode-255]
+ _ = x[ImmMode-0]
+ _ = x[AbsMode-32]
+ _ = x[IndMode-64]
+ _ = x[MemMode-96]
+ _ = x[MemSXMode-128]
+ _ = x[XAddMode-192]
+}
+
+const (
+ _Mode_name_0 = "ImmMode"
+ _Mode_name_1 = "AbsMode"
+ _Mode_name_2 = "IndMode"
+ _Mode_name_3 = "MemMode"
+ _Mode_name_4 = "MemSXMode"
+ _Mode_name_5 = "XAddMode"
+ _Mode_name_6 = "InvalidMode"
+)
+
+func (i Mode) String() string {
+ switch {
+ case i == 0:
+ return _Mode_name_0
+ case i == 32:
+ return _Mode_name_1
+ case i == 64:
+ return _Mode_name_2
+ case i == 96:
+ return _Mode_name_3
+ case i == 128:
+ return _Mode_name_4
+ case i == 192:
+ return _Mode_name_5
+ case i == 255:
+ return _Mode_name_6
+ default:
+ return "Mode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidSize-255]
+ _ = x[DWord-24]
+ _ = x[Word-0]
+ _ = x[Half-8]
+ _ = x[Byte-16]
+}
+
+const (
+ _Size_name_0 = "Word"
+ _Size_name_1 = "Half"
+ _Size_name_2 = "Byte"
+ _Size_name_3 = "DWord"
+ _Size_name_4 = "InvalidSize"
+)
+
+func (i Size) String() string {
+ switch {
+ case i == 0:
+ return _Size_name_0
+ case i == 8:
+ return _Size_name_1
+ case i == 16:
+ return _Size_name_2
+ case i == 24:
+ return _Size_name_3
+ case i == 255:
+ return _Size_name_4
+ default:
+ return "Size(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/metadata.go b/vendor/github.com/cilium/ebpf/asm/metadata.go
new file mode 100644
index 000000000..dd368a936
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/metadata.go
@@ -0,0 +1,80 @@
+package asm
+
+// Metadata contains metadata about an instruction.
+type Metadata struct {
+ head *metaElement
+}
+
+type metaElement struct {
+ next *metaElement
+ key, value interface{}
+}
+
+// Find the element containing key.
+//
+// Returns nil if there is no such element.
+func (m *Metadata) find(key interface{}) *metaElement {
+ for e := m.head; e != nil; e = e.next {
+ if e.key == key {
+ return e
+ }
+ }
+ return nil
+}
+
+// Remove an element from the linked list.
+//
+// Copies as many elements of the list as necessary to remove r, but doesn't
+// perform a full copy.
+func (m *Metadata) remove(r *metaElement) {
+ current := &m.head
+ for e := m.head; e != nil; e = e.next {
+ if e == r {
+ // We've found the element we want to remove.
+ *current = e.next
+
+ // No need to copy the tail.
+ return
+ }
+
+ // There is another element in front of the one we want to remove.
+ // We have to copy it to be able to change metaElement.next.
+ cpy := &metaElement{key: e.key, value: e.value}
+ *current = cpy
+ current = &cpy.next
+ }
+}
+
+// Set a key to a value.
+//
+// If value is nil, the key is removed. Avoids modifying old metadata by
+// copying if necessary.
+func (m *Metadata) Set(key, value interface{}) {
+ if e := m.find(key); e != nil {
+ if e.value == value {
+ // Key is present and the value is the same. Nothing to do.
+ return
+ }
+
+ // Key is present with a different value. Create a copy of the list
+ // which doesn't have the element in it.
+ m.remove(e)
+ }
+
+ // m.head is now a linked list that doesn't contain key.
+ if value == nil {
+ return
+ }
+
+ m.head = &metaElement{key: key, value: value, next: m.head}
+}
+
+// Get the value of a key.
+//
+// Returns nil if no value with the given key is present.
+func (m *Metadata) Get(key interface{}) interface{} {
+ if e := m.find(key); e != nil {
+ return e.value
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go
new file mode 100644
index 000000000..1dfd0b171
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/opcode.go
@@ -0,0 +1,303 @@
+package asm
+
+import (
+ "fmt"
+ "strings"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output opcode_string.go -type=Class
+
+// Class of operations
+//
+// msb lsb
+// +---+--+---+
+// | ?? |CLS|
+// +---+--+---+
+type Class uint8
+
+const classMask OpCode = 0x07
+
+const (
+ // LdClass loads immediate values into registers.
+ // Also used for non-standard load operations from cBPF.
+ LdClass Class = 0x00
+ // LdXClass loads memory into registers.
+ LdXClass Class = 0x01
+ // StClass stores immediate values to memory.
+ StClass Class = 0x02
+ // StXClass stores registers to memory.
+ StXClass Class = 0x03
+ // ALUClass describes arithmetic operators.
+ ALUClass Class = 0x04
+ // JumpClass describes jump operators.
+ JumpClass Class = 0x05
+ // Jump32Class describes jump operators with 32-bit comparisons.
+ // Requires kernel 5.1.
+ Jump32Class Class = 0x06
+ // ALU64Class describes arithmetic operators in 64-bit mode.
+ ALU64Class Class = 0x07
+)
+
+// IsLoad checks if this is either LdClass or LdXClass.
+func (cls Class) IsLoad() bool {
+ return cls == LdClass || cls == LdXClass
+}
+
+// IsStore checks if this is either StClass or StXClass.
+func (cls Class) IsStore() bool {
+ return cls == StClass || cls == StXClass
+}
+
+func (cls Class) isLoadOrStore() bool {
+ return cls.IsLoad() || cls.IsStore()
+}
+
+// IsALU checks if this is either ALUClass or ALU64Class.
+func (cls Class) IsALU() bool {
+ return cls == ALUClass || cls == ALU64Class
+}
+
+// IsJump checks if this is either JumpClass or Jump32Class.
+func (cls Class) IsJump() bool {
+ return cls == JumpClass || cls == Jump32Class
+}
+
+func (cls Class) isJumpOrALU() bool {
+ return cls.IsJump() || cls.IsALU()
+}
+
+// OpCode represents a single operation.
+// It is not a 1:1 mapping to real eBPF opcodes.
+//
+// The encoding varies based on a 3-bit Class:
+//
+// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+// ??? | CLS
+//
+// For ALUClass and ALUCLass32:
+//
+// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+// OPC |S| CLS
+//
+// For LdClass, LdXclass, StClass and StXClass:
+//
+// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+// 0 | MDE |SIZ| CLS
+//
+// For JumpClass, Jump32Class:
+//
+// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+// 0 | OPC |S| CLS
+type OpCode uint16
+
+// InvalidOpCode is returned by setters on OpCode
+const InvalidOpCode OpCode = 0xffff
+
+// bpfOpCode returns the actual BPF opcode.
+func (op OpCode) bpfOpCode() (byte, error) {
+ const opCodeMask = 0xff
+
+ if !valid(op, opCodeMask) {
+ return 0, fmt.Errorf("invalid opcode %x", op)
+ }
+
+ return byte(op & opCodeMask), nil
+}
+
+// rawInstructions returns the number of BPF instructions required
+// to encode this opcode.
+func (op OpCode) rawInstructions() int {
+ if op.IsDWordLoad() {
+ return 2
+ }
+ return 1
+}
+
+func (op OpCode) IsDWordLoad() bool {
+ return op == LoadImmOp(DWord)
+}
+
+// Class returns the class of operation.
+func (op OpCode) Class() Class {
+ return Class(op & classMask)
+}
+
+// Mode returns the mode for load and store operations.
+func (op OpCode) Mode() Mode {
+ if !op.Class().isLoadOrStore() {
+ return InvalidMode
+ }
+ return Mode(op & modeMask)
+}
+
+// Size returns the size for load and store operations.
+func (op OpCode) Size() Size {
+ if !op.Class().isLoadOrStore() {
+ return InvalidSize
+ }
+ return Size(op & sizeMask)
+}
+
+// Source returns the source for branch and ALU operations.
+func (op OpCode) Source() Source {
+ if !op.Class().isJumpOrALU() || op.ALUOp() == Swap {
+ return InvalidSource
+ }
+ return Source(op & sourceMask)
+}
+
+// ALUOp returns the ALUOp.
+func (op OpCode) ALUOp() ALUOp {
+ if !op.Class().IsALU() {
+ return InvalidALUOp
+ }
+ return ALUOp(op & aluMask)
+}
+
+// Endianness returns the Endianness for a byte swap instruction.
+func (op OpCode) Endianness() Endianness {
+ if op.ALUOp() != Swap {
+ return InvalidEndian
+ }
+ return Endianness(op & endianMask)
+}
+
+// JumpOp returns the JumpOp.
+// Returns InvalidJumpOp if it doesn't encode a jump.
+func (op OpCode) JumpOp() JumpOp {
+ if !op.Class().IsJump() {
+ return InvalidJumpOp
+ }
+
+ jumpOp := JumpOp(op & jumpMask)
+
+ // Some JumpOps are only supported by JumpClass, not Jump32Class.
+ if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call) {
+ return InvalidJumpOp
+ }
+
+ return jumpOp
+}
+
+// SetMode sets the mode on load and store operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetMode(mode Mode) OpCode {
+ if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) {
+ return InvalidOpCode
+ }
+ return (op & ^modeMask) | OpCode(mode)
+}
+
+// SetSize sets the size on load and store operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetSize(size Size) OpCode {
+ if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) {
+ return InvalidOpCode
+ }
+ return (op & ^sizeMask) | OpCode(size)
+}
+
+// SetSource sets the source on jump and ALU operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetSource(source Source) OpCode {
+ if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) {
+ return InvalidOpCode
+ }
+ return (op & ^sourceMask) | OpCode(source)
+}
+
+// SetALUOp sets the ALUOp on ALU operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetALUOp(alu ALUOp) OpCode {
+ if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) {
+ return InvalidOpCode
+ }
+ return (op & ^aluMask) | OpCode(alu)
+}
+
+// SetJumpOp sets the JumpOp on jump operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetJumpOp(jump JumpOp) OpCode {
+ if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) {
+ return InvalidOpCode
+ }
+
+ newOp := (op & ^jumpMask) | OpCode(jump)
+
+ // Check newOp is legal.
+ if newOp.JumpOp() == InvalidJumpOp {
+ return InvalidOpCode
+ }
+
+ return newOp
+}
+
+func (op OpCode) String() string {
+ var f strings.Builder
+
+ switch class := op.Class(); {
+ case class.isLoadOrStore():
+ f.WriteString(strings.TrimSuffix(class.String(), "Class"))
+
+ mode := op.Mode()
+ f.WriteString(strings.TrimSuffix(mode.String(), "Mode"))
+
+ switch op.Size() {
+ case DWord:
+ f.WriteString("DW")
+ case Word:
+ f.WriteString("W")
+ case Half:
+ f.WriteString("H")
+ case Byte:
+ f.WriteString("B")
+ }
+
+ case class.IsALU():
+ if op.ALUOp() == Swap && op.Class() == ALU64Class {
+ // B to make BSwap, uncontitional byte swap
+ f.WriteString("B")
+ }
+
+ f.WriteString(op.ALUOp().String())
+
+ if op.ALUOp() == Swap {
+ if op.Class() == ALUClass {
+ // Width for Endian is controlled by Constant
+ f.WriteString(op.Endianness().String())
+ }
+ } else {
+ f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
+
+ if class == ALUClass {
+ f.WriteString("32")
+ }
+ }
+
+ case class.IsJump():
+ f.WriteString(op.JumpOp().String())
+
+ if class == Jump32Class {
+ f.WriteString("32")
+ }
+
+ if jop := op.JumpOp(); jop != Exit && jop != Call && jop != Ja {
+ f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
+ }
+
+ default:
+ fmt.Fprintf(&f, "OpCode(%#x)", uint8(op))
+ }
+
+ return f.String()
+}
+
+// valid returns true if all bits in value are covered by mask.
+func valid(value, mask OpCode) bool {
+ return value & ^mask == 0
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go
new file mode 100644
index 000000000..58bc3e7e7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[LdClass-0]
+ _ = x[LdXClass-1]
+ _ = x[StClass-2]
+ _ = x[StXClass-3]
+ _ = x[ALUClass-4]
+ _ = x[JumpClass-5]
+ _ = x[Jump32Class-6]
+ _ = x[ALU64Class-7]
+}
+
+const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class"
+
+var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68}
+
+func (i Class) String() string {
+ if i >= Class(len(_Class_index)-1) {
+ return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Class_name[_Class_index[i]:_Class_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go
new file mode 100644
index 000000000..457a3b8a8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/register.go
@@ -0,0 +1,51 @@
+package asm
+
+import (
+ "fmt"
+)
+
+// Register is the source or destination of most operations.
+type Register uint8
+
+// R0 contains return values.
+const R0 Register = 0
+
+// Registers for function arguments.
+const (
+ R1 Register = R0 + 1 + iota
+ R2
+ R3
+ R4
+ R5
+)
+
+// Callee saved registers preserved by function calls.
+const (
+ R6 Register = R5 + 1 + iota
+ R7
+ R8
+ R9
+)
+
+// Read-only frame pointer to access stack.
+const (
+ R10 Register = R9 + 1
+ RFP = R10
+)
+
+// Pseudo registers used by 64bit loads and jumps
+const (
+ PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
+ PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
+ PseudoCall = R1 // BPF_PSEUDO_CALL
+ PseudoFunc = R4 // BPF_PSEUDO_FUNC
+ PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL
+)
+
+func (r Register) String() string {
+ v := uint8(r)
+ if v == 10 {
+ return "rfp"
+ }
+ return fmt.Sprintf("r%d", v)
+}
diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go
new file mode 100644
index 000000000..bece896bb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/attachtype_string.go
@@ -0,0 +1,79 @@
+// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT.
+
+package ebpf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[AttachNone-0]
+ _ = x[AttachCGroupInetIngress-0]
+ _ = x[AttachCGroupInetEgress-1]
+ _ = x[AttachCGroupInetSockCreate-2]
+ _ = x[AttachCGroupSockOps-3]
+ _ = x[AttachSkSKBStreamParser-4]
+ _ = x[AttachSkSKBStreamVerdict-5]
+ _ = x[AttachCGroupDevice-6]
+ _ = x[AttachSkMsgVerdict-7]
+ _ = x[AttachCGroupInet4Bind-8]
+ _ = x[AttachCGroupInet6Bind-9]
+ _ = x[AttachCGroupInet4Connect-10]
+ _ = x[AttachCGroupInet6Connect-11]
+ _ = x[AttachCGroupInet4PostBind-12]
+ _ = x[AttachCGroupInet6PostBind-13]
+ _ = x[AttachCGroupUDP4Sendmsg-14]
+ _ = x[AttachCGroupUDP6Sendmsg-15]
+ _ = x[AttachLircMode2-16]
+ _ = x[AttachFlowDissector-17]
+ _ = x[AttachCGroupSysctl-18]
+ _ = x[AttachCGroupUDP4Recvmsg-19]
+ _ = x[AttachCGroupUDP6Recvmsg-20]
+ _ = x[AttachCGroupGetsockopt-21]
+ _ = x[AttachCGroupSetsockopt-22]
+ _ = x[AttachTraceRawTp-23]
+ _ = x[AttachTraceFEntry-24]
+ _ = x[AttachTraceFExit-25]
+ _ = x[AttachModifyReturn-26]
+ _ = x[AttachLSMMac-27]
+ _ = x[AttachTraceIter-28]
+ _ = x[AttachCgroupInet4GetPeername-29]
+ _ = x[AttachCgroupInet6GetPeername-30]
+ _ = x[AttachCgroupInet4GetSockname-31]
+ _ = x[AttachCgroupInet6GetSockname-32]
+ _ = x[AttachXDPDevMap-33]
+ _ = x[AttachCgroupInetSockRelease-34]
+ _ = x[AttachXDPCPUMap-35]
+ _ = x[AttachSkLookup-36]
+ _ = x[AttachXDP-37]
+ _ = x[AttachSkSKBVerdict-38]
+ _ = x[AttachSkReuseportSelect-39]
+ _ = x[AttachSkReuseportSelectOrMigrate-40]
+ _ = x[AttachPerfEvent-41]
+ _ = x[AttachTraceKprobeMulti-42]
+ _ = x[AttachLSMCgroup-43]
+ _ = x[AttachStructOps-44]
+ _ = x[AttachNetfilter-45]
+ _ = x[AttachTCXIngress-46]
+ _ = x[AttachTCXEgress-47]
+ _ = x[AttachTraceUprobeMulti-48]
+ _ = x[AttachCgroupUnixConnect-49]
+ _ = x[AttachCgroupUnixSendmsg-50]
+ _ = x[AttachCgroupUnixRecvmsg-51]
+ _ = x[AttachCgroupUnixGetpeername-52]
+ _ = x[AttachCgroupUnixGetsockname-53]
+ _ = x[AttachNetkitPrimary-54]
+ _ = x[AttachNetkitPeer-55]
+}
+
+const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeer"
+
+var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804}
+
+func (i AttachType) String() string {
+ if i >= AttachType(len(_AttachType_index)-1) {
+ return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go
new file mode 100644
index 000000000..671f680b2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf.go
@@ -0,0 +1,699 @@
+package btf
+
+import (
+ "bufio"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "reflect"
+ "sync"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+const btfMagic = 0xeB9F
+
+// Errors returned by BTF functions.
+var (
+ ErrNotSupported = internal.ErrNotSupported
+ ErrNotFound = errors.New("not found")
+ ErrNoExtendedInfo = errors.New("no extended info")
+ ErrMultipleMatches = errors.New("multiple matching types")
+)
+
+// ID represents the unique ID of a BTF object.
+type ID = sys.BTFID
+
+// immutableTypes is a set of types which musn't be changed.
+type immutableTypes struct {
+ // All types contained by the spec, not including types from the base in
+ // case the spec was parsed from split BTF.
+ types []Type
+
+ // Type IDs indexed by type.
+ typeIDs map[Type]TypeID
+
+ // The ID of the first type in types.
+ firstTypeID TypeID
+
+ // Types indexed by essential name.
+ // Includes all struct flavors and types with the same name.
+ namedTypes map[essentialName][]TypeID
+
+ // Byte order of the types. This affects things like struct member order
+ // when using bitfields.
+ byteOrder binary.ByteOrder
+}
+
+func (s *immutableTypes) typeByID(id TypeID) (Type, bool) {
+ if id < s.firstTypeID {
+ return nil, false
+ }
+
+ index := int(id - s.firstTypeID)
+ if index >= len(s.types) {
+ return nil, false
+ }
+
+ return s.types[index], true
+}
+
+// mutableTypes is a set of types which may be changed.
+type mutableTypes struct {
+ imm immutableTypes
+ mu sync.RWMutex // protects copies below
+ copies map[Type]Type // map[orig]copy
+ copiedTypeIDs map[Type]TypeID // map[copy]origID
+}
+
+// add a type to the set of mutable types.
+//
+// Copies type and all of its children once. Repeated calls with the same type
+// do not copy again.
+func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type {
+ mt.mu.RLock()
+ cpy, ok := mt.copies[typ]
+ mt.mu.RUnlock()
+
+ if ok {
+ // Fast path: the type has been copied before.
+ return cpy
+ }
+
+ // modifyGraphPreorder copies the type graph node by node, so we can't drop
+ // the lock in between.
+ mt.mu.Lock()
+ defer mt.mu.Unlock()
+
+ return copyType(typ, typeIDs, mt.copies, mt.copiedTypeIDs)
+}
+
+// copy a set of mutable types.
+func (mt *mutableTypes) copy() *mutableTypes {
+ if mt == nil {
+ return nil
+ }
+
+ mtCopy := &mutableTypes{
+ mt.imm,
+ sync.RWMutex{},
+ make(map[Type]Type, len(mt.copies)),
+ make(map[Type]TypeID, len(mt.copiedTypeIDs)),
+ }
+
+ // Prevent concurrent modification of mt.copiedTypeIDs.
+ mt.mu.RLock()
+ defer mt.mu.RUnlock()
+
+ copiesOfCopies := make(map[Type]Type, len(mt.copies))
+ for orig, copy := range mt.copies {
+ // NB: We make a copy of copy, not orig, so that changes to mutable types
+ // are preserved.
+ copyOfCopy := copyType(copy, mt.copiedTypeIDs, copiesOfCopies, mtCopy.copiedTypeIDs)
+ mtCopy.copies[orig] = copyOfCopy
+ }
+
+ return mtCopy
+}
+
+func (mt *mutableTypes) typeID(typ Type) (TypeID, error) {
+ if _, ok := typ.(*Void); ok {
+ // Equality is weird for void, since it is a zero sized type.
+ return 0, nil
+ }
+
+ mt.mu.RLock()
+ defer mt.mu.RUnlock()
+
+ id, ok := mt.copiedTypeIDs[typ]
+ if !ok {
+ return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
+ }
+
+ return id, nil
+}
+
+func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) {
+ immT, ok := mt.imm.typeByID(id)
+ if !ok {
+ return nil, false
+ }
+
+ return mt.add(immT, mt.imm.typeIDs), true
+}
+
+func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) {
+ immTypes := mt.imm.namedTypes[newEssentialName(name)]
+ if len(immTypes) == 0 {
+ return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
+ }
+
+ // Return a copy to prevent changes to namedTypes.
+ result := make([]Type, 0, len(immTypes))
+ for _, id := range immTypes {
+ immT, ok := mt.imm.typeByID(id)
+ if !ok {
+ return nil, fmt.Errorf("no type with ID %d", id)
+ }
+
+ // Match against the full name, not just the essential one
+ // in case the type being looked up is a struct flavor.
+ if immT.TypeName() == name {
+ result = append(result, mt.add(immT, mt.imm.typeIDs))
+ }
+ }
+ return result, nil
+}
+
+// Spec allows querying a set of Types and loading the set into the
+// kernel.
+type Spec struct {
+ *mutableTypes
+
+ // String table from ELF.
+ strings *stringTable
+}
+
+// LoadSpec opens file and calls LoadSpecFromReader on it.
+func LoadSpec(file string) (*Spec, error) {
+ fh, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ return LoadSpecFromReader(fh)
+}
+
+// LoadSpecFromReader reads from an ELF or a raw BTF blob.
+//
+// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos
+// may be nil.
+func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ if bo := guessRawBTFByteOrder(rd); bo != nil {
+ return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil)
+ }
+
+ return nil, err
+ }
+
+ return loadSpecFromELF(file)
+}
+
+// LoadSpecAndExtInfosFromReader reads from an ELF.
+//
+// ExtInfos may be nil if the ELF doesn't contain section metadata.
+// Returns ErrNotFound if the ELF contains no BTF.
+func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ spec, err := loadSpecFromELF(file)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ extInfos, err := loadExtInfosFromELF(file, spec)
+ if err != nil && !errors.Is(err, ErrNotFound) {
+ return nil, nil, err
+ }
+
+ return spec, extInfos, nil
+}
+
+// symbolOffsets extracts all symbols offsets from an ELF and indexes them by
+// section and variable name.
+//
+// References to variables in BTF data sections carry unsigned 32-bit offsets.
+// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
+// beyond this range. Since these symbols cannot be described by BTF info,
+// ignore them here.
+func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) {
+ symbols, err := file.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("can't read symbols: %v", err)
+ }
+
+ offsets := make(map[symbol]uint32)
+ for _, sym := range symbols {
+ if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
+ // Ignore things like SHN_ABS
+ continue
+ }
+
+ if sym.Value > math.MaxUint32 {
+ // VarSecinfo offset is u32, cannot reference symbols in higher regions.
+ continue
+ }
+
+ if int(sym.Section) >= len(file.Sections) {
+ return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section)
+ }
+
+ secName := file.Sections[sym.Section].Name
+ offsets[symbol{secName, sym.Name}] = uint32(sym.Value)
+ }
+
+ return offsets, nil
+}
+
+func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
+ var (
+ btfSection *elf.Section
+ sectionSizes = make(map[string]uint32)
+ )
+
+ for _, sec := range file.Sections {
+ switch sec.Name {
+ case ".BTF":
+ btfSection = sec
+ default:
+ if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
+ break
+ }
+
+ if sec.Size > math.MaxUint32 {
+ return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
+ }
+
+ sectionSizes[sec.Name] = uint32(sec.Size)
+ }
+ }
+
+ if btfSection == nil {
+ return nil, fmt.Errorf("btf: %w", ErrNotFound)
+ }
+
+ offsets, err := symbolOffsets(file)
+ if err != nil {
+ return nil, err
+ }
+
+ if btfSection.ReaderAt == nil {
+ return nil, fmt.Errorf("compressed BTF is not supported")
+ }
+
+ spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fixupDatasec(spec.imm.types, sectionSizes, offsets)
+ if err != nil {
+ return nil, err
+ }
+
+ return spec, nil
+}
+
+func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) {
+ var (
+ baseStrings *stringTable
+ firstTypeID TypeID
+ err error
+ )
+
+ if base != nil {
+ if base.imm.firstTypeID != 0 {
+ return nil, fmt.Errorf("can't use split BTF as base")
+ }
+
+ baseStrings = base.strings
+
+ firstTypeID, err = base.nextTypeID()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ types, rawStrings, err := parseBTF(btf, bo, baseStrings, base)
+ if err != nil {
+ return nil, err
+ }
+
+ typeIDs, typesByName := indexTypes(types, firstTypeID)
+
+ return &Spec{
+ &mutableTypes{
+ immutableTypes{
+ types,
+ typeIDs,
+ firstTypeID,
+ typesByName,
+ bo,
+ },
+ sync.RWMutex{},
+ make(map[Type]Type),
+ make(map[Type]TypeID),
+ },
+ rawStrings,
+ }, nil
+}
+
+func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) {
+ namedTypes := 0
+ for _, typ := range types {
+ if typ.TypeName() != "" {
+ // Do a pre-pass to figure out how big types by name has to be.
+ // Most types have unique names, so it's OK to ignore essentialName
+ // here.
+ namedTypes++
+ }
+ }
+
+ typeIDs := make(map[Type]TypeID, len(types))
+ typesByName := make(map[essentialName][]TypeID, namedTypes)
+
+ for i, typ := range types {
+ id := firstTypeID + TypeID(i)
+ typeIDs[typ] = id
+
+ if name := newEssentialName(typ.TypeName()); name != "" {
+ typesByName[name] = append(typesByName[name], id)
+ }
+ }
+
+ return typeIDs, typesByName
+}
+
+func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
+ buf := new(bufio.Reader)
+ for _, bo := range []binary.ByteOrder{
+ binary.LittleEndian,
+ binary.BigEndian,
+ } {
+ buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64))
+ if _, err := parseBTFHeader(buf, bo); err == nil {
+ return bo
+ }
+ }
+
+ return nil
+}
+
+// parseBTF reads a .BTF section into memory and parses it into a list of
+// raw types and a string table.
+func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable, base *Spec) ([]Type, *stringTable, error) {
+ buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64)
+ header, err := parseBTFHeader(buf, bo)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parsing .BTF header: %v", err)
+ }
+
+ rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)),
+ baseStrings)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read type names: %w", err)
+ }
+
+ buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen)))
+ types, err := readAndInflateTypes(buf, bo, header.TypeLen, rawStrings, base)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return types, rawStrings, nil
+}
+
+type symbol struct {
+ section string
+ name string
+}
+
+// fixupDatasec attempts to patch up missing info in Datasecs and its members by
+// supplementing them with information from the ELF headers and symbol table.
+func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error {
+ for _, typ := range types {
+ ds, ok := typ.(*Datasec)
+ if !ok {
+ continue
+ }
+
+ name := ds.Name
+
+ // Some Datasecs are virtual and don't have corresponding ELF sections.
+ switch name {
+ case ".ksyms":
+ // .ksyms describes forward declarations of kfunc signatures.
+ // Nothing to fix up, all sizes and offsets are 0.
+ for _, vsi := range ds.Vars {
+ _, ok := vsi.Type.(*Func)
+ if !ok {
+ // Only Funcs are supported in the .ksyms Datasec.
+ return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported)
+ }
+ }
+
+ continue
+ case ".kconfig":
+ // .kconfig has a size of 0 and has all members' offsets set to 0.
+ // Fix up all offsets and set the Datasec's size.
+ if err := fixupDatasecLayout(ds); err != nil {
+ return err
+ }
+
+ // Fix up extern to global linkage to avoid a BTF verifier error.
+ for _, vsi := range ds.Vars {
+ vsi.Type.(*Var).Linkage = GlobalVar
+ }
+
+ continue
+ }
+
+ if ds.Size != 0 {
+ continue
+ }
+
+ ds.Size, ok = sectionSizes[name]
+ if !ok {
+ return fmt.Errorf("data section %s: missing size", name)
+ }
+
+ for i := range ds.Vars {
+ symName := ds.Vars[i].Type.TypeName()
+ ds.Vars[i].Offset, ok = offsets[symbol{name, symName}]
+ if !ok {
+ return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName)
+ }
+ }
+ }
+
+ return nil
+}
+
+// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and
+// alignment. Calculate and set ds.Size.
+func fixupDatasecLayout(ds *Datasec) error {
+ var off uint32
+
+ for i, vsi := range ds.Vars {
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type)
+ }
+
+ size, err := Sizeof(v.Type)
+ if err != nil {
+ return fmt.Errorf("variable %s: getting size: %w", v.Name, err)
+ }
+ align, err := alignof(v.Type)
+ if err != nil {
+ return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err)
+ }
+
+ // Align the current member based on the offset of the end of the previous
+ // member and the alignment of the current member.
+ off = internal.Align(off, uint32(align))
+
+ ds.Vars[i].Offset = off
+
+ off += uint32(size)
+ }
+
+ ds.Size = off
+
+ return nil
+}
+
+// Copy creates a copy of Spec.
+func (s *Spec) Copy() *Spec {
+ if s == nil {
+ return nil
+ }
+
+ return &Spec{
+ s.mutableTypes.copy(),
+ s.strings,
+ }
+}
+
+type sliceWriter []byte
+
+func (sw sliceWriter) Write(p []byte) (int, error) {
+ if len(p) != len(sw) {
+ return 0, errors.New("size doesn't match")
+ }
+
+ return copy(sw, p), nil
+}
+
+// nextTypeID returns the next unallocated type ID or an error if there are no
+// more type IDs.
+func (s *Spec) nextTypeID() (TypeID, error) {
+ id := s.imm.firstTypeID + TypeID(len(s.imm.types))
+ if id < s.imm.firstTypeID {
+ return 0, fmt.Errorf("no more type IDs")
+ }
+ return id, nil
+}
+
+// TypeByID returns the BTF Type with the given type ID.
+//
+// Returns an error wrapping ErrNotFound if a Type with the given ID
+// does not exist in the Spec.
+func (s *Spec) TypeByID(id TypeID) (Type, error) {
+ typ, ok := s.typeByID(id)
+ if !ok {
+ return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound)
+ }
+
+ return typ, nil
+}
+
+// TypeID returns the ID for a given Type.
+//
+// Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec.
+func (s *Spec) TypeID(typ Type) (TypeID, error) {
+ return s.mutableTypes.typeID(typ)
+}
+
+// AnyTypesByName returns a list of BTF Types with the given name.
+//
+// If the BTF blob describes multiple compilation units like vmlinux, multiple
+// Types with the same name and kind can exist, but might not describe the same
+// data structure.
+//
+// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
+func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
+ return s.mutableTypes.anyTypesByName(name)
+}
+
+// AnyTypeByName returns a Type with the given name.
+//
+// Returns an error if multiple types of that name exist.
+func (s *Spec) AnyTypeByName(name string) (Type, error) {
+ types, err := s.AnyTypesByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(types) > 1 {
+ return nil, fmt.Errorf("found multiple types: %v", types)
+ }
+
+ return types[0], nil
+}
+
+// TypeByName searches for a Type with a specific name. Since multiple Types
+// with the same name can exist, the parameter typ is taken to narrow down the
+// search in case of a clash.
+//
+// typ must be a non-nil pointer to an implementation of a Type. On success, the
+// address of the found Type will be copied to typ.
+//
+// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
+// Returns an error wrapping ErrMultipleTypes if multiple candidates are found.
+func (s *Spec) TypeByName(name string, typ interface{}) error {
+ typeInterface := reflect.TypeOf((*Type)(nil)).Elem()
+
+ // typ may be **T or *Type
+ typValue := reflect.ValueOf(typ)
+ if typValue.Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", typ)
+ }
+
+ typPtr := typValue.Elem()
+ if !typPtr.CanSet() {
+ return fmt.Errorf("%T cannot be set", typ)
+ }
+
+ wanted := typPtr.Type()
+ if wanted == typeInterface {
+ // This is *Type. Unwrap the value's type.
+ wanted = typPtr.Elem().Type()
+ }
+
+ if !wanted.AssignableTo(typeInterface) {
+ return fmt.Errorf("%T does not satisfy Type interface", typ)
+ }
+
+ types, err := s.AnyTypesByName(name)
+ if err != nil {
+ return err
+ }
+
+ var candidate Type
+ for _, typ := range types {
+ if reflect.TypeOf(typ) != wanted {
+ continue
+ }
+
+ if candidate != nil {
+ return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches)
+ }
+
+ candidate = typ
+ }
+
+ if candidate == nil {
+ return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound)
+ }
+
+ typPtr.Set(reflect.ValueOf(candidate))
+
+ return nil
+}
+
+// LoadSplitSpecFromReader loads split BTF from a reader.
+//
+// Types from base are used to resolve references in the split BTF.
+// The returned Spec only contains types from the split BTF, not from the base.
+func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
+ return loadRawSpec(r, internal.NativeEndian, base)
+}
+
+// TypesIterator iterates over types of a given spec.
+type TypesIterator struct {
+ spec *Spec
+ id TypeID
+ done bool
+ // The last visited type in the spec.
+ Type Type
+}
+
+// Iterate returns the types iterator.
+func (s *Spec) Iterate() *TypesIterator {
+ return &TypesIterator{spec: s, id: s.imm.firstTypeID}
+}
+
+// Next returns true as long as there are any remaining types.
+func (iter *TypesIterator) Next() bool {
+ if iter.done {
+ return false
+ }
+
+ var ok bool
+ iter.Type, ok = iter.spec.typeByID(iter.id)
+ iter.id++
+ iter.done = !ok
+ return !iter.done
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go
new file mode 100644
index 000000000..f0e327abc
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go
@@ -0,0 +1,519 @@
+package btf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind
+
+// btfKind describes a Type.
+type btfKind uint8
+
+// Equivalents of the BTF_KIND_* constants.
+const (
+ kindUnknown btfKind = iota // Unknown
+ kindInt // Int
+ kindPointer // Pointer
+ kindArray // Array
+ kindStruct // Struct
+ kindUnion // Union
+ kindEnum // Enum
+ kindForward // Forward
+ kindTypedef // Typedef
+ kindVolatile // Volatile
+ kindConst // Const
+ kindRestrict // Restrict
+ // Added ~4.20
+ kindFunc // Func
+ kindFuncProto // FuncProto
+ // Added ~5.1
+ kindVar // Var
+ kindDatasec // Datasec
+ // Added ~5.13
+ kindFloat // Float
+ // Added 5.16
+ kindDeclTag // DeclTag
+ kindTypeTag // TypeTag
+ // Added 6.0
+ kindEnum64 // Enum64
+)
+
+// FuncLinkage describes BTF function linkage metadata.
+type FuncLinkage int
+
+// Equivalent of enum btf_func_linkage.
+const (
+ StaticFunc FuncLinkage = iota // static
+ GlobalFunc // global
+ ExternFunc // extern
+)
+
+// VarLinkage describes BTF variable linkage metadata.
+type VarLinkage int
+
+const (
+ StaticVar VarLinkage = iota // static
+ GlobalVar // global
+ ExternVar // extern
+)
+
+const (
+ btfTypeKindShift = 24
+ btfTypeKindLen = 5
+ btfTypeVlenShift = 0
+ btfTypeVlenMask = 16
+ btfTypeKindFlagShift = 31
+ btfTypeKindFlagMask = 1
+)
+
+var btfHeaderLen = binary.Size(&btfHeader{})
+
+type btfHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+ HdrLen uint32
+
+ TypeOff uint32
+ TypeLen uint32
+ StringOff uint32
+ StringLen uint32
+}
+
+// typeStart returns the offset from the beginning of the .BTF section
+// to the start of its type entries.
+func (h *btfHeader) typeStart() int64 {
+ return int64(h.HdrLen + h.TypeOff)
+}
+
+// stringStart returns the offset from the beginning of the .BTF section
+// to the start of its string table.
+func (h *btfHeader) stringStart() int64 {
+ return int64(h.HdrLen + h.StringOff)
+}
+
+// parseBTFHeader parses the header of the .BTF section.
+func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) {
+ var header btfHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ remainder := int64(header.HdrLen) - int64(binary.Size(&header))
+ if remainder < 0 {
+ return nil, errors.New("header length shorter than btfHeader size")
+ }
+
+ if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil {
+ return nil, fmt.Errorf("header padding: %v", err)
+ }
+
+ return &header, nil
+}
+
+var btfTypeLen = binary.Size(btfType{})
+
+// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
+type btfType struct {
+ NameOff uint32
+ /* "info" bits arrangement
+ * bits 0-15: vlen (e.g. # of struct's members), linkage
+ * bits 16-23: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
+ */
+ Info uint32
+ /* "size" is used by INT, ENUM, STRUCT and UNION.
+ * "size" tells the size of the type it is describing.
+ *
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
+ * "type" is a type_id referring to another type.
+ */
+ SizeType uint32
+}
+
+var btfTypeSize = int(unsafe.Sizeof(btfType{}))
+
+func unmarshalBtfType(bt *btfType, b []byte, bo binary.ByteOrder) (int, error) {
+ if len(b) < btfTypeSize {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfType")
+ }
+
+ bt.NameOff = bo.Uint32(b[0:])
+ bt.Info = bo.Uint32(b[4:])
+ bt.SizeType = bo.Uint32(b[8:])
+ return btfTypeSize, nil
+}
+
+func mask(len uint32) uint32 {
+ return (1 << len) - 1
+}
+
+func readBits(value, len, shift uint32) uint32 {
+ return (value >> shift) & mask(len)
+}
+
+func writeBits(value, len, shift, new uint32) uint32 {
+ value &^= mask(len) << shift
+ value |= (new & mask(len)) << shift
+ return value
+}
+
+func (bt *btfType) info(len, shift uint32) uint32 {
+ return readBits(bt.Info, len, shift)
+}
+
+func (bt *btfType) setInfo(value, len, shift uint32) {
+ bt.Info = writeBits(bt.Info, len, shift, value)
+}
+
+func (bt *btfType) Kind() btfKind {
+ return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
+}
+
+func (bt *btfType) SetKind(kind btfKind) {
+ bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
+}
+
+func (bt *btfType) Vlen() int {
+ return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetVlen(vlen int) {
+ bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) kindFlagBool() bool {
+ return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
+}
+
+func (bt *btfType) setKindFlagBool(set bool) {
+ var value uint32
+ if set {
+ value = 1
+ }
+ bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift)
+}
+
+// Bitfield returns true if the struct or union contain a bitfield.
+func (bt *btfType) Bitfield() bool {
+ return bt.kindFlagBool()
+}
+
+func (bt *btfType) SetBitfield(isBitfield bool) {
+ bt.setKindFlagBool(isBitfield)
+}
+
+func (bt *btfType) FwdKind() FwdKind {
+ return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift))
+}
+
+func (bt *btfType) SetFwdKind(kind FwdKind) {
+ bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift)
+}
+
+func (bt *btfType) Signed() bool {
+ return bt.kindFlagBool()
+}
+
+func (bt *btfType) SetSigned(signed bool) {
+ bt.setKindFlagBool(signed)
+}
+
+func (bt *btfType) Linkage() FuncLinkage {
+ return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetLinkage(linkage FuncLinkage) {
+ bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) Type() TypeID {
+ // TODO: Panic here if wrong kind?
+ return TypeID(bt.SizeType)
+}
+
+func (bt *btfType) SetType(id TypeID) {
+ bt.SizeType = uint32(id)
+}
+
+func (bt *btfType) Size() uint32 {
+ // TODO: Panic here if wrong kind?
+ return bt.SizeType
+}
+
+func (bt *btfType) SetSize(size uint32) {
+ bt.SizeType = size
+}
+
+func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ buf := make([]byte, unsafe.Sizeof(*bt))
+ bo.PutUint32(buf[0:], bt.NameOff)
+ bo.PutUint32(buf[4:], bt.Info)
+ bo.PutUint32(buf[8:], bt.SizeType)
+ _, err := w.Write(buf)
+ return err
+}
+
+type rawType struct {
+ btfType
+ data interface{}
+}
+
+func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ if err := rt.btfType.Marshal(w, bo); err != nil {
+ return err
+ }
+
+ if rt.data == nil {
+ return nil
+ }
+
+ return binary.Write(w, bo, rt.data)
+}
+
+// btfInt encodes additional data for integers.
+//
+// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
+// ? = undefined
+// e = encoding
+// o = offset (bitfields?)
+// b = bits (bitfields)
+type btfInt struct {
+ Raw uint32
+}
+
+const (
+ btfIntEncodingLen = 4
+ btfIntEncodingShift = 24
+ btfIntOffsetLen = 8
+ btfIntOffsetShift = 16
+ btfIntBitsLen = 8
+ btfIntBitsShift = 0
+)
+
+var btfIntLen = int(unsafe.Sizeof(btfInt{}))
+
+func unmarshalBtfInt(bi *btfInt, b []byte, bo binary.ByteOrder) (int, error) {
+ if len(b) < btfIntLen {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfInt")
+ }
+
+ bi.Raw = bo.Uint32(b[0:])
+ return btfIntLen, nil
+}
+
+func (bi btfInt) Encoding() IntEncoding {
+ return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift))
+}
+
+func (bi *btfInt) SetEncoding(e IntEncoding) {
+ bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e))
+}
+
+func (bi btfInt) Offset() Bits {
+ return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift))
+}
+
+func (bi *btfInt) SetOffset(offset uint32) {
+ bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset)
+}
+
+func (bi btfInt) Bits() Bits {
+ return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift))
+}
+
+func (bi *btfInt) SetBits(bits byte) {
+ bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits))
+}
+
+type btfArray struct {
+ Type TypeID
+ IndexType TypeID
+ Nelems uint32
+}
+
+var btfArrayLen = int(unsafe.Sizeof(btfArray{}))
+
+func unmarshalBtfArray(ba *btfArray, b []byte, bo binary.ByteOrder) (int, error) {
+ if len(b) < btfArrayLen {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfArray")
+ }
+
+ ba.Type = TypeID(bo.Uint32(b[0:]))
+ ba.IndexType = TypeID(bo.Uint32(b[4:]))
+ ba.Nelems = bo.Uint32(b[8:])
+ return btfArrayLen, nil
+}
+
+type btfMember struct {
+ NameOff uint32
+ Type TypeID
+ Offset uint32
+}
+
+var btfMemberLen = int(unsafe.Sizeof(btfMember{}))
+
+func unmarshalBtfMembers(members []btfMember, b []byte, bo binary.ByteOrder) (int, error) {
+ off := 0
+ for i := range members {
+ if off+btfMemberLen > len(b) {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfMember %d", i)
+ }
+
+ members[i].NameOff = bo.Uint32(b[off+0:])
+ members[i].Type = TypeID(bo.Uint32(b[off+4:]))
+ members[i].Offset = bo.Uint32(b[off+8:])
+
+ off += btfMemberLen
+ }
+
+ return off, nil
+}
+
+type btfVarSecinfo struct {
+ Type TypeID
+ Offset uint32
+ Size uint32
+}
+
+var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{}))
+
+func unmarshalBtfVarSecInfos(secinfos []btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) {
+ off := 0
+ for i := range secinfos {
+ if off+btfVarSecinfoLen > len(b) {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo %d", i)
+ }
+
+ secinfos[i].Type = TypeID(bo.Uint32(b[off+0:]))
+ secinfos[i].Offset = bo.Uint32(b[off+4:])
+ secinfos[i].Size = bo.Uint32(b[off+8:])
+
+ off += btfVarSecinfoLen
+ }
+
+ return off, nil
+}
+
+type btfVariable struct {
+ Linkage uint32
+}
+
+var btfVariableLen = int(unsafe.Sizeof(btfVariable{}))
+
+func unmarshalBtfVariable(bv *btfVariable, b []byte, bo binary.ByteOrder) (int, error) {
+ if len(b) < btfVariableLen {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfVariable")
+ }
+
+ bv.Linkage = bo.Uint32(b[0:])
+ return btfVariableLen, nil
+}
+
+type btfEnum struct {
+ NameOff uint32
+ Val uint32
+}
+
+var btfEnumLen = int(unsafe.Sizeof(btfEnum{}))
+
+func unmarshalBtfEnums(enums []btfEnum, b []byte, bo binary.ByteOrder) (int, error) {
+ off := 0
+ for i := range enums {
+ if off+btfEnumLen > len(b) {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum %d", i)
+ }
+
+ enums[i].NameOff = bo.Uint32(b[off+0:])
+ enums[i].Val = bo.Uint32(b[off+4:])
+
+ off += btfEnumLen
+ }
+
+ return off, nil
+}
+
+type btfEnum64 struct {
+ NameOff uint32
+ ValLo32 uint32
+ ValHi32 uint32
+}
+
+var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{}))
+
+func unmarshalBtfEnums64(enums []btfEnum64, b []byte, bo binary.ByteOrder) (int, error) {
+ off := 0
+ for i := range enums {
+ if off+btfEnum64Len > len(b) {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64 %d", i)
+ }
+
+ enums[i].NameOff = bo.Uint32(b[off+0:])
+ enums[i].ValLo32 = bo.Uint32(b[off+4:])
+ enums[i].ValHi32 = bo.Uint32(b[off+8:])
+
+ off += btfEnum64Len
+ }
+
+ return off, nil
+}
+
+type btfParam struct {
+ NameOff uint32
+ Type TypeID
+}
+
+var btfParamLen = int(unsafe.Sizeof(btfParam{}))
+
+func unmarshalBtfParams(params []btfParam, b []byte, bo binary.ByteOrder) (int, error) {
+ off := 0
+ for i := range params {
+ if off+btfParamLen > len(b) {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfParam %d", i)
+ }
+
+ params[i].NameOff = bo.Uint32(b[off+0:])
+ params[i].Type = TypeID(bo.Uint32(b[off+4:]))
+
+ off += btfParamLen
+ }
+
+ return off, nil
+}
+
+type btfDeclTag struct {
+ ComponentIdx uint32
+}
+
+var btfDeclTagLen = int(unsafe.Sizeof(btfDeclTag{}))
+
+func unmarshalBtfDeclTag(bdt *btfDeclTag, b []byte, bo binary.ByteOrder) (int, error) {
+ if len(b) < btfDeclTagLen {
+ return 0, fmt.Errorf("not enough bytes to unmarshal btfDeclTag")
+ }
+
+ bdt.ComponentIdx = bo.Uint32(b[0:])
+ return btfDeclTagLen, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
new file mode 100644
index 000000000..b7a1b80d1
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
@@ -0,0 +1,80 @@
+// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT.
+
+package btf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StaticFunc-0]
+ _ = x[GlobalFunc-1]
+ _ = x[ExternFunc-2]
+}
+
+const _FuncLinkage_name = "staticglobalextern"
+
+var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
+
+func (i FuncLinkage) String() string {
+ if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
+ return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StaticVar-0]
+ _ = x[GlobalVar-1]
+ _ = x[ExternVar-2]
+}
+
+const _VarLinkage_name = "staticglobalextern"
+
+var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
+
+func (i VarLinkage) String() string {
+ if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
+ return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[kindUnknown-0]
+ _ = x[kindInt-1]
+ _ = x[kindPointer-2]
+ _ = x[kindArray-3]
+ _ = x[kindStruct-4]
+ _ = x[kindUnion-5]
+ _ = x[kindEnum-6]
+ _ = x[kindForward-7]
+ _ = x[kindTypedef-8]
+ _ = x[kindVolatile-9]
+ _ = x[kindConst-10]
+ _ = x[kindRestrict-11]
+ _ = x[kindFunc-12]
+ _ = x[kindFuncProto-13]
+ _ = x[kindVar-14]
+ _ = x[kindDatasec-15]
+ _ = x[kindFloat-16]
+ _ = x[kindDeclTag-17]
+ _ = x[kindTypeTag-18]
+ _ = x[kindEnum64-19]
+}
+
+const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64"
+
+var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120}
+
+func (i btfKind) String() string {
+ if i >= btfKind(len(_btfKind_index)-1) {
+ return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _btfKind_name[_btfKind_index[i]:_btfKind_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go
new file mode 100644
index 000000000..ee89f9833
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/core.go
@@ -0,0 +1,1261 @@
+package btf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "slices"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+)
+
+// Code in this file is derived from libbpf, which is available under a BSD
+// 2-Clause license.
+
+// A constant used when CO-RE relocation has to remove instructions.
+//
+// Taken from libbpf.
+const COREBadRelocationSentinel = 0xbad2310
+
+// COREFixup is the result of computing a CO-RE relocation for a target.
+type COREFixup struct {
+ kind coreKind
+ local uint64
+ target uint64
+ // True if there is no valid fixup. The instruction is replaced with an
+ // invalid dummy.
+ poison bool
+ // True if the validation of the local value should be skipped. Used by
+ // some kinds of bitfield relocations.
+ skipLocalValidation bool
+}
+
+func (f *COREFixup) equal(other COREFixup) bool {
+ return f.local == other.local && f.target == other.target
+}
+
+func (f *COREFixup) String() string {
+ if f.poison {
+ return fmt.Sprintf("%s=poison", f.kind)
+ }
+ return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target)
+}
+
+func (f *COREFixup) Apply(ins *asm.Instruction) error {
+ if f.poison {
+ // Relocation is poisoned, replace the instruction with an invalid one.
+ if ins.OpCode.IsDWordLoad() {
+ // Replace a dword load with a invalid dword load to preserve instruction size.
+ *ins = asm.LoadImm(asm.R10, COREBadRelocationSentinel, asm.DWord)
+ } else {
+ // Replace all single size instruction with a invalid call instruction.
+ *ins = asm.BuiltinFunc(COREBadRelocationSentinel).Call()
+ }
+
+ // Add context to the kernel verifier output.
+ if source := ins.Source(); source != nil {
+ *ins = ins.WithSource(asm.Comment(fmt.Sprintf("instruction poisoned by CO-RE: %s", source)))
+ } else {
+ *ins = ins.WithSource(asm.Comment("instruction poisoned by CO-RE"))
+ }
+
+ return nil
+ }
+
+ switch class := ins.OpCode.Class(); class {
+ case asm.LdXClass, asm.StClass, asm.StXClass:
+ if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset {
+ return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local)
+ }
+
+ if f.target > math.MaxInt16 {
+ return fmt.Errorf("offset %d exceeds MaxInt16", f.target)
+ }
+
+ ins.Offset = int16(f.target)
+
+ case asm.LdClass:
+ if !ins.IsConstantLoad(asm.DWord) {
+ return fmt.Errorf("not a dword-sized immediate load")
+ }
+
+ if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
+ return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
+ }
+
+ ins.Constant = int64(f.target)
+
+ case asm.ALUClass:
+ if ins.OpCode.ALUOp() == asm.Swap {
+ return fmt.Errorf("relocation against swap")
+ }
+
+ fallthrough
+
+ case asm.ALU64Class:
+ if src := ins.OpCode.Source(); src != asm.ImmSource {
+ return fmt.Errorf("invalid source %s", src)
+ }
+
+ if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
+ return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins)
+ }
+
+ if f.target > math.MaxInt32 {
+ return fmt.Errorf("immediate %d exceeds MaxInt32", f.target)
+ }
+
+ ins.Constant = int64(f.target)
+
+ default:
+ return fmt.Errorf("invalid class %s", class)
+ }
+
+ return nil
+}
+
+func (f COREFixup) isNonExistant() bool {
+ return f.kind.checksForExistence() && f.target == 0
+}
+
+// coreKind is the type of CO-RE relocation as specified in BPF source code.
+type coreKind uint32
+
+const (
+ reloFieldByteOffset coreKind = iota /* field byte offset */
+ reloFieldByteSize /* field size in bytes */
+ reloFieldExists /* field existence in target kernel */
+ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
+ reloFieldLShiftU64 /* bitfield-specific left bitshift */
+ reloFieldRShiftU64 /* bitfield-specific right bitshift */
+ reloTypeIDLocal /* type ID in local BPF object */
+ reloTypeIDTarget /* type ID in target kernel */
+ reloTypeExists /* type existence in target kernel */
+ reloTypeSize /* type size in bytes */
+ reloEnumvalExists /* enum value existence in target kernel */
+ reloEnumvalValue /* enum value integer value */
+ reloTypeMatches /* type matches kernel type */
+)
+
+func (k coreKind) checksForExistence() bool {
+ return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists || k == reloTypeMatches
+}
+
+func (k coreKind) String() string {
+ switch k {
+ case reloFieldByteOffset:
+ return "byte_off"
+ case reloFieldByteSize:
+ return "byte_sz"
+ case reloFieldExists:
+ return "field_exists"
+ case reloFieldSigned:
+ return "signed"
+ case reloFieldLShiftU64:
+ return "lshift_u64"
+ case reloFieldRShiftU64:
+ return "rshift_u64"
+ case reloTypeIDLocal:
+ return "local_type_id"
+ case reloTypeIDTarget:
+ return "target_type_id"
+ case reloTypeExists:
+ return "type_exists"
+ case reloTypeSize:
+ return "type_size"
+ case reloEnumvalExists:
+ return "enumval_exists"
+ case reloEnumvalValue:
+ return "enumval_value"
+ case reloTypeMatches:
+ return "type_matches"
+ default:
+ return fmt.Sprintf("unknown (%d)", k)
+ }
+}
+
+// CORERelocate calculates changes needed to adjust eBPF instructions for differences
+// in types.
+//
+// targets forms the set of types to relocate against. The first element has to be
+// BTF for vmlinux, the following must be types for kernel modules.
+//
+// resolveLocalTypeID is called for each local type which requires a stable TypeID.
+// Calling the function with the same type multiple times must produce the same
+// result. It is the callers responsibility to ensure that the relocated instructions
+// are loaded with matching BTF.
+//
+// Returns a list of fixups which can be applied to instructions to make them
+// match the target type(s).
+//
+// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
+// for relos[i].
+func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) {
+ if len(targets) == 0 {
+ // Explicitly check for nil here since the argument used to be optional.
+ return nil, fmt.Errorf("targets must be provided")
+ }
+
+ // We can't encode type IDs that aren't for vmlinux into instructions at the
+ // moment.
+ resolveTargetTypeID := targets[0].TypeID
+
+ for _, target := range targets {
+ if bo != target.imm.byteOrder {
+ return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder)
+ }
+ }
+
+ type reloGroup struct {
+ relos []*CORERelocation
+ // Position of each relocation in relos.
+ indices []int
+ }
+
+ // Split relocations into per Type lists.
+ relosByType := make(map[Type]*reloGroup)
+ result := make([]COREFixup, len(relos))
+ for i, relo := range relos {
+ if relo.kind == reloTypeIDLocal {
+ // Filtering out reloTypeIDLocal here makes our lives a lot easier
+ // down the line, since it doesn't have a target at all.
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
+ }
+
+ id, err := resolveLocalTypeID(relo.typ)
+ if err != nil {
+ return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err)
+ }
+
+ result[i] = COREFixup{
+ kind: relo.kind,
+ local: uint64(relo.id),
+ target: uint64(id),
+ }
+ continue
+ }
+
+ group, ok := relosByType[relo.typ]
+ if !ok {
+ group = &reloGroup{}
+ relosByType[relo.typ] = group
+ }
+ group.relos = append(group.relos, relo)
+ group.indices = append(group.indices, i)
+ }
+
+ for localType, group := range relosByType {
+ localTypeName := localType.TypeName()
+ if localTypeName == "" {
+ return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
+ }
+
+ essentialName := newEssentialName(localTypeName)
+
+ var targetTypes []Type
+ for _, target := range targets {
+ namedTypeIDs := target.imm.namedTypes[essentialName]
+ targetTypes = slices.Grow(targetTypes, len(namedTypeIDs))
+ for _, id := range namedTypeIDs {
+ typ, err := target.TypeByID(id)
+ if err != nil {
+ return nil, err
+ }
+
+ targetTypes = append(targetTypes, typ)
+ }
+ }
+
+ fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID)
+ if err != nil {
+ return nil, fmt.Errorf("relocate %s: %w", localType, err)
+ }
+
+ for j, index := range group.indices {
+ result[index] = fixups[j]
+ }
+ }
+
+ return result, nil
+}
+
+var errAmbiguousRelocation = errors.New("ambiguous relocation")
+var errImpossibleRelocation = errors.New("impossible relocation")
+var errIncompatibleTypes = errors.New("incompatible types")
+
+// coreCalculateFixups finds the target type that best matches all relocations.
+//
+// All relos must target the same type.
+//
+// The best target is determined by scoring: the less poisoning we have to do
+// the better the target is.
+func coreCalculateFixups(relos []*CORERelocation, targets []Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) ([]COREFixup, error) {
+ bestScore := len(relos)
+ var bestFixups []COREFixup
+ for _, target := range targets {
+ score := 0 // lower is better
+ fixups := make([]COREFixup, 0, len(relos))
+ for _, relo := range relos {
+ fixup, err := coreCalculateFixup(relo, target, bo, resolveTargetTypeID)
+ if err != nil {
+ return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err)
+ }
+ if fixup.poison || fixup.isNonExistant() {
+ score++
+ }
+ fixups = append(fixups, fixup)
+ }
+
+ if score > bestScore {
+ // We have a better target already, ignore this one.
+ continue
+ }
+
+ if score < bestScore {
+ // This is the best target yet, use it.
+ bestScore = score
+ bestFixups = fixups
+ continue
+ }
+
+ // Some other target has the same score as the current one. Make sure
+ // the fixups agree with each other.
+ for i, fixup := range bestFixups {
+ if !fixup.equal(fixups[i]) {
+ return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation)
+ }
+ }
+ }
+
+ if bestFixups == nil {
+ // Nothing at all matched, probably because there are no suitable
+ // targets at all.
+ //
+ // Poison everything except checksForExistence.
+ bestFixups = make([]COREFixup, len(relos))
+ for i, relo := range relos {
+ if relo.kind.checksForExistence() {
+ bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0}
+ } else {
+ bestFixups[i] = COREFixup{kind: relo.kind, poison: true}
+ }
+ }
+ }
+
+ return bestFixups, nil
+}
+
+var errNoSignedness = errors.New("no signedness")
+
+// coreCalculateFixup calculates the fixup given a relocation and a target type.
+func coreCalculateFixup(relo *CORERelocation, target Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) (COREFixup, error) {
+ fixup := func(local, target uint64) (COREFixup, error) {
+ return COREFixup{kind: relo.kind, local: local, target: target}, nil
+ }
+ fixupWithoutValidation := func(local, target uint64) (COREFixup, error) {
+ return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil
+ }
+ poison := func() (COREFixup, error) {
+ if relo.kind.checksForExistence() {
+ return fixup(1, 0)
+ }
+ return COREFixup{kind: relo.kind, poison: true}, nil
+ }
+ zero := COREFixup{}
+
+ local := relo.typ
+
+ switch relo.kind {
+ case reloTypeMatches:
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return zero, fmt.Errorf("unexpected accessor %v", relo.accessor)
+ }
+
+ err := coreTypesMatch(local, target, nil)
+ if errors.Is(err, errIncompatibleTypes) {
+ return poison()
+ }
+ if err != nil {
+ return zero, err
+ }
+
+ return fixup(1, 1)
+
+ case reloTypeIDTarget, reloTypeSize, reloTypeExists:
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return zero, fmt.Errorf("unexpected accessor %v", relo.accessor)
+ }
+
+ err := CheckTypeCompatibility(local, target)
+ if errors.Is(err, errIncompatibleTypes) {
+ return poison()
+ }
+ if err != nil {
+ return zero, err
+ }
+
+ switch relo.kind {
+ case reloTypeExists:
+ return fixup(1, 1)
+
+ case reloTypeIDTarget:
+ targetID, err := resolveTargetTypeID(target)
+ if errors.Is(err, ErrNotFound) {
+ // Probably a relocation trying to get the ID
+ // of a type from a kmod.
+ return poison()
+ }
+ if err != nil {
+ return zero, err
+ }
+ return fixup(uint64(relo.id), uint64(targetID))
+
+ case reloTypeSize:
+ localSize, err := Sizeof(local)
+ if err != nil {
+ return zero, err
+ }
+
+ targetSize, err := Sizeof(target)
+ if err != nil {
+ return zero, err
+ }
+
+ return fixup(uint64(localSize), uint64(targetSize))
+ }
+
+ case reloEnumvalValue, reloEnumvalExists:
+ localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, err
+ }
+
+ switch relo.kind {
+ case reloEnumvalExists:
+ return fixup(1, 1)
+
+ case reloEnumvalValue:
+ return fixup(localValue.Value, targetValue.Value)
+ }
+
+ case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned:
+ if _, ok := As[*Fwd](target); ok {
+ // We can't relocate fields using a forward declaration, so
+ // skip it. If a non-forward declaration is present in the BTF
+ // we'll find it in one of the other iterations.
+ return poison()
+ }
+
+ localField, targetField, err := coreFindField(local, relo.accessor, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, err
+ }
+
+ maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
+ f.skipLocalValidation = localField.bitfieldSize > 0
+ return f, err
+ }
+
+ switch relo.kind {
+ case reloFieldExists:
+ return fixup(1, 1)
+
+ case reloFieldByteOffset:
+ return maybeSkipValidation(fixup(uint64(localField.offset), uint64(targetField.offset)))
+
+ case reloFieldByteSize:
+ localSize, err := Sizeof(localField.Type)
+ if err != nil {
+ return zero, err
+ }
+
+ targetSize, err := Sizeof(targetField.Type)
+ if err != nil {
+ return zero, err
+ }
+ return maybeSkipValidation(fixup(uint64(localSize), uint64(targetSize)))
+
+ case reloFieldLShiftU64:
+ var target uint64
+ if bo == binary.LittleEndian {
+ targetSize, err := targetField.sizeBits()
+ if err != nil {
+ return zero, err
+ }
+
+ target = uint64(64 - targetField.bitfieldOffset - targetSize)
+ } else {
+ loadWidth, err := Sizeof(targetField.Type)
+ if err != nil {
+ return zero, err
+ }
+
+ target = uint64(64 - Bits(loadWidth*8) + targetField.bitfieldOffset)
+ }
+ return fixupWithoutValidation(0, target)
+
+ case reloFieldRShiftU64:
+ targetSize, err := targetField.sizeBits()
+ if err != nil {
+ return zero, err
+ }
+
+ return fixupWithoutValidation(0, uint64(64-targetSize))
+
+ case reloFieldSigned:
+ switch local := UnderlyingType(localField.Type).(type) {
+ case *Enum:
+ target, ok := As[*Enum](targetField.Type)
+ if !ok {
+ return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type)
+ }
+
+ return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed))
+ case *Int:
+ target, ok := As[*Int](targetField.Type)
+ if !ok {
+ return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type)
+ }
+
+ return fixup(
+ uint64(local.Encoding&Signed),
+ uint64(target.Encoding&Signed),
+ )
+ default:
+ return zero, fmt.Errorf("type %T: %w", local, errNoSignedness)
+ }
+ }
+ }
+
+ return zero, ErrNotSupported
+}
+
+func boolToUint64(val bool) uint64 {
+ if val {
+ return 1
+ }
+ return 0
+}
+
+/* coreAccessor contains a path through a struct. It contains at least one index.
+ *
+ * The interpretation depends on the kind of the relocation. The following is
+ * taken from struct bpf_core_relo in libbpf_internal.h:
+ *
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ *
+ * Example to provide a better feel.
+ *
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample s = ...;
+ * int x = &s->a; // encoded as "0:0" (a is field #0)
+ * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ */
+type coreAccessor []int
+
+func parseCOREAccessor(accessor string) (coreAccessor, error) {
+ if accessor == "" {
+ return nil, fmt.Errorf("empty accessor")
+ }
+
+ parts := strings.Split(accessor, ":")
+ result := make(coreAccessor, 0, len(parts))
+ for _, part := range parts {
+ // 31 bits to avoid overflowing int on 32 bit platforms.
+ index, err := strconv.ParseUint(part, 10, 31)
+ if err != nil {
+ return nil, fmt.Errorf("accessor index %q: %s", part, err)
+ }
+
+ result = append(result, int(index))
+ }
+
+ return result, nil
+}
+
+func (ca coreAccessor) String() string {
+ strs := make([]string, 0, len(ca))
+ for _, i := range ca {
+ strs = append(strs, strconv.Itoa(i))
+ }
+ return strings.Join(strs, ":")
+}
+
+func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
+ e, ok := As[*Enum](t)
+ if !ok {
+ return nil, fmt.Errorf("not an enum: %s", t)
+ }
+
+ if len(ca) > 1 {
+ return nil, fmt.Errorf("invalid accessor %s for enum", ca)
+ }
+
+ i := ca[0]
+ if i >= len(e.Values) {
+ return nil, fmt.Errorf("invalid index %d for %s", i, e)
+ }
+
+ return &e.Values[i], nil
+}
+
+// coreField represents the position of a "child" of a composite type from the
+// start of that type.
+//
+// /- start of composite
+// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
+// \- start of field end of field -/
+type coreField struct {
+ Type Type
+
+ // The position of the field from the start of the composite type in bytes.
+ offset uint32
+
+ // The offset of the bitfield in bits from the start of the field.
+ bitfieldOffset Bits
+
+ // The size of the bitfield in bits.
+ //
+ // Zero if the field is not a bitfield.
+ bitfieldSize Bits
+}
+
+func (cf *coreField) adjustOffsetToNthElement(n int) error {
+ if n == 0 {
+ return nil
+ }
+
+ size, err := Sizeof(cf.Type)
+ if err != nil {
+ return err
+ }
+
+ cf.offset += uint32(n) * uint32(size)
+ return nil
+}
+
+func (cf *coreField) adjustOffsetBits(offset Bits) error {
+ align, err := alignof(cf.Type)
+ if err != nil {
+ return err
+ }
+
+ // We can compute the load offset by:
+ // 1) converting the bit offset to bytes with a flooring division.
+ // 2) dividing and multiplying that offset by the alignment, yielding the
+ // load size aligned offset.
+ offsetBytes := uint32(offset/8) / uint32(align) * uint32(align)
+
+ // The number of bits remaining is the bit offset less the number of bits
+ // we can "skip" with the aligned offset.
+ cf.bitfieldOffset = offset - Bits(offsetBytes*8)
+
+ // We know that cf.offset is aligned at to at least align since we get it
+ // from the compiler via BTF. Adding an aligned offsetBytes preserves the
+ // alignment.
+ cf.offset += offsetBytes
+ return nil
+}
+
+func (cf *coreField) sizeBits() (Bits, error) {
+ if cf.bitfieldSize > 0 {
+ return cf.bitfieldSize, nil
+ }
+
+ // Someone is trying to access a non-bitfield via a bit shift relocation.
+ // This happens when a field changes from a bitfield to a regular field
+ // between kernel versions. Synthesise the size to make the shifts work.
+ size, err := Sizeof(cf.Type)
+ if err != nil {
+ return 0, err
+ }
+ return Bits(size * 8), nil
+}
+
+// coreFindField descends into the local type using the accessor and tries to
+// find an equivalent field in target at each step.
+//
+// Returns the field and the offset of the field from the start of
+// target in bits.
+func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) {
+ local := coreField{Type: localT}
+ target := coreField{Type: targetT}
+
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
+ }
+
+ // The first index is used to offset a pointer of the base type like
+ // when accessing an array.
+ if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ var localMaybeFlex, targetMaybeFlex bool
+ for i, acc := range localAcc[1:] {
+ switch localType := UnderlyingType(local.Type).(type) {
+ case composite:
+ // For composite types acc is used to find the field in the local type,
+ // and then we try to find a field in target with the same name.
+ localMembers := localType.members()
+ if acc >= len(localMembers) {
+ return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType)
+ }
+
+ localMember := localMembers[acc]
+ if localMember.Name == "" {
+ localMemberType, ok := As[composite](localMember.Type)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
+ }
+
+ // This is an anonymous struct or union, ignore it.
+ local = coreField{
+ Type: localMemberType,
+ offset: local.offset + localMember.Offset.Bytes(),
+ }
+ localMaybeFlex = false
+ continue
+ }
+
+ targetType, ok := As[composite](target.Type)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
+ }
+
+ targetMember, last, err := coreFindMember(targetType, localMember.Name)
+ if err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ local = coreField{
+ Type: localMember.Type,
+ offset: local.offset,
+ bitfieldSize: localMember.BitfieldSize,
+ }
+ localMaybeFlex = acc == len(localMembers)-1
+
+ target = coreField{
+ Type: targetMember.Type,
+ offset: target.offset,
+ bitfieldSize: targetMember.BitfieldSize,
+ }
+ targetMaybeFlex = last
+
+ if local.bitfieldSize == 0 && target.bitfieldSize == 0 {
+ local.offset += localMember.Offset.Bytes()
+ target.offset += targetMember.Offset.Bytes()
+ break
+ }
+
+ // Either of the members is a bitfield. Make sure we're at the
+ // end of the accessor.
+ if next := i + 1; next < len(localAcc[1:]) {
+ return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield")
+ }
+
+ if err := local.adjustOffsetBits(localMember.Offset); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := target.adjustOffsetBits(targetMember.Offset); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ case *Array:
+ // For arrays, acc is the index in the target.
+ targetType, ok := As[*Array](target.Type)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
+ }
+
+ if localType.Nelems == 0 && !localMaybeFlex {
+ return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
+ }
+ if targetType.Nelems == 0 && !targetMaybeFlex {
+ return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
+ }
+
+ if localType.Nelems > 0 && acc >= int(localType.Nelems) {
+ return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
+ }
+ if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
+ return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
+ }
+
+ local = coreField{
+ Type: localType.Type,
+ offset: local.offset,
+ }
+ localMaybeFlex = false
+
+ if err := local.adjustOffsetToNthElement(acc); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ target = coreField{
+ Type: targetType.Type,
+ offset: target.offset,
+ }
+ targetMaybeFlex = false
+
+ if err := target.adjustOffsetToNthElement(acc); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ default:
+ return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
+ }
+
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, err
+ }
+ }
+
+ return local, target, nil
+}
+
+// coreFindMember finds a member in a composite type while handling anonymous
+// structs and unions.
+func coreFindMember(typ composite, name string) (Member, bool, error) {
+ if name == "" {
+ return Member{}, false, errors.New("can't search for anonymous member")
+ }
+
+ type offsetTarget struct {
+ composite
+ offset Bits
+ }
+
+ targets := []offsetTarget{{typ, 0}}
+ visited := make(map[composite]bool)
+
+ for i := 0; i < len(targets); i++ {
+ target := targets[i]
+
+ // Only visit targets once to prevent infinite recursion.
+ if visited[target] {
+ continue
+ }
+ if len(visited) >= maxResolveDepth {
+ // This check is different than libbpf, which restricts the entire
+ // path to BPF_CORE_SPEC_MAX_LEN items.
+ return Member{}, false, fmt.Errorf("type is nested too deep")
+ }
+ visited[target] = true
+
+ members := target.members()
+ for j, member := range members {
+ if member.Name == name {
+ // NB: This is safe because member is a copy.
+ member.Offset += target.offset
+ return member, j == len(members)-1, nil
+ }
+
+ // The names don't match, but this member could be an anonymous struct
+ // or union.
+ if member.Name != "" {
+ continue
+ }
+
+ comp, ok := As[composite](member.Type)
+ if !ok {
+ return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
+ }
+
+ targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
+ }
+ }
+
+ return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
+}
+
+// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
+func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
+ localValue, err := localAcc.enumValue(local)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ targetEnum, ok := As[*Enum](target)
+ if !ok {
+ return nil, nil, errImpossibleRelocation
+ }
+
+ localName := newEssentialName(localValue.Name)
+ for i, targetValue := range targetEnum.Values {
+ if newEssentialName(targetValue.Name) != localName {
+ continue
+ }
+
+ return localValue, &targetEnum.Values[i], nil
+ }
+
+ return nil, nil, errImpossibleRelocation
+}
+
+// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules.
+//
+// Only layout compatibility is checked, ignoring names of the root type.
+func CheckTypeCompatibility(localType Type, targetType Type) error {
+ return coreAreTypesCompatible(localType, targetType, nil)
+}
+
+type pair struct {
+ A, B Type
+}
+
+/* The comment below is from bpf_core_types_are_compat in libbpf.c:
+ *
+ * Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ * kind should match for local and target types (i.e., STRUCT is not
+ * compatible with UNION);
+ * - for ENUMs, the size is ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ * - FUNC_PROTOs are compatible if they have compatible signature: same
+ * number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ *
+ * Returns errIncompatibleTypes if types are not compatible.
+ */
+func coreAreTypesCompatible(localType Type, targetType Type, visited map[pair]struct{}) error {
+ localType = UnderlyingType(localType)
+ targetType = UnderlyingType(targetType)
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
+ }
+
+ if _, ok := visited[pair{localType, targetType}]; ok {
+ return nil
+ }
+ if visited == nil {
+ visited = make(map[pair]struct{})
+ }
+ visited[pair{localType, targetType}] = struct{}{}
+
+ switch lv := localType.(type) {
+ case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
+ return nil
+
+ case *Pointer:
+ tv := targetType.(*Pointer)
+ return coreAreTypesCompatible(lv.Target, tv.Target, visited)
+
+ case *Array:
+ tv := targetType.(*Array)
+ if err := coreAreTypesCompatible(lv.Index, tv.Index, visited); err != nil {
+ return err
+ }
+
+ return coreAreTypesCompatible(lv.Type, tv.Type, visited)
+
+ case *FuncProto:
+ tv := targetType.(*FuncProto)
+ if err := coreAreTypesCompatible(lv.Return, tv.Return, visited); err != nil {
+ return err
+ }
+
+ if len(lv.Params) != len(tv.Params) {
+ return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes)
+ }
+
+ for i, localParam := range lv.Params {
+ targetParam := tv.Params[i]
+ if err := coreAreTypesCompatible(localParam.Type, targetParam.Type, visited); err != nil {
+ return err
+ }
+ }
+
+ return nil
+
+ default:
+ return fmt.Errorf("unsupported type %T", localType)
+ }
+}
+
+/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
+ *
+ * The comment below is from bpf_core_fields_are_compat in libbpf.c:
+ *
+ * Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
+ * - any two PTRs are always compatible;
+ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
+ * least one of enums should be anonymous;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and signedness are ignored;
+ * - any two FLOATs are always compatible;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * [ NB: coreAreMembersCompatible doesn't recurse, this check is done
+ * by coreFindField. ]
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ *
+ * Returns errImpossibleRelocation if the members are not compatible.
+ */
+func coreAreMembersCompatible(localType Type, targetType Type) error {
+ localType = UnderlyingType(localType)
+ targetType = UnderlyingType(targetType)
+
+ _, lok := localType.(composite)
+ _, tok := targetType.(composite)
+ if lok && tok {
+ return nil
+ }
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
+ }
+
+ switch lv := localType.(type) {
+ case *Array, *Pointer, *Float, *Int:
+ return nil
+
+ case *Enum:
+ tv := targetType.(*Enum)
+ if !coreEssentialNamesMatch(lv.Name, tv.Name) {
+ return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation)
+ }
+
+ return nil
+
+ case *Fwd:
+ tv := targetType.(*Fwd)
+ if !coreEssentialNamesMatch(lv.Name, tv.Name) {
+ return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation)
+ }
+
+ return nil
+
+ default:
+ return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
+ }
+}
+
+// coreEssentialNamesMatch compares two names while ignoring their flavour suffix.
+//
+// This should only be used on names which are in the global scope, like struct
+// names, typedefs or enum values.
+func coreEssentialNamesMatch(a, b string) bool {
+ if a == "" || b == "" {
+ // allow anonymous and named type to match
+ return true
+ }
+
+ return newEssentialName(a) == newEssentialName(b)
+}
+
+/* The comment below is from __bpf_core_types_match in relo_core.c:
+ *
+ * Check that two types "match". This function assumes that root types were
+ * already checked for name match.
+ *
+ * The matching relation is defined as follows:
+ * - modifiers and typedefs are stripped (and, hence, effectively ignored)
+ * - generally speaking types need to be of same kind (struct vs. struct, union
+ * vs. union, etc.)
+ * - exceptions are struct/union behind a pointer which could also match a
+ * forward declaration of a struct or union, respectively, and enum vs.
+ * enum64 (see below)
+ * Then, depending on type:
+ * - integers:
+ * - match if size and signedness match
+ * - arrays & pointers:
+ * - target types are recursively matched
+ * - structs & unions:
+ * - local members need to exist in target with the same name
+ * - for each member we recursively check match unless it is already behind a
+ * pointer, in which case we only check matching names and compatible kind
+ * - enums:
+ * - local variants have to have a match in target by symbolic name (but not
+ * numeric value)
+ * - size has to match (but enum may match enum64 and vice versa)
+ * - function pointers:
+ * - number and position of arguments in local type has to match target
+ * - for each argument and the return value we recursively check match
+ */
+func coreTypesMatch(localType Type, targetType Type, visited map[pair]struct{}) error {
+ localType = UnderlyingType(localType)
+ targetType = UnderlyingType(targetType)
+
+ if !coreEssentialNamesMatch(localType.TypeName(), targetType.TypeName()) {
+ return fmt.Errorf("type name %q don't match %q: %w", localType.TypeName(), targetType.TypeName(), errIncompatibleTypes)
+ }
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
+ }
+
+ if _, ok := visited[pair{localType, targetType}]; ok {
+ return nil
+ }
+ if visited == nil {
+ visited = make(map[pair]struct{})
+ }
+ visited[pair{localType, targetType}] = struct{}{}
+
+ switch lv := (localType).(type) {
+ case *Void:
+
+ case *Fwd:
+ if targetType.(*Fwd).Kind != lv.Kind {
+ return fmt.Errorf("fwd kind mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
+ }
+
+ case *Enum:
+ return coreEnumsMatch(lv, targetType.(*Enum))
+
+ case composite:
+ tv := targetType.(composite)
+
+ if len(lv.members()) > len(tv.members()) {
+ return errIncompatibleTypes
+ }
+
+ localMembers := lv.members()
+ targetMembers := map[string]Member{}
+ for _, member := range tv.members() {
+ targetMembers[member.Name] = member
+ }
+
+ for _, localMember := range localMembers {
+ targetMember, found := targetMembers[localMember.Name]
+ if !found {
+ return fmt.Errorf("no field %q in %v: %w", localMember.Name, targetType, errIncompatibleTypes)
+ }
+
+ err := coreTypesMatch(localMember.Type, targetMember.Type, visited)
+ if err != nil {
+ return err
+ }
+ }
+
+ case *Int:
+ if !coreEncodingMatches(lv, targetType.(*Int)) {
+ return fmt.Errorf("int mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
+ }
+
+ case *Pointer:
+ tv := targetType.(*Pointer)
+
+ // Allow a pointer to a forward declaration to match a struct
+ // or union.
+ if fwd, ok := As[*Fwd](lv.Target); ok && fwd.matches(tv.Target) {
+ return nil
+ }
+
+ if fwd, ok := As[*Fwd](tv.Target); ok && fwd.matches(lv.Target) {
+ return nil
+ }
+
+ return coreTypesMatch(lv.Target, tv.Target, visited)
+
+ case *Array:
+ tv := targetType.(*Array)
+
+ if lv.Nelems != tv.Nelems {
+ return fmt.Errorf("array mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
+ }
+
+ return coreTypesMatch(lv.Type, tv.Type, visited)
+
+ case *FuncProto:
+ tv := targetType.(*FuncProto)
+
+ if len(lv.Params) != len(tv.Params) {
+ return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes)
+ }
+
+ for i, lparam := range lv.Params {
+ if err := coreTypesMatch(lparam.Type, tv.Params[i].Type, visited); err != nil {
+ return err
+ }
+ }
+
+ return coreTypesMatch(lv.Return, tv.Return, visited)
+
+ default:
+ return fmt.Errorf("unsupported type %T", localType)
+ }
+
+ return nil
+}
+
+// coreEncodingMatches returns true if both ints have the same size and signedness.
+// All encodings other than `Signed` are considered unsigned.
+func coreEncodingMatches(local, target *Int) bool {
+ return local.Size == target.Size && (local.Encoding == Signed) == (target.Encoding == Signed)
+}
+
+// coreEnumsMatch checks two enums match, which is considered to be the case if the following is true:
+// - size has to match (but enum may match enum64 and vice versa)
+// - local variants have to have a match in target by symbolic name (but not numeric value)
+func coreEnumsMatch(local *Enum, target *Enum) error {
+ if local.Size != target.Size {
+ return fmt.Errorf("size mismatch between %v and %v: %w", local, target, errIncompatibleTypes)
+ }
+
+ // If there are more values in the local than the target, there must be at least one value in the local
+ // that isn't in the target, and therefor the types are incompatible.
+ if len(local.Values) > len(target.Values) {
+ return fmt.Errorf("local has more values than target: %w", errIncompatibleTypes)
+ }
+
+outer:
+ for _, lv := range local.Values {
+ for _, rv := range target.Values {
+ if coreEssentialNamesMatch(lv.Name, rv.Name) {
+ continue outer
+ }
+ }
+
+ return fmt.Errorf("no match for %v in %v: %w", lv, target, errIncompatibleTypes)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/doc.go b/vendor/github.com/cilium/ebpf/btf/doc.go
new file mode 100644
index 000000000..b1f4b1fc3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/doc.go
@@ -0,0 +1,5 @@
+// Package btf handles data encoded according to the BPF Type Format.
+//
+// The canonical documentation lives in the Linux kernel repository and is
+// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
+package btf
diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go
new file mode 100644
index 000000000..eb9044bad
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go
@@ -0,0 +1,835 @@
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+)
+
+// ExtInfos contains ELF section metadata.
+type ExtInfos struct {
+ // The slices are sorted by offset in ascending order.
+ funcInfos map[string]FuncInfos
+ lineInfos map[string]LineInfos
+ relocationInfos map[string]CORERelocationInfos
+}
+
+// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
+//
+// Returns an error wrapping ErrNotFound if no ext infos are present.
+func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) {
+ section := file.Section(".BTF.ext")
+ if section == nil {
+ return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
+ }
+
+ if section.ReaderAt == nil {
+ return nil, fmt.Errorf("compressed ext_info is not supported")
+ }
+
+ return loadExtInfos(section.ReaderAt, file.ByteOrder, spec)
+}
+
+// loadExtInfos parses bare ext infos.
+func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, error) {
+ // Open unbuffered section reader. binary.Read() calls io.ReadFull on
+ // the header structs, resulting in one syscall per header.
+ headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
+ extHeader, err := parseBTFExtHeader(headerRd, bo)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF extension header: %w", err)
+ }
+
+ coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err)
+ }
+
+ buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen))
+ btfFuncInfos, err := parseFuncInfos(buf, bo, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF function info: %w", err)
+ }
+
+ funcInfos := make(map[string]FuncInfos, len(btfFuncInfos))
+ for section, bfis := range btfFuncInfos {
+ funcInfos[section], err = newFuncInfos(bfis, spec)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: func infos: %w", section, err)
+ }
+ }
+
+ buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen))
+ btfLineInfos, err := parseLineInfos(buf, bo, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF line info: %w", err)
+ }
+
+ lineInfos := make(map[string]LineInfos, len(btfLineInfos))
+ for section, blis := range btfLineInfos {
+ lineInfos[section], err = newLineInfos(blis, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: line infos: %w", section, err)
+ }
+ }
+
+ if coreHeader == nil || coreHeader.COREReloLen == 0 {
+ return &ExtInfos{funcInfos, lineInfos, nil}, nil
+ }
+
+ var btfCORERelos map[string][]bpfCORERelo
+ buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen))
+ btfCORERelos, err = parseCORERelos(buf, bo, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err)
+ }
+
+ coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos))
+ for section, brs := range btfCORERelos {
+ coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
+ }
+ }
+
+ return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil
+}
+
+type funcInfoMeta struct{}
+type coreRelocationMeta struct{}
+
+// Assign per-section metadata from BTF to a section's instructions.
+func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
+ funcInfos := ei.funcInfos[section]
+ lineInfos := ei.lineInfos[section]
+ reloInfos := ei.relocationInfos[section]
+
+ AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos)
+}
+
+// Assign per-instruction metadata to the instructions in insns.
+func AssignMetadataToInstructions(
+ insns asm.Instructions,
+ funcInfos FuncInfos,
+ lineInfos LineInfos,
+ reloInfos CORERelocationInfos,
+) {
+ iter := insns.Iterate()
+ for iter.Next() {
+ if len(funcInfos.infos) > 0 && funcInfos.infos[0].offset == iter.Offset {
+ *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos.infos[0].fn)
+ funcInfos.infos = funcInfos.infos[1:]
+ }
+
+ if len(lineInfos.infos) > 0 && lineInfos.infos[0].offset == iter.Offset {
+ *iter.Ins = iter.Ins.WithSource(lineInfos.infos[0].line)
+ lineInfos.infos = lineInfos.infos[1:]
+ }
+
+ if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset {
+ iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo)
+ reloInfos.infos = reloInfos.infos[1:]
+ }
+ }
+}
+
+// MarshalExtInfos encodes function and line info embedded in insns into kernel
+// wire format.
+//
+// If an instruction has an [asm.Comment], it will be synthesized into a mostly
+// empty line info.
+func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) {
+ iter := insns.Iterate()
+ for iter.Next() {
+ if iter.Ins.Source() != nil || FuncMetadata(iter.Ins) != nil {
+ goto marshal
+ }
+ }
+
+ return nil, nil, nil
+
+marshal:
+ var fiBuf, liBuf bytes.Buffer
+ for {
+ if fn := FuncMetadata(iter.Ins); fn != nil {
+ fi := &funcInfo{
+ fn: fn,
+ offset: iter.Offset,
+ }
+ if err := fi.marshal(&fiBuf, b); err != nil {
+ return nil, nil, fmt.Errorf("write func info: %w", err)
+ }
+ }
+
+ if source := iter.Ins.Source(); source != nil {
+ var line *Line
+ if l, ok := source.(*Line); ok {
+ line = l
+ } else {
+ line = &Line{
+ line: source.String(),
+ }
+ }
+
+ li := &lineInfo{
+ line: line,
+ offset: iter.Offset,
+ }
+ if err := li.marshal(&liBuf, b); err != nil {
+ return nil, nil, fmt.Errorf("write line info: %w", err)
+ }
+ }
+
+ if !iter.Next() {
+ break
+ }
+ }
+
+ return fiBuf.Bytes(), liBuf.Bytes(), nil
+}
+
+// btfExtHeader is found at the start of the .BTF.ext section.
+type btfExtHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+
+ // HdrLen is larger than the size of struct btfExtHeader when it is
+ // immediately followed by a btfExtCOREHeader.
+ HdrLen uint32
+
+ FuncInfoOff uint32
+ FuncInfoLen uint32
+ LineInfoOff uint32
+ LineInfoLen uint32
+}
+
+// parseBTFExtHeader parses the header of the .BTF.ext section.
+func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) {
+ var header btfExtHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ if int64(header.HdrLen) < int64(binary.Size(&header)) {
+ return nil, fmt.Errorf("header length shorter than btfExtHeader size")
+ }
+
+ return &header, nil
+}
+
+// funcInfoStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its func_info entries.
+func (h *btfExtHeader) funcInfoStart() int64 {
+ return int64(h.HdrLen + h.FuncInfoOff)
+}
+
+// lineInfoStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its line_info entries.
+func (h *btfExtHeader) lineInfoStart() int64 {
+ return int64(h.HdrLen + h.LineInfoOff)
+}
+
+// coreReloStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its CO-RE relocation entries.
+func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 {
+ return int64(h.HdrLen + ch.COREReloOff)
+}
+
+// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen
+// field is larger than its size.
+type btfExtCOREHeader struct {
+ COREReloOff uint32
+ COREReloLen uint32
+}
+
+// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional
+// header bytes are present, extHeader.HdrLen will be larger than the struct,
+// indicating the presence of a CO-RE extension header.
+func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) {
+ extHdrSize := int64(binary.Size(&extHeader))
+ remainder := int64(extHeader.HdrLen) - extHdrSize
+
+ if remainder == 0 {
+ return nil, nil
+ }
+
+ var coreHeader btfExtCOREHeader
+ if err := binary.Read(r, bo, &coreHeader); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ return &coreHeader, nil
+}
+
+type btfExtInfoSec struct {
+ SecNameOff uint32
+ NumInfo uint32
+}
+
+// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext,
+// appearing within func_info and line_info sub-sections.
+// These headers appear once for each program section in the ELF and are
+// followed by one or more func/line_info records for the section.
+func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) {
+ var infoHeader btfExtInfoSec
+ if err := binary.Read(r, bo, &infoHeader); err != nil {
+ return "", nil, fmt.Errorf("read ext info header: %w", err)
+ }
+
+ secName, err := strings.Lookup(infoHeader.SecNameOff)
+ if err != nil {
+ return "", nil, fmt.Errorf("get section name: %w", err)
+ }
+ if secName == "" {
+ return "", nil, fmt.Errorf("extinfo header refers to empty section name")
+ }
+
+ if infoHeader.NumInfo == 0 {
+ return "", nil, fmt.Errorf("section %s has zero records", secName)
+ }
+
+ return secName, &infoHeader, nil
+}
+
+// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos
+// or line_infos segment that describes the length of all extInfoRecords in
+// that segment.
+func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) {
+ const maxRecordSize = 256
+
+ var recordSize uint32
+ if err := binary.Read(r, bo, &recordSize); err != nil {
+ return 0, fmt.Errorf("can't read record size: %v", err)
+ }
+
+ if recordSize < 4 {
+ // Need at least InsnOff worth of bytes per record.
+ return 0, errors.New("record size too short")
+ }
+ if recordSize > maxRecordSize {
+ return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
+ }
+
+ return recordSize, nil
+}
+
+// FuncInfos contains a sorted list of func infos.
+type FuncInfos struct {
+ infos []funcInfo
+}
+
+// The size of a FuncInfo in BTF wire format.
+var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{}))
+
+type funcInfo struct {
+ fn *Func
+ offset asm.RawInstructionOffset
+}
+
+type bpfFuncInfo struct {
+ // Instruction offset of the function within an ELF section.
+ InsnOff uint32
+ TypeID TypeID
+}
+
+func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) {
+ typ, err := spec.TypeByID(fi.TypeID)
+ if err != nil {
+ return nil, err
+ }
+
+ fn, ok := typ.(*Func)
+ if !ok {
+ return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ)
+ }
+
+ // C doesn't have anonymous functions, but check just in case.
+ if fn.Name == "" {
+ return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID)
+ }
+
+ return &funcInfo{
+ fn,
+ asm.RawInstructionOffset(fi.InsnOff),
+ }, nil
+}
+
+func newFuncInfos(bfis []bpfFuncInfo, spec *Spec) (FuncInfos, error) {
+ fis := FuncInfos{
+ infos: make([]funcInfo, 0, len(bfis)),
+ }
+ for _, bfi := range bfis {
+ fi, err := newFuncInfo(bfi, spec)
+ if err != nil {
+ return FuncInfos{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
+ }
+ fis.infos = append(fis.infos, *fi)
+ }
+ sort.Slice(fis.infos, func(i, j int) bool {
+ return fis.infos[i].offset <= fis.infos[j].offset
+ })
+ return fis, nil
+}
+
+// LoadFuncInfos parses BTF func info in kernel wire format.
+func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncInfos, error) {
+ fis, err := parseFuncInfoRecords(
+ reader,
+ bo,
+ FuncInfoSize,
+ recordNum,
+ false,
+ )
+ if err != nil {
+ return FuncInfos{}, fmt.Errorf("parsing BTF func info: %w", err)
+ }
+
+ return newFuncInfos(fis, spec)
+}
+
+// marshal into the BTF wire format.
+func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error {
+ id, err := b.Add(fi.fn)
+ if err != nil {
+ return err
+ }
+ bfi := bpfFuncInfo{
+ InsnOff: uint32(fi.offset),
+ TypeID: id,
+ }
+ buf := make([]byte, FuncInfoSize)
+ internal.NativeEndian.PutUint32(buf, bfi.InsnOff)
+ internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID))
+ _, err = w.Write(buf)
+ return err
+}
+
+// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of
+// func infos indexed by section name.
+func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]bpfFuncInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseFuncInfoRecords parses a stream of func_infos into a funcInfos.
+// These records appear after a btf_ext_info_sec header in the func_info
+// sub-section of .BTF.ext.
+func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfFuncInfo, error) {
+ var out []bpfFuncInfo
+ var fi bpfFuncInfo
+
+ if exp, got := FuncInfoSize, recordSize; exp != got {
+ // BTF blob's record size is longer than we know how to parse.
+ return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got)
+ }
+
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &fi); err != nil {
+ return nil, fmt.Errorf("can't read function info: %v", err)
+ }
+
+ if offsetInBytes {
+ if fi.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ fi.InsnOff /= asm.InstructionSize
+ }
+
+ out = append(out, fi)
+ }
+
+ return out, nil
+}
+
+var LineInfoSize = uint32(binary.Size(bpfLineInfo{}))
+
+// Line represents the location and contents of a single line of source
+// code a BPF ELF was compiled from.
+type Line struct {
+ fileName string
+ line string
+ lineNumber uint32
+ lineColumn uint32
+}
+
+func (li *Line) FileName() string {
+ return li.fileName
+}
+
+func (li *Line) Line() string {
+ return li.line
+}
+
+func (li *Line) LineNumber() uint32 {
+ return li.lineNumber
+}
+
+func (li *Line) LineColumn() uint32 {
+ return li.lineColumn
+}
+
+func (li *Line) String() string {
+ return li.line
+}
+
+// LineInfos contains a sorted list of line infos.
+type LineInfos struct {
+ infos []lineInfo
+}
+
+type lineInfo struct {
+ line *Line
+ offset asm.RawInstructionOffset
+}
+
+// Constants for the format of bpfLineInfo.LineCol.
+const (
+ bpfLineShift = 10
+ bpfLineMax = (1 << (32 - bpfLineShift)) - 1
+ bpfColumnMax = (1 << bpfLineShift) - 1
+)
+
+type bpfLineInfo struct {
+ // Instruction offset of the line within the whole instruction stream, in instructions.
+ InsnOff uint32
+ FileNameOff uint32
+ LineOff uint32
+ LineCol uint32
+}
+
+// LoadLineInfos parses BTF line info in kernel wire format.
+func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineInfos, error) {
+ lis, err := parseLineInfoRecords(
+ reader,
+ bo,
+ LineInfoSize,
+ recordNum,
+ false,
+ )
+ if err != nil {
+ return LineInfos{}, fmt.Errorf("parsing BTF line info: %w", err)
+ }
+
+ return newLineInfos(lis, spec.strings)
+}
+
+func newLineInfo(li bpfLineInfo, strings *stringTable) (lineInfo, error) {
+ line, err := strings.Lookup(li.LineOff)
+ if err != nil {
+ return lineInfo{}, fmt.Errorf("lookup of line: %w", err)
+ }
+
+ fileName, err := strings.Lookup(li.FileNameOff)
+ if err != nil {
+ return lineInfo{}, fmt.Errorf("lookup of filename: %w", err)
+ }
+
+ lineNumber := li.LineCol >> bpfLineShift
+ lineColumn := li.LineCol & bpfColumnMax
+
+ return lineInfo{
+ &Line{
+ fileName,
+ line,
+ lineNumber,
+ lineColumn,
+ },
+ asm.RawInstructionOffset(li.InsnOff),
+ }, nil
+}
+
+func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineInfos, error) {
+ lis := LineInfos{
+ infos: make([]lineInfo, 0, len(blis)),
+ }
+ for _, bli := range blis {
+ li, err := newLineInfo(bli, strings)
+ if err != nil {
+ return LineInfos{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err)
+ }
+ lis.infos = append(lis.infos, li)
+ }
+ sort.Slice(lis.infos, func(i, j int) bool {
+ return lis.infos[i].offset <= lis.infos[j].offset
+ })
+ return lis, nil
+}
+
+// marshal writes the binary representation of the LineInfo to w.
+func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error {
+ line := li.line
+ if line.lineNumber > bpfLineMax {
+ return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
+ }
+
+ if line.lineColumn > bpfColumnMax {
+ return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
+ }
+
+ fileNameOff, err := b.addString(line.fileName)
+ if err != nil {
+ return fmt.Errorf("file name %q: %w", line.fileName, err)
+ }
+
+ lineOff, err := b.addString(line.line)
+ if err != nil {
+ return fmt.Errorf("line %q: %w", line.line, err)
+ }
+
+ bli := bpfLineInfo{
+ uint32(li.offset),
+ fileNameOff,
+ lineOff,
+ (line.lineNumber << bpfLineShift) | line.lineColumn,
+ }
+
+ buf := make([]byte, LineInfoSize)
+ internal.NativeEndian.PutUint32(buf, bli.InsnOff)
+ internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff)
+ internal.NativeEndian.PutUint32(buf[8:], bli.LineOff)
+ internal.NativeEndian.PutUint32(buf[12:], bli.LineCol)
+ _, err = w.Write(buf)
+ return err
+}
+
+// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
+// line infos indexed by section name.
+func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]bpfLineInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseLineInfoRecords parses a stream of line_infos into a lineInfos.
+// These records appear after a btf_ext_info_sec header in the line_info
+// sub-section of .BTF.ext.
+func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) {
+ var li bpfLineInfo
+
+ if exp, got := uint32(binary.Size(li)), recordSize; exp != got {
+ // BTF blob's record size is longer than we know how to parse.
+ return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got)
+ }
+
+ out := make([]bpfLineInfo, 0, recordNum)
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &li); err != nil {
+ return nil, fmt.Errorf("can't read line info: %v", err)
+ }
+
+ if offsetInBytes {
+ if li.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ li.InsnOff /= asm.InstructionSize
+ }
+
+ out = append(out, li)
+ }
+
+ return out, nil
+}
+
+// bpfCORERelo matches the kernel's struct bpf_core_relo.
+type bpfCORERelo struct {
+ InsnOff uint32
+ TypeID TypeID
+ AccessStrOff uint32
+ Kind coreKind
+}
+
+type CORERelocation struct {
+ // The local type of the relocation, stripped of typedefs and qualifiers.
+ typ Type
+ accessor coreAccessor
+ kind coreKind
+ // The ID of the local type in the source BTF.
+ id TypeID
+}
+
+func (cr *CORERelocation) String() string {
+ return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id)
+}
+
+func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
+ relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation)
+ return relo
+}
+
+// CORERelocationInfos contains a sorted list of co:re relocation infos.
+type CORERelocationInfos struct {
+ infos []coreRelocationInfo
+}
+
+type coreRelocationInfo struct {
+ relo *CORERelocation
+ offset asm.RawInstructionOffset
+}
+
+func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) {
+ typ, err := spec.TypeByID(relo.TypeID)
+ if err != nil {
+ return nil, err
+ }
+
+ accessorStr, err := strings.Lookup(relo.AccessStrOff)
+ if err != nil {
+ return nil, err
+ }
+
+ accessor, err := parseCOREAccessor(accessorStr)
+ if err != nil {
+ return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
+ }
+
+ return &coreRelocationInfo{
+ &CORERelocation{
+ typ,
+ accessor,
+ relo.Kind,
+ relo.TypeID,
+ },
+ asm.RawInstructionOffset(relo.InsnOff),
+ }, nil
+}
+
+func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) {
+ rs := CORERelocationInfos{
+ infos: make([]coreRelocationInfo, 0, len(brs)),
+ }
+ for _, br := range brs {
+ relo, err := newRelocationInfo(br, spec, strings)
+ if err != nil {
+ return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err)
+ }
+ rs.infos = append(rs.infos, *relo)
+ }
+ sort.Slice(rs.infos, func(i, j int) bool {
+ return rs.infos[i].offset < rs.infos[j].offset
+ })
+ return rs, nil
+}
+
+var extInfoReloSize = binary.Size(bpfCORERelo{})
+
+// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of
+// CO-RE relocations indexed by section name.
+func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ if recordSize != uint32(extInfoReloSize) {
+ return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
+ }
+
+ result := make(map[string][]bpfCORERelo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseCOREReloRecords parses a stream of CO-RE relocation entries into a
+// coreRelos. These records appear after a btf_ext_info_sec header in the
+// core_relos sub-section of .BTF.ext.
+func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) {
+ var out []bpfCORERelo
+
+ var relo bpfCORERelo
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &relo); err != nil {
+ return nil, fmt.Errorf("can't read CO-RE relocation: %v", err)
+ }
+
+ if relo.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ relo.InsnOff /= asm.InstructionSize
+
+ out = append(out, relo)
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/feature.go b/vendor/github.com/cilium/ebpf/btf/feature.go
new file mode 100644
index 000000000..6feb08dfb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/feature.go
@@ -0,0 +1,123 @@
+package btf
+
+import (
+ "errors"
+ "math"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// haveBTF attempts to load a BTF blob containing an Int. It should pass on any
+// kernel that supports BPF_BTF_LOAD.
+var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error {
+ // 0-length anonymous integer
+ err := probeBTF(&Int{})
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is
+// used as a proxy for .bss, .data and .rodata map support, which generally
+// come with a Var and Datasec. These were introduced in Linux 5.2.
+var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ v := &Var{
+ Name: "a",
+ Type: &Pointer{(*Void)(nil)},
+ }
+
+ err := probeBTF(v)
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ // Treat both EINVAL and EPERM as not supported: creating the map may still
+ // succeed without Btf* attrs.
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It
+// is used as a proxy for ext_info (func_info) support, which depends on
+// Func(Proto) by definition.
+var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ fn := &Func{
+ Name: "a",
+ Type: &FuncProto{Return: (*Void)(nil)},
+ }
+
+ err := probeBTF(fn)
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error {
+ if err := haveProgBTF(); err != nil {
+ return err
+ }
+
+ fn := &Func{
+ Name: "a",
+ Type: &FuncProto{Return: (*Void)(nil)},
+ Linkage: GlobalFunc,
+ }
+
+ err := probeBTF(fn)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+var haveEnum64 = internal.NewFeatureTest("ENUM64", "6.0", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ enum := &Enum{
+ Size: 8,
+ Values: []EnumValue{
+ {"TEST", math.MaxUint32 + 1},
+ },
+ }
+
+ err := probeBTF(enum)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+func probeBTF(typ Type) error {
+ b, err := NewBuilder([]Type{typ})
+ if err != nil {
+ return err
+ }
+
+ buf, err := b.Marshal(nil, nil)
+ if err != nil {
+ return err
+ }
+
+ fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(buf),
+ BtfSize: uint32(len(buf)),
+ })
+
+ if err == nil {
+ fd.Close()
+ }
+
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go
new file mode 100644
index 000000000..5e581b4a8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/format.go
@@ -0,0 +1,350 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+var errNestedTooDeep = errors.New("nested too deep")
+
+// GoFormatter converts a Type to Go syntax.
+//
+// A zero GoFormatter is valid to use.
+type GoFormatter struct {
+ w strings.Builder
+
+ // Types present in this map are referred to using the given name if they
+ // are encountered when outputting another type.
+ Names map[Type]string
+
+ // Identifier is called for each field of struct-like types. By default the
+ // field name is used as is.
+ Identifier func(string) string
+
+ // EnumIdentifier is called for each element of an enum. By default the
+ // name of the enum type is concatenated with Identifier(element).
+ EnumIdentifier func(name, element string) string
+}
+
+// TypeDeclaration generates a Go type declaration for a BTF type.
+func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) {
+ gf.w.Reset()
+ if err := gf.writeTypeDecl(name, typ); err != nil {
+ return "", err
+ }
+ return gf.w.String(), nil
+}
+
+func (gf *GoFormatter) identifier(s string) string {
+ if gf.Identifier != nil {
+ return gf.Identifier(s)
+ }
+
+ return s
+}
+
+func (gf *GoFormatter) enumIdentifier(name, element string) string {
+ if gf.EnumIdentifier != nil {
+ return gf.EnumIdentifier(name, element)
+ }
+
+ return name + gf.identifier(element)
+}
+
+// writeTypeDecl outputs a declaration of the given type.
+//
+// It encodes https://golang.org/ref/spec#Type_declarations:
+//
+// type foo struct { bar uint32; }
+// type bar int32
+func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
+ if name == "" {
+ return fmt.Errorf("need a name for type %s", typ)
+ }
+
+ typ = skipQualifiers(typ)
+ fmt.Fprintf(&gf.w, "type %s ", name)
+ if err := gf.writeTypeLit(typ, 0); err != nil {
+ return err
+ }
+
+ e, ok := typ.(*Enum)
+ if !ok || len(e.Values) == 0 {
+ return nil
+ }
+
+ gf.w.WriteString("; const ( ")
+ for _, ev := range e.Values {
+ id := gf.enumIdentifier(name, ev.Name)
+ var value any
+ if e.Signed {
+ value = int64(ev.Value)
+ } else {
+ value = ev.Value
+ }
+ fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, value)
+ }
+ gf.w.WriteString(")")
+
+ return nil
+}
+
+// writeType outputs the name of a named type or a literal describing the type.
+//
+// It encodes https://golang.org/ref/spec#Types.
+//
+// foo (if foo is a named type)
+// uint32
+func (gf *GoFormatter) writeType(typ Type, depth int) error {
+ typ = skipQualifiers(typ)
+
+ name := gf.Names[typ]
+ if name != "" {
+ gf.w.WriteString(name)
+ return nil
+ }
+
+ return gf.writeTypeLit(typ, depth)
+}
+
+// writeTypeLit outputs a literal describing the type.
+//
+// The function ignores named types.
+//
+// It encodes https://golang.org/ref/spec#TypeLit.
+//
+// struct { bar uint32; }
+// uint32
+func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
+ depth++
+ if depth > maxResolveDepth {
+ return errNestedTooDeep
+ }
+
+ var err error
+ switch v := skipQualifiers(typ).(type) {
+ case *Int:
+ err = gf.writeIntLit(v)
+
+ case *Enum:
+ if !v.Signed {
+ gf.w.WriteRune('u')
+ }
+ switch v.Size {
+ case 1:
+ gf.w.WriteString("int8")
+ case 2:
+ gf.w.WriteString("int16")
+ case 4:
+ gf.w.WriteString("int32")
+ case 8:
+ gf.w.WriteString("int64")
+ default:
+ err = fmt.Errorf("invalid enum size %d", v.Size)
+ }
+
+ case *Typedef:
+ err = gf.writeType(v.Type, depth)
+
+ case *Array:
+ fmt.Fprintf(&gf.w, "[%d]", v.Nelems)
+ err = gf.writeType(v.Type, depth)
+
+ case *Struct:
+ err = gf.writeStructLit(v.Size, v.Members, depth)
+
+ case *Union:
+ // Always choose the first member to represent the union in Go.
+ err = gf.writeStructLit(v.Size, v.Members[:1], depth)
+
+ case *Datasec:
+ err = gf.writeDatasecLit(v, depth)
+
+ default:
+ return fmt.Errorf("type %T: %w", v, ErrNotSupported)
+ }
+
+ if err != nil {
+ return fmt.Errorf("%s: %w", typ, err)
+ }
+
+ return nil
+}
+
+func (gf *GoFormatter) writeIntLit(i *Int) error {
+ bits := i.Size * 8
+ switch i.Encoding {
+ case Bool:
+ if i.Size != 1 {
+ return fmt.Errorf("bool with size %d", i.Size)
+ }
+ gf.w.WriteString("bool")
+ case Char:
+ if i.Size != 1 {
+ return fmt.Errorf("char with size %d", i.Size)
+ }
+ // BTF doesn't have a way to specify the signedness of a char. Assume
+ // we are dealing with unsigned, since this works nicely with []byte
+ // in Go code.
+ fallthrough
+ case Unsigned, Signed:
+ stem := "uint"
+ if i.Encoding == Signed {
+ stem = "int"
+ }
+ if i.Size > 8 {
+ fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8)
+ } else {
+ fmt.Fprintf(&gf.w, "%s%d", stem, bits)
+ }
+ default:
+ return fmt.Errorf("can't encode %s", i.Encoding)
+ }
+ return nil
+}
+
+func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error {
+ gf.w.WriteString("struct { ")
+
+ prevOffset := uint32(0)
+ skippedBitfield := false
+ for i, m := range members {
+ if m.BitfieldSize > 0 {
+ skippedBitfield = true
+ continue
+ }
+
+ offset := m.Offset.Bytes()
+ if n := offset - prevOffset; skippedBitfield && n > 0 {
+ fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n)
+ } else {
+ gf.writePadding(n)
+ }
+
+ fieldSize, err := Sizeof(m.Type)
+ if err != nil {
+ return fmt.Errorf("field %d: %w", i, err)
+ }
+
+ prevOffset = offset + uint32(fieldSize)
+ if prevOffset > size {
+ return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size)
+ }
+
+ if err := gf.writeStructField(m, depth); err != nil {
+ return fmt.Errorf("field %d: %w", i, err)
+ }
+ }
+
+ gf.writePadding(size - prevOffset)
+ gf.w.WriteString("}")
+ return nil
+}
+
+func (gf *GoFormatter) writeStructField(m Member, depth int) error {
+ if m.BitfieldSize > 0 {
+ return fmt.Errorf("bitfields are not supported")
+ }
+ if m.Offset%8 != 0 {
+ return fmt.Errorf("unsupported offset %d", m.Offset)
+ }
+
+ if m.Name == "" {
+ // Special case a nested anonymous union like
+ // struct foo { union { int bar; int baz }; }
+ // by replacing the whole union with its first member.
+ union, ok := m.Type.(*Union)
+ if !ok {
+ return fmt.Errorf("anonymous fields are not supported")
+
+ }
+
+ if len(union.Members) == 0 {
+ return errors.New("empty anonymous union")
+ }
+
+ depth++
+ if depth > maxResolveDepth {
+ return errNestedTooDeep
+ }
+
+ m := union.Members[0]
+ size, err := Sizeof(m.Type)
+ if err != nil {
+ return err
+ }
+
+ if err := gf.writeStructField(m, depth); err != nil {
+ return err
+ }
+
+ gf.writePadding(union.Size - uint32(size))
+ return nil
+
+ }
+
+ fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name))
+
+ if err := gf.writeType(m.Type, depth); err != nil {
+ return err
+ }
+
+ gf.w.WriteString("; ")
+ return nil
+}
+
+func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
+ gf.w.WriteString("struct { ")
+
+ prevOffset := uint32(0)
+ for i, vsi := range ds.Vars {
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ return fmt.Errorf("can't format %s as part of data section", vsi.Type)
+ }
+
+ if v.Linkage != GlobalVar {
+ // Ignore static, extern, etc. for now.
+ continue
+ }
+
+ if v.Name == "" {
+ return fmt.Errorf("variable %d: empty name", i)
+ }
+
+ gf.writePadding(vsi.Offset - prevOffset)
+ prevOffset = vsi.Offset + vsi.Size
+
+ fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name))
+
+ if err := gf.writeType(v.Type, depth); err != nil {
+ return fmt.Errorf("variable %d: %w", i, err)
+ }
+
+ gf.w.WriteString("; ")
+ }
+
+ gf.writePadding(ds.Size - prevOffset)
+ gf.w.WriteString("}")
+ return nil
+}
+
+func (gf *GoFormatter) writePadding(bytes uint32) {
+ if bytes > 0 {
+ fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes)
+ }
+}
+
+func skipQualifiers(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxResolveDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ default:
+ return result
+ }
+ }
+ return &cycle{typ}
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go
new file mode 100644
index 000000000..adfa6fed4
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/handle.go
@@ -0,0 +1,317 @@
+package btf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Handle is a reference to BTF loaded into the kernel.
+type Handle struct {
+ fd *sys.FD
+
+ // Size of the raw BTF in bytes.
+ size uint32
+
+ needsKernelBase bool
+}
+
+// NewHandle loads the contents of a [Builder] into the kernel.
+//
+// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
+func NewHandle(b *Builder) (*Handle, error) {
+ small := getByteSlice()
+ defer putByteSlice(small)
+
+ buf, err := b.Marshal(*small, KernelMarshalOptions())
+ if err != nil {
+ return nil, fmt.Errorf("marshal BTF: %w", err)
+ }
+
+ return NewHandleFromRawBTF(buf)
+}
+
+// NewHandleFromRawBTF loads raw BTF into the kernel.
+//
+// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
+func NewHandleFromRawBTF(btf []byte) (*Handle, error) {
+ const minLogSize = 64 * 1024
+
+ if uint64(len(btf)) > math.MaxUint32 {
+ return nil, errors.New("BTF exceeds the maximum size")
+ }
+
+ attr := &sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ }
+
+ var (
+ logBuf []byte
+ err error
+ )
+ for {
+ var fd *sys.FD
+ fd, err = sys.BtfLoad(attr)
+ if err == nil {
+ return &Handle{fd, attr.BtfSize, false}, nil
+ }
+
+ if attr.BtfLogTrueSize != 0 && attr.BtfLogSize >= attr.BtfLogTrueSize {
+ // The log buffer already has the correct size.
+ break
+ }
+
+ if attr.BtfLogSize != 0 && !errors.Is(err, unix.ENOSPC) {
+ // Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
+ // if there are other verification errors. ENOSPC is only returned when
+ // the BTF blob is correct, a log was requested, and the provided buffer
+ // is too small. We're therefore not sure whether we got the full
+ // log or not.
+ break
+ }
+
+ // Make an educated guess how large the buffer should be. Start
+ // at a reasonable minimum and then double the size.
+ logSize := uint32(max(len(logBuf)*2, minLogSize))
+ if int(logSize) < len(logBuf) {
+ return nil, errors.New("overflow while probing log buffer size")
+ }
+
+ if attr.BtfLogTrueSize != 0 {
+ // The kernel has given us a hint how large the log buffer has to be.
+ logSize = attr.BtfLogTrueSize
+ }
+
+ logBuf = make([]byte, logSize)
+ attr.BtfLogSize = logSize
+ attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
+ attr.BtfLogLevel = 1
+ }
+
+ if err := haveBTF(); err != nil {
+ return nil, err
+ }
+
+ return nil, internal.ErrorWithLog("load btf", err, logBuf)
+}
+
+// NewHandleFromID returns the BTF handle for a given id.
+//
+// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
+//
+// Returns ErrNotExist, if there is no BTF with the given id.
+//
+// Requires CAP_SYS_ADMIN.
+func NewHandleFromID(id ID) (*Handle, error) {
+ fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
+ }
+
+ info, err := newHandleInfoFromFD(fd)
+ if err != nil {
+ _ = fd.Close()
+ return nil, err
+ }
+
+ return &Handle{fd, info.size, info.IsModule()}, nil
+}
+
+// Spec parses the kernel BTF into Go types.
+//
+// base must contain type information for vmlinux if the handle is for
+// a kernel module. It may be nil otherwise.
+func (h *Handle) Spec(base *Spec) (*Spec, error) {
+ var btfInfo sys.BtfInfo
+ btfBuffer := make([]byte, h.size)
+ btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
+
+ if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ if h.needsKernelBase && base == nil {
+ return nil, fmt.Errorf("missing base types")
+ }
+
+ return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base)
+}
+
+// Close destroys the handle.
+//
+// Subsequent calls to FD will return an invalid value.
+func (h *Handle) Close() error {
+ if h == nil {
+ return nil
+ }
+
+ return h.fd.Close()
+}
+
+// FD returns the file descriptor for the handle.
+func (h *Handle) FD() int {
+ return h.fd.Int()
+}
+
+// Info returns metadata about the handle.
+func (h *Handle) Info() (*HandleInfo, error) {
+ return newHandleInfoFromFD(h.fd)
+}
+
+// HandleInfo describes a Handle.
+type HandleInfo struct {
+ // ID of this handle in the kernel. The ID is only valid as long as the
+ // associated handle is kept alive.
+ ID ID
+
+ // Name is an identifying name for the BTF, currently only used by the
+ // kernel.
+ Name string
+
+ // IsKernel is true if the BTF originated with the kernel and not
+ // userspace.
+ IsKernel bool
+
+ // Size of the raw BTF in bytes.
+ size uint32
+}
+
+func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
+ // We invoke the syscall once with a empty BTF and name buffers to get size
+ // information to allocate buffers. Then we invoke it a second time with
+ // buffers to receive the data.
+ var btfInfo sys.BtfInfo
+ if err := sys.ObjInfo(fd, &btfInfo); err != nil {
+ return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err)
+ }
+
+ if btfInfo.NameLen > 0 {
+ // NameLen doesn't account for the terminating NUL.
+ btfInfo.NameLen++
+ }
+
+ // Don't pull raw BTF by default, since it may be quite large.
+ btfSize := btfInfo.BtfSize
+ btfInfo.BtfSize = 0
+
+ nameBuffer := make([]byte, btfInfo.NameLen)
+ btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer)
+ if err := sys.ObjInfo(fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ return &HandleInfo{
+ ID: ID(btfInfo.Id),
+ Name: unix.ByteSliceToString(nameBuffer),
+ IsKernel: btfInfo.KernelBtf != 0,
+ size: btfSize,
+ }, nil
+}
+
+// IsVmlinux returns true if the BTF is for the kernel itself.
+func (i *HandleInfo) IsVmlinux() bool {
+ return i.IsKernel && i.Name == "vmlinux"
+}
+
+// IsModule returns true if the BTF is for a kernel module.
+func (i *HandleInfo) IsModule() bool {
+ return i.IsKernel && i.Name != "vmlinux"
+}
+
+// HandleIterator allows enumerating BTF blobs loaded into the kernel.
+type HandleIterator struct {
+ // The ID of the current handle. Only valid after a call to Next.
+ ID ID
+ // The current Handle. Only valid until a call to Next.
+ // See Take if you want to retain the handle.
+ Handle *Handle
+ err error
+}
+
+// Next retrieves a handle for the next BTF object.
+//
+// Returns true if another BTF object was found. Call [HandleIterator.Err] after
+// the function returns false.
+func (it *HandleIterator) Next() bool {
+ id := it.ID
+ for {
+ attr := &sys.BtfGetNextIdAttr{Id: id}
+ err := sys.BtfGetNextId(attr)
+ if errors.Is(err, os.ErrNotExist) {
+ // There are no more BTF objects.
+ break
+ } else if err != nil {
+ it.err = fmt.Errorf("get next BTF ID: %w", err)
+ break
+ }
+
+ id = attr.NextId
+ handle, err := NewHandleFromID(id)
+ if errors.Is(err, os.ErrNotExist) {
+ // Try again with the next ID.
+ continue
+ } else if err != nil {
+ it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err)
+ break
+ }
+
+ it.Handle.Close()
+ it.ID, it.Handle = id, handle
+ return true
+ }
+
+ // No more handles or we encountered an error.
+ it.Handle.Close()
+ it.Handle = nil
+ return false
+}
+
+// Take the ownership of the current handle.
+//
+// It's the callers responsibility to close the handle.
+func (it *HandleIterator) Take() *Handle {
+ handle := it.Handle
+ it.Handle = nil
+ return handle
+}
+
+// Err returns an error if iteration failed for some reason.
+func (it *HandleIterator) Err() error {
+ return it.err
+}
+
+// FindHandle returns the first handle for which predicate returns true.
+//
+// Requires CAP_SYS_ADMIN.
+//
+// Returns an error wrapping ErrNotFound if predicate never returns true or if
+// there is no BTF loaded into the kernel.
+func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) {
+ it := new(HandleIterator)
+ defer it.Handle.Close()
+
+ for it.Next() {
+ info, err := it.Handle.Info()
+ if err != nil {
+ return nil, fmt.Errorf("info for ID %d: %w", it.ID, err)
+ }
+
+ if predicate(info) {
+ return it.Take(), nil
+ }
+ }
+ if err := it.Err(); err != nil {
+ return nil, fmt.Errorf("iterate handles: %w", err)
+ }
+
+ return nil, fmt.Errorf("find handle: %w", ErrNotFound)
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/kernel.go b/vendor/github.com/cilium/ebpf/btf/kernel.go
new file mode 100644
index 000000000..8584ebcb9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/kernel.go
@@ -0,0 +1,159 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/kallsyms"
+)
+
+var kernelBTF = struct {
+ sync.RWMutex
+ kernel *Spec
+ modules map[string]*Spec
+}{
+ modules: make(map[string]*Spec),
+}
+
+// FlushKernelSpec removes any cached kernel type information.
+func FlushKernelSpec() {
+ kallsyms.FlushKernelModuleCache()
+
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ kernelBTF.kernel = nil
+ kernelBTF.modules = make(map[string]*Spec)
+}
+
+// LoadKernelSpec returns the current kernel's BTF information.
+//
+// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
+// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
+func LoadKernelSpec() (*Spec, error) {
+ kernelBTF.RLock()
+ spec := kernelBTF.kernel
+ kernelBTF.RUnlock()
+
+ if spec == nil {
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ spec = kernelBTF.kernel
+ }
+
+ if spec != nil {
+ return spec.Copy(), nil
+ }
+
+ spec, _, err := loadKernelSpec()
+ if err != nil {
+ return nil, err
+ }
+
+ kernelBTF.kernel = spec
+ return spec.Copy(), nil
+}
+
+// LoadKernelModuleSpec returns the BTF information for the named kernel module.
+//
+// Defaults to /sys/kernel/btf/.
+// Returns an error wrapping ErrNotSupported if BTF is not enabled.
+// Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist.
+func LoadKernelModuleSpec(module string) (*Spec, error) {
+ kernelBTF.RLock()
+ spec := kernelBTF.modules[module]
+ kernelBTF.RUnlock()
+
+ if spec != nil {
+ return spec.Copy(), nil
+ }
+
+ base, err := LoadKernelSpec()
+ if err != nil {
+ return nil, fmt.Errorf("load kernel spec: %w", err)
+ }
+
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ if spec = kernelBTF.modules[module]; spec != nil {
+ return spec.Copy(), nil
+ }
+
+ spec, err = loadKernelModuleSpec(module, base)
+ if err != nil {
+ return nil, err
+ }
+
+ kernelBTF.modules[module] = spec
+ return spec.Copy(), nil
+}
+
+func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
+ fh, err := os.Open("/sys/kernel/btf/vmlinux")
+ if err == nil {
+ defer fh.Close()
+
+ spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
+ return spec, false, err
+ }
+
+ file, err := findVMLinux()
+ if err != nil {
+ return nil, false, err
+ }
+ defer file.Close()
+
+ spec, err := LoadSpecFromReader(file)
+ return spec, true, err
+}
+
+func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) {
+ dir, file := filepath.Split(module)
+ if dir != "" || filepath.Ext(file) != "" {
+ return nil, fmt.Errorf("invalid module name %q", module)
+ }
+
+ fh, err := os.Open(filepath.Join("/sys/kernel/btf", module))
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ return loadRawSpec(fh, internal.NativeEndian, base)
+}
+
+// findVMLinux scans multiple well-known paths for vmlinux kernel images.
+func findVMLinux() (*os.File, error) {
+ release, err := internal.KernelRelease()
+ if err != nil {
+ return nil, err
+ }
+
+ // use same list of locations as libbpf
+ // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
+ locations := []string{
+ "/boot/vmlinux-%s",
+ "/lib/modules/%s/vmlinux-%[1]s",
+ "/lib/modules/%s/build/vmlinux",
+ "/usr/lib/modules/%s/kernel/vmlinux",
+ "/usr/lib/debug/boot/vmlinux-%s",
+ "/usr/lib/debug/boot/vmlinux-%s.debug",
+ "/usr/lib/debug/lib/modules/%s/vmlinux",
+ }
+
+ for _, loc := range locations {
+ file, err := os.Open(fmt.Sprintf(loc, release))
+ if errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ return file, err
+ }
+
+ return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go
new file mode 100644
index 000000000..f14cfa6e9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/marshal.go
@@ -0,0 +1,611 @@
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "maps"
+ "math"
+ "slices"
+ "sync"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+type MarshalOptions struct {
+ // Target byte order. Defaults to the system's native endianness.
+ Order binary.ByteOrder
+ // Remove function linkage information for compatibility with <5.6 kernels.
+ StripFuncLinkage bool
+ // Replace Enum64 with a placeholder for compatibility with <6.0 kernels.
+ ReplaceEnum64 bool
+ // Prevent the "No type found" error when loading BTF without any types.
+ PreventNoTypeFound bool
+}
+
+// KernelMarshalOptions will generate BTF suitable for the current kernel.
+func KernelMarshalOptions() *MarshalOptions {
+ return &MarshalOptions{
+ Order: internal.NativeEndian,
+ StripFuncLinkage: haveFuncLinkage() != nil,
+ ReplaceEnum64: haveEnum64() != nil,
+ PreventNoTypeFound: true, // All current kernels require this.
+ }
+}
+
+// encoder turns Types into raw BTF.
+type encoder struct {
+ MarshalOptions
+
+ pending internal.Deque[Type]
+ buf *bytes.Buffer
+ strings *stringTableBuilder
+ ids map[Type]TypeID
+ visited map[Type]struct{}
+ lastID TypeID
+}
+
+var bufferPool = sync.Pool{
+ New: func() any {
+ buf := make([]byte, btfHeaderLen+128)
+ return &buf
+ },
+}
+
+func getByteSlice() *[]byte {
+ return bufferPool.Get().(*[]byte)
+}
+
+func putByteSlice(buf *[]byte) {
+ *buf = (*buf)[:0]
+ bufferPool.Put(buf)
+}
+
+// Builder turns Types into raw BTF.
+//
+// The default value may be used and represents an empty BTF blob. Void is
+// added implicitly if necessary.
+type Builder struct {
+ // Explicitly added types.
+ types []Type
+ // IDs for all added types which the user knows about.
+ stableIDs map[Type]TypeID
+ // Explicitly added strings.
+ strings *stringTableBuilder
+}
+
+// NewBuilder creates a Builder from a list of types.
+//
+// It is more efficient than calling [Add] individually.
+//
+// Returns an error if adding any of the types fails.
+func NewBuilder(types []Type) (*Builder, error) {
+ b := &Builder{
+ make([]Type, 0, len(types)),
+ make(map[Type]TypeID, len(types)),
+ nil,
+ }
+
+ for _, typ := range types {
+ _, err := b.Add(typ)
+ if err != nil {
+ return nil, fmt.Errorf("add %s: %w", typ, err)
+ }
+ }
+
+ return b, nil
+}
+
+// Empty returns true if neither types nor strings have been added.
+func (b *Builder) Empty() bool {
+ return len(b.types) == 0 && (b.strings == nil || b.strings.Length() == 0)
+}
+
+// Add a Type and allocate a stable ID for it.
+//
+// Adding the identical Type multiple times is valid and will return the same ID.
+//
+// See [Type] for details on identity.
+func (b *Builder) Add(typ Type) (TypeID, error) {
+ if b.stableIDs == nil {
+ b.stableIDs = make(map[Type]TypeID)
+ }
+
+ if _, ok := typ.(*Void); ok {
+ // Equality is weird for void, since it is a zero sized type.
+ return 0, nil
+ }
+
+ if ds, ok := typ.(*Datasec); ok {
+ if err := datasecResolveWorkaround(b, ds); err != nil {
+ return 0, err
+ }
+ }
+
+ id, ok := b.stableIDs[typ]
+ if ok {
+ return id, nil
+ }
+
+ b.types = append(b.types, typ)
+
+ id = TypeID(len(b.types))
+ if int(id) != len(b.types) {
+ return 0, fmt.Errorf("no more type IDs")
+ }
+
+ b.stableIDs[typ] = id
+ return id, nil
+}
+
+// Marshal encodes all types in the Marshaler into BTF wire format.
+//
+// opts may be nil.
+func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) {
+ stb := b.strings
+ if stb == nil {
+ // Assume that most types are named. This makes encoding large BTF like
+ // vmlinux a lot cheaper.
+ stb = newStringTableBuilder(len(b.types))
+ } else {
+ // Avoid modifying the Builder's string table.
+ stb = b.strings.Copy()
+ }
+
+ if opts == nil {
+ opts = &MarshalOptions{Order: internal.NativeEndian}
+ }
+
+ // Reserve space for the BTF header.
+ buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen]
+
+ w := internal.NewBuffer(buf)
+ defer internal.PutBuffer(w)
+
+ e := encoder{
+ MarshalOptions: *opts,
+ buf: w,
+ strings: stb,
+ lastID: TypeID(len(b.types)),
+ visited: make(map[Type]struct{}, len(b.types)),
+ ids: maps.Clone(b.stableIDs),
+ }
+
+ if e.ids == nil {
+ e.ids = make(map[Type]TypeID)
+ }
+
+ types := b.types
+ if len(types) == 0 && stb.Length() > 0 && opts.PreventNoTypeFound {
+ // We have strings that need to be written out,
+ // but no types (besides the implicit Void).
+ // Kernels as recent as v6.7 refuse to load such BTF
+ // with a "No type found" error in the log.
+ // Fix this by adding a dummy type.
+ types = []Type{&Int{Size: 0}}
+ }
+
+ // Ensure that types are marshaled in the exact order they were Add()ed.
+ // Otherwise the ID returned from Add() won't match.
+ e.pending.Grow(len(types))
+ for _, typ := range types {
+ e.pending.Push(typ)
+ }
+
+ if err := e.deflatePending(); err != nil {
+ return nil, err
+ }
+
+ length := e.buf.Len()
+ typeLen := uint32(length - btfHeaderLen)
+
+ stringLen := e.strings.Length()
+ buf = e.strings.AppendEncoded(e.buf.Bytes())
+
+ // Fill out the header, and write it out.
+ header := &btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ Flags: 0,
+ HdrLen: uint32(btfHeaderLen),
+ TypeOff: 0,
+ TypeLen: typeLen,
+ StringOff: typeLen,
+ StringLen: uint32(stringLen),
+ }
+
+ err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header)
+ if err != nil {
+ return nil, fmt.Errorf("write header: %v", err)
+ }
+
+ return buf, nil
+}
+
+// addString adds a string to the resulting BTF.
+//
+// Adding the same string multiple times will return the same result.
+//
+// Returns an identifier into the string table or an error if the string
+// contains invalid characters.
+func (b *Builder) addString(str string) (uint32, error) {
+ if b.strings == nil {
+ b.strings = newStringTableBuilder(0)
+ }
+
+ return b.strings.Add(str)
+}
+
+func (e *encoder) allocateIDs(root Type) (err error) {
+ visitInPostorder(root, e.visited, func(typ Type) bool {
+ if _, ok := typ.(*Void); ok {
+ return true
+ }
+
+ if _, ok := e.ids[typ]; ok {
+ return true
+ }
+
+ id := e.lastID + 1
+ if id < e.lastID {
+ err = errors.New("type ID overflow")
+ return false
+ }
+
+ e.pending.Push(typ)
+ e.ids[typ] = id
+ e.lastID = id
+ return true
+ })
+ return
+}
+
+// id returns the ID for the given type or panics with an error.
+func (e *encoder) id(typ Type) TypeID {
+ if _, ok := typ.(*Void); ok {
+ return 0
+ }
+
+ id, ok := e.ids[typ]
+ if !ok {
+ panic(fmt.Errorf("no ID for type %v", typ))
+ }
+
+ return id
+}
+
+func (e *encoder) deflatePending() error {
+ // Declare root outside of the loop to avoid repeated heap allocations.
+ var root Type
+
+ for !e.pending.Empty() {
+ root = e.pending.Shift()
+
+ // Allocate IDs for all children of typ, including transitive dependencies.
+ if err := e.allocateIDs(root); err != nil {
+ return err
+ }
+
+ if err := e.deflateType(root); err != nil {
+ id := e.ids[root]
+ return fmt.Errorf("deflate %v with ID %d: %w", root, id, err)
+ }
+ }
+
+ return nil
+}
+
+func (e *encoder) deflateType(typ Type) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ err, ok = r.(error)
+ if !ok {
+ panic(r)
+ }
+ }
+ }()
+
+ var raw rawType
+ raw.NameOff, err = e.strings.Add(typ.TypeName())
+ if err != nil {
+ return err
+ }
+
+ switch v := typ.(type) {
+ case *Void:
+ return errors.New("Void is implicit in BTF wire format")
+
+ case *Int:
+ raw.SetKind(kindInt)
+ raw.SetSize(v.Size)
+
+ var bi btfInt
+ bi.SetEncoding(v.Encoding)
+ // We need to set bits in addition to size, since btf_type_int_is_regular
+ // otherwise flags this as a bitfield.
+ bi.SetBits(byte(v.Size) * 8)
+ raw.data = bi
+
+ case *Pointer:
+ raw.SetKind(kindPointer)
+ raw.SetType(e.id(v.Target))
+
+ case *Array:
+ raw.SetKind(kindArray)
+ raw.data = &btfArray{
+ e.id(v.Type),
+ e.id(v.Index),
+ v.Nelems,
+ }
+
+ case *Struct:
+ raw.SetKind(kindStruct)
+ raw.SetSize(v.Size)
+ raw.data, err = e.convertMembers(&raw.btfType, v.Members)
+
+ case *Union:
+ err = e.deflateUnion(&raw, v)
+
+ case *Enum:
+ if v.Size == 8 {
+ err = e.deflateEnum64(&raw, v)
+ } else {
+ err = e.deflateEnum(&raw, v)
+ }
+
+ case *Fwd:
+ raw.SetKind(kindForward)
+ raw.SetFwdKind(v.Kind)
+
+ case *Typedef:
+ raw.SetKind(kindTypedef)
+ raw.SetType(e.id(v.Type))
+
+ case *Volatile:
+ raw.SetKind(kindVolatile)
+ raw.SetType(e.id(v.Type))
+
+ case *Const:
+ raw.SetKind(kindConst)
+ raw.SetType(e.id(v.Type))
+
+ case *Restrict:
+ raw.SetKind(kindRestrict)
+ raw.SetType(e.id(v.Type))
+
+ case *Func:
+ raw.SetKind(kindFunc)
+ raw.SetType(e.id(v.Type))
+ if !e.StripFuncLinkage {
+ raw.SetLinkage(v.Linkage)
+ }
+
+ case *FuncProto:
+ raw.SetKind(kindFuncProto)
+ raw.SetType(e.id(v.Return))
+ raw.SetVlen(len(v.Params))
+ raw.data, err = e.deflateFuncParams(v.Params)
+
+ case *Var:
+ raw.SetKind(kindVar)
+ raw.SetType(e.id(v.Type))
+ raw.data = btfVariable{uint32(v.Linkage)}
+
+ case *Datasec:
+ raw.SetKind(kindDatasec)
+ raw.SetSize(v.Size)
+ raw.SetVlen(len(v.Vars))
+ raw.data = e.deflateVarSecinfos(v.Vars)
+
+ case *Float:
+ raw.SetKind(kindFloat)
+ raw.SetSize(v.Size)
+
+ case *declTag:
+ raw.SetKind(kindDeclTag)
+ raw.SetType(e.id(v.Type))
+ raw.data = &btfDeclTag{uint32(v.Index)}
+ raw.NameOff, err = e.strings.Add(v.Value)
+
+ case *typeTag:
+ raw.SetKind(kindTypeTag)
+ raw.SetType(e.id(v.Type))
+ raw.NameOff, err = e.strings.Add(v.Value)
+
+ default:
+ return fmt.Errorf("don't know how to deflate %T", v)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ return raw.Marshal(e.buf, e.Order)
+}
+
+func (e *encoder) deflateUnion(raw *rawType, union *Union) (err error) {
+ raw.SetKind(kindUnion)
+ raw.SetSize(union.Size)
+ raw.data, err = e.convertMembers(&raw.btfType, union.Members)
+ return
+}
+
+func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) {
+ bms := make([]btfMember, 0, len(members))
+ isBitfield := false
+ for _, member := range members {
+ isBitfield = isBitfield || member.BitfieldSize > 0
+
+ offset := member.Offset
+ if isBitfield {
+ offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff)
+ }
+
+ nameOff, err := e.strings.Add(member.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bms = append(bms, btfMember{
+ nameOff,
+ e.id(member.Type),
+ uint32(offset),
+ })
+ }
+
+ header.SetVlen(len(members))
+ header.SetBitfield(isBitfield)
+ return bms, nil
+}
+
+func (e *encoder) deflateEnum(raw *rawType, enum *Enum) (err error) {
+ raw.SetKind(kindEnum)
+ raw.SetSize(enum.Size)
+ raw.SetVlen(len(enum.Values))
+ // Signedness appeared together with ENUM64 support.
+ raw.SetSigned(enum.Signed && !e.ReplaceEnum64)
+ raw.data, err = e.deflateEnumValues(enum)
+ return
+}
+
+func (e *encoder) deflateEnumValues(enum *Enum) ([]btfEnum, error) {
+ bes := make([]btfEnum, 0, len(enum.Values))
+ for _, value := range enum.Values {
+ nameOff, err := e.strings.Add(value.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ if enum.Signed {
+ if signedValue := int64(value.Value); signedValue < math.MinInt32 || signedValue > math.MaxInt32 {
+ return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", signedValue, value.Name)
+ }
+ } else {
+ if value.Value > math.MaxUint32 {
+ return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", value.Value, value.Name)
+ }
+ }
+
+ bes = append(bes, btfEnum{
+ nameOff,
+ uint32(value.Value),
+ })
+ }
+
+ return bes, nil
+}
+
+func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) {
+ if e.ReplaceEnum64 {
+ // Replace the ENUM64 with a union of fields with the correct size.
+ // This matches libbpf behaviour on purpose.
+ placeholder := &Int{
+ "enum64_placeholder",
+ enum.Size,
+ Unsigned,
+ }
+ if enum.Signed {
+ placeholder.Encoding = Signed
+ }
+ if err := e.allocateIDs(placeholder); err != nil {
+ return fmt.Errorf("add enum64 placeholder: %w", err)
+ }
+
+ members := make([]Member, 0, len(enum.Values))
+ for _, v := range enum.Values {
+ members = append(members, Member{
+ Name: v.Name,
+ Type: placeholder,
+ })
+ }
+
+ return e.deflateUnion(raw, &Union{enum.Name, enum.Size, members})
+ }
+
+ raw.SetKind(kindEnum64)
+ raw.SetSize(enum.Size)
+ raw.SetVlen(len(enum.Values))
+ raw.SetSigned(enum.Signed)
+ raw.data, err = e.deflateEnum64Values(enum.Values)
+ return
+}
+
+func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) {
+ bes := make([]btfEnum64, 0, len(values))
+ for _, value := range values {
+ nameOff, err := e.strings.Add(value.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bes = append(bes, btfEnum64{
+ nameOff,
+ uint32(value.Value),
+ uint32(value.Value >> 32),
+ })
+ }
+
+ return bes, nil
+}
+
+func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) {
+ bps := make([]btfParam, 0, len(params))
+ for _, param := range params {
+ nameOff, err := e.strings.Add(param.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bps = append(bps, btfParam{
+ nameOff,
+ e.id(param.Type),
+ })
+ }
+ return bps, nil
+}
+
+func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo {
+ vsis := make([]btfVarSecinfo, 0, len(vars))
+ for _, v := range vars {
+ vsis = append(vsis, btfVarSecinfo{
+ e.id(v.Type),
+ v.Offset,
+ v.Size,
+ })
+ }
+ return vsis
+}
+
+// MarshalMapKV creates a BTF object containing a map key and value.
+//
+// The function is intended for the use of the ebpf package and may be removed
+// at any point in time.
+func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) {
+ var b Builder
+
+ if key != nil {
+ keyID, err = b.Add(key)
+ if err != nil {
+ return nil, 0, 0, fmt.Errorf("add key type: %w", err)
+ }
+ }
+
+ if value != nil {
+ valueID, err = b.Add(value)
+ if err != nil {
+ return nil, 0, 0, fmt.Errorf("add value type: %w", err)
+ }
+ }
+
+ handle, err := NewHandle(&b)
+ if err != nil {
+ // Check for 'full' map BTF support, since kernels between 4.18 and 5.2
+ // already support BTF blobs for maps without Var or Datasec just fine.
+ if err := haveMapBTF(); err != nil {
+ return nil, 0, 0, err
+ }
+ }
+ return handle, keyID, valueID, err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go
new file mode 100644
index 000000000..7c31461c3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/strings.go
@@ -0,0 +1,198 @@
+package btf
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "strings"
+)
+
+type stringTable struct {
+ base *stringTable
+ offsets []uint32
+ prevIdx int
+ strings []string
+}
+
+// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc.
+type sizedReader interface {
+ io.Reader
+ Size() int64
+}
+
+func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) {
+ // When parsing split BTF's string table, the first entry offset is derived
+ // from the last entry offset of the base BTF.
+ firstStringOffset := uint32(0)
+ if base != nil {
+ idx := len(base.offsets) - 1
+ firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1
+ }
+
+ // Derived from vmlinux BTF.
+ const averageStringLength = 16
+
+ n := int(r.Size() / averageStringLength)
+ offsets := make([]uint32, 0, n)
+ strings := make([]string, 0, n)
+
+ offset := firstStringOffset
+ scanner := bufio.NewScanner(r)
+ scanner.Split(splitNull)
+ for scanner.Scan() {
+ str := scanner.Text()
+ offsets = append(offsets, offset)
+ strings = append(strings, str)
+ offset += uint32(len(str)) + 1
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ if len(strings) == 0 {
+ return nil, errors.New("string table is empty")
+ }
+
+ if firstStringOffset == 0 && strings[0] != "" {
+ return nil, errors.New("first item in string table is non-empty")
+ }
+
+ return &stringTable{base, offsets, 0, strings}, nil
+}
+
+func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ i := bytes.IndexByte(data, 0)
+ if i == -1 {
+ if atEOF && len(data) > 0 {
+ return 0, nil, errors.New("string table isn't null terminated")
+ }
+ return 0, nil, nil
+ }
+
+ return i + 1, data[:i], nil
+}
+
+func (st *stringTable) Lookup(offset uint32) (string, error) {
+ if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] {
+ return st.base.lookup(offset)
+ }
+ return st.lookup(offset)
+}
+
+func (st *stringTable) lookup(offset uint32) (string, error) {
+ // Fast path: zero offset is the empty string, looked up frequently.
+ if offset == 0 && st.base == nil {
+ return "", nil
+ }
+
+ // Accesses tend to be globally increasing, so check if the next string is
+ // the one we want. This skips the binary search in about 50% of cases.
+ if st.prevIdx+1 < len(st.offsets) && st.offsets[st.prevIdx+1] == offset {
+ st.prevIdx++
+ return st.strings[st.prevIdx], nil
+ }
+
+ i, found := slices.BinarySearch(st.offsets, offset)
+ if !found {
+ return "", fmt.Errorf("offset %d isn't start of a string", offset)
+ }
+
+ // Set the new increment index, but only if its greater than the current.
+ if i > st.prevIdx+1 {
+ st.prevIdx = i
+ }
+
+ return st.strings[i], nil
+}
+
+// Num returns the number of strings in the table.
+func (st *stringTable) Num() int {
+ return len(st.strings)
+}
+
+// stringTableBuilder builds BTF string tables.
+type stringTableBuilder struct {
+ length uint32
+ strings map[string]uint32
+}
+
+// newStringTableBuilder creates a builder with the given capacity.
+//
+// capacity may be zero.
+func newStringTableBuilder(capacity int) *stringTableBuilder {
+ var stb stringTableBuilder
+
+ if capacity == 0 {
+ // Use the runtime's small default size.
+ stb.strings = make(map[string]uint32)
+ } else {
+ stb.strings = make(map[string]uint32, capacity)
+ }
+
+ // Ensure that the empty string is at index 0.
+ stb.append("")
+ return &stb
+}
+
+// Add a string to the table.
+//
+// Adding the same string multiple times will only store it once.
+func (stb *stringTableBuilder) Add(str string) (uint32, error) {
+ if strings.IndexByte(str, 0) != -1 {
+ return 0, fmt.Errorf("string contains null: %q", str)
+ }
+
+ offset, ok := stb.strings[str]
+ if ok {
+ return offset, nil
+ }
+
+ return stb.append(str), nil
+}
+
+func (stb *stringTableBuilder) append(str string) uint32 {
+ offset := stb.length
+ stb.length += uint32(len(str)) + 1
+ stb.strings[str] = offset
+ return offset
+}
+
+// Lookup finds the offset of a string in the table.
+//
+// Returns an error if str hasn't been added yet.
+func (stb *stringTableBuilder) Lookup(str string) (uint32, error) {
+ offset, ok := stb.strings[str]
+ if !ok {
+ return 0, fmt.Errorf("string %q is not in table", str)
+ }
+
+ return offset, nil
+}
+
+// Length returns the length in bytes.
+func (stb *stringTableBuilder) Length() int {
+ return int(stb.length)
+}
+
+// AppendEncoded appends the string table to the end of the provided buffer.
+func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte {
+ n := len(buf)
+ buf = append(buf, make([]byte, stb.Length())...)
+ strings := buf[n:]
+ for str, offset := range stb.strings {
+ copy(strings[offset:], str)
+ }
+ return buf
+}
+
+// Copy the string table builder.
+func (stb *stringTableBuilder) Copy() *stringTableBuilder {
+ return &stringTableBuilder{
+ stb.length,
+ maps.Clone(stb.strings),
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go
new file mode 100644
index 000000000..c39dc66e4
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/traversal.go
@@ -0,0 +1,123 @@
+package btf
+
+import (
+ "fmt"
+)
+
+// Functions to traverse a cyclic graph of types. The below was very useful:
+// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order
+
+// Visit all types reachable from root in postorder.
+//
+// Traversal stops if yield returns false.
+//
+// Returns false if traversal was aborted.
+func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool {
+ if _, ok := visited[root]; ok {
+ return true
+ }
+ if visited == nil {
+ visited = make(map[Type]struct{})
+ }
+ visited[root] = struct{}{}
+
+ cont := children(root, func(child *Type) bool {
+ return visitInPostorder(*child, visited, yield)
+ })
+ if !cont {
+ return false
+ }
+
+ return yield(root)
+}
+
+// children calls yield on each child of typ.
+//
+// Traversal stops if yield returns false.
+//
+// Returns false if traversal was aborted.
+func children(typ Type, yield func(child *Type) bool) bool {
+ // Explicitly type switch on the most common types to allow the inliner to
+ // do its work. This avoids allocating intermediate slices from walk() on
+ // the heap.
+ switch v := typ.(type) {
+ case *Void, *Int, *Enum, *Fwd, *Float:
+ // No children to traverse.
+ case *Pointer:
+ if !yield(&v.Target) {
+ return false
+ }
+ case *Array:
+ if !yield(&v.Index) {
+ return false
+ }
+ if !yield(&v.Type) {
+ return false
+ }
+ case *Struct:
+ for i := range v.Members {
+ if !yield(&v.Members[i].Type) {
+ return false
+ }
+ }
+ case *Union:
+ for i := range v.Members {
+ if !yield(&v.Members[i].Type) {
+ return false
+ }
+ }
+ case *Typedef:
+ if !yield(&v.Type) {
+ return false
+ }
+ case *Volatile:
+ if !yield(&v.Type) {
+ return false
+ }
+ case *Const:
+ if !yield(&v.Type) {
+ return false
+ }
+ case *Restrict:
+ if !yield(&v.Type) {
+ return false
+ }
+ case *Func:
+ if !yield(&v.Type) {
+ return false
+ }
+ case *FuncProto:
+ if !yield(&v.Return) {
+ return false
+ }
+ for i := range v.Params {
+ if !yield(&v.Params[i].Type) {
+ return false
+ }
+ }
+ case *Var:
+ if !yield(&v.Type) {
+ return false
+ }
+ case *Datasec:
+ for i := range v.Vars {
+ if !yield(&v.Vars[i].Type) {
+ return false
+ }
+ }
+ case *declTag:
+ if !yield(&v.Type) {
+ return false
+ }
+ case *typeTag:
+ if !yield(&v.Type) {
+ return false
+ }
+ case *cycle:
+ // cycle has children, but we ignore them deliberately.
+ default:
+ panic(fmt.Sprintf("don't know how to walk Type %T", v))
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go
new file mode 100644
index 000000000..a3397460b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/types.go
@@ -0,0 +1,1319 @@
+package btf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+// Mirrors MAX_RESOLVE_DEPTH in libbpf.
+// https://github.com/libbpf/libbpf/blob/e26b84dc330c9644c07428c271ab491b0f01f4e1/src/btf.c#L761
+const maxResolveDepth = 32
+
+// TypeID identifies a type in a BTF section.
+type TypeID = sys.TypeID
+
+// Type represents a type described by BTF.
+//
+// Identity of Type follows the [Go specification]: two Types are considered
+// equal if they have the same concrete type and the same dynamic value, aka
+// they point at the same location in memory. This means that the following
+// Types are considered distinct even though they have the same "shape".
+//
+// a := &Int{Size: 1}
+// b := &Int{Size: 1}
+// a != b
+//
+// [Go specification]: https://go.dev/ref/spec#Comparison_operators
+type Type interface {
+ // Type can be formatted using the %s and %v verbs. %s outputs only the
+ // identity of the type, without any detail. %v outputs additional detail.
+ //
+ // Use the '+' flag to include the address of the type.
+ //
+ // Use the width to specify how many levels of detail to output, for example
+ // %1v will output detail for the root type and a short description of its
+ // children. %2v would output details of the root type and its children
+ // as well as a short description of the grandchildren.
+ fmt.Formatter
+
+ // Name of the type, empty for anonymous types and types that cannot
+ // carry a name, like Void and Pointer.
+ TypeName() string
+
+ // Make a copy of the type, without copying Type members.
+ copy() Type
+
+ // New implementations must update walkType.
+}
+
+var (
+ _ Type = (*Int)(nil)
+ _ Type = (*Struct)(nil)
+ _ Type = (*Union)(nil)
+ _ Type = (*Enum)(nil)
+ _ Type = (*Fwd)(nil)
+ _ Type = (*Func)(nil)
+ _ Type = (*Typedef)(nil)
+ _ Type = (*Var)(nil)
+ _ Type = (*Datasec)(nil)
+ _ Type = (*Float)(nil)
+ _ Type = (*declTag)(nil)
+ _ Type = (*typeTag)(nil)
+ _ Type = (*cycle)(nil)
+)
+
+// Void is the unit type of BTF.
+type Void struct{}
+
+func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) }
+func (v *Void) TypeName() string { return "" }
+func (v *Void) size() uint32 { return 0 }
+func (v *Void) copy() Type { return (*Void)(nil) }
+
+type IntEncoding byte
+
+// Valid IntEncodings.
+//
+// These may look like they are flags, but they aren't.
+const (
+ Unsigned IntEncoding = 0
+ Signed IntEncoding = 1
+ Char IntEncoding = 2
+ Bool IntEncoding = 4
+)
+
+func (ie IntEncoding) String() string {
+ switch ie {
+ case Char:
+ // NB: There is no way to determine signedness for char.
+ return "char"
+ case Bool:
+ return "bool"
+ case Signed:
+ return "signed"
+ case Unsigned:
+ return "unsigned"
+ default:
+ return fmt.Sprintf("IntEncoding(%d)", byte(ie))
+ }
+}
+
+// Int is an integer of a given length.
+//
+// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
+type Int struct {
+ Name string
+
+ // The size of the integer in bytes.
+ Size uint32
+ Encoding IntEncoding
+}
+
+func (i *Int) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, i, i.Encoding, "size=", i.Size)
+}
+
+func (i *Int) TypeName() string { return i.Name }
+func (i *Int) size() uint32 { return i.Size }
+func (i *Int) copy() Type {
+ cpy := *i
+ return &cpy
+}
+
+// Pointer is a pointer to another type.
+type Pointer struct {
+ Target Type
+}
+
+func (p *Pointer) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, p, "target=", p.Target)
+}
+
+func (p *Pointer) TypeName() string { return "" }
+func (p *Pointer) size() uint32 { return 8 }
+func (p *Pointer) copy() Type {
+ cpy := *p
+ return &cpy
+}
+
+// Array is an array with a fixed number of elements.
+type Array struct {
+ Index Type
+ Type Type
+ Nelems uint32
+}
+
+func (arr *Array) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems)
+}
+
+func (arr *Array) TypeName() string { return "" }
+
+func (arr *Array) copy() Type {
+ cpy := *arr
+ return &cpy
+}
+
+// Struct is a compound type of consecutive members.
+type Struct struct {
+ Name string
+ // The size of the struct including padding, in bytes
+ Size uint32
+ Members []Member
+}
+
+func (s *Struct) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, s, "fields=", len(s.Members))
+}
+
+func (s *Struct) TypeName() string { return s.Name }
+
+func (s *Struct) size() uint32 { return s.Size }
+
+func (s *Struct) copy() Type {
+ cpy := *s
+ cpy.Members = copyMembers(s.Members)
+ return &cpy
+}
+
+func (s *Struct) members() []Member {
+ return s.Members
+}
+
+// Union is a compound type where members occupy the same memory.
+type Union struct {
+ Name string
+ // The size of the union including padding, in bytes.
+ Size uint32
+ Members []Member
+}
+
+func (u *Union) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, u, "fields=", len(u.Members))
+}
+
+func (u *Union) TypeName() string { return u.Name }
+
+func (u *Union) size() uint32 { return u.Size }
+
+func (u *Union) copy() Type {
+ cpy := *u
+ cpy.Members = copyMembers(u.Members)
+ return &cpy
+}
+
+func (u *Union) members() []Member {
+ return u.Members
+}
+
+func copyMembers(orig []Member) []Member {
+ cpy := make([]Member, len(orig))
+ copy(cpy, orig)
+ return cpy
+}
+
+type composite interface {
+ Type
+ members() []Member
+}
+
+var (
+ _ composite = (*Struct)(nil)
+ _ composite = (*Union)(nil)
+)
+
+// A value in bits.
+type Bits uint32
+
+// Bytes converts a bit value into bytes.
+func (b Bits) Bytes() uint32 {
+ return uint32(b / 8)
+}
+
+// Member is part of a Struct or Union.
+//
+// It is not a valid Type.
+type Member struct {
+ Name string
+ Type Type
+ Offset Bits
+ BitfieldSize Bits
+}
+
+// Enum lists possible values.
+type Enum struct {
+ Name string
+ // Size of the enum value in bytes.
+ Size uint32
+ // True if the values should be interpreted as signed integers.
+ Signed bool
+ Values []EnumValue
+}
+
+func (e *Enum) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values))
+}
+
+func (e *Enum) TypeName() string { return e.Name }
+
+// EnumValue is part of an Enum
+//
+// Is is not a valid Type
+type EnumValue struct {
+ Name string
+ Value uint64
+}
+
+func (e *Enum) size() uint32 { return e.Size }
+func (e *Enum) copy() Type {
+ cpy := *e
+ cpy.Values = make([]EnumValue, len(e.Values))
+ copy(cpy.Values, e.Values)
+ return &cpy
+}
+
+// FwdKind is the type of forward declaration.
+type FwdKind int
+
+// Valid types of forward declaration.
+const (
+ FwdStruct FwdKind = iota
+ FwdUnion
+)
+
+func (fk FwdKind) String() string {
+ switch fk {
+ case FwdStruct:
+ return "struct"
+ case FwdUnion:
+ return "union"
+ default:
+ return fmt.Sprintf("%T(%d)", fk, int(fk))
+ }
+}
+
+// Fwd is a forward declaration of a Type.
+type Fwd struct {
+ Name string
+ Kind FwdKind
+}
+
+func (f *Fwd) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, f.Kind)
+}
+
+func (f *Fwd) TypeName() string { return f.Name }
+
+func (f *Fwd) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+func (f *Fwd) matches(typ Type) bool {
+ if _, ok := As[*Struct](typ); ok && f.Kind == FwdStruct {
+ return true
+ }
+
+ if _, ok := As[*Union](typ); ok && f.Kind == FwdUnion {
+ return true
+ }
+
+ return false
+}
+
+// Typedef is an alias of a Type.
+type Typedef struct {
+ Name string
+ Type Type
+}
+
+func (td *Typedef) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, td, td.Type)
+}
+
+func (td *Typedef) TypeName() string { return td.Name }
+
+func (td *Typedef) copy() Type {
+ cpy := *td
+ return &cpy
+}
+
+// Volatile is a qualifier.
+type Volatile struct {
+ Type Type
+}
+
+func (v *Volatile) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, v, v.Type)
+}
+
+func (v *Volatile) TypeName() string { return "" }
+
+func (v *Volatile) qualify() Type { return v.Type }
+func (v *Volatile) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Const is a qualifier.
+type Const struct {
+ Type Type
+}
+
+func (c *Const) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, c, c.Type)
+}
+
+func (c *Const) TypeName() string { return "" }
+
+func (c *Const) qualify() Type { return c.Type }
+func (c *Const) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+// Restrict is a qualifier.
+type Restrict struct {
+ Type Type
+}
+
+func (r *Restrict) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, r, r.Type)
+}
+
+func (r *Restrict) TypeName() string { return "" }
+
+func (r *Restrict) qualify() Type { return r.Type }
+func (r *Restrict) copy() Type {
+ cpy := *r
+ return &cpy
+}
+
+// Func is a function definition.
+type Func struct {
+ Name string
+ Type Type
+ Linkage FuncLinkage
+}
+
+func FuncMetadata(ins *asm.Instruction) *Func {
+ fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func)
+ return fn
+}
+
+// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction.
+func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction {
+ ins.Metadata.Set(funcInfoMeta{}, fn)
+ return ins
+}
+
+func (f *Func) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
+}
+
+func (f *Func) TypeName() string { return f.Name }
+
+func (f *Func) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// FuncProto is a function declaration.
+type FuncProto struct {
+ Return Type
+ Params []FuncParam
+}
+
+func (fp *FuncProto) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return)
+}
+
+func (fp *FuncProto) TypeName() string { return "" }
+
+func (fp *FuncProto) copy() Type {
+ cpy := *fp
+ cpy.Params = make([]FuncParam, len(fp.Params))
+ copy(cpy.Params, fp.Params)
+ return &cpy
+}
+
+type FuncParam struct {
+ Name string
+ Type Type
+}
+
+// Var is a global variable.
+type Var struct {
+ Name string
+ Type Type
+ Linkage VarLinkage
+}
+
+func (v *Var) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, v, v.Linkage)
+}
+
+func (v *Var) TypeName() string { return v.Name }
+
+func (v *Var) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Datasec is a global program section containing data.
+type Datasec struct {
+ Name string
+ Size uint32
+ Vars []VarSecinfo
+}
+
+func (ds *Datasec) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, ds)
+}
+
+func (ds *Datasec) TypeName() string { return ds.Name }
+
+func (ds *Datasec) size() uint32 { return ds.Size }
+
+func (ds *Datasec) copy() Type {
+ cpy := *ds
+ cpy.Vars = make([]VarSecinfo, len(ds.Vars))
+ copy(cpy.Vars, ds.Vars)
+ return &cpy
+}
+
+// VarSecinfo describes variable in a Datasec.
+//
+// It is not a valid Type.
+type VarSecinfo struct {
+ // Var or Func.
+ Type Type
+ Offset uint32
+ Size uint32
+}
+
+// Float is a float of a given length.
+type Float struct {
+ Name string
+
+ // The size of the float in bytes.
+ Size uint32
+}
+
+func (f *Float) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, "size=", f.Size*8)
+}
+
+func (f *Float) TypeName() string { return f.Name }
+func (f *Float) size() uint32 { return f.Size }
+func (f *Float) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// declTag associates metadata with a declaration.
+type declTag struct {
+ Type Type
+ Value string
+ // The index this tag refers to in the target type. For composite types,
+ // a value of -1 indicates that the tag refers to the whole type. Otherwise
+ // it indicates which member or argument the tag applies to.
+ Index int
+}
+
+func (dt *declTag) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index)
+}
+
+func (dt *declTag) TypeName() string { return "" }
+func (dt *declTag) copy() Type {
+ cpy := *dt
+ return &cpy
+}
+
+// typeTag associates metadata with a type.
+type typeTag struct {
+ Type Type
+ Value string
+}
+
+func (tt *typeTag) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value)
+}
+
+func (tt *typeTag) TypeName() string { return "" }
+func (tt *typeTag) qualify() Type { return tt.Type }
+func (tt *typeTag) copy() Type {
+ cpy := *tt
+ return &cpy
+}
+
+// cycle is a type which had to be elided since it exceeded maxTypeDepth.
+type cycle struct {
+ root Type
+}
+
+func (c *cycle) ID() TypeID { return math.MaxUint32 }
+func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) }
+func (c *cycle) TypeName() string { return "" }
+func (c *cycle) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+type sizer interface {
+ size() uint32
+}
+
+var (
+ _ sizer = (*Int)(nil)
+ _ sizer = (*Pointer)(nil)
+ _ sizer = (*Struct)(nil)
+ _ sizer = (*Union)(nil)
+ _ sizer = (*Enum)(nil)
+ _ sizer = (*Datasec)(nil)
+)
+
+type qualifier interface {
+ qualify() Type
+}
+
+var (
+ _ qualifier = (*Const)(nil)
+ _ qualifier = (*Restrict)(nil)
+ _ qualifier = (*Volatile)(nil)
+ _ qualifier = (*typeTag)(nil)
+)
+
+var errUnsizedType = errors.New("type is unsized")
+
+// Sizeof returns the size of a type in bytes.
+//
+// Returns an error if the size can't be computed.
+func Sizeof(typ Type) (int, error) {
+ var (
+ n = int64(1)
+ elem int64
+ )
+
+ for i := 0; i < maxResolveDepth; i++ {
+ switch v := typ.(type) {
+ case *Array:
+ if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ // Arrays may be of zero length, which allows
+ // n to be zero as well.
+ n *= int64(v.Nelems)
+ typ = v.Type
+ continue
+
+ case sizer:
+ elem = int64(v.size())
+
+ case *Typedef:
+ typ = v.Type
+ continue
+
+ case qualifier:
+ typ = v.qualify()
+ continue
+
+ default:
+ return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType)
+ }
+
+ if n > 0 && elem > math.MaxInt64/n {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ size := n * elem
+ if int64(int(size)) != size {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ return int(size), nil
+ }
+
+ return 0, fmt.Errorf("type %s: exceeded type depth", typ)
+}
+
+// alignof returns the alignment of a type.
+//
+// Returns an error if the Type can't be aligned, like an integer with an uneven
+// size. Currently only supports the subset of types necessary for bitfield
+// relocations.
+func alignof(typ Type) (int, error) {
+ var n int
+
+ switch t := UnderlyingType(typ).(type) {
+ case *Enum:
+ n = int(t.size())
+ case *Int:
+ n = int(t.Size)
+ case *Array:
+ return alignof(t.Type)
+ default:
+ return 0, fmt.Errorf("can't calculate alignment of %T", t)
+ }
+
+ if !internal.IsPow(n) {
+ return 0, fmt.Errorf("alignment value %d is not a power of two", n)
+ }
+
+ return n, nil
+}
+
+// Copy a Type recursively.
+//
+// typ may form a cycle.
+func Copy(typ Type) Type {
+ return copyType(typ, nil, make(map[Type]Type), nil)
+}
+
+func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map[Type]TypeID) Type {
+ if typ == nil {
+ return nil
+ }
+
+ cpy, ok := copies[typ]
+ if ok {
+ // This has been copied previously, no need to continue.
+ return cpy
+ }
+
+ cpy = typ.copy()
+ copies[typ] = cpy
+
+ if id, ok := ids[typ]; ok {
+ copiedIDs[cpy] = id
+ }
+
+ children(cpy, func(child *Type) bool {
+ *child = copyType(*child, ids, copies, copiedIDs)
+ return true
+ })
+
+ return cpy
+}
+
+type typeDeque = internal.Deque[*Type]
+
+// readAndInflateTypes reads the raw btf type info and turns it into a graph
+// of Types connected via pointers.
+//
+// If base is provided, then the types are considered to be of a split BTF
+// (e.g., a kernel module).
+//
+// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
+// units, multiple types may share the same name. A Type may form a cyclic graph
+// by pointing at itself.
+func readAndInflateTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32, rawStrings *stringTable, base *Spec) ([]Type, error) {
+ // because of the interleaving between types and struct members it is difficult to
+ // precompute the numbers of raw types this will parse
+ // this "guess" is a good first estimation
+ sizeOfbtfType := uintptr(btfTypeLen)
+ tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
+ types := make([]Type, 0, tyMaxCount)
+
+ // Void is defined to always be type ID 0, and is thus omitted from BTF.
+ types = append(types, (*Void)(nil))
+
+ firstTypeID := TypeID(0)
+ if base != nil {
+ var err error
+ firstTypeID, err = base.nextTypeID()
+ if err != nil {
+ return nil, err
+ }
+
+ // Split BTF doesn't contain Void.
+ types = types[:0]
+ }
+
+ type fixupDef struct {
+ id TypeID
+ typ *Type
+ }
+
+ var fixups []fixupDef
+ fixup := func(id TypeID, typ *Type) {
+ if id < firstTypeID {
+ if baseType, err := base.TypeByID(id); err == nil {
+ *typ = baseType
+ return
+ }
+ }
+
+ idx := int(id - firstTypeID)
+ if idx < len(types) {
+ // We've already inflated this type, fix it up immediately.
+ *typ = types[idx]
+ return
+ }
+
+ fixups = append(fixups, fixupDef{id, typ})
+ }
+
+ type bitfieldFixupDef struct {
+ id TypeID
+ m *Member
+ }
+
+ var (
+ legacyBitfields = make(map[TypeID][2]Bits) // offset, size
+ bitfieldFixups []bitfieldFixupDef
+ )
+ convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
+ // NB: The fixup below relies on pre-allocating this array to
+ // work, since otherwise append might re-allocate members.
+ members := make([]Member, 0, len(raw))
+ for i, btfMember := range raw {
+ name, err := rawStrings.Lookup(btfMember.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
+ }
+
+ members = append(members, Member{
+ Name: name,
+ Offset: Bits(btfMember.Offset),
+ })
+
+ m := &members[i]
+ fixup(raw[i].Type, &m.Type)
+
+ if kindFlag {
+ m.BitfieldSize = Bits(btfMember.Offset >> 24)
+ m.Offset &= 0xffffff
+ // We ignore legacy bitfield definitions if the current composite
+ // is a new-style bitfield. This is kind of safe since offset and
+ // size on the type of the member must be zero if kindFlat is set
+ // according to spec.
+ continue
+ }
+
+ // This may be a legacy bitfield, try to fix it up.
+ data, ok := legacyBitfields[raw[i].Type]
+ if ok {
+ // Bingo!
+ m.Offset += data[0]
+ m.BitfieldSize = data[1]
+ continue
+ }
+
+ if m.Type != nil {
+ // We couldn't find a legacy bitfield, but we know that the member's
+ // type has already been inflated. Hence we know that it can't be
+ // a legacy bitfield and there is nothing left to do.
+ continue
+ }
+
+ // We don't have fixup data, and the type we're pointing
+ // at hasn't been inflated yet. No choice but to defer
+ // the fixup.
+ bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{
+ raw[i].Type,
+ m,
+ })
+ }
+ return members, nil
+ }
+
+ var (
+ buf = make([]byte, 1024)
+ header btfType
+ bInt btfInt
+ bArr btfArray
+ bMembers []btfMember
+ bEnums []btfEnum
+ bParams []btfParam
+ bVariable btfVariable
+ bSecInfos []btfVarSecinfo
+ bDeclTag btfDeclTag
+ bEnums64 []btfEnum64
+ )
+
+ var declTags []*declTag
+ for {
+ var (
+ id = firstTypeID + TypeID(len(types))
+ typ Type
+ )
+
+ if _, err := io.ReadFull(r, buf[:btfTypeLen]); err == io.EOF {
+ break
+ } else if err != nil {
+ return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
+ }
+
+ if _, err := unmarshalBtfType(&header, buf[:btfTypeLen], bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err)
+ }
+
+ if id < firstTypeID {
+ return nil, fmt.Errorf("no more type IDs")
+ }
+
+ name, err := rawStrings.Lookup(header.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for type id %d: %w", id, err)
+ }
+
+ switch header.Kind() {
+ case kindInt:
+ size := header.Size()
+ buf = buf[:btfIntLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfInt, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfInt(&bInt, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err)
+ }
+ if bInt.Offset() > 0 || bInt.Bits().Bytes() != size {
+ legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()}
+ }
+ typ = &Int{name, header.Size(), bInt.Encoding()}
+
+ case kindPointer:
+ ptr := &Pointer{nil}
+ fixup(header.Type(), &ptr.Target)
+ typ = ptr
+
+ case kindArray:
+ buf = buf[:btfArrayLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfArray, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfArray(&bArr, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err)
+ }
+
+ arr := &Array{nil, nil, bArr.Nelems}
+ fixup(bArr.IndexType, &arr.Index)
+ fixup(bArr.Type, &arr.Type)
+ typ = arr
+
+ case kindStruct:
+ vlen := header.Vlen()
+ bMembers = slices.Grow(bMembers[:0], vlen)[:vlen]
+ buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err)
+ }
+
+ members, err := convertMembers(bMembers, header.Bitfield())
+ if err != nil {
+ return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
+ }
+ typ = &Struct{name, header.Size(), members}
+
+ case kindUnion:
+ vlen := header.Vlen()
+ bMembers = slices.Grow(bMembers[:0], vlen)[:vlen]
+ buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err)
+ }
+
+ members, err := convertMembers(bMembers, header.Bitfield())
+ if err != nil {
+ return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
+ }
+ typ = &Union{name, header.Size(), members}
+
+ case kindEnum:
+ vlen := header.Vlen()
+ bEnums = slices.Grow(bEnums[:0], vlen)[:vlen]
+ buf = slices.Grow(buf[:0], vlen*btfEnumLen)[:vlen*btfEnumLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfEnums, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfEnums(bEnums, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal btfEnums, id: %d: %w", id, err)
+ }
+
+ vals := make([]EnumValue, 0, vlen)
+ signed := header.Signed()
+ for i, btfVal := range bEnums {
+ name, err := rawStrings.Lookup(btfVal.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for enum value %d: %s", i, err)
+ }
+ value := uint64(btfVal.Val)
+ if signed {
+ // Sign extend values to 64 bit.
+ value = uint64(int32(btfVal.Val))
+ }
+ vals = append(vals, EnumValue{name, value})
+ }
+ typ = &Enum{name, header.Size(), signed, vals}
+
+ case kindForward:
+ typ = &Fwd{name, header.FwdKind()}
+
+ case kindTypedef:
+ typedef := &Typedef{name, nil}
+ fixup(header.Type(), &typedef.Type)
+ typ = typedef
+
+ case kindVolatile:
+ volatile := &Volatile{nil}
+ fixup(header.Type(), &volatile.Type)
+ typ = volatile
+
+ case kindConst:
+ cnst := &Const{nil}
+ fixup(header.Type(), &cnst.Type)
+ typ = cnst
+
+ case kindRestrict:
+ restrict := &Restrict{nil}
+ fixup(header.Type(), &restrict.Type)
+ typ = restrict
+
+ case kindFunc:
+ fn := &Func{name, nil, header.Linkage()}
+ fixup(header.Type(), &fn.Type)
+ typ = fn
+
+ case kindFuncProto:
+ vlen := header.Vlen()
+ bParams = slices.Grow(bParams[:0], vlen)[:vlen]
+ buf = slices.Grow(buf[:0], vlen*btfParamLen)[:vlen*btfParamLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfParams, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfParams(bParams, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal btfParams, id: %d: %w", id, err)
+ }
+
+ params := make([]FuncParam, 0, vlen)
+ for i, param := range bParams {
+ name, err := rawStrings.Lookup(param.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
+ }
+ params = append(params, FuncParam{
+ Name: name,
+ })
+ }
+ for i := range params {
+ fixup(bParams[i].Type, ¶ms[i].Type)
+ }
+
+ fp := &FuncProto{nil, params}
+ fixup(header.Type(), &fp.Return)
+ typ = fp
+
+ case kindVar:
+ buf = buf[:btfVariableLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfVariable(&bVariable, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err)
+ }
+
+ v := &Var{name, nil, VarLinkage(bVariable.Linkage)}
+ fixup(header.Type(), &v.Type)
+ typ = v
+
+ case kindDatasec:
+ vlen := header.Vlen()
+ bSecInfos = slices.Grow(bSecInfos[:0], vlen)[:vlen]
+ buf = slices.Grow(buf[:0], vlen*btfVarSecinfoLen)[:vlen*btfVarSecinfoLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfVarSecInfos, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfVarSecInfos(bSecInfos, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal btfVarSecInfos, id: %d: %w", id, err)
+ }
+
+ vars := make([]VarSecinfo, 0, vlen)
+ for _, btfVar := range bSecInfos {
+ vars = append(vars, VarSecinfo{
+ Offset: btfVar.Offset,
+ Size: btfVar.Size,
+ })
+ }
+ for i := range vars {
+ fixup(bSecInfos[i].Type, &vars[i].Type)
+ }
+ typ = &Datasec{name, header.Size(), vars}
+
+ case kindFloat:
+ typ = &Float{name, header.Size()}
+
+ case kindDeclTag:
+ buf = buf[:btfDeclTagLen]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfDeclTag(&bDeclTag, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err)
+ }
+
+ btfIndex := bDeclTag.ComponentIdx
+ if uint64(btfIndex) > math.MaxInt {
+ return nil, fmt.Errorf("type id %d: index exceeds int", id)
+ }
+
+ dt := &declTag{nil, name, int(int32(btfIndex))}
+ fixup(header.Type(), &dt.Type)
+ typ = dt
+
+ declTags = append(declTags, dt)
+
+ case kindTypeTag:
+ tt := &typeTag{nil, name}
+ fixup(header.Type(), &tt.Type)
+ typ = tt
+
+ case kindEnum64:
+ vlen := header.Vlen()
+ bEnums64 = slices.Grow(bEnums64[:0], vlen)[:vlen]
+ buf = slices.Grow(buf[:0], vlen*btfEnum64Len)[:vlen*btfEnum64Len]
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("can't read btfEnum64s, id: %d: %w", id, err)
+ }
+ if _, err := unmarshalBtfEnums64(bEnums64, buf, bo); err != nil {
+ return nil, fmt.Errorf("can't unmarshal btfEnum64s, id: %d: %w", id, err)
+ }
+
+ vals := make([]EnumValue, 0, vlen)
+ for i, btfVal := range bEnums64 {
+ name, err := rawStrings.Lookup(btfVal.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err)
+ }
+ value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32)
+ vals = append(vals, EnumValue{name, value})
+ }
+ typ = &Enum{name, header.Size(), header.Signed(), vals}
+
+ default:
+ return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind())
+ }
+
+ types = append(types, typ)
+ }
+
+ for _, fixup := range fixups {
+ if fixup.id < firstTypeID {
+ return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id)
+ }
+
+ idx := int(fixup.id - firstTypeID)
+ if idx >= len(types) {
+ return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
+ }
+
+ *fixup.typ = types[idx]
+ }
+
+ for _, bitfieldFixup := range bitfieldFixups {
+ if bitfieldFixup.id < firstTypeID {
+ return nil, fmt.Errorf("bitfield fixup from split to base types is not expected")
+ }
+
+ data, ok := legacyBitfields[bitfieldFixup.id]
+ if ok {
+ // This is indeed a legacy bitfield, fix it up.
+ bitfieldFixup.m.Offset += data[0]
+ bitfieldFixup.m.BitfieldSize = data[1]
+ }
+ }
+
+ for _, dt := range declTags {
+ switch t := dt.Type.(type) {
+ case *Var, *Typedef:
+ if dt.Index != -1 {
+ return nil, fmt.Errorf("type %s: index %d is not -1", dt, dt.Index)
+ }
+
+ case composite:
+ if dt.Index >= len(t.members()) {
+ return nil, fmt.Errorf("type %s: index %d exceeds members of %s", dt, dt.Index, t)
+ }
+
+ case *Func:
+ fp, ok := t.Type.(*FuncProto)
+ if !ok {
+ return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type)
+ }
+
+ if dt.Index >= len(fp.Params) {
+ return nil, fmt.Errorf("type %s: index %d exceeds params of %s", dt, dt.Index, t)
+ }
+
+ default:
+ return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t)
+ }
+ }
+
+ return types, nil
+}
+
+// essentialName represents the name of a BTF type stripped of any flavor
+// suffixes after a ___ delimiter.
+type essentialName string
+
+// newEssentialName returns name without a ___ suffix.
+//
+// CO-RE has the concept of 'struct flavors', which are used to deal with
+// changes in kernel data structures. Anything after three underscores
+// in a type name is ignored for the purpose of finding a candidate type
+// in the kernel's BTF.
+func newEssentialName(name string) essentialName {
+ if name == "" {
+ return ""
+ }
+ lastIdx := strings.LastIndex(name, "___")
+ if lastIdx > 0 {
+ return essentialName(name[:lastIdx])
+ }
+ return essentialName(name)
+}
+
+// UnderlyingType skips qualifiers and Typedefs.
+func UnderlyingType(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxResolveDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ case *Typedef:
+ result = v.Type
+ default:
+ return result
+ }
+ }
+ return &cycle{typ}
+}
+
+// As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs
+// until it finds a T.
+//
+// Returns the zero value and false if there is no T or if the type is nested
+// too deeply.
+func As[T Type](typ Type) (T, bool) {
+ // NB: We can't make this function return (*T) since then
+ // we can't assert that a type matches an interface which
+ // embeds Type: as[composite](T).
+ for depth := 0; depth <= maxResolveDepth; depth++ {
+ switch v := (typ).(type) {
+ case T:
+ return v, true
+ case qualifier:
+ typ = v.qualify()
+ case *Typedef:
+ typ = v.Type
+ default:
+ goto notFound
+ }
+ }
+notFound:
+ var zero T
+ return zero, false
+}
+
+type formatState struct {
+ fmt.State
+ depth int
+}
+
+// formattableType is a subset of Type, to ease unit testing of formatType.
+type formattableType interface {
+ fmt.Formatter
+ TypeName() string
+}
+
+// formatType formats a type in a canonical form.
+//
+// Handles cyclical types by only printing cycles up to a certain depth. Elements
+// in extra are separated by spaces unless the preceding element is a string
+// ending in '='.
+func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) {
+ if verb != 'v' && verb != 's' {
+ fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb)
+ return
+ }
+
+ _, _ = io.WriteString(f, internal.GoTypeName(t))
+
+ if name := t.TypeName(); name != "" {
+ // Output BTF type name if present.
+ fmt.Fprintf(f, ":%q", name)
+ }
+
+ if f.Flag('+') {
+ // Output address if requested.
+ fmt.Fprintf(f, ":%#p", t)
+ }
+
+ if verb == 's' {
+ // %s omits details.
+ return
+ }
+
+ var depth int
+ if ps, ok := f.(*formatState); ok {
+ depth = ps.depth
+ f = ps.State
+ }
+
+ maxDepth, ok := f.Width()
+ if !ok {
+ maxDepth = 0
+ }
+
+ if depth > maxDepth {
+ // We've reached the maximum depth. This avoids infinite recursion even
+ // for cyclical types.
+ return
+ }
+
+ if len(extra) == 0 {
+ return
+ }
+
+ wantSpace := false
+ _, _ = io.WriteString(f, "[")
+ for _, arg := range extra {
+ if wantSpace {
+ _, _ = io.WriteString(f, " ")
+ }
+
+ switch v := arg.(type) {
+ case string:
+ _, _ = io.WriteString(f, v)
+ wantSpace = len(v) > 0 && v[len(v)-1] != '='
+ continue
+
+ case formattableType:
+ v.Format(&formatState{f, depth + 1}, verb)
+
+ default:
+ fmt.Fprint(f, arg)
+ }
+
+ wantSpace = true
+ }
+ _, _ = io.WriteString(f, "]")
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/workarounds.go b/vendor/github.com/cilium/ebpf/btf/workarounds.go
new file mode 100644
index 000000000..12a89b87e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/workarounds.go
@@ -0,0 +1,26 @@
+package btf
+
+// datasecResolveWorkaround ensures that certain vars in a Datasec are added
+// to a Spec before the Datasec. This avoids a bug in kernel BTF validation.
+//
+// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/
+func datasecResolveWorkaround(b *Builder, ds *Datasec) error {
+ for _, vsi := range ds.Vars {
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ continue
+ }
+
+ switch v.Type.(type) {
+ case *Typedef, *Volatile, *Const, *Restrict, *typeTag:
+ // NB: We must never call Add on a Datasec, otherwise we risk
+ // infinite recursion.
+ _, err := b.Add(v.Type)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go
new file mode 100644
index 000000000..b2cb214ad
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/collection.go
@@ -0,0 +1,925 @@
+package ebpf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/kconfig"
+ "github.com/cilium/ebpf/internal/sysenc"
+)
+
+// CollectionOptions control loading a collection into the kernel.
+//
+// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions.
+type CollectionOptions struct {
+ Maps MapOptions
+ Programs ProgramOptions
+
+ // MapReplacements takes a set of Maps that will be used instead of
+ // creating new ones when loading the CollectionSpec.
+ //
+ // For each given Map, there must be a corresponding MapSpec in
+ // CollectionSpec.Maps, and its type, key/value size, max entries and flags
+ // must match the values of the MapSpec.
+ //
+ // The given Maps are Clone()d before being used in the Collection, so the
+ // caller can Close() them freely when they are no longer needed.
+ MapReplacements map[string]*Map
+}
+
+// CollectionSpec describes a collection.
+type CollectionSpec struct {
+ Maps map[string]*MapSpec
+ Programs map[string]*ProgramSpec
+
+ // Types holds type information about Maps and Programs.
+ // Modifications to Types are currently undefined behaviour.
+ Types *btf.Spec
+
+ // ByteOrder specifies whether the ELF was compiled for
+ // big-endian or little-endian architectures.
+ ByteOrder binary.ByteOrder
+}
+
+// Copy returns a recursive copy of the spec.
+func (cs *CollectionSpec) Copy() *CollectionSpec {
+ if cs == nil {
+ return nil
+ }
+
+ cpy := CollectionSpec{
+ Maps: make(map[string]*MapSpec, len(cs.Maps)),
+ Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
+ ByteOrder: cs.ByteOrder,
+ Types: cs.Types.Copy(),
+ }
+
+ for name, spec := range cs.Maps {
+ cpy.Maps[name] = spec.Copy()
+ }
+
+ for name, spec := range cs.Programs {
+ cpy.Programs[name] = spec.Copy()
+ }
+
+ return &cpy
+}
+
+// RewriteMaps replaces all references to specific maps.
+//
+// Use this function to use pre-existing maps instead of creating new ones
+// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps.
+//
+// Returns an error if a named map isn't used in at least one program.
+//
+// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection
+// instead.
+func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
+ for symbol, m := range maps {
+ // have we seen a program that uses this symbol / map
+ seen := false
+ for progName, progSpec := range cs.Programs {
+ err := progSpec.Instructions.AssociateMap(symbol, m)
+
+ switch {
+ case err == nil:
+ seen = true
+
+ case errors.Is(err, asm.ErrUnreferencedSymbol):
+ // Not all programs need to use the map
+
+ default:
+ return fmt.Errorf("program %s: %w", progName, err)
+ }
+ }
+
+ if !seen {
+ return fmt.Errorf("map %s not referenced by any programs", symbol)
+ }
+
+ // Prevent NewCollection from creating rewritten maps
+ delete(cs.Maps, symbol)
+ }
+
+ return nil
+}
+
+// MissingConstantsError is returned by [CollectionSpec.RewriteConstants].
+type MissingConstantsError struct {
+ // The constants missing from .rodata.
+ Constants []string
+}
+
+func (m *MissingConstantsError) Error() string {
+ return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", "))
+}
+
+// RewriteConstants replaces the value of multiple constants.
+//
+// The constant must be defined like so in the C program:
+//
+// volatile const type foobar;
+// volatile const type foobar = default;
+//
+// Replacement values must be of the same length as the C sizeof(type).
+// If necessary, they are marshalled according to the same rules as
+// map values.
+//
+// From Linux 5.5 the verifier will use constants to eliminate dead code.
+//
+// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist.
+func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
+ replaced := make(map[string]bool)
+
+ for name, spec := range cs.Maps {
+ if !strings.HasPrefix(name, ".rodata") {
+ continue
+ }
+
+ b, ds, err := spec.dataSection()
+ if errors.Is(err, errMapNoBTFValue) {
+ // Data sections without a BTF Datasec are valid, but don't support
+ // constant replacements.
+ continue
+ }
+ if err != nil {
+ return fmt.Errorf("map %s: %w", name, err)
+ }
+
+ // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice
+ // to avoid any changes affecting other copies of the MapSpec.
+ cpy := make([]byte, len(b))
+ copy(cpy, b)
+
+ for _, v := range ds.Vars {
+ vname := v.Type.TypeName()
+ replacement, ok := consts[vname]
+ if !ok {
+ continue
+ }
+
+ if _, ok := v.Type.(*btf.Var); !ok {
+ return fmt.Errorf("section %s: unexpected type %T for variable %s", name, v.Type, vname)
+ }
+
+ if replaced[vname] {
+ return fmt.Errorf("section %s: duplicate variable %s", name, vname)
+ }
+
+ if int(v.Offset+v.Size) > len(cpy) {
+ return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname)
+ }
+
+ b, err := sysenc.Marshal(replacement, int(v.Size))
+ if err != nil {
+ return fmt.Errorf("marshaling constant replacement %s: %w", vname, err)
+ }
+
+ b.CopyTo(cpy[v.Offset : v.Offset+v.Size])
+
+ replaced[vname] = true
+ }
+
+ spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy}
+ }
+
+ var missing []string
+ for c := range consts {
+ if !replaced[c] {
+ missing = append(missing, c)
+ }
+ }
+
+ if len(missing) != 0 {
+ return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing})
+ }
+
+ return nil
+}
+
+// Assign the contents of a CollectionSpec to a struct.
+//
+// This function is a shortcut to manually checking the presence
+// of maps and programs in a CollectionSpec. Consider using bpf2go
+// if this sounds useful.
+//
+// 'to' must be a pointer to a struct. A field of the
+// struct is updated with values from Programs or Maps if it
+// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec.
+// The tag's value specifies the name of the program or map as
+// found in the CollectionSpec.
+//
+// struct {
+// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
+// Bar *ebpf.MapSpec `ebpf:"bar_map"`
+// Ignored int
+// }
+//
+// Returns an error if any of the eBPF objects can't be found, or
+// if the same MapSpec or ProgramSpec is assigned multiple times.
+func (cs *CollectionSpec) Assign(to interface{}) error {
+ // Assign() only supports assigning ProgramSpecs and MapSpecs,
+ // so doesn't load any resources into the kernel.
+ getValue := func(typ reflect.Type, name string) (interface{}, error) {
+ switch typ {
+
+ case reflect.TypeOf((*ProgramSpec)(nil)):
+ if p := cs.Programs[name]; p != nil {
+ return p, nil
+ }
+ return nil, fmt.Errorf("missing program %q", name)
+
+ case reflect.TypeOf((*MapSpec)(nil)):
+ if m := cs.Maps[name]; m != nil {
+ return m, nil
+ }
+ return nil, fmt.Errorf("missing map %q", name)
+
+ default:
+ return nil, fmt.Errorf("unsupported type %s", typ)
+ }
+ }
+
+ return assignValues(to, getValue)
+}
+
+// LoadAndAssign loads Maps and Programs into the kernel and assigns them
+// to a struct.
+//
+// Omitting Map/Program.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
+//
+// This function is a shortcut to manually checking the presence
+// of maps and programs in a CollectionSpec. Consider using bpf2go
+// if this sounds useful.
+//
+// 'to' must be a pointer to a struct. A field of the struct is updated with
+// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map.
+// The tag's value specifies the name of the program or map as found in the
+// CollectionSpec. Before updating the struct, the requested objects and their
+// dependent resources are loaded into the kernel and populated with values if
+// specified.
+//
+// struct {
+// Foo *ebpf.Program `ebpf:"xdp_foo"`
+// Bar *ebpf.Map `ebpf:"bar_map"`
+// Ignored int
+// }
+//
+// opts may be nil.
+//
+// Returns an error if any of the fields can't be found, or
+// if the same Map or Program is assigned multiple times.
+func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error {
+ loader, err := newCollectionLoader(cs, opts)
+ if err != nil {
+ return err
+ }
+ defer loader.close()
+
+ // Support assigning Programs and Maps, lazy-loading the required objects.
+ assignedMaps := make(map[string]bool)
+ assignedProgs := make(map[string]bool)
+
+ getValue := func(typ reflect.Type, name string) (interface{}, error) {
+ switch typ {
+
+ case reflect.TypeOf((*Program)(nil)):
+ assignedProgs[name] = true
+ return loader.loadProgram(name)
+
+ case reflect.TypeOf((*Map)(nil)):
+ assignedMaps[name] = true
+ return loader.loadMap(name)
+
+ default:
+ return nil, fmt.Errorf("unsupported type %s", typ)
+ }
+ }
+
+ // Load the Maps and Programs requested by the annotated struct.
+ if err := assignValues(to, getValue); err != nil {
+ return err
+ }
+
+ // Populate the requested maps. Has a chance of lazy-loading other dependent maps.
+ if err := loader.populateDeferredMaps(); err != nil {
+ return err
+ }
+
+ // Evaluate the loader's objects after all (lazy)loading has taken place.
+ for n, m := range loader.maps {
+ switch m.typ {
+ case ProgramArray:
+ // Require all lazy-loaded ProgramArrays to be assigned to the given object.
+ // The kernel empties a ProgramArray once the last user space reference
+ // to it closes, which leads to failed tail calls. Combined with the library
+ // closing map fds via GC finalizers this can lead to surprising behaviour.
+ // Only allow unassigned ProgramArrays when the library hasn't pre-populated
+ // any entries from static value declarations. At this point, we know the map
+ // is empty and there's no way for the caller to interact with the map going
+ // forward.
+ if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 {
+ return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n)
+ }
+ }
+ }
+
+ // Prevent loader.cleanup() from closing assigned Maps and Programs.
+ for m := range assignedMaps {
+ delete(loader.maps, m)
+ }
+ for p := range assignedProgs {
+ delete(loader.programs, p)
+ }
+
+ return nil
+}
+
+// Collection is a collection of Programs and Maps associated
+// with their symbols
+type Collection struct {
+ Programs map[string]*Program
+ Maps map[string]*Map
+}
+
+// NewCollection creates a Collection from the given spec, creating and
+// loading its declared resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
+func NewCollection(spec *CollectionSpec) (*Collection, error) {
+ return NewCollectionWithOptions(spec, CollectionOptions{})
+}
+
+// NewCollectionWithOptions creates a Collection from the given spec using
+// options, creating and loading its declared resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
+func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) {
+ loader, err := newCollectionLoader(spec, &opts)
+ if err != nil {
+ return nil, err
+ }
+ defer loader.close()
+
+ // Create maps first, as their fds need to be linked into programs.
+ for mapName := range spec.Maps {
+ if _, err := loader.loadMap(mapName); err != nil {
+ return nil, err
+ }
+ }
+
+ for progName, prog := range spec.Programs {
+ if prog.Type == UnspecifiedProgram {
+ continue
+ }
+
+ if _, err := loader.loadProgram(progName); err != nil {
+ return nil, err
+ }
+ }
+
+ // Maps can contain Program and Map stubs, so populate them after
+ // all Maps and Programs have been successfully loaded.
+ if err := loader.populateDeferredMaps(); err != nil {
+ return nil, err
+ }
+
+ // Prevent loader.cleanup from closing maps and programs.
+ maps, progs := loader.maps, loader.programs
+ loader.maps, loader.programs = nil, nil
+
+ return &Collection{
+ progs,
+ maps,
+ }, nil
+}
+
+type collectionLoader struct {
+ coll *CollectionSpec
+ opts *CollectionOptions
+ maps map[string]*Map
+ programs map[string]*Program
+}
+
+func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) {
+ if opts == nil {
+ opts = &CollectionOptions{}
+ }
+
+ // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps.
+ for name, m := range opts.MapReplacements {
+ spec, ok := coll.Maps[name]
+ if !ok {
+ return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name)
+ }
+
+ if err := spec.Compatible(m); err != nil {
+ return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err)
+ }
+ }
+
+ return &collectionLoader{
+ coll,
+ opts,
+ make(map[string]*Map),
+ make(map[string]*Program),
+ }, nil
+}
+
+// close all resources left over in the collectionLoader.
+func (cl *collectionLoader) close() {
+ for _, m := range cl.maps {
+ m.Close()
+ }
+ for _, p := range cl.programs {
+ p.Close()
+ }
+}
+
+func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
+ if m := cl.maps[mapName]; m != nil {
+ return m, nil
+ }
+
+ mapSpec := cl.coll.Maps[mapName]
+ if mapSpec == nil {
+ return nil, fmt.Errorf("missing map %s", mapName)
+ }
+
+ if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok {
+ // Clone the map to avoid closing user's map later on.
+ m, err := replaceMap.Clone()
+ if err != nil {
+ return nil, err
+ }
+
+ cl.maps[mapName] = m
+ return m, nil
+ }
+
+ m, err := newMapWithOptions(mapSpec, cl.opts.Maps)
+ if err != nil {
+ return nil, fmt.Errorf("map %s: %w", mapName, err)
+ }
+
+ // Finalize 'scalar' maps that don't refer to any other eBPF resources
+ // potentially pending creation. This is needed for frozen maps like .rodata
+ // that need to be finalized before invoking the verifier.
+ if !mapSpec.Type.canStoreMapOrProgram() {
+ if err := m.finalize(mapSpec); err != nil {
+ return nil, fmt.Errorf("finalizing map %s: %w", mapName, err)
+ }
+ }
+
+ cl.maps[mapName] = m
+ return m, nil
+}
+
+func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
+ if prog := cl.programs[progName]; prog != nil {
+ return prog, nil
+ }
+
+ progSpec := cl.coll.Programs[progName]
+ if progSpec == nil {
+ return nil, fmt.Errorf("unknown program %s", progName)
+ }
+
+ // Bail out early if we know the kernel is going to reject the program.
+ // This skips loading map dependencies, saving some cleanup work later.
+ if progSpec.Type == UnspecifiedProgram {
+ return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName)
+ }
+
+ progSpec = progSpec.Copy()
+
+ // Rewrite any reference to a valid map in the program's instructions,
+ // which includes all of its dependencies.
+ for i := range progSpec.Instructions {
+ ins := &progSpec.Instructions[i]
+
+ if !ins.IsLoadFromMap() || ins.Reference() == "" {
+ continue
+ }
+
+ // Don't overwrite map loads containing non-zero map fd's,
+ // they can be manually included by the caller.
+ // Map FDs/IDs are placed in the lower 32 bits of Constant.
+ if int32(ins.Constant) > 0 {
+ continue
+ }
+
+ m, err := cl.loadMap(ins.Reference())
+ if err != nil {
+ return nil, fmt.Errorf("program %s: %w", progName, err)
+ }
+
+ if err := ins.AssociateMap(m); err != nil {
+ return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err)
+ }
+ }
+
+ prog, err := newProgramWithOptions(progSpec, cl.opts.Programs)
+ if err != nil {
+ return nil, fmt.Errorf("program %s: %w", progName, err)
+ }
+
+ cl.programs[progName] = prog
+ return prog, nil
+}
+
+// populateDeferredMaps iterates maps holding programs or other maps and loads
+// any dependencies. Populates all maps in cl and freezes them if specified.
+func (cl *collectionLoader) populateDeferredMaps() error {
+ for mapName, m := range cl.maps {
+ mapSpec, ok := cl.coll.Maps[mapName]
+ if !ok {
+ return fmt.Errorf("missing map spec %s", mapName)
+ }
+
+ // Scalar maps without Map or Program references are finalized during
+ // creation. Don't finalize them again.
+ if !mapSpec.Type.canStoreMapOrProgram() {
+ continue
+ }
+
+ mapSpec = mapSpec.Copy()
+
+ // MapSpecs that refer to inner maps or programs within the same
+ // CollectionSpec do so using strings. These strings are used as the key
+ // to look up the respective object in the Maps or Programs fields.
+ // Resolve those references to actual Map or Program resources that
+ // have been loaded into the kernel.
+ for i, kv := range mapSpec.Contents {
+ objName, ok := kv.Value.(string)
+ if !ok {
+ continue
+ }
+
+ switch t := mapSpec.Type; {
+ case t.canStoreProgram():
+ // loadProgram is idempotent and could return an existing Program.
+ prog, err := cl.loadProgram(objName)
+ if err != nil {
+ return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err)
+ }
+ mapSpec.Contents[i] = MapKV{kv.Key, prog}
+
+ case t.canStoreMap():
+ // loadMap is idempotent and could return an existing Map.
+ innerMap, err := cl.loadMap(objName)
+ if err != nil {
+ return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err)
+ }
+ mapSpec.Contents[i] = MapKV{kv.Key, innerMap}
+ }
+ }
+
+ // Populate and freeze the map if specified.
+ if err := m.finalize(mapSpec); err != nil {
+ return fmt.Errorf("populating map %s: %w", mapName, err)
+ }
+ }
+
+ return nil
+}
+
+// resolveKconfig resolves all variables declared in .kconfig and populates
+// m.Contents. Does nothing if the given m.Contents is non-empty.
+func resolveKconfig(m *MapSpec) error {
+ ds, ok := m.Value.(*btf.Datasec)
+ if !ok {
+ return errors.New("map value is not a Datasec")
+ }
+
+ type configInfo struct {
+ offset uint32
+ typ btf.Type
+ }
+
+ configs := make(map[string]configInfo)
+
+ data := make([]byte, ds.Size)
+ for _, vsi := range ds.Vars {
+ v := vsi.Type.(*btf.Var)
+ n := v.TypeName()
+
+ switch n {
+ case "LINUX_KERNEL_VERSION":
+ if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 {
+ return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type)
+ }
+
+ kv, err := internal.KernelVersion()
+ if err != nil {
+ return fmt.Errorf("getting kernel version: %w", err)
+ }
+ internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel())
+
+ case "LINUX_HAS_SYSCALL_WRAPPER":
+ integer, ok := v.Type.(*btf.Int)
+ if !ok {
+ return fmt.Errorf("variable %s must be an integer, got %s", n, v.Type)
+ }
+ var value uint64 = 1
+ if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) {
+ value = 0
+ } else if err != nil {
+ return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err)
+ }
+
+ if err := kconfig.PutInteger(data[vsi.Offset:], integer, value); err != nil {
+ return fmt.Errorf("set LINUX_HAS_SYSCALL_WRAPPER: %w", err)
+ }
+
+ default: // Catch CONFIG_*.
+ configs[n] = configInfo{
+ offset: vsi.Offset,
+ typ: v.Type,
+ }
+ }
+ }
+
+ // We only parse kconfig file if a CONFIG_* variable was found.
+ if len(configs) > 0 {
+ f, err := kconfig.Find()
+ if err != nil {
+ return fmt.Errorf("cannot find a kconfig file: %w", err)
+ }
+ defer f.Close()
+
+ filter := make(map[string]struct{}, len(configs))
+ for config := range configs {
+ filter[config] = struct{}{}
+ }
+
+ kernelConfig, err := kconfig.Parse(f, filter)
+ if err != nil {
+ return fmt.Errorf("cannot parse kconfig file: %w", err)
+ }
+
+ for n, info := range configs {
+ value, ok := kernelConfig[n]
+ if !ok {
+ return fmt.Errorf("config option %q does not exists for this kernel", n)
+ }
+
+ err := kconfig.PutValue(data[info.offset:], info.typ, value)
+ if err != nil {
+ return fmt.Errorf("problem adding value for %s: %w", n, err)
+ }
+ }
+ }
+
+ m.Contents = []MapKV{{uint32(0), data}}
+
+ return nil
+}
+
+// LoadCollection reads an object file and creates and loads its declared
+// resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
+func LoadCollection(file string) (*Collection, error) {
+ spec, err := LoadCollectionSpec(file)
+ if err != nil {
+ return nil, err
+ }
+ return NewCollection(spec)
+}
+
+// Assign the contents of a Collection to a struct.
+//
+// This function bridges functionality between bpf2go generated
+// code and any functionality better implemented in Collection.
+//
+// 'to' must be a pointer to a struct. A field of the
+// struct is updated with values from Programs or Maps if it
+// has an `ebpf` tag and its type is *Program or *Map.
+// The tag's value specifies the name of the program or map as
+// found in the CollectionSpec.
+//
+// struct {
+// Foo *ebpf.Program `ebpf:"xdp_foo"`
+// Bar *ebpf.Map `ebpf:"bar_map"`
+// Ignored int
+// }
+//
+// Returns an error if any of the eBPF objects can't be found, or
+// if the same Map or Program is assigned multiple times.
+//
+// Ownership and Close()ing responsibility is transferred to `to`
+// for any successful assigns. On error `to` is left in an undefined state.
+func (coll *Collection) Assign(to interface{}) error {
+ assignedMaps := make(map[string]bool)
+ assignedProgs := make(map[string]bool)
+
+ // Assign() only transfers already-loaded Maps and Programs. No extra
+ // loading is done.
+ getValue := func(typ reflect.Type, name string) (interface{}, error) {
+ switch typ {
+
+ case reflect.TypeOf((*Program)(nil)):
+ if p := coll.Programs[name]; p != nil {
+ assignedProgs[name] = true
+ return p, nil
+ }
+ return nil, fmt.Errorf("missing program %q", name)
+
+ case reflect.TypeOf((*Map)(nil)):
+ if m := coll.Maps[name]; m != nil {
+ assignedMaps[name] = true
+ return m, nil
+ }
+ return nil, fmt.Errorf("missing map %q", name)
+
+ default:
+ return nil, fmt.Errorf("unsupported type %s", typ)
+ }
+ }
+
+ if err := assignValues(to, getValue); err != nil {
+ return err
+ }
+
+ // Finalize ownership transfer
+ for p := range assignedProgs {
+ delete(coll.Programs, p)
+ }
+ for m := range assignedMaps {
+ delete(coll.Maps, m)
+ }
+
+ return nil
+}
+
+// Close frees all maps and programs associated with the collection.
+//
+// The collection mustn't be used afterwards.
+func (coll *Collection) Close() {
+ for _, prog := range coll.Programs {
+ prog.Close()
+ }
+ for _, m := range coll.Maps {
+ m.Close()
+ }
+}
+
+// DetachMap removes the named map from the Collection.
+//
+// This means that a later call to Close() will not affect this map.
+//
+// Returns nil if no map of that name exists.
+func (coll *Collection) DetachMap(name string) *Map {
+ m := coll.Maps[name]
+ delete(coll.Maps, name)
+ return m
+}
+
+// DetachProgram removes the named program from the Collection.
+//
+// This means that a later call to Close() will not affect this program.
+//
+// Returns nil if no program of that name exists.
+func (coll *Collection) DetachProgram(name string) *Program {
+ p := coll.Programs[name]
+ delete(coll.Programs, name)
+ return p
+}
+
+// structField represents a struct field containing the ebpf struct tag.
+type structField struct {
+ reflect.StructField
+ value reflect.Value
+}
+
+// ebpfFields extracts field names tagged with 'ebpf' from a struct type.
+// Keep track of visited types to avoid infinite recursion.
+func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) {
+ if visited == nil {
+ visited = make(map[reflect.Type]bool)
+ }
+
+ structType := structVal.Type()
+ if structType.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("%s is not a struct", structType)
+ }
+
+ if visited[structType] {
+ return nil, fmt.Errorf("recursion on type %s", structType)
+ }
+
+ fields := make([]structField, 0, structType.NumField())
+ for i := 0; i < structType.NumField(); i++ {
+ field := structField{structType.Field(i), structVal.Field(i)}
+
+ // If the field is tagged, gather it and move on.
+ name := field.Tag.Get("ebpf")
+ if name != "" {
+ fields = append(fields, field)
+ continue
+ }
+
+ // If the field does not have an ebpf tag, but is a struct or a pointer
+ // to a struct, attempt to gather its fields as well.
+ var v reflect.Value
+ switch field.Type.Kind() {
+ case reflect.Ptr:
+ if field.Type.Elem().Kind() != reflect.Struct {
+ continue
+ }
+
+ if field.value.IsNil() {
+ return nil, fmt.Errorf("nil pointer to %s", structType)
+ }
+
+ // Obtain the destination type of the pointer.
+ v = field.value.Elem()
+
+ case reflect.Struct:
+ // Reference the value's type directly.
+ v = field.value
+
+ default:
+ continue
+ }
+
+ inner, err := ebpfFields(v, visited)
+ if err != nil {
+ return nil, fmt.Errorf("field %s: %w", field.Name, err)
+ }
+
+ fields = append(fields, inner...)
+ }
+
+ return fields, nil
+}
+
+// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'.
+//
+// getValue is called for every tagged field of 'to' and must return the value
+// to be assigned to the field with the given typ and name.
+func assignValues(to interface{},
+ getValue func(typ reflect.Type, name string) (interface{}, error)) error {
+
+ toValue := reflect.ValueOf(to)
+ if toValue.Type().Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer to struct", to)
+ }
+
+ if toValue.IsNil() {
+ return fmt.Errorf("nil pointer to %T", to)
+ }
+
+ fields, err := ebpfFields(toValue.Elem(), nil)
+ if err != nil {
+ return err
+ }
+
+ type elem struct {
+ // Either *Map or *Program
+ typ reflect.Type
+ name string
+ }
+
+ assigned := make(map[elem]string)
+ for _, field := range fields {
+ // Get string value the field is tagged with.
+ tag := field.Tag.Get("ebpf")
+ if strings.Contains(tag, ",") {
+ return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name)
+ }
+
+ // Check if the eBPF object with the requested
+ // type and tag was already assigned elsewhere.
+ e := elem{field.Type, tag}
+ if af := assigned[e]; af != "" {
+ return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af)
+ }
+
+ // Get the eBPF object referred to by the tag.
+ value, err := getValue(field.Type, tag)
+ if err != nil {
+ return fmt.Errorf("field %s: %w", field.Name, err)
+ }
+
+ if !field.value.CanSet() {
+ return fmt.Errorf("field %s: can't set value", field.Name)
+ }
+ field.value.Set(reflect.ValueOf(value))
+
+ assigned[e] = field.Name
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/cpu.go b/vendor/github.com/cilium/ebpf/cpu.go
new file mode 100644
index 000000000..07e959efd
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/cpu.go
@@ -0,0 +1,66 @@
+package ebpf
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+)
+
+var possibleCPU = sync.OnceValues(func() (int, error) {
+ return parseCPUsFromFile("/sys/devices/system/cpu/possible")
+})
+
+// PossibleCPU returns the max number of CPUs a system may possibly have
+// Logical CPU numbers must be of the form 0-n
+func PossibleCPU() (int, error) {
+ return possibleCPU()
+}
+
+// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if
+// the error is non-nil.
+func MustPossibleCPU() int {
+ cpus, err := PossibleCPU()
+ if err != nil {
+ panic(err)
+ }
+ return cpus
+}
+
+func parseCPUsFromFile(path string) (int, error) {
+ spec, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+
+ n, err := parseCPUs(string(spec))
+ if err != nil {
+ return 0, fmt.Errorf("can't parse %s: %v", path, err)
+ }
+
+ return n, nil
+}
+
+// parseCPUs parses the number of cpus from a string produced
+// by bitmap_list_string() in the Linux kernel.
+// Multiple ranges are rejected, since they can't be unified
+// into a single number.
+// This is the format of /sys/devices/system/cpu/possible, it
+// is not suitable for /sys/devices/system/cpu/online, etc.
+func parseCPUs(spec string) (int, error) {
+ if strings.Trim(spec, "\n") == "0" {
+ return 1, nil
+ }
+
+ var low, high int
+ n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high)
+ if n != 2 || err != nil {
+ return 0, fmt.Errorf("invalid format: %s", spec)
+ }
+ if low != 0 {
+ return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec)
+ }
+
+ // cpus is 0 indexed
+ return high + 1, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go
new file mode 100644
index 000000000..396b3394d
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/doc.go
@@ -0,0 +1,25 @@
+// Package ebpf is a toolkit for working with eBPF programs.
+//
+// eBPF programs are small snippets of code which are executed directly
+// in a VM in the Linux kernel, which makes them very fast and flexible.
+// Many Linux subsystems now accept eBPF programs. This makes it possible
+// to implement highly application specific logic inside the kernel,
+// without having to modify the actual kernel itself.
+//
+// This package is designed for long-running processes which
+// want to use eBPF to implement part of their application logic. It has no
+// run-time dependencies outside of the library and the Linux kernel itself.
+// eBPF code should be compiled ahead of time using clang, and shipped with
+// your application as any other resource.
+//
+// Use the link subpackage to attach a loaded program to a hook in the kernel.
+//
+// Note that losing all references to Map and Program resources will cause
+// their underlying file descriptors to be closed, potentially removing those
+// objects from the kernel. Always retain a reference by e.g. deferring a
+// Close() of a Collection or LoadAndAssign object until application exit.
+//
+// Special care needs to be taken when handling maps of type ProgramArray,
+// as the kernel erases its contents when the last userspace or bpffs
+// reference disappears, regardless of the map being in active use.
+package ebpf
diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go
new file mode 100644
index 000000000..620037d80
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/elf_reader.go
@@ -0,0 +1,1337 @@
+package ebpf
+
+import (
+ "bufio"
+ "bytes"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+type kconfigMetaKey struct{}
+
+type kconfigMeta struct {
+ Map *MapSpec
+ Offset uint32
+}
+
+type kfuncMetaKey struct{}
+
+type kfuncMeta struct {
+ Binding elf.SymBind
+ Func *btf.Func
+}
+
+// elfCode is a convenience to reduce the amount of arguments that have to
+// be passed around explicitly. You should treat its contents as immutable.
+type elfCode struct {
+ *internal.SafeELFFile
+ sections map[elf.SectionIndex]*elfSection
+ license string
+ version uint32
+ btf *btf.Spec
+ extInfo *btf.ExtInfos
+ maps map[string]*MapSpec
+ kfuncs map[string]*btf.Func
+ kconfig *MapSpec
+}
+
+// LoadCollectionSpec parses an ELF file into a CollectionSpec.
+func LoadCollectionSpec(file string) (*CollectionSpec, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ spec, err := LoadCollectionSpecFromReader(f)
+ if err != nil {
+ return nil, fmt.Errorf("file %s: %w", file, err)
+ }
+ return spec, nil
+}
+
+// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec.
+func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
+ f, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Checks if the ELF file is for BPF data.
+ // Old LLVM versions set e_machine to EM_NONE.
+ if f.File.Machine != unix.EM_NONE && f.File.Machine != elf.EM_BPF {
+ return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.File.Machine)
+ }
+
+ var (
+ licenseSection *elf.Section
+ versionSection *elf.Section
+ sections = make(map[elf.SectionIndex]*elfSection)
+ relSections = make(map[elf.SectionIndex]*elf.Section)
+ )
+
+ // This is the target of relocations generated by inline assembly.
+ sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection)
+
+ // Collect all the sections we're interested in. This includes relocations
+ // which we parse later.
+ //
+ // Keep the documentation at docs/ebpf/loading/elf-sections.md up-to-date.
+ for i, sec := range f.Sections {
+ idx := elf.SectionIndex(i)
+
+ switch {
+ case strings.HasPrefix(sec.Name, "license"):
+ licenseSection = sec
+ case strings.HasPrefix(sec.Name, "version"):
+ versionSection = sec
+ case strings.HasPrefix(sec.Name, "maps"):
+ sections[idx] = newElfSection(sec, mapSection)
+ case sec.Name == ".maps":
+ sections[idx] = newElfSection(sec, btfMapSection)
+ case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"):
+ sections[idx] = newElfSection(sec, dataSection)
+ case sec.Type == elf.SHT_REL:
+ // Store relocations under the section index of the target
+ relSections[elf.SectionIndex(sec.Info)] = sec
+ case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
+ sections[idx] = newElfSection(sec, programSection)
+ }
+ }
+
+ license, err := loadLicense(licenseSection)
+ if err != nil {
+ return nil, fmt.Errorf("load license: %w", err)
+ }
+
+ version, err := loadVersion(versionSection, f.ByteOrder)
+ if err != nil {
+ return nil, fmt.Errorf("load version: %w", err)
+ }
+
+ btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd)
+ if err != nil && !errors.Is(err, btf.ErrNotFound) {
+ return nil, fmt.Errorf("load BTF: %w", err)
+ }
+
+ ec := &elfCode{
+ SafeELFFile: f,
+ sections: sections,
+ license: license,
+ version: version,
+ btf: btfSpec,
+ extInfo: btfExtInfo,
+ maps: make(map[string]*MapSpec),
+ kfuncs: make(map[string]*btf.Func),
+ }
+
+ symbols, err := f.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("load symbols: %v", err)
+ }
+
+ ec.assignSymbols(symbols)
+
+ if err := ec.loadRelocations(relSections, symbols); err != nil {
+ return nil, fmt.Errorf("load relocations: %w", err)
+ }
+
+ if err := ec.loadMaps(); err != nil {
+ return nil, fmt.Errorf("load maps: %w", err)
+ }
+
+ if err := ec.loadBTFMaps(); err != nil {
+ return nil, fmt.Errorf("load BTF maps: %w", err)
+ }
+
+ if err := ec.loadDataSections(); err != nil {
+ return nil, fmt.Errorf("load data sections: %w", err)
+ }
+
+ if err := ec.loadKconfigSection(); err != nil {
+ return nil, fmt.Errorf("load virtual .kconfig section: %w", err)
+ }
+
+ if err := ec.loadKsymsSection(); err != nil {
+ return nil, fmt.Errorf("load virtual .ksyms section: %w", err)
+ }
+
+ // Finally, collect programs and link them.
+ progs, err := ec.loadProgramSections()
+ if err != nil {
+ return nil, fmt.Errorf("load programs: %w", err)
+ }
+
+ return &CollectionSpec{ec.maps, progs, btfSpec, ec.ByteOrder}, nil
+}
+
+func loadLicense(sec *elf.Section) (string, error) {
+ if sec == nil {
+ return "", nil
+ }
+
+ data, err := sec.Data()
+ if err != nil {
+ return "", fmt.Errorf("section %s: %v", sec.Name, err)
+ }
+ return string(bytes.TrimRight(data, "\000")), nil
+}
+
+func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
+ if sec == nil {
+ return 0, nil
+ }
+
+ var version uint32
+ if err := binary.Read(sec.Open(), bo, &version); err != nil {
+ return 0, fmt.Errorf("section %s: %v", sec.Name, err)
+ }
+ return version, nil
+}
+
+type elfSectionKind int
+
+const (
+ undefSection elfSectionKind = iota
+ mapSection
+ btfMapSection
+ programSection
+ dataSection
+)
+
+type elfSection struct {
+ *elf.Section
+ kind elfSectionKind
+ // Offset from the start of the section to a symbol
+ symbols map[uint64]elf.Symbol
+ // Offset from the start of the section to a relocation, which points at
+ // a symbol in another section.
+ relocations map[uint64]elf.Symbol
+ // The number of relocations pointing at this section.
+ references int
+}
+
+func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection {
+ return &elfSection{
+ section,
+ kind,
+ make(map[uint64]elf.Symbol),
+ make(map[uint64]elf.Symbol),
+ 0,
+ }
+}
+
+// assignSymbols takes a list of symbols and assigns them to their
+// respective sections, indexed by name.
+func (ec *elfCode) assignSymbols(symbols []elf.Symbol) {
+ for _, symbol := range symbols {
+ symType := elf.ST_TYPE(symbol.Info)
+ symSection := ec.sections[symbol.Section]
+ if symSection == nil {
+ continue
+ }
+
+ // Anonymous symbols only occur in debug sections which we don't process
+ // relocations for. Anonymous symbols are not referenced from other sections.
+ if symbol.Name == "" {
+ continue
+ }
+
+ // Older versions of LLVM don't tag symbols correctly, so keep
+ // all NOTYPE ones.
+ switch symSection.kind {
+ case mapSection, btfMapSection, dataSection:
+ if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT {
+ continue
+ }
+ case programSection:
+ if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC {
+ continue
+ }
+ // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump
+ // targets within sections, but BPF has no use for them.
+ if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL &&
+ strings.HasPrefix(symbol.Name, "LBB") {
+ continue
+ }
+ // Only collect symbols that occur in program/maps/data sections.
+ default:
+ continue
+ }
+
+ symSection.symbols[symbol.Value] = symbol
+ }
+}
+
+// loadRelocations iterates .rel* sections and extracts relocation entries for
+// sections of interest. Makes sure relocations point at valid sections.
+func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error {
+ for idx, relSection := range relSections {
+ section := ec.sections[idx]
+ if section == nil {
+ continue
+ }
+
+ rels, err := ec.loadSectionRelocations(relSection, symbols)
+ if err != nil {
+ return fmt.Errorf("relocation for section %q: %w", section.Name, err)
+ }
+
+ for _, rel := range rels {
+ target := ec.sections[rel.Section]
+ if target == nil {
+ return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
+ }
+
+ target.references++
+ }
+
+ section.relocations = rels
+ }
+
+ return nil
+}
+
+// loadProgramSections iterates ec's sections and emits a ProgramSpec
+// for each function it finds.
+//
+// The resulting map is indexed by function name.
+func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) {
+
+ progs := make(map[string]*ProgramSpec)
+
+ // Generate a ProgramSpec for each function found in each program section.
+ var export []string
+ for _, sec := range ec.sections {
+ if sec.kind != programSection {
+ continue
+ }
+
+ if len(sec.symbols) == 0 {
+ return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
+ }
+
+ funcs, err := ec.loadFunctions(sec)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", sec.Name, err)
+ }
+
+ progType, attachType, progFlags, attachTo := getProgType(sec.Name)
+
+ for name, insns := range funcs {
+ spec := &ProgramSpec{
+ Name: name,
+ Type: progType,
+ Flags: progFlags,
+ AttachType: attachType,
+ AttachTo: attachTo,
+ SectionName: sec.Name,
+ License: ec.license,
+ KernelVersion: ec.version,
+ Instructions: insns,
+ ByteOrder: ec.ByteOrder,
+ }
+
+ // Function names must be unique within a single ELF blob.
+ if progs[name] != nil {
+ return nil, fmt.Errorf("duplicate program name %s", name)
+ }
+ progs[name] = spec
+
+ if spec.SectionName != ".text" {
+ export = append(export, name)
+ }
+ }
+ }
+
+ flattenPrograms(progs, export)
+
+ // Hide programs (e.g. library functions) that were not explicitly emitted
+ // to an ELF section. These could be exposed in a separate CollectionSpec
+ // field later to allow them to be modified.
+ for n, p := range progs {
+ if p.SectionName == ".text" {
+ delete(progs, n)
+ }
+ }
+
+ return progs, nil
+}
+
+// loadFunctions extracts instruction streams from the given program section
+// starting at each symbol in the section. The section's symbols must already
+// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC.
+//
+// The resulting map is indexed by function name.
+func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) {
+ r := bufio.NewReader(section.Open())
+
+ // Decode the section's instruction stream.
+ insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize)
+ if err := insns.Unmarshal(r, ec.ByteOrder); err != nil {
+ return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err)
+ }
+ if len(insns) == 0 {
+ return nil, fmt.Errorf("no instructions found in section %s", section.Name)
+ }
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+ offset := iter.Offset.Bytes()
+
+ // Tag Symbol Instructions.
+ if sym, ok := section.symbols[offset]; ok {
+ *ins = ins.WithSymbol(sym.Name)
+ }
+
+ // Apply any relocations for the current instruction.
+ // If no relocation is present, resolve any section-relative function calls.
+ if rel, ok := section.relocations[offset]; ok {
+ if err := ec.relocateInstruction(ins, rel); err != nil {
+ return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err)
+ }
+ } else {
+ if err := referenceRelativeJump(ins, offset, section.symbols); err != nil {
+ return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err)
+ }
+ }
+ }
+
+ if ec.extInfo != nil {
+ ec.extInfo.Assign(insns, section.Name)
+ }
+
+ return splitSymbols(insns)
+}
+
+// referenceRelativeJump turns a relative jump to another bpf subprogram within
+// the same ELF section into a Reference Instruction.
+//
+// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes
+// encoded using relative jumps instead of relocation entries. These jumps go
+// out of bounds of the current program, so their targets must be memoized
+// before the section's instruction stream is split.
+//
+// The relative jump Constant is blinded to -1 and the target Symbol is set as
+// the Instruction's Reference so it can be resolved by the linker.
+func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error {
+ if !ins.IsFunctionReference() || ins.Constant == -1 {
+ return nil
+ }
+
+ tgt := jumpTarget(offset, *ins)
+ sym := symbols[tgt].Name
+ if sym == "" {
+ return fmt.Errorf("no jump target found at offset %d", tgt)
+ }
+
+ *ins = ins.WithReference(sym)
+ ins.Constant = -1
+
+ return nil
+}
+
+// jumpTarget takes ins' offset within an instruction stream (in bytes)
+// and returns its absolute jump destination (in bytes) within the
+// instruction stream.
+func jumpTarget(offset uint64, ins asm.Instruction) uint64 {
+ // A relative jump instruction describes the amount of raw BPF instructions
+ // to jump, convert the offset into bytes.
+ dest := ins.Constant * asm.InstructionSize
+
+ // The starting point of the jump is the end of the current instruction.
+ dest += int64(offset + asm.InstructionSize)
+
+ if dest < 0 {
+ return 0
+ }
+
+ return uint64(dest)
+}
+
+var errUnsupportedBinding = errors.New("unsupported binding")
+
+func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error {
+ var (
+ typ = elf.ST_TYPE(rel.Info)
+ bind = elf.ST_BIND(rel.Info)
+ name = rel.Name
+ )
+
+ target := ec.sections[rel.Section]
+
+ switch target.kind {
+ case mapSection, btfMapSection:
+ if bind == elf.STB_LOCAL {
+ return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name)
+ }
+
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE {
+ // STT_NOTYPE is generated on clang < 8 which doesn't tag
+ // relocations appropriately.
+ return fmt.Errorf("map load: incorrect relocation type %v", typ)
+ }
+
+ ins.Src = asm.PseudoMapFD
+
+ case dataSection:
+ var offset uint32
+ switch typ {
+ case elf.STT_SECTION:
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ // This is really a reference to a static symbol, which clang doesn't
+ // emit a symbol table entry for. Instead it encodes the offset in
+ // the instruction itself.
+ offset = uint32(uint64(ins.Constant))
+
+ case elf.STT_OBJECT:
+ // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants.
+ if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL {
+ return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ offset = uint32(rel.Value)
+
+ case elf.STT_NOTYPE:
+ // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants.
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ offset = uint32(rel.Value)
+
+ default:
+ return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
+ }
+
+ // We rely on using the name of the data section as the reference. It
+ // would be nicer to keep the real name in case of an STT_OBJECT, but
+ // it's not clear how to encode that into Instruction.
+ name = target.Name
+
+ // The kernel expects the offset in the second basic BPF instruction.
+ ins.Constant = int64(uint64(offset) << 32)
+ ins.Src = asm.PseudoMapValue
+
+ case programSection:
+ switch opCode := ins.OpCode; {
+ case opCode.JumpOp() == asm.Call:
+ if ins.Src != asm.PseudoCall {
+ return fmt.Errorf("call: %s: incorrect source register", name)
+ }
+
+ switch typ {
+ case elf.STT_NOTYPE, elf.STT_FUNC:
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ case elf.STT_SECTION:
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ // The function we want to call is in the indicated section,
+ // at the offset encoded in the instruction itself. Reverse
+ // the calculation to find the real function we're looking for.
+ // A value of -1 references the first instruction in the section.
+ offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
+ sym, ok := target.symbols[uint64(offset)]
+ if !ok {
+ return fmt.Errorf("call: no symbol at offset %d", offset)
+ }
+
+ name = sym.Name
+ ins.Constant = -1
+
+ default:
+ return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
+ }
+ case opCode.IsDWordLoad():
+ switch typ {
+ case elf.STT_FUNC:
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ case elf.STT_SECTION:
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ // ins.Constant already contains the offset in bytes from the
+ // start of the section. This is different than a call to a
+ // static function.
+
+ default:
+ return fmt.Errorf("load: %s: invalid symbol type %s", name, typ)
+ }
+
+ sym, ok := target.symbols[uint64(ins.Constant)]
+ if !ok {
+ return fmt.Errorf("load: no symbol at offset %d", ins.Constant)
+ }
+
+ name = sym.Name
+ ins.Constant = -1
+ ins.Src = asm.PseudoFunc
+
+ default:
+ return fmt.Errorf("neither a call nor a load instruction: %v", ins)
+ }
+
+ // The Undefined section is used for 'virtual' symbols that aren't backed by
+ // an ELF section. This includes symbol references from inline asm, forward
+ // function declarations, as well as extern kfunc declarations using __ksym
+ // and extern kconfig variables declared using __kconfig.
+ case undefSection:
+ if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK {
+ return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ if typ != elf.STT_NOTYPE {
+ return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ)
+ }
+
+ kf := ec.kfuncs[name]
+ switch {
+ // If a Call / DWordLoad instruction is found and the datasec has a btf.Func with a Name
+ // that matches the symbol name we mark the instruction as a referencing a kfunc.
+ case kf != nil && ins.OpCode.JumpOp() == asm.Call:
+ ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{
+ Func: kf,
+ Binding: bind,
+ })
+
+ ins.Src = asm.PseudoKfuncCall
+ ins.Constant = -1
+
+ case kf != nil && ins.OpCode.IsDWordLoad():
+ ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{
+ Func: kf,
+ Binding: bind,
+ })
+
+ ins.Constant = 0
+
+ // If no kconfig map is found, this must be a symbol reference from inline
+ // asm (see testdata/loader.c:asm_relocation()) or a call to a forward
+ // function declaration (see testdata/fwd_decl.c). Don't interfere, These
+ // remain standard symbol references.
+ // extern __kconfig reads are represented as dword loads that need to be
+ // rewritten to pseudo map loads from .kconfig. If the map is present,
+ // require it to contain the symbol to disambiguate between inline asm
+ // relos and kconfigs.
+ case ec.kconfig != nil && ins.OpCode.IsDWordLoad():
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind)
+ }
+
+ for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars {
+ if vsi.Type.(*btf.Var).Name != rel.Name {
+ continue
+ }
+
+ ins.Src = asm.PseudoMapValue
+ ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset})
+ return nil
+ }
+
+ return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name)
+ }
+
+ default:
+ return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
+ }
+
+ *ins = ins.WithReference(name)
+ return nil
+}
+
+func (ec *elfCode) loadMaps() error {
+ for _, sec := range ec.sections {
+ if sec.kind != mapSection {
+ continue
+ }
+
+ nSym := len(sec.symbols)
+ if nSym == 0 {
+ return fmt.Errorf("section %v: no symbols", sec.Name)
+ }
+
+ if sec.Size%uint64(nSym) != 0 {
+ return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name)
+ }
+
+ var (
+ r = bufio.NewReader(sec.Open())
+ size = sec.Size / uint64(nSym)
+ )
+ for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size {
+ mapSym, ok := sec.symbols[offset]
+ if !ok {
+ return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
+ }
+
+ mapName := mapSym.Name
+ if ec.maps[mapName] != nil {
+ return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
+ }
+
+ lr := io.LimitReader(r, int64(size))
+
+ spec := MapSpec{
+ Name: SanitizeName(mapName, -1),
+ }
+ switch {
+ case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
+ return fmt.Errorf("map %s: missing type", mapName)
+ case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
+ return fmt.Errorf("map %s: missing key size", mapName)
+ case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
+ return fmt.Errorf("map %s: missing value size", mapName)
+ case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
+ return fmt.Errorf("map %s: missing max entries", mapName)
+ case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
+ return fmt.Errorf("map %s: missing flags", mapName)
+ }
+
+ extra, err := io.ReadAll(lr)
+ if err != nil {
+ return fmt.Errorf("map %s: reading map tail: %w", mapName, err)
+ }
+ if len(extra) > 0 {
+ spec.Extra = bytes.NewReader(extra)
+ }
+
+ ec.maps[mapName] = &spec
+ }
+ }
+
+ return nil
+}
+
+// loadBTFMaps iterates over all ELF sections marked as BTF map sections
+// (like .maps) and parses them into MapSpecs. Dump the .maps section and
+// any relocations with `readelf -x .maps -r `.
+func (ec *elfCode) loadBTFMaps() error {
+ for _, sec := range ec.sections {
+ if sec.kind != btfMapSection {
+ continue
+ }
+
+ if ec.btf == nil {
+ return fmt.Errorf("missing BTF")
+ }
+
+ // Each section must appear as a DataSec in the ELF's BTF blob.
+ var ds *btf.Datasec
+ if err := ec.btf.TypeByName(sec.Name, &ds); err != nil {
+ return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err)
+ }
+
+ // Open a Reader to the ELF's raw section bytes so we can assert that all
+ // of them are zero on a per-map (per-Var) basis. For now, the section's
+ // sole purpose is to receive relocations, so all must be zero.
+ rs := sec.Open()
+
+ for _, vs := range ds.Vars {
+ // BPF maps are declared as and assigned to global variables,
+ // so iterate over each Var in the DataSec and validate their types.
+ v, ok := vs.Type.(*btf.Var)
+ if !ok {
+ return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type)
+ }
+ name := string(v.Name)
+
+ // The BTF metadata for each Var contains the full length of the map
+ // declaration, so read the corresponding amount of bytes from the ELF.
+ // This way, we can pinpoint which map declaration contains unexpected
+ // (and therefore unsupported) data.
+ _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size)))
+ if err != nil {
+ return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported)
+ }
+
+ if ec.maps[name] != nil {
+ return fmt.Errorf("section %v: map %s already exists", sec.Name, name)
+ }
+
+ // Each Var representing a BTF map definition contains a Struct.
+ mapStruct, ok := btf.UnderlyingType(v.Type).(*btf.Struct)
+ if !ok {
+ return fmt.Errorf("expected struct, got %s", v.Type)
+ }
+
+ mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false)
+ if err != nil {
+ return fmt.Errorf("map %v: %w", name, err)
+ }
+
+ ec.maps[name] = mapSpec
+ }
+
+ // Drain the ELF section reader to make sure all bytes are accounted for
+ // with BTF metadata.
+ i, err := io.Copy(io.Discard, rs)
+ if err != nil {
+ return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err)
+ }
+ if i > 0 {
+ return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i)
+ }
+ }
+
+ return nil
+}
+
+// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing
+// a BTF map definition. The name and spec arguments will be copied to the
+// resulting MapSpec, and inner must be true on any recursive invocations.
+func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) {
+ var (
+ key, value btf.Type
+ keySize, valueSize uint32
+ mapType MapType
+ flags, maxEntries uint32
+ pinType PinType
+ innerMapSpec *MapSpec
+ contents []MapKV
+ err error
+ )
+
+ for i, member := range def.Members {
+ switch member.Name {
+ case "type":
+ mt, err := uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get type: %w", err)
+ }
+ mapType = MapType(mt)
+
+ case "map_flags":
+ flags, err = uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get BTF map flags: %w", err)
+ }
+
+ case "max_entries":
+ maxEntries, err = uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get BTF map max entries: %w", err)
+ }
+
+ case "key":
+ if keySize != 0 {
+ return nil, errors.New("both key and key_size given")
+ }
+
+ pk, ok := member.Type.(*btf.Pointer)
+ if !ok {
+ return nil, fmt.Errorf("key type is not a pointer: %T", member.Type)
+ }
+
+ key = pk.Target
+
+ size, err := btf.Sizeof(pk.Target)
+ if err != nil {
+ return nil, fmt.Errorf("can't get size of BTF key: %w", err)
+ }
+
+ keySize = uint32(size)
+
+ case "value":
+ if valueSize != 0 {
+ return nil, errors.New("both value and value_size given")
+ }
+
+ vk, ok := member.Type.(*btf.Pointer)
+ if !ok {
+ return nil, fmt.Errorf("value type is not a pointer: %T", member.Type)
+ }
+
+ value = vk.Target
+
+ size, err := btf.Sizeof(vk.Target)
+ if err != nil {
+ return nil, fmt.Errorf("can't get size of BTF value: %w", err)
+ }
+
+ valueSize = uint32(size)
+
+ case "key_size":
+ // Key needs to be nil and keySize needs to be 0 for key_size to be
+ // considered a valid member.
+ if key != nil || keySize != 0 {
+ return nil, errors.New("both key and key_size given")
+ }
+
+ keySize, err = uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get BTF key size: %w", err)
+ }
+
+ case "value_size":
+ // Value needs to be nil and valueSize needs to be 0 for value_size to be
+ // considered a valid member.
+ if value != nil || valueSize != 0 {
+ return nil, errors.New("both value and value_size given")
+ }
+
+ valueSize, err = uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get BTF value size: %w", err)
+ }
+
+ case "pinning":
+ if inner {
+ return nil, errors.New("inner maps can't be pinned")
+ }
+
+ pinning, err := uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get pinning: %w", err)
+ }
+
+ pinType = PinType(pinning)
+
+ case "values":
+ // The 'values' field in BTF map definitions is used for declaring map
+ // value types that are references to other BPF objects, like other maps
+ // or programs. It is always expected to be an array of pointers.
+ if i != len(def.Members)-1 {
+ return nil, errors.New("'values' must be the last member in a BTF map definition")
+ }
+
+ if valueSize != 0 && valueSize != 4 {
+ return nil, errors.New("value_size must be 0 or 4")
+ }
+ valueSize = 4
+
+ valueType, err := resolveBTFArrayMacro(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't resolve type of member 'values': %w", err)
+ }
+
+ switch t := valueType.(type) {
+ case *btf.Struct:
+ // The values member pointing to an array of structs means we're expecting
+ // a map-in-map declaration.
+ if mapType != ArrayOfMaps && mapType != HashOfMaps {
+ return nil, errors.New("outer map needs to be an array or a hash of maps")
+ }
+ if inner {
+ return nil, fmt.Errorf("nested inner maps are not supported")
+ }
+
+ // This inner map spec is used as a map template, but it needs to be
+ // created as a traditional map before it can be used to do so.
+ // libbpf names the inner map template '.inner', but we
+ // opted for _inner to simplify validation logic. (dots only supported
+ // on kernels 5.2 and up)
+ // Pass the BTF spec from the parent object, since both parent and
+ // child must be created from the same BTF blob (on kernels that support BTF).
+ innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err)
+ }
+
+ case *btf.FuncProto:
+ // The values member contains an array of function pointers, meaning an
+ // autopopulated PROG_ARRAY.
+ if mapType != ProgramArray {
+ return nil, errors.New("map needs to be a program array")
+ }
+
+ default:
+ return nil, fmt.Errorf("unsupported value type %q in 'values' field", t)
+ }
+
+ contents, err = resolveBTFValuesContents(es, vs, member)
+ if err != nil {
+ return nil, fmt.Errorf("resolving values contents: %w", err)
+ }
+
+ case "map_extra":
+ return nil, fmt.Errorf("BTF map definition: field %s: %w", member.Name, ErrNotSupported)
+
+ default:
+ return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
+ }
+ }
+
+ return &MapSpec{
+ Name: SanitizeName(name, -1),
+ Type: MapType(mapType),
+ KeySize: keySize,
+ ValueSize: valueSize,
+ MaxEntries: maxEntries,
+ Flags: flags,
+ Key: key,
+ Value: value,
+ Pinning: pinType,
+ InnerMap: innerMapSpec,
+ Contents: contents,
+ }, nil
+}
+
+// uintFromBTF resolves the __uint macro, which is a pointer to a sized
+// array, e.g. for int (*foo)[10], this function will return 10.
+func uintFromBTF(typ btf.Type) (uint32, error) {
+ ptr, ok := typ.(*btf.Pointer)
+ if !ok {
+ return 0, fmt.Errorf("not a pointer: %v", typ)
+ }
+
+ arr, ok := ptr.Target.(*btf.Array)
+ if !ok {
+ return 0, fmt.Errorf("not a pointer to array: %v", typ)
+ }
+
+ return arr.Nelems, nil
+}
+
+// resolveBTFArrayMacro resolves the __array macro, which declares an array
+// of pointers to a given type. This function returns the target Type of
+// the pointers in the array.
+func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) {
+ arr, ok := typ.(*btf.Array)
+ if !ok {
+ return nil, fmt.Errorf("not an array: %v", typ)
+ }
+
+ ptr, ok := arr.Type.(*btf.Pointer)
+ if !ok {
+ return nil, fmt.Errorf("not an array of pointers: %v", typ)
+ }
+
+ return ptr.Target, nil
+}
+
+// resolveBTFValuesContents resolves relocations into ELF sections belonging
+// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map
+// definitions to extract static declarations of map contents.
+func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) {
+ // The elements of a .values pointer array are not encoded in BTF.
+ // Instead, relocations are generated into each array index.
+ // However, it's possible to leave certain array indices empty, so all
+ // indices' offsets need to be checked for emitted relocations.
+
+ // The offset of the 'values' member within the _struct_ (in bits)
+ // is the starting point of the array. Convert to bytes. Add VarSecinfo
+ // offset to get the absolute position in the ELF blob.
+ start := member.Offset.Bytes() + vs.Offset
+ // 'values' is encoded in BTF as a zero (variable) length struct
+ // member, and its contents run until the end of the VarSecinfo.
+ // Add VarSecinfo offset to get the absolute position in the ELF blob.
+ end := vs.Size + vs.Offset
+ // The size of an address in this section. This determines the width of
+ // an index in the array.
+ align := uint32(es.SectionHeader.Addralign)
+
+ // Check if variable-length section is aligned.
+ if (end-start)%align != 0 {
+ return nil, errors.New("unaligned static values section")
+ }
+ elems := (end - start) / align
+
+ if elems == 0 {
+ return nil, nil
+ }
+
+ contents := make([]MapKV, 0, elems)
+
+ // k is the array index, off is its corresponding ELF section offset.
+ for k, off := uint32(0), start; k < elems; k, off = k+1, off+align {
+ r, ok := es.relocations[uint64(off)]
+ if !ok {
+ continue
+ }
+
+ // Relocation exists for the current offset in the ELF section.
+ // Emit a value stub based on the type of relocation to be replaced by
+ // a real fd later in the pipeline before populating the map.
+ // Map keys are encoded in MapKV entries, so empty array indices are
+ // skipped here.
+ switch t := elf.ST_TYPE(r.Info); t {
+ case elf.STT_FUNC:
+ contents = append(contents, MapKV{uint32(k), r.Name})
+ case elf.STT_OBJECT:
+ contents = append(contents, MapKV{uint32(k), r.Name})
+ default:
+ return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name)
+ }
+ }
+
+ return contents, nil
+}
+
+func (ec *elfCode) loadDataSections() error {
+ for _, sec := range ec.sections {
+ if sec.kind != dataSection {
+ continue
+ }
+
+ if sec.references == 0 {
+ // Prune data sections which are not referenced by any
+ // instructions.
+ continue
+ }
+
+ mapSpec := &MapSpec{
+ Name: SanitizeName(sec.Name, -1),
+ Type: Array,
+ KeySize: 4,
+ ValueSize: uint32(sec.Size),
+ MaxEntries: 1,
+ }
+
+ switch sec.Type {
+ // Only open the section if we know there's actual data to be read.
+ case elf.SHT_PROGBITS:
+ data, err := sec.Data()
+ if err != nil {
+ return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
+ }
+
+ if uint64(len(data)) > math.MaxUint32 {
+ return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
+ }
+ mapSpec.Contents = []MapKV{{uint32(0), data}}
+
+ case elf.SHT_NOBITS:
+ // NOBITS sections like .bss contain only zeroes, and since data sections
+ // are Arrays, the kernel already preallocates them. Skip reading zeroes
+ // from the ELF.
+ default:
+ return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type)
+ }
+
+ // It is possible for a data section to exist without a corresponding BTF Datasec
+ // if it only contains anonymous values like macro-defined arrays.
+ if ec.btf != nil {
+ var ds *btf.Datasec
+ if ec.btf.TypeByName(sec.Name, &ds) == nil {
+ // Assign the spec's key and BTF only if the Datasec lookup was successful.
+ mapSpec.Key = &btf.Void{}
+ mapSpec.Value = ds
+ }
+ }
+
+ if strings.HasPrefix(sec.Name, ".rodata") {
+ mapSpec.Flags = unix.BPF_F_RDONLY_PROG
+ mapSpec.Freeze = true
+ }
+
+ ec.maps[sec.Name] = mapSpec
+ }
+
+ return nil
+}
+
+// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't
+// have a corresponding ELF section and exist purely in BTF.
+func (ec *elfCode) loadKconfigSection() error {
+ if ec.btf == nil {
+ return nil
+ }
+
+ var ds *btf.Datasec
+ err := ec.btf.TypeByName(".kconfig", &ds)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if ds.Size == 0 {
+ return errors.New("zero-length .kconfig")
+ }
+
+ ec.kconfig = &MapSpec{
+ Name: ".kconfig",
+ Type: Array,
+ KeySize: uint32(4),
+ ValueSize: ds.Size,
+ MaxEntries: 1,
+ Flags: unix.BPF_F_RDONLY_PROG,
+ Freeze: true,
+ Key: &btf.Int{Size: 4},
+ Value: ds,
+ }
+
+ return nil
+}
+
+// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't
+// have a corresponding ELF section and exist purely in BTF.
+func (ec *elfCode) loadKsymsSection() error {
+ if ec.btf == nil {
+ return nil
+ }
+
+ var ds *btf.Datasec
+ err := ec.btf.TypeByName(".ksyms", &ds)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ for _, v := range ds.Vars {
+ // we have already checked the .ksyms Datasec to only contain Func Vars.
+ ec.kfuncs[v.Type.TypeName()] = v.Type.(*btf.Func)
+ }
+
+ return nil
+}
+
+type libbpfElfSectionDef struct {
+ pattern string
+ programType sys.ProgType
+ attachType sys.AttachType
+ flags libbpfElfSectionFlag
+}
+
+type libbpfElfSectionFlag uint32
+
+// The values correspond to enum sec_def_flags in libbpf.
+const (
+ _SEC_NONE libbpfElfSectionFlag = 0
+
+ _SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1)
+ _SEC_ATTACHABLE
+ _SEC_ATTACH_BTF
+ _SEC_SLEEPABLE
+ _SEC_XDP_FRAGS
+ _SEC_USDT
+
+ // Ignore any present extra in order to preserve backwards compatibility
+ // with earlier versions of the library.
+ ignoreExtra
+
+ _SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT
+)
+
+func init() {
+ // Compatibility with older versions of the library.
+ // We prepend libbpf definitions since they contain a prefix match
+ // for "xdp".
+ elfSectionDefs = append([]libbpfElfSectionDef{
+ {"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra},
+ {"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS},
+ {"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0},
+ {"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS},
+ {"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0},
+ // This has been in the library since the beginning of time. Not sure
+ // where it came from.
+ {"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE},
+ }, elfSectionDefs...)
+}
+
+func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
+ // Skip optional program marking for now.
+ sectionName = strings.TrimPrefix(sectionName, "?")
+
+ for _, t := range elfSectionDefs {
+ extra, ok := matchSectionName(sectionName, t.pattern)
+ if !ok {
+ continue
+ }
+
+ programType := ProgramType(t.programType)
+ attachType := AttachType(t.attachType)
+
+ var flags uint32
+ if t.flags&_SEC_SLEEPABLE > 0 {
+ flags |= unix.BPF_F_SLEEPABLE
+ }
+ if t.flags&_SEC_XDP_FRAGS > 0 {
+ flags |= unix.BPF_F_XDP_HAS_FRAGS
+ }
+ if t.flags&_SEC_EXP_ATTACH_OPT > 0 {
+ if programType == XDP {
+ // The library doesn't yet have code to fallback to not specifying
+ // attach type. Only do this for XDP since we've enforced correct
+ // attach type for all other program types.
+ attachType = AttachNone
+ }
+ }
+ if t.flags&ignoreExtra > 0 {
+ extra = ""
+ }
+
+ return programType, attachType, flags, extra
+ }
+
+ return UnspecifiedProgram, AttachNone, 0, ""
+}
+
+// matchSectionName checks a section name against a pattern.
+//
+// It's behaviour mirrors that of libbpf's sec_def_matches.
+func matchSectionName(sectionName, pattern string) (extra string, found bool) {
+ have, extra, found := strings.Cut(sectionName, "/")
+ want := strings.TrimRight(pattern, "+/")
+
+ if strings.HasSuffix(pattern, "/") {
+ // Section name must have a slash and extra may be empty.
+ return extra, have == want && found
+ } else if strings.HasSuffix(pattern, "+") {
+ // Section name may have a slash and extra may be empty.
+ return extra, have == want
+ }
+
+ // Section name must have a prefix. extra is ignored.
+ return "", strings.HasPrefix(sectionName, pattern)
+}
+
+func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
+ rels := make(map[uint64]elf.Symbol)
+
+ if sec.Entsize < 16 {
+ return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
+ }
+
+ r := bufio.NewReader(sec.Open())
+ for off := uint64(0); off < sec.Size; off += sec.Entsize {
+ ent := io.LimitReader(r, int64(sec.Entsize))
+
+ var rel elf.Rel64
+ if binary.Read(ent, ec.ByteOrder, &rel) != nil {
+ return nil, fmt.Errorf("can't parse relocation at offset %v", off)
+ }
+
+ symNo := int(elf.R_SYM64(rel.Info) - 1)
+ if symNo >= len(symbols) {
+ return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo)
+ }
+
+ symbol := symbols[symNo]
+ rels[rel.Off] = symbol
+ }
+
+ return rels, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/elf_sections.go b/vendor/github.com/cilium/ebpf/elf_sections.go
new file mode 100644
index 000000000..4b58251d9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/elf_sections.go
@@ -0,0 +1,109 @@
+// Code generated by internal/cmd/gensections.awk; DO NOT EDIT.
+
+package ebpf
+
+// Code in this file is derived from libbpf, available under BSD-2-Clause.
+
+import "github.com/cilium/ebpf/internal/sys"
+
+var elfSectionDefs = []libbpfElfSectionDef{
+ {"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE},
+ {"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE},
+ {"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE},
+ {"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+ {"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+ {"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE},
+ {"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+ {"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+ {"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE},
+ {"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE},
+ {"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE},
+ {"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE},
+ {"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE},
+ {"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE},
+ {"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE},
+ {"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+ {"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+ {"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT},
+ {"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE},
+ {"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE},
+ {"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE},
+ {"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE},
+ {"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE},
+ {"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE},
+ {"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE},
+ {"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE},
+ {"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE},
+ {"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE},
+ {"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE},
+ {"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE},
+ {"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE},
+ {"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE},
+ {"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE},
+ {"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE},
+ {"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF},
+ {"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF},
+ {"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF},
+ {"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF},
+ {"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+ {"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+ {"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+ {"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF},
+ {"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF},
+ {"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+ {"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF},
+ {"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF},
+ {"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+ {"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE},
+ {"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS},
+ {"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE},
+ {"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS},
+ {"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE},
+ {"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS},
+ {"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT},
+ {"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE},
+ {"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE},
+ {"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE},
+ {"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE},
+ {"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE},
+ {"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT},
+ {"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT},
+ {"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT},
+ {"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE},
+ {"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT},
+ {"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT},
+ {"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT},
+ {"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT},
+ {"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT},
+ {"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE},
+ {"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE},
+ {"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE},
+ {"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT},
+ {"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE},
+ {"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE},
+ {"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE},
+ {"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE},
+ {"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE},
+ {"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE},
+ {"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE},
+ {"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE},
+ {"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE},
+ {"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE},
+ {"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE},
+ {"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE},
+ {"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE},
+ {"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE},
+ {"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE},
+ {"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE},
+ {"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE},
+ {"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE},
+ {"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE},
+ {"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE},
+ {"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE},
+ {"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE},
+ {"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT},
+ {"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE},
+ {"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE},
+ {"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE},
+ {"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE},
+}
diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go
new file mode 100644
index 000000000..04c60c64b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/info.go
@@ -0,0 +1,499 @@
+package ebpf
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// The *Info structs expose metadata about a program or map. Most
+// fields are exposed via a getter:
+//
+// func (*MapInfo) ID() (MapID, bool)
+//
+// This is because the metadata available changes based on kernel version.
+// The second boolean return value indicates whether a particular field is
+// available on the current kernel.
+//
+// Always add new metadata as such a getter, unless you can somehow get the
+// value of the field on all supported kernels. Also document which version
+// a particular field first appeared in.
+//
+// Some metadata is a buffer which needs additional parsing. In this case,
+// store the undecoded data in the Info struct and provide a getter which
+// decodes it when necessary. See ProgramInfo.Instructions for an example.
+
+// MapInfo describes a map.
+type MapInfo struct {
+ Type MapType
+ id MapID
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+ Flags uint32
+ // Name as supplied by user space at load time. Available from 4.15.
+ Name string
+
+ btf btf.ID
+}
+
+func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) {
+ var info sys.MapInfo
+ err := sys.ObjInfo(fd, &info)
+ if errors.Is(err, syscall.EINVAL) {
+ return newMapInfoFromProc(fd)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &MapInfo{
+ MapType(info.Type),
+ MapID(info.Id),
+ info.KeySize,
+ info.ValueSize,
+ info.MaxEntries,
+ uint32(info.MapFlags),
+ unix.ByteSliceToString(info.Name[:]),
+ btf.ID(info.BtfId),
+ }, nil
+}
+
+func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) {
+ var mi MapInfo
+ err := scanFdInfo(fd, map[string]interface{}{
+ "map_type": &mi.Type,
+ "key_size": &mi.KeySize,
+ "value_size": &mi.ValueSize,
+ "max_entries": &mi.MaxEntries,
+ "map_flags": &mi.Flags,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &mi, nil
+}
+
+// ID returns the map ID.
+//
+// Available from 4.13.
+//
+// The bool return value indicates whether this optional field is available.
+func (mi *MapInfo) ID() (MapID, bool) {
+ return mi.id, mi.id > 0
+}
+
+// BTFID returns the BTF ID associated with the Map.
+//
+// The ID is only valid as long as the associated Map is kept alive.
+// Available from 4.18.
+//
+// The bool return value indicates whether this optional field is available and
+// populated. (The field may be available but not populated if the kernel
+// supports the field but the Map was loaded without BTF information.)
+func (mi *MapInfo) BTFID() (btf.ID, bool) {
+ return mi.btf, mi.btf > 0
+}
+
+// programStats holds statistics of a program.
+type programStats struct {
+ // Total accumulated runtime of the program ins ns.
+ runtime time.Duration
+ // Total number of times the program was called.
+ runCount uint64
+ // Total number of times the programm was NOT called.
+ // Added in commit 9ed9e9ba2337 ("bpf: Count the number of times recursion was prevented").
+ recursionMisses uint64
+}
+
+// ProgramInfo describes a program.
+type ProgramInfo struct {
+ Type ProgramType
+ id ProgramID
+ // Truncated hash of the BPF bytecode. Available from 4.13.
+ Tag string
+ // Name as supplied by user space at load time. Available from 4.15.
+ Name string
+
+ createdByUID uint32
+ haveCreatedByUID bool
+ btf btf.ID
+ stats *programStats
+
+ maps []MapID
+ insns []byte
+
+ lineInfos []byte
+ numLineInfos uint32
+ funcInfos []byte
+ numFuncInfos uint32
+}
+
+func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) {
+ var info sys.ProgInfo
+ err := sys.ObjInfo(fd, &info)
+ if errors.Is(err, syscall.EINVAL) {
+ return newProgramInfoFromProc(fd)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ pi := ProgramInfo{
+ Type: ProgramType(info.Type),
+ id: ProgramID(info.Id),
+ Tag: hex.EncodeToString(info.Tag[:]),
+ Name: unix.ByteSliceToString(info.Name[:]),
+ btf: btf.ID(info.BtfId),
+ stats: &programStats{
+ runtime: time.Duration(info.RunTimeNs),
+ runCount: info.RunCnt,
+ recursionMisses: info.RecursionMisses,
+ },
+ }
+
+ // Start with a clean struct for the second call, otherwise we may get EFAULT.
+ var info2 sys.ProgInfo
+
+ makeSecondCall := false
+
+ if info.NrMapIds > 0 {
+ pi.maps = make([]MapID, info.NrMapIds)
+ info2.NrMapIds = info.NrMapIds
+ info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0]))
+ makeSecondCall = true
+ } else if haveProgramInfoMapIDs() == nil {
+ // This program really has no associated maps.
+ pi.maps = make([]MapID, 0)
+ } else {
+ // The kernel doesn't report associated maps.
+ pi.maps = nil
+ }
+
+ // createdByUID and NrMapIds were introduced in the same kernel version.
+ if pi.maps != nil {
+ pi.createdByUID = info.CreatedByUid
+ pi.haveCreatedByUID = true
+ }
+
+ if info.XlatedProgLen > 0 {
+ pi.insns = make([]byte, info.XlatedProgLen)
+ info2.XlatedProgLen = info.XlatedProgLen
+ info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns)
+ makeSecondCall = true
+ }
+
+ if info.NrLineInfo > 0 {
+ pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo)
+ info2.LineInfo = sys.NewSlicePointer(pi.lineInfos)
+ info2.LineInfoRecSize = btf.LineInfoSize
+ info2.NrLineInfo = info.NrLineInfo
+ pi.numLineInfos = info.NrLineInfo
+ makeSecondCall = true
+ }
+
+ if info.NrFuncInfo > 0 {
+ pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo)
+ info2.FuncInfo = sys.NewSlicePointer(pi.funcInfos)
+ info2.FuncInfoRecSize = btf.FuncInfoSize
+ info2.NrFuncInfo = info.NrFuncInfo
+ pi.numFuncInfos = info.NrFuncInfo
+ makeSecondCall = true
+ }
+
+ if makeSecondCall {
+ if err := sys.ObjInfo(fd, &info2); err != nil {
+ return nil, err
+ }
+ }
+
+ return &pi, nil
+}
+
+func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) {
+ var info ProgramInfo
+ err := scanFdInfo(fd, map[string]interface{}{
+ "prog_type": &info.Type,
+ "prog_tag": &info.Tag,
+ })
+ if errors.Is(err, errMissingFields) {
+ return nil, &internal.UnsupportedFeatureError{
+ Name: "reading program info from /proc/self/fdinfo",
+ MinimumVersion: internal.Version{4, 10, 0},
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &info, nil
+}
+
+// ID returns the program ID.
+//
+// Available from 4.13.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) ID() (ProgramID, bool) {
+ return pi.id, pi.id > 0
+}
+
+// CreatedByUID returns the Uid that created the program.
+//
+// Available from 4.15.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) CreatedByUID() (uint32, bool) {
+ return pi.createdByUID, pi.haveCreatedByUID
+}
+
+// BTFID returns the BTF ID associated with the program.
+//
+// The ID is only valid as long as the associated program is kept alive.
+// Available from 5.0.
+//
+// The bool return value indicates whether this optional field is available and
+// populated. (The field may be available but not populated if the kernel
+// supports the field but the program was loaded without BTF information.)
+func (pi *ProgramInfo) BTFID() (btf.ID, bool) {
+ return pi.btf, pi.btf > 0
+}
+
+// RunCount returns the total number of times the program was called.
+//
+// Can return 0 if the collection of statistics is not enabled. See EnableStats().
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) RunCount() (uint64, bool) {
+ if pi.stats != nil {
+ return pi.stats.runCount, true
+ }
+ return 0, false
+}
+
+// Runtime returns the total accumulated runtime of the program.
+//
+// Can return 0 if the collection of statistics is not enabled. See EnableStats().
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
+ if pi.stats != nil {
+ return pi.stats.runtime, true
+ }
+ return time.Duration(0), false
+}
+
+// RecursionMisses returns the total number of times the program was NOT called.
+// This can happen when another bpf program is already running on the cpu, which
+// is likely to happen for example when you interrupt bpf program execution.
+func (pi *ProgramInfo) RecursionMisses() (uint64, bool) {
+ if pi.stats != nil {
+ return pi.stats.recursionMisses, true
+ }
+ return 0, false
+}
+
+// Instructions returns the 'xlated' instruction stream of the program
+// after it has been verified and rewritten by the kernel. These instructions
+// cannot be loaded back into the kernel as-is, this is mainly used for
+// inspecting loaded programs for troubleshooting, dumping, etc.
+//
+// For example, map accesses are made to reference their kernel map IDs,
+// not the FDs they had when the program was inserted. Note that before
+// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated
+// instructions were not sanitized, making the output even less reusable
+// and less likely to round-trip or evaluate to the same program Tag.
+//
+// The first instruction is marked as a symbol using the Program's name.
+//
+// If available, the instructions will be annotated with metadata from the
+// BTF. This includes line information and function information. Reading
+// this metadata requires CAP_SYS_ADMIN or equivalent. If capability is
+// unavailable, the instructions will be returned without metadata.
+//
+// Available from 4.13. Requires CAP_BPF or equivalent for plain instructions.
+// Requires CAP_SYS_ADMIN for instructions with metadata.
+func (pi *ProgramInfo) Instructions() (asm.Instructions, error) {
+ // If the calling process is not BPF-capable or if the kernel doesn't
+ // support getting xlated instructions, the field will be zero.
+ if len(pi.insns) == 0 {
+ return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported)
+ }
+
+ r := bytes.NewReader(pi.insns)
+ var insns asm.Instructions
+ if err := insns.Unmarshal(r, internal.NativeEndian); err != nil {
+ return nil, fmt.Errorf("unmarshaling instructions: %w", err)
+ }
+
+ if pi.btf != 0 {
+ btfh, err := btf.NewHandleFromID(pi.btf)
+ if err != nil {
+ // Getting a BTF handle requires CAP_SYS_ADMIN, if not available we get an -EPERM.
+ // Ignore it and fall back to instructions without metadata.
+ if !errors.Is(err, unix.EPERM) {
+ return nil, fmt.Errorf("unable to get BTF handle: %w", err)
+ }
+ }
+
+ // If we have a BTF handle, we can use it to assign metadata to the instructions.
+ if btfh != nil {
+ defer btfh.Close()
+
+ spec, err := btfh.Spec(nil)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get BTF spec: %w", err)
+ }
+
+ lineInfos, err := btf.LoadLineInfos(
+ bytes.NewReader(pi.lineInfos),
+ internal.NativeEndian,
+ pi.numLineInfos,
+ spec,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("parse line info: %w", err)
+ }
+
+ funcInfos, err := btf.LoadFuncInfos(
+ bytes.NewReader(pi.funcInfos),
+ internal.NativeEndian,
+ pi.numFuncInfos,
+ spec,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("parse func info: %w", err)
+ }
+
+ btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{})
+ }
+ }
+
+ fn := btf.FuncMetadata(&insns[0])
+ name := pi.Name
+ if fn != nil {
+ name = fn.Name
+ }
+ insns[0] = insns[0].WithSymbol(name)
+
+ return insns, nil
+}
+
+// MapIDs returns the maps related to the program.
+//
+// Available from 4.15.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) MapIDs() ([]MapID, bool) {
+ return pi.maps, pi.maps != nil
+}
+
+func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error {
+ fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int()))
+ if err != nil {
+ return err
+ }
+ defer fh.Close()
+
+ if err := scanFdInfoReader(fh, fields); err != nil {
+ return fmt.Errorf("%s: %w", fh.Name(), err)
+ }
+ return nil
+}
+
+var errMissingFields = errors.New("missing fields")
+
+func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
+ var (
+ scanner = bufio.NewScanner(r)
+ scanned int
+ )
+
+ for scanner.Scan() {
+ parts := strings.SplitN(scanner.Text(), "\t", 2)
+ if len(parts) != 2 {
+ continue
+ }
+
+ name := strings.TrimSuffix(parts[0], ":")
+ field, ok := fields[string(name)]
+ if !ok {
+ continue
+ }
+
+ if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 {
+ return fmt.Errorf("can't parse field %s: %v", name, err)
+ }
+
+ scanned++
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ if len(fields) > 0 && scanned == 0 {
+ return ErrNotSupported
+ }
+
+ if scanned != len(fields) {
+ return errMissingFields
+ }
+
+ return nil
+}
+
+// EnableStats starts the measuring of the runtime
+// and run counts of eBPF programs.
+//
+// Collecting statistics can have an impact on the performance.
+//
+// Requires at least 5.8.
+func EnableStats(which uint32) (io.Closer, error) {
+ fd, err := sys.EnableStats(&sys.EnableStatsAttr{
+ Type: which,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return fd, nil
+}
+
+var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", "4.15", func() error {
+ prog, err := progLoad(asm.Instructions{
+ asm.LoadImm(asm.R0, 0, asm.DWord),
+ asm.Return(),
+ }, SocketFilter, "MIT")
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ err = sys.ObjInfo(prog, &sys.ProgInfo{
+ // NB: Don't need to allocate MapIds since the program isn't using
+ // any maps.
+ NrMapIds: 1,
+ })
+ if errors.Is(err, unix.EINVAL) {
+ // Most likely the syscall doesn't exist.
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.E2BIG) {
+ // We've hit check_uarg_tail_zero on older kernels.
+ return internal.ErrNotSupported
+ }
+
+ return err
+})
diff --git a/vendor/github.com/cilium/ebpf/internal/auxv.go b/vendor/github.com/cilium/ebpf/internal/auxv.go
new file mode 100644
index 000000000..45fd0d37f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/auxv.go
@@ -0,0 +1,60 @@
+package internal
+
+import (
+ "errors"
+ "io"
+ _ "unsafe"
+)
+
+type auxvPairReader interface {
+ Close() error
+ ReadAuxvPair() (uint64, uint64, error)
+}
+
+// See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h
+const (
+ _AT_NULL = 0 // End of vector
+ _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image
+)
+
+//go:linkname runtime_getAuxv runtime.getAuxv
+func runtime_getAuxv() []uintptr
+
+type auxvRuntimeReader struct {
+ data []uintptr
+ index int
+}
+
+func (r *auxvRuntimeReader) Close() error {
+ return nil
+}
+
+func (r *auxvRuntimeReader) ReadAuxvPair() (uint64, uint64, error) {
+ if r.index >= len(r.data)+2 {
+ return 0, 0, io.EOF
+ }
+
+ // we manually add the (_AT_NULL, _AT_NULL) pair at the end
+ // that is not provided by the go runtime
+ var tag, value uintptr
+ if r.index+1 < len(r.data) {
+ tag, value = r.data[r.index], r.data[r.index+1]
+ } else {
+ tag, value = _AT_NULL, _AT_NULL
+ }
+ r.index += 2
+ return uint64(tag), uint64(value), nil
+}
+
+func newAuxvRuntimeReader() (auxvPairReader, error) {
+ data := runtime_getAuxv()
+
+ if len(data)%2 != 0 {
+ return nil, errors.New("malformed auxv passed from runtime")
+ }
+
+ return &auxvRuntimeReader{
+ data: data,
+ index: 0,
+ }, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/buffer.go b/vendor/github.com/cilium/ebpf/internal/buffer.go
new file mode 100644
index 000000000..81c654433
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/buffer.go
@@ -0,0 +1,31 @@
+package internal
+
+import (
+ "bytes"
+ "sync"
+)
+
+var bytesBufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+// NewBuffer retrieves a [bytes.Buffer] from a pool an re-initialises it.
+//
+// The returned buffer should be passed to [PutBuffer].
+func NewBuffer(buf []byte) *bytes.Buffer {
+ wr := bytesBufferPool.Get().(*bytes.Buffer)
+ // Reinitialize the Buffer with a new backing slice since it is returned to
+ // the caller by wr.Bytes() below. Pooling is faster despite calling
+ // NewBuffer. The pooled alloc is still reused, it only needs to be zeroed.
+ *wr = *bytes.NewBuffer(buf)
+ return wr
+}
+
+// PutBuffer releases a buffer to the pool.
+func PutBuffer(buf *bytes.Buffer) {
+ // Release reference to the backing buffer.
+ *buf = *bytes.NewBuffer(nil)
+ bytesBufferPool.Put(buf)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/deque.go b/vendor/github.com/cilium/ebpf/internal/deque.go
new file mode 100644
index 000000000..e3a305021
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/deque.go
@@ -0,0 +1,91 @@
+package internal
+
+import "math/bits"
+
+// Deque implements a double ended queue.
+type Deque[T any] struct {
+ elems []T
+ read, write uint64
+ mask uint64
+}
+
+// Reset clears the contents of the deque while retaining the backing buffer.
+func (dq *Deque[T]) Reset() {
+ var zero T
+
+ for i := dq.read; i < dq.write; i++ {
+ dq.elems[i&dq.mask] = zero
+ }
+
+ dq.read, dq.write = 0, 0
+}
+
+func (dq *Deque[T]) Empty() bool {
+ return dq.read == dq.write
+}
+
+// Push adds an element to the end.
+func (dq *Deque[T]) Push(e T) {
+ dq.Grow(1)
+ dq.elems[dq.write&dq.mask] = e
+ dq.write++
+}
+
+// Shift returns the first element or the zero value.
+func (dq *Deque[T]) Shift() T {
+ var zero T
+
+ if dq.Empty() {
+ return zero
+ }
+
+ index := dq.read & dq.mask
+ t := dq.elems[index]
+ dq.elems[index] = zero
+ dq.read++
+ return t
+}
+
+// Pop returns the last element or the zero value.
+func (dq *Deque[T]) Pop() T {
+ var zero T
+
+ if dq.Empty() {
+ return zero
+ }
+
+ dq.write--
+ index := dq.write & dq.mask
+ t := dq.elems[index]
+ dq.elems[index] = zero
+ return t
+}
+
+// Grow the deque's capacity, if necessary, to guarantee space for another n
+// elements.
+func (dq *Deque[T]) Grow(n int) {
+ have := dq.write - dq.read
+ need := have + uint64(n)
+ if need < have {
+ panic("overflow")
+ }
+ if uint64(len(dq.elems)) >= need {
+ return
+ }
+
+ // Round up to the new power of two which is at least 8.
+ // See https://jameshfisher.com/2018/03/30/round-up-power-2/
+ capacity := 1 << (64 - bits.LeadingZeros64(need-1))
+ if capacity < 8 {
+ capacity = 8
+ }
+
+ elems := make([]T, have, capacity)
+ pivot := dq.read & dq.mask
+ copied := copy(elems, dq.elems[pivot:])
+ copy(elems[copied:], dq.elems[:pivot])
+
+ dq.elems = elems[:capacity]
+ dq.mask = uint64(capacity) - 1
+ dq.read, dq.write = 0, have
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go
new file mode 100644
index 000000000..011581938
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/elf.go
@@ -0,0 +1,102 @@
+package internal
+
+import (
+ "debug/elf"
+ "fmt"
+ "io"
+)
+
+type SafeELFFile struct {
+ *elf.File
+}
+
+// NewSafeELFFile reads an ELF safely.
+//
+// Any panic during parsing is turned into an error. This is necessary since
+// there are a bunch of unfixed bugs in debug/elf.
+//
+// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
+func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ safe = nil
+ err = fmt.Errorf("reading ELF file panicked: %s", r)
+ }()
+
+ file, err := elf.NewFile(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SafeELFFile{file}, nil
+}
+
+// OpenSafeELFFile reads an ELF from a file.
+//
+// It works like NewSafeELFFile, with the exception that safe.Close will
+// close the underlying file.
+func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ safe = nil
+ err = fmt.Errorf("reading ELF file panicked: %s", r)
+ }()
+
+ file, err := elf.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SafeELFFile{file}, nil
+}
+
+// Symbols is the safe version of elf.File.Symbols.
+func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ syms = nil
+ err = fmt.Errorf("reading ELF symbols panicked: %s", r)
+ }()
+
+ syms, err = se.File.Symbols()
+ return
+}
+
+// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
+func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ syms = nil
+ err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
+ }()
+
+ syms, err = se.File.DynamicSymbols()
+ return
+}
+
+// SectionsByType returns all sections in the file with the specified section type.
+func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section {
+ sections := make([]*elf.Section, 0, 1)
+ for _, section := range se.Sections {
+ if section.Type == typ {
+ sections = append(sections, section)
+ }
+ }
+ return sections
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go
new file mode 100644
index 000000000..a37777f21
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go
@@ -0,0 +1,9 @@
+//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
+
+package internal
+
+import "encoding/binary"
+
+// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
+// depending on the host's endianness.
+var NativeEndian = binary.BigEndian
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go
new file mode 100644
index 000000000..6dcd916d5
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go
@@ -0,0 +1,9 @@
+//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64
+
+package internal
+
+import "encoding/binary"
+
+// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
+// depending on the host's endianness.
+var NativeEndian = binary.LittleEndian
diff --git a/vendor/github.com/cilium/ebpf/internal/epoll/poller.go b/vendor/github.com/cilium/ebpf/internal/epoll/poller.go
new file mode 100644
index 000000000..ed1c3a3c8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/epoll/poller.go
@@ -0,0 +1,278 @@
+package epoll
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "runtime"
+ "slices"
+ "sync"
+ "time"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var ErrFlushed = errors.New("data was flushed")
+
+// Poller waits for readiness notifications from multiple file descriptors.
+//
+// The wait can be interrupted by calling Close.
+type Poller struct {
+ // mutexes protect the fields declared below them. If you need to
+ // acquire both at once you must lock epollMu before eventMu.
+ epollMu sync.Mutex
+ epollFd int
+
+ eventMu sync.Mutex
+ closeEvent *eventFd
+ flushEvent *eventFd
+}
+
+func New() (_ *Poller, err error) {
+ closeFDOnError := func(fd int) {
+ if err != nil {
+ unix.Close(fd)
+ }
+ }
+ closeEventFDOnError := func(e *eventFd) {
+ if err != nil {
+ e.close()
+ }
+ }
+
+ epollFd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC)
+ if err != nil {
+ return nil, fmt.Errorf("create epoll fd: %v", err)
+ }
+ defer closeFDOnError(epollFd)
+
+ p := &Poller{epollFd: epollFd}
+ p.closeEvent, err = newEventFd()
+ if err != nil {
+ return nil, err
+ }
+ defer closeEventFDOnError(p.closeEvent)
+
+ p.flushEvent, err = newEventFd()
+ if err != nil {
+ return nil, err
+ }
+ defer closeEventFDOnError(p.flushEvent)
+
+ if err := p.Add(p.closeEvent.raw, 0); err != nil {
+ return nil, fmt.Errorf("add close eventfd: %w", err)
+ }
+
+ if err := p.Add(p.flushEvent.raw, 0); err != nil {
+ return nil, fmt.Errorf("add flush eventfd: %w", err)
+ }
+
+ runtime.SetFinalizer(p, (*Poller).Close)
+ return p, nil
+}
+
+// Close the poller.
+//
+// Interrupts any calls to Wait. Multiple calls to Close are valid, but subsequent
+// calls will return os.ErrClosed.
+func (p *Poller) Close() error {
+ runtime.SetFinalizer(p, nil)
+
+ // Interrupt Wait() via the closeEvent fd if it's currently blocked.
+ if err := p.wakeWaitForClose(); err != nil {
+ return err
+ }
+
+ // Acquire the lock. This ensures that Wait isn't running.
+ p.epollMu.Lock()
+ defer p.epollMu.Unlock()
+
+ // Prevent other calls to Close().
+ p.eventMu.Lock()
+ defer p.eventMu.Unlock()
+
+ if p.epollFd != -1 {
+ unix.Close(p.epollFd)
+ p.epollFd = -1
+ }
+
+ if p.closeEvent != nil {
+ p.closeEvent.close()
+ p.closeEvent = nil
+ }
+
+ if p.flushEvent != nil {
+ p.flushEvent.close()
+ p.flushEvent = nil
+ }
+
+ return nil
+}
+
+// Add an fd to the poller.
+//
+// id is returned by Wait in the unix.EpollEvent.Pad field any may be zero. It
+// must not exceed math.MaxInt32.
+//
+// Add is blocked by Wait.
+func (p *Poller) Add(fd int, id int) error {
+ if int64(id) > math.MaxInt32 {
+ return fmt.Errorf("unsupported id: %d", id)
+ }
+
+ p.epollMu.Lock()
+ defer p.epollMu.Unlock()
+
+ if p.epollFd == -1 {
+ return fmt.Errorf("epoll add: %w", os.ErrClosed)
+ }
+
+ // The representation of EpollEvent isn't entirely accurate.
+ // Pad is fully usable, not just padding. Hence we stuff the
+ // id in there, which allows us to identify the event later (e.g.,
+ // in case of perf events, which CPU sent it).
+ event := unix.EpollEvent{
+ Events: unix.EPOLLIN,
+ Fd: int32(fd),
+ Pad: int32(id),
+ }
+
+ if err := unix.EpollCtl(p.epollFd, unix.EPOLL_CTL_ADD, fd, &event); err != nil {
+ return fmt.Errorf("add fd to epoll: %v", err)
+ }
+
+ return nil
+}
+
+// Wait for events.
+//
+// Returns the number of pending events and any errors.
+//
+// - [os.ErrClosed] if interrupted by [Close].
+// - [ErrFlushed] if interrupted by [Flush].
+// - [os.ErrDeadlineExceeded] if deadline is reached.
+func (p *Poller) Wait(events []unix.EpollEvent, deadline time.Time) (int, error) {
+ p.epollMu.Lock()
+ defer p.epollMu.Unlock()
+
+ if p.epollFd == -1 {
+ return 0, fmt.Errorf("epoll wait: %w", os.ErrClosed)
+ }
+
+ for {
+ timeout := int(-1)
+ if !deadline.IsZero() {
+ msec := time.Until(deadline).Milliseconds()
+ // Deadline is in the past, don't block.
+ msec = max(msec, 0)
+ // Deadline is too far in the future.
+ msec = min(msec, math.MaxInt)
+
+ timeout = int(msec)
+ }
+
+ n, err := unix.EpollWait(p.epollFd, events, timeout)
+ if temp, ok := err.(temporaryError); ok && temp.Temporary() {
+ // Retry the syscall if we were interrupted, see https://github.com/golang/go/issues/20400
+ continue
+ }
+
+ if err != nil {
+ return 0, err
+ }
+
+ if n == 0 {
+ return 0, fmt.Errorf("epoll wait: %w", os.ErrDeadlineExceeded)
+ }
+
+ for i := 0; i < n; {
+ event := events[i]
+ if int(event.Fd) == p.closeEvent.raw {
+ // Since we don't read p.closeEvent the event is never cleared and
+ // we'll keep getting this wakeup until Close() acquires the
+ // lock and sets p.epollFd = -1.
+ return 0, fmt.Errorf("epoll wait: %w", os.ErrClosed)
+ }
+ if int(event.Fd) == p.flushEvent.raw {
+ // read event to prevent it from continuing to wake
+ p.flushEvent.read()
+ err = ErrFlushed
+ events = slices.Delete(events, i, i+1)
+ n -= 1
+ continue
+ }
+ i++
+ }
+
+ return n, err
+ }
+}
+
+type temporaryError interface {
+ Temporary() bool
+}
+
+// wakeWaitForClose unblocks Wait if it's epoll_wait.
+func (p *Poller) wakeWaitForClose() error {
+ p.eventMu.Lock()
+ defer p.eventMu.Unlock()
+
+ if p.closeEvent == nil {
+ return fmt.Errorf("epoll wake: %w", os.ErrClosed)
+ }
+
+ return p.closeEvent.add(1)
+}
+
+// Flush unblocks Wait if it's epoll_wait, for purposes of reading pending samples
+func (p *Poller) Flush() error {
+ p.eventMu.Lock()
+ defer p.eventMu.Unlock()
+
+ if p.flushEvent == nil {
+ return fmt.Errorf("epoll wake: %w", os.ErrClosed)
+ }
+
+ return p.flushEvent.add(1)
+}
+
+// eventFd wraps a Linux eventfd.
+//
+// An eventfd acts like a counter: writes add to the counter, reads retrieve
+// the counter and reset it to zero. Reads also block if the counter is zero.
+//
+// See man 2 eventfd.
+type eventFd struct {
+ file *os.File
+ // prefer raw over file.Fd(), since the latter puts the file into blocking
+ // mode.
+ raw int
+}
+
+func newEventFd() (*eventFd, error) {
+ fd, err := unix.Eventfd(0, unix.O_CLOEXEC|unix.O_NONBLOCK)
+ if err != nil {
+ return nil, err
+ }
+ file := os.NewFile(uintptr(fd), "event")
+ return &eventFd{file, fd}, nil
+}
+
+func (efd *eventFd) close() error {
+ return efd.file.Close()
+}
+
+func (efd *eventFd) add(n uint64) error {
+ var buf [8]byte
+ internal.NativeEndian.PutUint64(buf[:], n)
+ _, err := efd.file.Write(buf[:])
+ return err
+}
+
+func (efd *eventFd) read() (uint64, error) {
+ var buf [8]byte
+ _, err := efd.file.Read(buf[:])
+ return internal.NativeEndian.Uint64(buf[:]), err
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go
new file mode 100644
index 000000000..83a371ad3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/errors.go
@@ -0,0 +1,181 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier
+// log buffer.
+//
+// The default error output is a summary of the full log. The latter can be
+// accessed via VerifierError.Log or by formatting the error, see Format.
+func ErrorWithLog(source string, err error, log []byte) *VerifierError {
+ const whitespace = "\t\r\v\n "
+
+ // Convert verifier log C string by truncating it on the first 0 byte
+ // and trimming trailing whitespace before interpreting as a Go string.
+ if i := bytes.IndexByte(log, 0); i != -1 {
+ log = log[:i]
+ }
+
+ log = bytes.Trim(log, whitespace)
+ if len(log) == 0 {
+ return &VerifierError{source, err, nil, false}
+ }
+
+ logLines := bytes.Split(log, []byte{'\n'})
+ lines := make([]string, 0, len(logLines))
+ for _, line := range logLines {
+ // Don't remove leading white space on individual lines. We rely on it
+ // when outputting logs.
+ lines = append(lines, string(bytes.TrimRight(line, whitespace)))
+ }
+
+ return &VerifierError{source, err, lines, false}
+}
+
+// VerifierError includes information from the eBPF verifier.
+//
+// It summarises the log output, see Format if you want to output the full contents.
+type VerifierError struct {
+ source string
+ // The error which caused this error.
+ Cause error
+ // The verifier output split into lines.
+ Log []string
+ // Deprecated: the log is never truncated anymore.
+ Truncated bool
+}
+
+func (le *VerifierError) Unwrap() error {
+ return le.Cause
+}
+
+func (le *VerifierError) Error() string {
+ log := le.Log
+ if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") {
+ // Get rid of "processed 39 insns (limit 1000000) ..." from summary.
+ log = log[:n-1]
+ }
+
+ var b strings.Builder
+ fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error())
+
+ n := len(log)
+ if n == 0 {
+ return b.String()
+ }
+
+ lines := log[n-1:]
+ if n >= 2 && includePreviousLine(log[n-1]) {
+ // Add one more line of context if it aids understanding the error.
+ lines = log[n-2:]
+ }
+
+ for _, line := range lines {
+ b.WriteString(": ")
+ b.WriteString(strings.TrimSpace(line))
+ }
+
+ omitted := len(le.Log) - len(lines)
+ if omitted > 0 {
+ fmt.Fprintf(&b, " (%d line(s) omitted)", omitted)
+ }
+
+ return b.String()
+}
+
+// includePreviousLine returns true if the given line likely is better
+// understood with additional context from the preceding line.
+func includePreviousLine(line string) bool {
+ // We need to find a good trade off between understandable error messages
+ // and too much complexity here. Checking the string prefix is ok, requiring
+ // regular expressions to do it is probably overkill.
+
+ if strings.HasPrefix(line, "\t") {
+ // [13] STRUCT drm_rect size=16 vlen=4
+ // \tx1 type_id=2
+ return true
+ }
+
+ if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' {
+ // 0: (95) exit
+ // R0 !read_ok
+ return true
+ }
+
+ if strings.HasPrefix(line, "invalid bpf_context access") {
+ // 0: (79) r6 = *(u64 *)(r1 +0)
+ // func '__x64_sys_recvfrom' arg0 type FWD is not a struct
+ // invalid bpf_context access off=0 size=8
+ return true
+ }
+
+ return false
+}
+
+// Format the error.
+//
+// Understood verbs are %s and %v, which are equivalent to calling Error(). %v
+// allows outputting additional information using the following flags:
+//
+// %+v: Output the first lines, or all lines if no width is given.
+// %-v: Output the last lines, or all lines if no width is given.
+//
+// Use width to specify how many lines to output. Use the '-' flag to output
+// lines from the end of the log instead of the beginning.
+func (le *VerifierError) Format(f fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ _, _ = io.WriteString(f, le.Error())
+
+ case 'v':
+ n, haveWidth := f.Width()
+ if !haveWidth || n > len(le.Log) {
+ n = len(le.Log)
+ }
+
+ if !f.Flag('+') && !f.Flag('-') {
+ if haveWidth {
+ _, _ = io.WriteString(f, "%!v(BADWIDTH)")
+ return
+ }
+
+ _, _ = io.WriteString(f, le.Error())
+ return
+ }
+
+ if f.Flag('+') && f.Flag('-') {
+ _, _ = io.WriteString(f, "%!v(BADFLAG)")
+ return
+ }
+
+ fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error())
+
+ omitted := len(le.Log) - n
+ lines := le.Log[:n]
+ if f.Flag('-') {
+ // Print last instead of first lines.
+ lines = le.Log[len(le.Log)-n:]
+ if omitted > 0 {
+ fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted)
+ }
+ }
+
+ for _, line := range lines {
+ fmt.Fprintf(f, "\n\t%s", line)
+ }
+
+ if !f.Flag('-') {
+ if omitted > 0 {
+ fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted)
+ }
+ }
+
+ default:
+ fmt.Fprintf(f, "%%!%c(BADVERB)", verb)
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go
new file mode 100644
index 000000000..2b856c735
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/feature.go
@@ -0,0 +1,184 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+)
+
+// ErrNotSupported indicates that a feature is not supported by the current kernel.
+var ErrNotSupported = errors.New("not supported")
+
+// UnsupportedFeatureError is returned by FeatureTest() functions.
+type UnsupportedFeatureError struct {
+ // The minimum Linux mainline version required for this feature.
+ // Used for the error string, and for sanity checking during testing.
+ MinimumVersion Version
+
+ // The name of the feature that isn't supported.
+ Name string
+}
+
+func (ufe *UnsupportedFeatureError) Error() string {
+ if ufe.MinimumVersion.Unspecified() {
+ return fmt.Sprintf("%s not supported", ufe.Name)
+ }
+ return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
+}
+
+// Is indicates that UnsupportedFeatureError is ErrNotSupported.
+func (ufe *UnsupportedFeatureError) Is(target error) bool {
+ return target == ErrNotSupported
+}
+
+// FeatureTest caches the result of a [FeatureTestFn].
+//
+// Fields should not be modified after creation.
+type FeatureTest struct {
+ // The name of the feature being detected.
+ Name string
+ // Version in the form Major.Minor[.Patch].
+ Version string
+ // The feature test itself.
+ Fn FeatureTestFn
+
+ mu sync.RWMutex
+ done bool
+ result error
+}
+
+// FeatureTestFn is used to determine whether the kernel supports
+// a certain feature.
+//
+// The return values have the following semantics:
+//
+// err == ErrNotSupported: the feature is not available
+// err == nil: the feature is available
+// err != nil: the test couldn't be executed
+type FeatureTestFn func() error
+
+// NewFeatureTest is a convenient way to create a single [FeatureTest].
+func NewFeatureTest(name, version string, fn FeatureTestFn) func() error {
+ ft := &FeatureTest{
+ Name: name,
+ Version: version,
+ Fn: fn,
+ }
+
+ return ft.execute
+}
+
+// execute the feature test.
+//
+// The result is cached if the test is conclusive.
+//
+// See [FeatureTestFn] for the meaning of the returned error.
+func (ft *FeatureTest) execute() error {
+ ft.mu.RLock()
+ result, done := ft.result, ft.done
+ ft.mu.RUnlock()
+
+ if done {
+ return result
+ }
+
+ ft.mu.Lock()
+ defer ft.mu.Unlock()
+
+ // The test may have been executed by another caller while we were
+ // waiting to acquire ft.mu.
+ if ft.done {
+ return ft.result
+ }
+
+ err := ft.Fn()
+ if err == nil {
+ ft.done = true
+ return nil
+ }
+
+ if errors.Is(err, ErrNotSupported) {
+ var v Version
+ if ft.Version != "" {
+ v, err = NewVersion(ft.Version)
+ if err != nil {
+ return fmt.Errorf("feature %s: %w", ft.Name, err)
+ }
+ }
+
+ ft.done = true
+ ft.result = &UnsupportedFeatureError{
+ MinimumVersion: v,
+ Name: ft.Name,
+ }
+
+ return ft.result
+ }
+
+ // We couldn't execute the feature test to a point
+ // where it could make a determination.
+ // Don't cache the result, just return it.
+ return fmt.Errorf("detect support for %s: %w", ft.Name, err)
+}
+
+// FeatureMatrix groups multiple related feature tests into a map.
+//
+// Useful when there is a small number of discrete features which are known
+// at compile time.
+//
+// It must not be modified concurrently with calling [FeatureMatrix.Result].
+type FeatureMatrix[K comparable] map[K]*FeatureTest
+
+// Result returns the outcome of the feature test for the given key.
+//
+// It's safe to call this function concurrently.
+func (fm FeatureMatrix[K]) Result(key K) error {
+ ft, ok := fm[key]
+ if !ok {
+ return fmt.Errorf("no feature probe for %v", key)
+ }
+
+ return ft.execute()
+}
+
+// FeatureCache caches a potentially unlimited number of feature probes.
+//
+// Useful when there is a high cardinality for a feature test.
+type FeatureCache[K comparable] struct {
+ mu sync.RWMutex
+ newTest func(K) *FeatureTest
+ features map[K]*FeatureTest
+}
+
+func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] {
+ return &FeatureCache[K]{
+ newTest: newTest,
+ features: make(map[K]*FeatureTest),
+ }
+}
+
+func (fc *FeatureCache[K]) Result(key K) error {
+ // NB: Executing the feature test happens without fc.mu taken.
+ return fc.retrieve(key).execute()
+}
+
+func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest {
+ fc.mu.RLock()
+ ft := fc.features[key]
+ fc.mu.RUnlock()
+
+ if ft != nil {
+ return ft
+ }
+
+ fc.mu.Lock()
+ defer fc.mu.Unlock()
+
+ if ft := fc.features[key]; ft != nil {
+ return ft
+ }
+
+ ft = fc.newTest(key)
+ fc.features[key] = ft
+ return ft
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go
new file mode 100644
index 000000000..1eaf4775a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/io.go
@@ -0,0 +1,128 @@
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized
+// buffered reader. It is a convenience function for reading subsections of
+// ELF sections while minimizing the amount of read() syscalls made.
+//
+// Syscall overhead is non-negligible in continuous integration context
+// where ELFs might be accessed over virtual filesystems with poor random
+// access performance. Buffering reads makes sense because (sub)sections
+// end up being read completely anyway.
+//
+// Use instead of the r.Seek() + io.LimitReader() pattern.
+func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader {
+ // Clamp the size of the buffer to one page to avoid slurping large parts
+ // of a file into memory. bufio.NewReader uses a hardcoded default buffer
+ // of 4096. Allow arches with larger pages to allocate more, but don't
+ // allocate a fixed 4k buffer if we only need to read a small segment.
+ buf := n
+ if ps := int64(os.Getpagesize()); n > ps {
+ buf = ps
+ }
+
+ return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf))
+}
+
+// DiscardZeroes makes sure that all written bytes are zero
+// before discarding them.
+type DiscardZeroes struct{}
+
+func (DiscardZeroes) Write(p []byte) (int, error) {
+ for _, b := range p {
+ if b != 0 {
+ return 0, errors.New("encountered non-zero byte")
+ }
+ }
+ return len(p), nil
+}
+
+// ReadAllCompressed decompresses a gzipped file into memory.
+func ReadAllCompressed(file string) ([]byte, error) {
+ fh, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ gz, err := gzip.NewReader(fh)
+ if err != nil {
+ return nil, err
+ }
+ defer gz.Close()
+
+ return io.ReadAll(gz)
+}
+
+// ReadUint64FromFile reads a uint64 from a file.
+//
+// format specifies the contents of the file in fmt.Scanf syntax.
+func ReadUint64FromFile(format string, path ...string) (uint64, error) {
+ filename := filepath.Join(path...)
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ return 0, fmt.Errorf("reading file %q: %w", filename, err)
+ }
+
+ var value uint64
+ n, err := fmt.Fscanf(bytes.NewReader(data), format, &value)
+ if err != nil {
+ return 0, fmt.Errorf("parsing file %q: %w", filename, err)
+ }
+ if n != 1 {
+ return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n)
+ }
+
+ return value, nil
+}
+
+type uint64FromFileKey struct {
+ format, path string
+}
+
+var uint64FromFileCache = struct {
+ sync.RWMutex
+ values map[uint64FromFileKey]uint64
+}{
+ values: map[uint64FromFileKey]uint64{},
+}
+
+// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result.
+func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) {
+ filename := filepath.Join(path...)
+ key := uint64FromFileKey{format, filename}
+
+ uint64FromFileCache.RLock()
+ if value, ok := uint64FromFileCache.values[key]; ok {
+ uint64FromFileCache.RUnlock()
+ return value, nil
+ }
+ uint64FromFileCache.RUnlock()
+
+ value, err := ReadUint64FromFile(format, filename)
+ if err != nil {
+ return 0, err
+ }
+
+ uint64FromFileCache.Lock()
+ defer uint64FromFileCache.Unlock()
+
+ if value, ok := uint64FromFileCache.values[key]; ok {
+ // Someone else got here before us, use what is cached.
+ return value, nil
+ }
+
+ uint64FromFileCache.values[key] = value
+ return value, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go
new file mode 100644
index 000000000..776c7a10a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go
@@ -0,0 +1,74 @@
+package kallsyms
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "os"
+ "sync"
+)
+
+var kernelModules struct {
+ sync.RWMutex
+ // function to kernel module mapping
+ kmods map[string]string
+}
+
+// KernelModule returns the kernel module, if any, a probe-able function is contained in.
+func KernelModule(fn string) (string, error) {
+ kernelModules.RLock()
+ kmods := kernelModules.kmods
+ kernelModules.RUnlock()
+
+ if kmods == nil {
+ kernelModules.Lock()
+ defer kernelModules.Unlock()
+ kmods = kernelModules.kmods
+ }
+
+ if kmods != nil {
+ return kmods[fn], nil
+ }
+
+ f, err := os.Open("/proc/kallsyms")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ kmods, err = loadKernelModuleMapping(f)
+ if err != nil {
+ return "", err
+ }
+
+ kernelModules.kmods = kmods
+ return kmods[fn], nil
+}
+
+// FlushKernelModuleCache removes any cached information about function to kernel module mapping.
+func FlushKernelModuleCache() {
+ kernelModules.Lock()
+ defer kernelModules.Unlock()
+
+ kernelModules.kmods = nil
+}
+
+func loadKernelModuleMapping(f io.Reader) (map[string]string, error) {
+ mods := make(map[string]string)
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ fields := bytes.Fields(scanner.Bytes())
+ if len(fields) < 4 {
+ continue
+ }
+ switch string(fields[1]) {
+ case "t", "T":
+ mods[string(fields[2])] = string(bytes.Trim(fields[3], "[]"))
+ default:
+ continue
+ }
+ }
+ if scanner.Err() != nil {
+ return nil, scanner.Err()
+ }
+ return mods, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
new file mode 100644
index 000000000..1921e4f15
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
@@ -0,0 +1,293 @@
+package kconfig
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+)
+
+// Find find a kconfig file on the host.
+// It first reads from /boot/config- of the current running kernel and tries
+// /proc/config.gz if nothing was found in /boot.
+// If none of the file provide a kconfig, it returns an error.
+func Find() (*os.File, error) {
+ kernelRelease, err := internal.KernelRelease()
+ if err != nil {
+ return nil, fmt.Errorf("cannot get kernel release: %w", err)
+ }
+
+ path := "/boot/config-" + kernelRelease
+ f, err := os.Open(path)
+ if err == nil {
+ return f, nil
+ }
+
+ f, err = os.Open("/proc/config.gz")
+ if err == nil {
+ return f, nil
+ }
+
+ return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path)
+}
+
+// Parse parses the kconfig file for which a reader is given.
+// All the CONFIG_* which are in filter and which are set set will be
+// put in the returned map as key with their corresponding value as map value.
+// If filter is nil, no filtering will occur.
+// If the kconfig file is not valid, error will be returned.
+func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) {
+ var r io.Reader
+ zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64))
+ if err != nil {
+ r = io.NewSectionReader(source, 0, math.MaxInt64)
+ } else {
+ // Source is gzip compressed, transparently decompress.
+ r = zr
+ }
+
+ ret := make(map[string]string, len(filter))
+
+ s := bufio.NewScanner(r)
+
+ for s.Scan() {
+ line := s.Bytes()
+ err = processKconfigLine(line, ret, filter)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse line: %w", err)
+ }
+
+ if filter != nil && len(ret) == len(filter) {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, fmt.Errorf("cannot parse: %w", err)
+ }
+
+ if zr != nil {
+ return ret, zr.Close()
+ }
+
+ return ret, nil
+}
+
+// Golang translation of libbpf bpf_object__process_kconfig_line():
+// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874
+// It does the same checks but does not put the data inside the BPF map.
+func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error {
+ // Ignore empty lines and "# CONFIG_* is not set".
+ if !bytes.HasPrefix(line, []byte("CONFIG_")) {
+ return nil
+ }
+
+ key, value, found := bytes.Cut(line, []byte{'='})
+ if !found {
+ return fmt.Errorf("line %q does not contain separator '='", line)
+ }
+
+ if len(value) == 0 {
+ return fmt.Errorf("line %q has no value", line)
+ }
+
+ if filter != nil {
+ // NB: map[string(key)] gets special optimisation help from the compiler
+ // and doesn't allocate. Don't turn this into a variable.
+ _, ok := filter[string(key)]
+ if !ok {
+ return nil
+ }
+ }
+
+ // This can seem odd, but libbpf only sets the value the first time the key is
+ // met:
+ // https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908
+ _, ok := m[string(key)]
+ if !ok {
+ m[string(key)] = string(value)
+ }
+
+ return nil
+}
+
+// PutValue translates the value given as parameter depending on the BTF
+// type, the translated value is then written to the byte array.
+func PutValue(data []byte, typ btf.Type, value string) error {
+ typ = btf.UnderlyingType(typ)
+
+ switch value {
+ case "y", "n", "m":
+ return putValueTri(data, typ, value)
+ default:
+ if strings.HasPrefix(value, `"`) {
+ return putValueString(data, typ, value)
+ }
+ return putValueNumber(data, typ, value)
+ }
+}
+
+// Golang translation of libbpf_tristate enum:
+// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169
+type triState int
+
+const (
+ TriNo triState = 0
+ TriYes triState = 1
+ TriModule triState = 2
+)
+
+func putValueTri(data []byte, typ btf.Type, value string) error {
+ switch v := typ.(type) {
+ case *btf.Int:
+ if v.Encoding != btf.Bool {
+ return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding)
+ }
+
+ if v.Size != 1 {
+ return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size)
+ }
+
+ switch value {
+ case "y":
+ data[0] = 1
+ case "n":
+ data[0] = 0
+ default:
+ return fmt.Errorf("cannot use %q for btf.Bool", value)
+ }
+ case *btf.Enum:
+ if v.Name != "libbpf_tristate" {
+ return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name)
+ }
+
+ var tri triState
+ switch value {
+ case "y":
+ tri = TriYes
+ case "m":
+ tri = TriModule
+ case "n":
+ tri = TriNo
+ default:
+ return fmt.Errorf("value %q is not support for libbpf_tristate", value)
+ }
+
+ internal.NativeEndian.PutUint64(data, uint64(tri))
+ default:
+ return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v)
+ }
+
+ return nil
+}
+
+func putValueString(data []byte, typ btf.Type, value string) error {
+ array, ok := typ.(*btf.Array)
+ if !ok {
+ return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array)
+ }
+
+ contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int)
+ if !ok {
+ return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType)
+ }
+
+ // Any Int, which is not bool, of one byte could be used to store char:
+ // https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638
+ if contentType.Size != 1 && contentType.Encoding != btf.Bool {
+ return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size)
+ }
+
+ if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) {
+ return fmt.Errorf(`value %q must start and finish with '"'`, value)
+ }
+
+ str := strings.Trim(value, `"`)
+
+ // We need to trim string if the bpf array is smaller.
+ if uint32(len(str)) >= array.Nelems {
+ str = str[:array.Nelems]
+ }
+
+ // Write the string content to .kconfig.
+ copy(data, str)
+
+ return nil
+}
+
+func putValueNumber(data []byte, typ btf.Type, value string) error {
+ integer, ok := typ.(*btf.Int)
+ if !ok {
+ return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer)
+ }
+
+ size := integer.Size
+ sizeInBits := size * 8
+
+ var n uint64
+ var err error
+ if integer.Encoding == btf.Signed {
+ parsed, e := strconv.ParseInt(value, 0, int(sizeInBits))
+
+ n = uint64(parsed)
+ err = e
+ } else {
+ parsed, e := strconv.ParseUint(value, 0, int(sizeInBits))
+
+ n = uint64(parsed)
+ err = e
+ }
+
+ if err != nil {
+ return fmt.Errorf("cannot parse value: %w", err)
+ }
+
+ return PutInteger(data, integer, n)
+}
+
+// PutInteger writes n into data.
+//
+// integer determines how much is written into data and what the valid values
+// are.
+func PutInteger(data []byte, integer *btf.Int, n uint64) error {
+ // This function should match set_kcfg_value_num in libbpf.
+ if integer.Encoding == btf.Bool && n > 1 {
+ return fmt.Errorf("invalid boolean value: %d", n)
+ }
+
+ if len(data) < int(integer.Size) {
+ return fmt.Errorf("can't fit an integer of size %d into a byte slice of length %d", integer.Size, len(data))
+ }
+
+ switch integer.Size {
+ case 1:
+ if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt8 || int64(n) < math.MinInt8) {
+ return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size)
+ }
+ data[0] = byte(n)
+ case 2:
+ if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt16 || int64(n) < math.MinInt16) {
+ return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size)
+ }
+ internal.NativeEndian.PutUint16(data, uint16(n))
+ case 4:
+ if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt32 || int64(n) < math.MinInt32) {
+ return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size)
+ }
+ internal.NativeEndian.PutUint32(data, uint32(n))
+ case 8:
+ internal.NativeEndian.PutUint64(data, uint64(n))
+ default:
+ return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", integer.Size)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/math.go b/vendor/github.com/cilium/ebpf/internal/math.go
new file mode 100644
index 000000000..e95c8efde
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/math.go
@@ -0,0 +1,13 @@
+package internal
+
+import "golang.org/x/exp/constraints"
+
+// Align returns 'n' updated to 'alignment' boundary.
+func Align[I constraints.Integer](n, alignment I) I {
+ return (n + alignment - 1) / alignment * alignment
+}
+
+// IsPow returns true if n is a power of two.
+func IsPow[I constraints.Integer](n I) bool {
+ return n != 0 && (n&(n-1)) == 0
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go
new file mode 100644
index 000000000..dd6e6cbaf
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/output.go
@@ -0,0 +1,97 @@
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "go/format"
+ "go/scanner"
+ "io"
+ "reflect"
+ "strings"
+ "unicode"
+)
+
+// Identifier turns a C style type or field name into an exportable Go equivalent.
+func Identifier(str string) string {
+ prev := rune(-1)
+ return strings.Map(func(r rune) rune {
+ // See https://golang.org/ref/spec#Identifiers
+ switch {
+ case unicode.IsLetter(r):
+ if prev == -1 {
+ r = unicode.ToUpper(r)
+ }
+
+ case r == '_':
+ switch {
+ // The previous rune was deleted, or we are at the
+ // beginning of the string.
+ case prev == -1:
+ fallthrough
+
+ // The previous rune is a lower case letter or a digit.
+ case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)):
+ // delete the current rune, and force the
+ // next character to be uppercased.
+ r = -1
+ }
+
+ case unicode.IsDigit(r):
+
+ default:
+ // Delete the current rune. prev is unchanged.
+ return -1
+ }
+
+ prev = r
+ return r
+ }, str)
+}
+
+// WriteFormatted outputs a formatted src into out.
+//
+// If formatting fails it returns an informative error message.
+func WriteFormatted(src []byte, out io.Writer) error {
+ formatted, err := format.Source(src)
+ if err == nil {
+ _, err = out.Write(formatted)
+ return err
+ }
+
+ var el scanner.ErrorList
+ if !errors.As(err, &el) {
+ return err
+ }
+
+ var nel scanner.ErrorList
+ for _, err := range el {
+ if !err.Pos.IsValid() {
+ nel = append(nel, err)
+ continue
+ }
+
+ buf := src[err.Pos.Offset:]
+ nl := bytes.IndexRune(buf, '\n')
+ if nl == -1 {
+ nel = append(nel, err)
+ continue
+ }
+
+ err.Msg += ": " + string(buf[:nl])
+ nel = append(nel, err)
+ }
+
+ return nel
+}
+
+// GoTypeName is like %T, but elides the package name.
+//
+// Pointers to a type are peeled off.
+func GoTypeName(t any) string {
+ rT := reflect.TypeOf(t)
+ for rT.Kind() == reflect.Pointer {
+ rT = rT.Elem()
+ }
+ // Doesn't return the correct Name for generic types due to https://github.com/golang/go/issues/55924
+ return rT.Name()
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go
new file mode 100644
index 000000000..01d892f93
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/pinning.go
@@ -0,0 +1,65 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+func Pin(currentPath, newPath string, fd *sys.FD) error {
+ if newPath == "" {
+ return errors.New("given pinning path cannot be empty")
+ }
+ if currentPath == newPath {
+ return nil
+ }
+
+ fsType, err := FSType(filepath.Dir(newPath))
+ if err != nil {
+ return err
+ }
+ if fsType != unix.BPF_FS_MAGIC {
+ return fmt.Errorf("%s is not on a bpf filesystem", newPath)
+ }
+
+ defer runtime.KeepAlive(fd)
+
+ if currentPath == "" {
+ return sys.ObjPin(&sys.ObjPinAttr{
+ Pathname: sys.NewStringPointer(newPath),
+ BpfFd: fd.Uint(),
+ })
+ }
+
+ // Renameat2 is used instead of os.Rename to disallow the new path replacing
+ // an existing path.
+ err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE)
+ if err == nil {
+ // Object is now moved to the new pinning path.
+ return nil
+ }
+ if !os.IsNotExist(err) {
+ return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err)
+ }
+ // Internal state not in sync with the file system so let's fix it.
+ return sys.ObjPin(&sys.ObjPinAttr{
+ Pathname: sys.NewStringPointer(newPath),
+ BpfFd: fd.Uint(),
+ })
+}
+
+func Unpin(pinnedPath string) error {
+ if pinnedPath == "" {
+ return nil
+ }
+ err := os.Remove(pinnedPath)
+ if err == nil || os.IsNotExist(err) {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/platform.go b/vendor/github.com/cilium/ebpf/internal/platform.go
new file mode 100644
index 000000000..6e90f2ef7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/platform.go
@@ -0,0 +1,43 @@
+package internal
+
+import (
+ "runtime"
+)
+
+// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by
+// the linux kernel.
+//
+// Based on https://github.com/golang/go/blob/master/src/go/build/syslist.go
+// and https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047
+func PlatformPrefix() string {
+ switch runtime.GOARCH {
+ case "386":
+ return "__ia32_"
+ case "amd64", "amd64p32":
+ return "__x64_"
+
+ case "arm", "armbe":
+ return "__arm_"
+ case "arm64", "arm64be":
+ return "__arm64_"
+
+ case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le":
+ return "__mips_"
+
+ case "s390":
+ return "__s390_"
+ case "s390x":
+ return "__s390x_"
+
+ case "riscv", "riscv64":
+ return "__riscv_"
+
+ case "ppc":
+ return "__powerpc_"
+ case "ppc64", "ppc64le":
+ return "__powerpc64_"
+
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/prog.go b/vendor/github.com/cilium/ebpf/internal/prog.go
new file mode 100644
index 000000000..d629145b6
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/prog.go
@@ -0,0 +1,11 @@
+package internal
+
+// EmptyBPFContext is the smallest-possible BPF input context to be used for
+// invoking `Program.{Run,Benchmark,Test}`.
+//
+// Programs require a context input buffer of at least 15 bytes. Looking in
+// net/bpf/test_run.c, bpf_test_init() requires that the input is at least
+// ETH_HLEN (14) bytes. As of Linux commit fd18942 ("bpf: Don't redirect packets
+// with invalid pkt_len"), it also requires the skb to be non-empty after
+// removing the Layer 2 header.
+var EmptyBPFContext = make([]byte, 15)
diff --git a/vendor/github.com/cilium/ebpf/internal/statfs.go b/vendor/github.com/cilium/ebpf/internal/statfs.go
new file mode 100644
index 000000000..44c02d676
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/statfs.go
@@ -0,0 +1,23 @@
+package internal
+
+import (
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+func FSType(path string) (int64, error) {
+ var statfs unix.Statfs_t
+ if err := unix.Statfs(path, &statfs); err != nil {
+ return 0, err
+ }
+
+ fsType := int64(statfs.Type)
+ if unsafe.Sizeof(statfs.Type) == 4 {
+ // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a
+ // negative number when interpreted as int32 so we need to cast via
+ // uint32 to avoid sign extension.
+ fsType = int64(uint32(statfs.Type))
+ }
+ return fsType, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/vendor/github.com/cilium/ebpf/internal/sys/doc.go
new file mode 100644
index 000000000..dfe174448
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/doc.go
@@ -0,0 +1,6 @@
+// Package sys contains bindings for the BPF syscall.
+package sys
+
+// Regenerate types.go by invoking go generate in the current directory.
+
+//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
new file mode 100644
index 000000000..941a56fb9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
@@ -0,0 +1,133 @@
+package sys
+
+import (
+ "fmt"
+ "math"
+ "os"
+ "runtime"
+ "strconv"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var ErrClosedFd = unix.EBADF
+
+type FD struct {
+ raw int
+}
+
+func newFD(value int) *FD {
+ if onLeakFD != nil {
+ // Attempt to store the caller's stack for the given fd value.
+ // Panic if fds contains an existing stack for the fd.
+ old, exist := fds.LoadOrStore(value, callersFrames())
+ if exist {
+ f := old.(*runtime.Frames)
+ panic(fmt.Sprintf("found existing stack for fd %d:\n%s", value, FormatFrames(f)))
+ }
+ }
+
+ fd := &FD{value}
+ runtime.SetFinalizer(fd, (*FD).finalize)
+ return fd
+}
+
+// finalize is set as the FD's runtime finalizer and
+// sends a leak trace before calling FD.Close().
+func (fd *FD) finalize() {
+ if fd.raw < 0 {
+ return
+ }
+
+ // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback
+ // is invoked at most once for one sys.FD allocation, runtime.Frames can only
+ // be unwound once.
+ f, ok := fds.LoadAndDelete(fd.Int())
+ if ok && onLeakFD != nil {
+ onLeakFD(f.(*runtime.Frames))
+ }
+
+ _ = fd.Close()
+}
+
+// NewFD wraps a raw fd with a finalizer.
+//
+// You must not use the raw fd after calling this function, since the underlying
+// file descriptor number may change. This is because the BPF UAPI assumes that
+// zero is not a valid fd value.
+func NewFD(value int) (*FD, error) {
+ if value < 0 {
+ return nil, fmt.Errorf("invalid fd %d", value)
+ }
+
+ fd := newFD(value)
+ if value != 0 {
+ return fd, nil
+ }
+
+ dup, err := fd.Dup()
+ _ = fd.Close()
+ return dup, err
+}
+
+func (fd *FD) String() string {
+ return strconv.FormatInt(int64(fd.raw), 10)
+}
+
+func (fd *FD) Int() int {
+ return fd.raw
+}
+
+func (fd *FD) Uint() uint32 {
+ if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 {
+ // Best effort: this is the number most likely to be an invalid file
+ // descriptor. It is equal to -1 (on two's complement arches).
+ return math.MaxUint32
+ }
+ return uint32(fd.raw)
+}
+
+func (fd *FD) Close() error {
+ if fd.raw < 0 {
+ return nil
+ }
+
+ return unix.Close(fd.disown())
+}
+
+func (fd *FD) disown() int {
+ value := int(fd.raw)
+ fds.Delete(int(value))
+ fd.raw = -1
+
+ runtime.SetFinalizer(fd, nil)
+ return value
+}
+
+func (fd *FD) Dup() (*FD, error) {
+ if fd.raw < 0 {
+ return nil, ErrClosedFd
+ }
+
+ // Always require the fd to be larger than zero: the BPF API treats the value
+ // as "no argument provided".
+ dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1)
+ if err != nil {
+ return nil, fmt.Errorf("can't dup fd: %v", err)
+ }
+
+ return newFD(dup), nil
+}
+
+// File takes ownership of FD and turns it into an [*os.File].
+//
+// You must not use the FD after the call returns.
+//
+// Returns nil if the FD is not valid.
+func (fd *FD) File(name string) *os.File {
+ if fd.raw < 0 {
+ return nil
+ }
+
+ return os.NewFile(uintptr(fd.disown()), name)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
new file mode 100644
index 000000000..cd50dd1f6
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
@@ -0,0 +1,93 @@
+package sys
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sync"
+)
+
+// OnLeakFD controls tracing [FD] lifetime to detect resources that are not
+// closed by Close().
+//
+// If fn is not nil, tracing is enabled for all FDs created going forward. fn is
+// invoked for all FDs that are closed by the garbage collector instead of an
+// explicit Close() by a caller. Calling OnLeakFD twice with a non-nil fn
+// (without disabling tracing in the meantime) will cause a panic.
+//
+// If fn is nil, tracing will be disabled. Any FDs that have not been closed are
+// considered to be leaked, fn will be invoked for them, and the process will be
+// terminated.
+//
+// fn will be invoked at most once for every unique sys.FD allocation since a
+// runtime.Frames can only be unwound once.
+func OnLeakFD(fn func(*runtime.Frames)) {
+ // Enable leak tracing if new fn is provided.
+ if fn != nil {
+ if onLeakFD != nil {
+ panic("OnLeakFD called twice with non-nil fn")
+ }
+
+ onLeakFD = fn
+ return
+ }
+
+ // fn is nil past this point.
+
+ if onLeakFD == nil {
+ return
+ }
+
+ // Call onLeakFD for all open fds.
+ if fs := flushFrames(); len(fs) != 0 {
+ for _, f := range fs {
+ onLeakFD(f)
+ }
+ }
+
+ onLeakFD = nil
+}
+
+var onLeakFD func(*runtime.Frames)
+
+// fds is a registry of all file descriptors wrapped into sys.fds that were
+// created while an fd tracer was active.
+var fds sync.Map // map[int]*runtime.Frames
+
+// flushFrames removes all elements from fds and returns them as a slice. This
+// deals with the fact that a runtime.Frames can only be unwound once using
+// Next().
+func flushFrames() []*runtime.Frames {
+ var frames []*runtime.Frames
+ fds.Range(func(key, value any) bool {
+ frames = append(frames, value.(*runtime.Frames))
+ fds.Delete(key)
+ return true
+ })
+ return frames
+}
+
+func callersFrames() *runtime.Frames {
+ c := make([]uintptr, 32)
+
+ // Skip runtime.Callers and this function.
+ i := runtime.Callers(2, c)
+ if i == 0 {
+ return nil
+ }
+
+ return runtime.CallersFrames(c)
+}
+
+// FormatFrames formats a runtime.Frames as a human-readable string.
+func FormatFrames(fs *runtime.Frames) string {
+ var b bytes.Buffer
+ for {
+ f, more := fs.Next()
+ b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line))
+ if !more {
+ break
+ }
+ }
+ return b.String()
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
new file mode 100644
index 000000000..d9fe21722
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
@@ -0,0 +1,53 @@
+// Code generated by "stringer -type MapFlags"; DO NOT EDIT.
+
+package sys
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[BPF_F_NO_PREALLOC-1]
+ _ = x[BPF_F_NO_COMMON_LRU-2]
+ _ = x[BPF_F_NUMA_NODE-4]
+ _ = x[BPF_F_RDONLY-8]
+ _ = x[BPF_F_WRONLY-16]
+ _ = x[BPF_F_STACK_BUILD_ID-32]
+ _ = x[BPF_F_ZERO_SEED-64]
+ _ = x[BPF_F_RDONLY_PROG-128]
+ _ = x[BPF_F_WRONLY_PROG-256]
+ _ = x[BPF_F_CLONE-512]
+ _ = x[BPF_F_MMAPABLE-1024]
+ _ = x[BPF_F_PRESERVE_ELEMS-2048]
+ _ = x[BPF_F_INNER_MAP-4096]
+ _ = x[BPF_F_LINK-8192]
+ _ = x[BPF_F_PATH_FD-16384]
+}
+
+const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAPBPF_F_LINKBPF_F_PATH_FD"
+
+var _MapFlags_map = map[MapFlags]string{
+ 1: _MapFlags_name[0:17],
+ 2: _MapFlags_name[17:36],
+ 4: _MapFlags_name[36:51],
+ 8: _MapFlags_name[51:63],
+ 16: _MapFlags_name[63:75],
+ 32: _MapFlags_name[75:95],
+ 64: _MapFlags_name[95:110],
+ 128: _MapFlags_name[110:127],
+ 256: _MapFlags_name[127:144],
+ 512: _MapFlags_name[144:155],
+ 1024: _MapFlags_name[155:169],
+ 2048: _MapFlags_name[169:189],
+ 4096: _MapFlags_name[189:204],
+ 8192: _MapFlags_name[204:214],
+ 16384: _MapFlags_name[214:227],
+}
+
+func (i MapFlags) String() string {
+ if str, ok := _MapFlags_map[i]; ok {
+ return str
+ }
+ return "MapFlags(" + strconv.FormatInt(int64(i), 10) + ")"
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
new file mode 100644
index 000000000..e9bb59059
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
@@ -0,0 +1,52 @@
+package sys
+
+import (
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// NewPointer creates a 64-bit pointer from an unsafe Pointer.
+func NewPointer(ptr unsafe.Pointer) Pointer {
+ return Pointer{ptr: ptr}
+}
+
+// NewSlicePointer creates a 64-bit pointer from a byte slice.
+func NewSlicePointer(buf []byte) Pointer {
+ if len(buf) == 0 {
+ return Pointer{}
+ }
+
+ return Pointer{ptr: unsafe.Pointer(&buf[0])}
+}
+
+// NewSlicePointerLen creates a 64-bit pointer from a byte slice.
+//
+// Useful to assign both the pointer and the length in one go.
+func NewSlicePointerLen(buf []byte) (Pointer, uint32) {
+ return NewSlicePointer(buf), uint32(len(buf))
+}
+
+// NewStringPointer creates a 64-bit pointer from a string.
+func NewStringPointer(str string) Pointer {
+ p, err := unix.BytePtrFromString(str)
+ if err != nil {
+ return Pointer{}
+ }
+
+ return Pointer{ptr: unsafe.Pointer(p)}
+}
+
+// NewStringSlicePointer allocates an array of Pointers to each string in the
+// given slice of strings and returns a 64-bit pointer to the start of the
+// resulting array.
+//
+// Use this function to pass arrays of strings as syscall arguments.
+func NewStringSlicePointer(strings []string) Pointer {
+ sp := make([]Pointer, 0, len(strings))
+ for _, s := range strings {
+ sp = append(sp, NewStringPointer(s))
+ }
+
+ return Pointer{ptr: unsafe.Pointer(&sp[0])}
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
new file mode 100644
index 000000000..6278c79c9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
@@ -0,0 +1,14 @@
+//go:build armbe || mips || mips64p32
+
+package sys
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ pad uint32
+ ptr unsafe.Pointer
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
new file mode 100644
index 000000000..c27b537e8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
@@ -0,0 +1,14 @@
+//go:build 386 || amd64p32 || arm || mipsle || mips64p32le
+
+package sys
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ ptr unsafe.Pointer
+ pad uint32
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
new file mode 100644
index 000000000..2d7828230
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
@@ -0,0 +1,13 @@
+//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32
+
+package sys
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ ptr unsafe.Pointer
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/vendor/github.com/cilium/ebpf/internal/sys/signals.go
new file mode 100644
index 000000000..e5337191d
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/signals.go
@@ -0,0 +1,83 @@
+package sys
+
+import (
+ "fmt"
+ "runtime"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// A sigset containing only SIGPROF.
+var profSet unix.Sigset_t
+
+func init() {
+ // See sigsetAdd for details on the implementation. Open coded here so
+ // that the compiler will check the constant calculations for us.
+ profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits)
+}
+
+// maskProfilerSignal locks the calling goroutine to its underlying OS thread
+// and adds SIGPROF to the thread's signal mask. This prevents pprof from
+// interrupting expensive syscalls like e.g. BPF_PROG_LOAD.
+//
+// The caller must defer unmaskProfilerSignal() to reverse the operation.
+func maskProfilerSignal() {
+ runtime.LockOSThread()
+
+ if err := unix.PthreadSigmask(unix.SIG_BLOCK, &profSet, nil); err != nil {
+ runtime.UnlockOSThread()
+ panic(fmt.Errorf("masking profiler signal: %w", err))
+ }
+}
+
+// unmaskProfilerSignal removes SIGPROF from the underlying thread's signal
+// mask, allowing it to be interrupted for profiling once again.
+//
+// It also unlocks the current goroutine from its underlying OS thread.
+func unmaskProfilerSignal() {
+ defer runtime.UnlockOSThread()
+
+ if err := unix.PthreadSigmask(unix.SIG_UNBLOCK, &profSet, nil); err != nil {
+ panic(fmt.Errorf("unmasking profiler signal: %w", err))
+ }
+}
+
+const (
+ // Signal is the nth bit in the bitfield.
+ sigprofBit = int(unix.SIGPROF - 1)
+ // The number of bits in one Sigset_t word.
+ wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8
+)
+
+// sigsetAdd adds signal to set.
+//
+// Note: Sigset_t.Val's value type is uint32 or uint64 depending on the arch.
+// This function must be able to deal with both and so must avoid any direct
+// references to u32 or u64 types.
+func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error {
+ if signal < 1 {
+ return fmt.Errorf("signal %d must be larger than 0", signal)
+ }
+
+ // For amd64, runtime.sigaddset() performs the following operation:
+ // set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31)
+ //
+ // This trick depends on sigset being two u32's, causing a signal in the
+ // bottom 31 bits to be written to the low word if bit 32 is low, or the high
+ // word if bit 32 is high.
+
+ // Signal is the nth bit in the bitfield.
+ bit := int(signal - 1)
+ // Word within the sigset the bit needs to be written to.
+ word := bit / wordBits
+
+ if word >= len(set.Val) {
+ return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal)
+ }
+
+ // Write the signal bit into its corresponding word at the corrected offset.
+ set.Val[word] |= 1 << (bit % wordBits)
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
new file mode 100644
index 000000000..f6b6e9345
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
@@ -0,0 +1,229 @@
+package sys
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// ENOTSUPP is a Linux internal error code that has leaked into UAPI.
+//
+// It is not the same as ENOTSUP or EOPNOTSUPP.
+const ENOTSUPP = syscall.Errno(524)
+
+// BPF wraps SYS_BPF.
+//
+// Any pointers contained in attr must use the Pointer type from this package.
+func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
+ // Prevent the Go profiler from repeatedly interrupting the verifier,
+ // which could otherwise lead to a livelock due to receiving EAGAIN.
+ if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN {
+ maskProfilerSignal()
+ defer unmaskProfilerSignal()
+ }
+
+ for {
+ r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
+ runtime.KeepAlive(attr)
+
+ // As of ~4.20 the verifier can be interrupted by a signal,
+ // and returns EAGAIN in that case.
+ if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD {
+ continue
+ }
+
+ var err error
+ if errNo != 0 {
+ err = wrappedErrno{errNo}
+ }
+
+ return r1, err
+ }
+}
+
+// Info is implemented by all structs that can be passed to the ObjInfo syscall.
+//
+// MapInfo
+// ProgInfo
+// LinkInfo
+// BtfInfo
+type Info interface {
+ info() (unsafe.Pointer, uint32)
+}
+
+var _ Info = (*MapInfo)(nil)
+
+func (i *MapInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*ProgInfo)(nil)
+
+func (i *ProgInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*LinkInfo)(nil)
+
+func (i *LinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *TracingLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *CgroupLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *NetNsLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *XDPLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *TcxLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *NetfilterLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *NetkitLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *KprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *KprobeLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*BtfInfo)(nil)
+
+func (i *BtfInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+func (i *PerfEventLinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+// ObjInfo retrieves information about a BPF Fd.
+//
+// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo.
+func ObjInfo(fd *FD, info Info) error {
+ ptr, len := info.info()
+ err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{
+ BpfFd: fd.Uint(),
+ InfoLen: len,
+ Info: NewPointer(ptr),
+ })
+ runtime.KeepAlive(fd)
+ return err
+}
+
+// BPFObjName is a null-terminated string made up of
+// 'A-Za-z0-9_' characters.
+type ObjName [unix.BPF_OBJ_NAME_LEN]byte
+
+// NewObjName truncates the result if it is too long.
+func NewObjName(name string) ObjName {
+ var result ObjName
+ copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
+ return result
+}
+
+// LogLevel controls the verbosity of the kernel's eBPF program verifier.
+type LogLevel uint32
+
+const (
+ BPF_LOG_LEVEL1 LogLevel = 1 << iota
+ BPF_LOG_LEVEL2
+ BPF_LOG_STATS
+)
+
+// LinkID uniquely identifies a bpf_link.
+type LinkID uint32
+
+// BTFID uniquely identifies a BTF blob loaded into the kernel.
+type BTFID uint32
+
+// TypeID identifies a type in a BTF blob.
+type TypeID uint32
+
+// MapFlags control map behaviour.
+type MapFlags uint32
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -type MapFlags
+
+const (
+ BPF_F_NO_PREALLOC MapFlags = 1 << iota
+ BPF_F_NO_COMMON_LRU
+ BPF_F_NUMA_NODE
+ BPF_F_RDONLY
+ BPF_F_WRONLY
+ BPF_F_STACK_BUILD_ID
+ BPF_F_ZERO_SEED
+ BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG
+ BPF_F_CLONE
+ BPF_F_MMAPABLE
+ BPF_F_PRESERVE_ELEMS
+ BPF_F_INNER_MAP
+ BPF_F_LINK
+ BPF_F_PATH_FD
+)
+
+// Flags used by bpf_mprog.
+const (
+ BPF_F_REPLACE = 1 << (iota + 2)
+ BPF_F_BEFORE
+ BPF_F_AFTER
+ BPF_F_ID
+ BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK
+)
+
+// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
+// syscall.E* or unix.E* constants.
+//
+// You should never export an error of this type.
+type wrappedErrno struct {
+ syscall.Errno
+}
+
+func (we wrappedErrno) Unwrap() error {
+ return we.Errno
+}
+
+func (we wrappedErrno) Error() string {
+ if we.Errno == ENOTSUPP {
+ return "operation not supported"
+ }
+ return we.Errno.Error()
+}
+
+type syscallError struct {
+ error
+ errno syscall.Errno
+}
+
+func Error(err error, errno syscall.Errno) error {
+ return &syscallError{err, errno}
+}
+
+func (se *syscallError) Is(target error) bool {
+ return target == se.error
+}
+
+func (se *syscallError) Unwrap() error {
+ return se.errno
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go
new file mode 100644
index 000000000..70e754de7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go
@@ -0,0 +1,1383 @@
+// Code generated by internal/cmd/gentypes; DO NOT EDIT.
+
+package sys
+
+import (
+ "unsafe"
+)
+
+type AdjRoomMode uint32
+
+const (
+ BPF_ADJ_ROOM_NET AdjRoomMode = 0
+ BPF_ADJ_ROOM_MAC AdjRoomMode = 1
+)
+
+type AttachType uint32
+
+const (
+ BPF_CGROUP_INET_INGRESS AttachType = 0
+ BPF_CGROUP_INET_EGRESS AttachType = 1
+ BPF_CGROUP_INET_SOCK_CREATE AttachType = 2
+ BPF_CGROUP_SOCK_OPS AttachType = 3
+ BPF_SK_SKB_STREAM_PARSER AttachType = 4
+ BPF_SK_SKB_STREAM_VERDICT AttachType = 5
+ BPF_CGROUP_DEVICE AttachType = 6
+ BPF_SK_MSG_VERDICT AttachType = 7
+ BPF_CGROUP_INET4_BIND AttachType = 8
+ BPF_CGROUP_INET6_BIND AttachType = 9
+ BPF_CGROUP_INET4_CONNECT AttachType = 10
+ BPF_CGROUP_INET6_CONNECT AttachType = 11
+ BPF_CGROUP_INET4_POST_BIND AttachType = 12
+ BPF_CGROUP_INET6_POST_BIND AttachType = 13
+ BPF_CGROUP_UDP4_SENDMSG AttachType = 14
+ BPF_CGROUP_UDP6_SENDMSG AttachType = 15
+ BPF_LIRC_MODE2 AttachType = 16
+ BPF_FLOW_DISSECTOR AttachType = 17
+ BPF_CGROUP_SYSCTL AttachType = 18
+ BPF_CGROUP_UDP4_RECVMSG AttachType = 19
+ BPF_CGROUP_UDP6_RECVMSG AttachType = 20
+ BPF_CGROUP_GETSOCKOPT AttachType = 21
+ BPF_CGROUP_SETSOCKOPT AttachType = 22
+ BPF_TRACE_RAW_TP AttachType = 23
+ BPF_TRACE_FENTRY AttachType = 24
+ BPF_TRACE_FEXIT AttachType = 25
+ BPF_MODIFY_RETURN AttachType = 26
+ BPF_LSM_MAC AttachType = 27
+ BPF_TRACE_ITER AttachType = 28
+ BPF_CGROUP_INET4_GETPEERNAME AttachType = 29
+ BPF_CGROUP_INET6_GETPEERNAME AttachType = 30
+ BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31
+ BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32
+ BPF_XDP_DEVMAP AttachType = 33
+ BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34
+ BPF_XDP_CPUMAP AttachType = 35
+ BPF_SK_LOOKUP AttachType = 36
+ BPF_XDP AttachType = 37
+ BPF_SK_SKB_VERDICT AttachType = 38
+ BPF_SK_REUSEPORT_SELECT AttachType = 39
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40
+ BPF_PERF_EVENT AttachType = 41
+ BPF_TRACE_KPROBE_MULTI AttachType = 42
+ BPF_LSM_CGROUP AttachType = 43
+ BPF_STRUCT_OPS AttachType = 44
+ BPF_NETFILTER AttachType = 45
+ BPF_TCX_INGRESS AttachType = 46
+ BPF_TCX_EGRESS AttachType = 47
+ BPF_TRACE_UPROBE_MULTI AttachType = 48
+ BPF_CGROUP_UNIX_CONNECT AttachType = 49
+ BPF_CGROUP_UNIX_SENDMSG AttachType = 50
+ BPF_CGROUP_UNIX_RECVMSG AttachType = 51
+ BPF_CGROUP_UNIX_GETPEERNAME AttachType = 52
+ BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53
+ BPF_NETKIT_PRIMARY AttachType = 54
+ BPF_NETKIT_PEER AttachType = 55
+ __MAX_BPF_ATTACH_TYPE AttachType = 56
+)
+
+type Cmd uint32
+
+const (
+ BPF_MAP_CREATE Cmd = 0
+ BPF_MAP_LOOKUP_ELEM Cmd = 1
+ BPF_MAP_UPDATE_ELEM Cmd = 2
+ BPF_MAP_DELETE_ELEM Cmd = 3
+ BPF_MAP_GET_NEXT_KEY Cmd = 4
+ BPF_PROG_LOAD Cmd = 5
+ BPF_OBJ_PIN Cmd = 6
+ BPF_OBJ_GET Cmd = 7
+ BPF_PROG_ATTACH Cmd = 8
+ BPF_PROG_DETACH Cmd = 9
+ BPF_PROG_TEST_RUN Cmd = 10
+ BPF_PROG_RUN Cmd = 10
+ BPF_PROG_GET_NEXT_ID Cmd = 11
+ BPF_MAP_GET_NEXT_ID Cmd = 12
+ BPF_PROG_GET_FD_BY_ID Cmd = 13
+ BPF_MAP_GET_FD_BY_ID Cmd = 14
+ BPF_OBJ_GET_INFO_BY_FD Cmd = 15
+ BPF_PROG_QUERY Cmd = 16
+ BPF_RAW_TRACEPOINT_OPEN Cmd = 17
+ BPF_BTF_LOAD Cmd = 18
+ BPF_BTF_GET_FD_BY_ID Cmd = 19
+ BPF_TASK_FD_QUERY Cmd = 20
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21
+ BPF_MAP_FREEZE Cmd = 22
+ BPF_BTF_GET_NEXT_ID Cmd = 23
+ BPF_MAP_LOOKUP_BATCH Cmd = 24
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25
+ BPF_MAP_UPDATE_BATCH Cmd = 26
+ BPF_MAP_DELETE_BATCH Cmd = 27
+ BPF_LINK_CREATE Cmd = 28
+ BPF_LINK_UPDATE Cmd = 29
+ BPF_LINK_GET_FD_BY_ID Cmd = 30
+ BPF_LINK_GET_NEXT_ID Cmd = 31
+ BPF_ENABLE_STATS Cmd = 32
+ BPF_ITER_CREATE Cmd = 33
+ BPF_LINK_DETACH Cmd = 34
+ BPF_PROG_BIND_MAP Cmd = 35
+)
+
+type FunctionId uint32
+
+const (
+ BPF_FUNC_unspec FunctionId = 0
+ BPF_FUNC_map_lookup_elem FunctionId = 1
+ BPF_FUNC_map_update_elem FunctionId = 2
+ BPF_FUNC_map_delete_elem FunctionId = 3
+ BPF_FUNC_probe_read FunctionId = 4
+ BPF_FUNC_ktime_get_ns FunctionId = 5
+ BPF_FUNC_trace_printk FunctionId = 6
+ BPF_FUNC_get_prandom_u32 FunctionId = 7
+ BPF_FUNC_get_smp_processor_id FunctionId = 8
+ BPF_FUNC_skb_store_bytes FunctionId = 9
+ BPF_FUNC_l3_csum_replace FunctionId = 10
+ BPF_FUNC_l4_csum_replace FunctionId = 11
+ BPF_FUNC_tail_call FunctionId = 12
+ BPF_FUNC_clone_redirect FunctionId = 13
+ BPF_FUNC_get_current_pid_tgid FunctionId = 14
+ BPF_FUNC_get_current_uid_gid FunctionId = 15
+ BPF_FUNC_get_current_comm FunctionId = 16
+ BPF_FUNC_get_cgroup_classid FunctionId = 17
+ BPF_FUNC_skb_vlan_push FunctionId = 18
+ BPF_FUNC_skb_vlan_pop FunctionId = 19
+ BPF_FUNC_skb_get_tunnel_key FunctionId = 20
+ BPF_FUNC_skb_set_tunnel_key FunctionId = 21
+ BPF_FUNC_perf_event_read FunctionId = 22
+ BPF_FUNC_redirect FunctionId = 23
+ BPF_FUNC_get_route_realm FunctionId = 24
+ BPF_FUNC_perf_event_output FunctionId = 25
+ BPF_FUNC_skb_load_bytes FunctionId = 26
+ BPF_FUNC_get_stackid FunctionId = 27
+ BPF_FUNC_csum_diff FunctionId = 28
+ BPF_FUNC_skb_get_tunnel_opt FunctionId = 29
+ BPF_FUNC_skb_set_tunnel_opt FunctionId = 30
+ BPF_FUNC_skb_change_proto FunctionId = 31
+ BPF_FUNC_skb_change_type FunctionId = 32
+ BPF_FUNC_skb_under_cgroup FunctionId = 33
+ BPF_FUNC_get_hash_recalc FunctionId = 34
+ BPF_FUNC_get_current_task FunctionId = 35
+ BPF_FUNC_probe_write_user FunctionId = 36
+ BPF_FUNC_current_task_under_cgroup FunctionId = 37
+ BPF_FUNC_skb_change_tail FunctionId = 38
+ BPF_FUNC_skb_pull_data FunctionId = 39
+ BPF_FUNC_csum_update FunctionId = 40
+ BPF_FUNC_set_hash_invalid FunctionId = 41
+ BPF_FUNC_get_numa_node_id FunctionId = 42
+ BPF_FUNC_skb_change_head FunctionId = 43
+ BPF_FUNC_xdp_adjust_head FunctionId = 44
+ BPF_FUNC_probe_read_str FunctionId = 45
+ BPF_FUNC_get_socket_cookie FunctionId = 46
+ BPF_FUNC_get_socket_uid FunctionId = 47
+ BPF_FUNC_set_hash FunctionId = 48
+ BPF_FUNC_setsockopt FunctionId = 49
+ BPF_FUNC_skb_adjust_room FunctionId = 50
+ BPF_FUNC_redirect_map FunctionId = 51
+ BPF_FUNC_sk_redirect_map FunctionId = 52
+ BPF_FUNC_sock_map_update FunctionId = 53
+ BPF_FUNC_xdp_adjust_meta FunctionId = 54
+ BPF_FUNC_perf_event_read_value FunctionId = 55
+ BPF_FUNC_perf_prog_read_value FunctionId = 56
+ BPF_FUNC_getsockopt FunctionId = 57
+ BPF_FUNC_override_return FunctionId = 58
+ BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59
+ BPF_FUNC_msg_redirect_map FunctionId = 60
+ BPF_FUNC_msg_apply_bytes FunctionId = 61
+ BPF_FUNC_msg_cork_bytes FunctionId = 62
+ BPF_FUNC_msg_pull_data FunctionId = 63
+ BPF_FUNC_bind FunctionId = 64
+ BPF_FUNC_xdp_adjust_tail FunctionId = 65
+ BPF_FUNC_skb_get_xfrm_state FunctionId = 66
+ BPF_FUNC_get_stack FunctionId = 67
+ BPF_FUNC_skb_load_bytes_relative FunctionId = 68
+ BPF_FUNC_fib_lookup FunctionId = 69
+ BPF_FUNC_sock_hash_update FunctionId = 70
+ BPF_FUNC_msg_redirect_hash FunctionId = 71
+ BPF_FUNC_sk_redirect_hash FunctionId = 72
+ BPF_FUNC_lwt_push_encap FunctionId = 73
+ BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74
+ BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75
+ BPF_FUNC_lwt_seg6_action FunctionId = 76
+ BPF_FUNC_rc_repeat FunctionId = 77
+ BPF_FUNC_rc_keydown FunctionId = 78
+ BPF_FUNC_skb_cgroup_id FunctionId = 79
+ BPF_FUNC_get_current_cgroup_id FunctionId = 80
+ BPF_FUNC_get_local_storage FunctionId = 81
+ BPF_FUNC_sk_select_reuseport FunctionId = 82
+ BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83
+ BPF_FUNC_sk_lookup_tcp FunctionId = 84
+ BPF_FUNC_sk_lookup_udp FunctionId = 85
+ BPF_FUNC_sk_release FunctionId = 86
+ BPF_FUNC_map_push_elem FunctionId = 87
+ BPF_FUNC_map_pop_elem FunctionId = 88
+ BPF_FUNC_map_peek_elem FunctionId = 89
+ BPF_FUNC_msg_push_data FunctionId = 90
+ BPF_FUNC_msg_pop_data FunctionId = 91
+ BPF_FUNC_rc_pointer_rel FunctionId = 92
+ BPF_FUNC_spin_lock FunctionId = 93
+ BPF_FUNC_spin_unlock FunctionId = 94
+ BPF_FUNC_sk_fullsock FunctionId = 95
+ BPF_FUNC_tcp_sock FunctionId = 96
+ BPF_FUNC_skb_ecn_set_ce FunctionId = 97
+ BPF_FUNC_get_listener_sock FunctionId = 98
+ BPF_FUNC_skc_lookup_tcp FunctionId = 99
+ BPF_FUNC_tcp_check_syncookie FunctionId = 100
+ BPF_FUNC_sysctl_get_name FunctionId = 101
+ BPF_FUNC_sysctl_get_current_value FunctionId = 102
+ BPF_FUNC_sysctl_get_new_value FunctionId = 103
+ BPF_FUNC_sysctl_set_new_value FunctionId = 104
+ BPF_FUNC_strtol FunctionId = 105
+ BPF_FUNC_strtoul FunctionId = 106
+ BPF_FUNC_sk_storage_get FunctionId = 107
+ BPF_FUNC_sk_storage_delete FunctionId = 108
+ BPF_FUNC_send_signal FunctionId = 109
+ BPF_FUNC_tcp_gen_syncookie FunctionId = 110
+ BPF_FUNC_skb_output FunctionId = 111
+ BPF_FUNC_probe_read_user FunctionId = 112
+ BPF_FUNC_probe_read_kernel FunctionId = 113
+ BPF_FUNC_probe_read_user_str FunctionId = 114
+ BPF_FUNC_probe_read_kernel_str FunctionId = 115
+ BPF_FUNC_tcp_send_ack FunctionId = 116
+ BPF_FUNC_send_signal_thread FunctionId = 117
+ BPF_FUNC_jiffies64 FunctionId = 118
+ BPF_FUNC_read_branch_records FunctionId = 119
+ BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120
+ BPF_FUNC_xdp_output FunctionId = 121
+ BPF_FUNC_get_netns_cookie FunctionId = 122
+ BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123
+ BPF_FUNC_sk_assign FunctionId = 124
+ BPF_FUNC_ktime_get_boot_ns FunctionId = 125
+ BPF_FUNC_seq_printf FunctionId = 126
+ BPF_FUNC_seq_write FunctionId = 127
+ BPF_FUNC_sk_cgroup_id FunctionId = 128
+ BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129
+ BPF_FUNC_ringbuf_output FunctionId = 130
+ BPF_FUNC_ringbuf_reserve FunctionId = 131
+ BPF_FUNC_ringbuf_submit FunctionId = 132
+ BPF_FUNC_ringbuf_discard FunctionId = 133
+ BPF_FUNC_ringbuf_query FunctionId = 134
+ BPF_FUNC_csum_level FunctionId = 135
+ BPF_FUNC_skc_to_tcp6_sock FunctionId = 136
+ BPF_FUNC_skc_to_tcp_sock FunctionId = 137
+ BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138
+ BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139
+ BPF_FUNC_skc_to_udp6_sock FunctionId = 140
+ BPF_FUNC_get_task_stack FunctionId = 141
+ BPF_FUNC_load_hdr_opt FunctionId = 142
+ BPF_FUNC_store_hdr_opt FunctionId = 143
+ BPF_FUNC_reserve_hdr_opt FunctionId = 144
+ BPF_FUNC_inode_storage_get FunctionId = 145
+ BPF_FUNC_inode_storage_delete FunctionId = 146
+ BPF_FUNC_d_path FunctionId = 147
+ BPF_FUNC_copy_from_user FunctionId = 148
+ BPF_FUNC_snprintf_btf FunctionId = 149
+ BPF_FUNC_seq_printf_btf FunctionId = 150
+ BPF_FUNC_skb_cgroup_classid FunctionId = 151
+ BPF_FUNC_redirect_neigh FunctionId = 152
+ BPF_FUNC_per_cpu_ptr FunctionId = 153
+ BPF_FUNC_this_cpu_ptr FunctionId = 154
+ BPF_FUNC_redirect_peer FunctionId = 155
+ BPF_FUNC_task_storage_get FunctionId = 156
+ BPF_FUNC_task_storage_delete FunctionId = 157
+ BPF_FUNC_get_current_task_btf FunctionId = 158
+ BPF_FUNC_bprm_opts_set FunctionId = 159
+ BPF_FUNC_ktime_get_coarse_ns FunctionId = 160
+ BPF_FUNC_ima_inode_hash FunctionId = 161
+ BPF_FUNC_sock_from_file FunctionId = 162
+ BPF_FUNC_check_mtu FunctionId = 163
+ BPF_FUNC_for_each_map_elem FunctionId = 164
+ BPF_FUNC_snprintf FunctionId = 165
+ BPF_FUNC_sys_bpf FunctionId = 166
+ BPF_FUNC_btf_find_by_name_kind FunctionId = 167
+ BPF_FUNC_sys_close FunctionId = 168
+ BPF_FUNC_timer_init FunctionId = 169
+ BPF_FUNC_timer_set_callback FunctionId = 170
+ BPF_FUNC_timer_start FunctionId = 171
+ BPF_FUNC_timer_cancel FunctionId = 172
+ BPF_FUNC_get_func_ip FunctionId = 173
+ BPF_FUNC_get_attach_cookie FunctionId = 174
+ BPF_FUNC_task_pt_regs FunctionId = 175
+ BPF_FUNC_get_branch_snapshot FunctionId = 176
+ BPF_FUNC_trace_vprintk FunctionId = 177
+ BPF_FUNC_skc_to_unix_sock FunctionId = 178
+ BPF_FUNC_kallsyms_lookup_name FunctionId = 179
+ BPF_FUNC_find_vma FunctionId = 180
+ BPF_FUNC_loop FunctionId = 181
+ BPF_FUNC_strncmp FunctionId = 182
+ BPF_FUNC_get_func_arg FunctionId = 183
+ BPF_FUNC_get_func_ret FunctionId = 184
+ BPF_FUNC_get_func_arg_cnt FunctionId = 185
+ BPF_FUNC_get_retval FunctionId = 186
+ BPF_FUNC_set_retval FunctionId = 187
+ BPF_FUNC_xdp_get_buff_len FunctionId = 188
+ BPF_FUNC_xdp_load_bytes FunctionId = 189
+ BPF_FUNC_xdp_store_bytes FunctionId = 190
+ BPF_FUNC_copy_from_user_task FunctionId = 191
+ BPF_FUNC_skb_set_tstamp FunctionId = 192
+ BPF_FUNC_ima_file_hash FunctionId = 193
+ BPF_FUNC_kptr_xchg FunctionId = 194
+ BPF_FUNC_map_lookup_percpu_elem FunctionId = 195
+ BPF_FUNC_skc_to_mptcp_sock FunctionId = 196
+ BPF_FUNC_dynptr_from_mem FunctionId = 197
+ BPF_FUNC_ringbuf_reserve_dynptr FunctionId = 198
+ BPF_FUNC_ringbuf_submit_dynptr FunctionId = 199
+ BPF_FUNC_ringbuf_discard_dynptr FunctionId = 200
+ BPF_FUNC_dynptr_read FunctionId = 201
+ BPF_FUNC_dynptr_write FunctionId = 202
+ BPF_FUNC_dynptr_data FunctionId = 203
+ BPF_FUNC_tcp_raw_gen_syncookie_ipv4 FunctionId = 204
+ BPF_FUNC_tcp_raw_gen_syncookie_ipv6 FunctionId = 205
+ BPF_FUNC_tcp_raw_check_syncookie_ipv4 FunctionId = 206
+ BPF_FUNC_tcp_raw_check_syncookie_ipv6 FunctionId = 207
+ BPF_FUNC_ktime_get_tai_ns FunctionId = 208
+ BPF_FUNC_user_ringbuf_drain FunctionId = 209
+ BPF_FUNC_cgrp_storage_get FunctionId = 210
+ BPF_FUNC_cgrp_storage_delete FunctionId = 211
+ __BPF_FUNC_MAX_ID FunctionId = 212
+)
+
+type HdrStartOff uint32
+
+const (
+ BPF_HDR_START_MAC HdrStartOff = 0
+ BPF_HDR_START_NET HdrStartOff = 1
+)
+
+type LinkType uint32
+
+const (
+ BPF_LINK_TYPE_UNSPEC LinkType = 0
+ BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1
+ BPF_LINK_TYPE_TRACING LinkType = 2
+ BPF_LINK_TYPE_CGROUP LinkType = 3
+ BPF_LINK_TYPE_ITER LinkType = 4
+ BPF_LINK_TYPE_NETNS LinkType = 5
+ BPF_LINK_TYPE_XDP LinkType = 6
+ BPF_LINK_TYPE_PERF_EVENT LinkType = 7
+ BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8
+ BPF_LINK_TYPE_STRUCT_OPS LinkType = 9
+ BPF_LINK_TYPE_NETFILTER LinkType = 10
+ BPF_LINK_TYPE_TCX LinkType = 11
+ BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12
+ BPF_LINK_TYPE_NETKIT LinkType = 13
+ __MAX_BPF_LINK_TYPE LinkType = 14
+)
+
+type MapType uint32
+
+const (
+ BPF_MAP_TYPE_UNSPEC MapType = 0
+ BPF_MAP_TYPE_HASH MapType = 1
+ BPF_MAP_TYPE_ARRAY MapType = 2
+ BPF_MAP_TYPE_PROG_ARRAY MapType = 3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4
+ BPF_MAP_TYPE_PERCPU_HASH MapType = 5
+ BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6
+ BPF_MAP_TYPE_STACK_TRACE MapType = 7
+ BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8
+ BPF_MAP_TYPE_LRU_HASH MapType = 9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10
+ BPF_MAP_TYPE_LPM_TRIE MapType = 11
+ BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12
+ BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13
+ BPF_MAP_TYPE_DEVMAP MapType = 14
+ BPF_MAP_TYPE_SOCKMAP MapType = 15
+ BPF_MAP_TYPE_CPUMAP MapType = 16
+ BPF_MAP_TYPE_XSKMAP MapType = 17
+ BPF_MAP_TYPE_SOCKHASH MapType = 18
+ BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19
+ BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21
+ BPF_MAP_TYPE_QUEUE MapType = 22
+ BPF_MAP_TYPE_STACK MapType = 23
+ BPF_MAP_TYPE_SK_STORAGE MapType = 24
+ BPF_MAP_TYPE_DEVMAP_HASH MapType = 25
+ BPF_MAP_TYPE_STRUCT_OPS MapType = 26
+ BPF_MAP_TYPE_RINGBUF MapType = 27
+ BPF_MAP_TYPE_INODE_STORAGE MapType = 28
+ BPF_MAP_TYPE_TASK_STORAGE MapType = 29
+ BPF_MAP_TYPE_BLOOM_FILTER MapType = 30
+ BPF_MAP_TYPE_USER_RINGBUF MapType = 31
+ BPF_MAP_TYPE_CGRP_STORAGE MapType = 32
+)
+
+type PerfEventType uint32
+
+const (
+ BPF_PERF_EVENT_UNSPEC PerfEventType = 0
+ BPF_PERF_EVENT_UPROBE PerfEventType = 1
+ BPF_PERF_EVENT_URETPROBE PerfEventType = 2
+ BPF_PERF_EVENT_KPROBE PerfEventType = 3
+ BPF_PERF_EVENT_KRETPROBE PerfEventType = 4
+ BPF_PERF_EVENT_TRACEPOINT PerfEventType = 5
+ BPF_PERF_EVENT_EVENT PerfEventType = 6
+)
+
+type ProgType uint32
+
+const (
+ BPF_PROG_TYPE_UNSPEC ProgType = 0
+ BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1
+ BPF_PROG_TYPE_KPROBE ProgType = 2
+ BPF_PROG_TYPE_SCHED_CLS ProgType = 3
+ BPF_PROG_TYPE_SCHED_ACT ProgType = 4
+ BPF_PROG_TYPE_TRACEPOINT ProgType = 5
+ BPF_PROG_TYPE_XDP ProgType = 6
+ BPF_PROG_TYPE_PERF_EVENT ProgType = 7
+ BPF_PROG_TYPE_CGROUP_SKB ProgType = 8
+ BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9
+ BPF_PROG_TYPE_LWT_IN ProgType = 10
+ BPF_PROG_TYPE_LWT_OUT ProgType = 11
+ BPF_PROG_TYPE_LWT_XMIT ProgType = 12
+ BPF_PROG_TYPE_SOCK_OPS ProgType = 13
+ BPF_PROG_TYPE_SK_SKB ProgType = 14
+ BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15
+ BPF_PROG_TYPE_SK_MSG ProgType = 16
+ BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18
+ BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19
+ BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20
+ BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21
+ BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22
+ BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24
+ BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25
+ BPF_PROG_TYPE_TRACING ProgType = 26
+ BPF_PROG_TYPE_STRUCT_OPS ProgType = 27
+ BPF_PROG_TYPE_EXT ProgType = 28
+ BPF_PROG_TYPE_LSM ProgType = 29
+ BPF_PROG_TYPE_SK_LOOKUP ProgType = 30
+ BPF_PROG_TYPE_SYSCALL ProgType = 31
+ BPF_PROG_TYPE_NETFILTER ProgType = 32
+)
+
+type RetCode uint32
+
+const (
+ BPF_OK RetCode = 0
+ BPF_DROP RetCode = 2
+ BPF_REDIRECT RetCode = 7
+ BPF_LWT_REROUTE RetCode = 128
+ BPF_FLOW_DISSECTOR_CONTINUE RetCode = 129
+)
+
+type SkAction uint32
+
+const (
+ SK_DROP SkAction = 0
+ SK_PASS SkAction = 1
+)
+
+type StackBuildIdStatus uint32
+
+const (
+ BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0
+ BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1
+ BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2
+)
+
+type StatsType uint32
+
+const (
+ BPF_STATS_RUN_TIME StatsType = 0
+)
+
+type TcxActionBase int32
+
+const (
+ TCX_NEXT TcxActionBase = -1
+ TCX_PASS TcxActionBase = 0
+ TCX_DROP TcxActionBase = 2
+ TCX_REDIRECT TcxActionBase = 7
+)
+
+type XdpAction uint32
+
+const (
+ XDP_ABORTED XdpAction = 0
+ XDP_DROP XdpAction = 1
+ XDP_PASS XdpAction = 2
+ XDP_TX XdpAction = 3
+ XDP_REDIRECT XdpAction = 4
+)
+
+type BtfInfo struct {
+ Btf Pointer
+ BtfSize uint32
+ Id BTFID
+ Name Pointer
+ NameLen uint32
+ KernelBtf uint32
+}
+
+type FuncInfo struct {
+ InsnOff uint32
+ TypeId uint32
+}
+
+type LineInfo struct {
+ InsnOff uint32
+ FileNameOff uint32
+ LineOff uint32
+ LineCol uint32
+}
+
+type LinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ Extra [48]uint8
+}
+
+type MapInfo struct {
+ Type uint32
+ Id uint32
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+ MapFlags MapFlags
+ Name ObjName
+ Ifindex uint32
+ BtfVmlinuxValueTypeId TypeID
+ NetnsDev uint64
+ NetnsIno uint64
+ BtfId uint32
+ BtfKeyTypeId TypeID
+ BtfValueTypeId TypeID
+ _ [4]byte
+ MapExtra uint64
+}
+
+type ProgInfo struct {
+ Type uint32
+ Id uint32
+ Tag [8]uint8
+ JitedProgLen uint32
+ XlatedProgLen uint32
+ JitedProgInsns uint64
+ XlatedProgInsns Pointer
+ LoadTime uint64
+ CreatedByUid uint32
+ NrMapIds uint32
+ MapIds Pointer
+ Name ObjName
+ Ifindex uint32
+ _ [4]byte /* unsupported bitfield */
+ NetnsDev uint64
+ NetnsIno uint64
+ NrJitedKsyms uint32
+ NrJitedFuncLens uint32
+ JitedKsyms uint64
+ JitedFuncLens uint64
+ BtfId BTFID
+ FuncInfoRecSize uint32
+ FuncInfo Pointer
+ NrFuncInfo uint32
+ NrLineInfo uint32
+ LineInfo Pointer
+ JitedLineInfo uint64
+ NrJitedLineInfo uint32
+ LineInfoRecSize uint32
+ JitedLineInfoRecSize uint32
+ NrProgTags uint32
+ ProgTags uint64
+ RunTimeNs uint64
+ RunCnt uint64
+ RecursionMisses uint64
+ VerifiedInsns uint32
+ AttachBtfObjId BTFID
+ AttachBtfId TypeID
+ _ [4]byte
+}
+
+type SkLookup struct {
+ Cookie uint64
+ Family uint32
+ Protocol uint32
+ RemoteIp4 [4]uint8
+ RemoteIp6 [16]uint8
+ RemotePort uint16
+ _ [2]byte
+ LocalIp4 [4]uint8
+ LocalIp6 [16]uint8
+ LocalPort uint32
+ IngressIfindex uint32
+ _ [4]byte
+}
+
+type XdpMd struct {
+ Data uint32
+ DataEnd uint32
+ DataMeta uint32
+ IngressIfindex uint32
+ RxQueueIndex uint32
+ EgressIfindex uint32
+}
+
+type BtfGetFdByIdAttr struct{ Id uint32 }
+
+func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type BtfGetNextIdAttr struct {
+ Id BTFID
+ NextId BTFID
+}
+
+func BtfGetNextId(attr *BtfGetNextIdAttr) error {
+ _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type BtfLoadAttr struct {
+ Btf Pointer
+ BtfLogBuf Pointer
+ BtfSize uint32
+ BtfLogSize uint32
+ BtfLogLevel uint32
+ BtfLogTrueSize uint32
+}
+
+func BtfLoad(attr *BtfLoadAttr) (*FD, error) {
+ fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type EnableStatsAttr struct{ Type uint32 }
+
+func EnableStats(attr *EnableStatsAttr) (*FD, error) {
+ fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type IterCreateAttr struct {
+ LinkFd uint32
+ Flags uint32
+}
+
+func IterCreate(attr *IterCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ TargetBtfId TypeID
+ _ [44]byte
+}
+
+func LinkCreate(attr *LinkCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateIterAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ IterInfo Pointer
+ IterInfoLen uint32
+ _ [36]byte
+}
+
+func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateKprobeMultiAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ KprobeMultiFlags uint32
+ Count uint32
+ Syms Pointer
+ Addrs Pointer
+ Cookies Pointer
+ _ [16]byte
+}
+
+func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateNetfilterAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ Pf uint32
+ Hooknum uint32
+ Priority int32
+ NetfilterFlags uint32
+ _ [32]byte
+}
+
+func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateNetkitAttr struct {
+ ProgFd uint32
+ TargetIfindex uint32
+ AttachType AttachType
+ Flags uint32
+ RelativeFdOrId uint32
+ _ [4]byte
+ ExpectedRevision uint64
+ _ [32]byte
+}
+
+func LinkCreateNetkit(attr *LinkCreateNetkitAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreatePerfEventAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ BpfCookie uint64
+ _ [40]byte
+}
+
+func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateTcxAttr struct {
+ ProgFd uint32
+ TargetIfindex uint32
+ AttachType AttachType
+ Flags uint32
+ RelativeFdOrId uint32
+ _ [4]byte
+ ExpectedRevision uint64
+ _ [32]byte
+}
+
+func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateTracingAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ TargetBtfId BTFID
+ _ [4]byte
+ Cookie uint64
+ _ [32]byte
+}
+
+func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateUprobeMultiAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ Path Pointer
+ Offsets Pointer
+ RefCtrOffsets Pointer
+ Cookies Pointer
+ Count uint32
+ UprobeMultiFlags uint32
+ Pid uint32
+ _ [4]byte
+}
+
+func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkGetFdByIdAttr struct{ Id LinkID }
+
+func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkGetNextIdAttr struct {
+ Id LinkID
+ NextId LinkID
+}
+
+func LinkGetNextId(attr *LinkGetNextIdAttr) error {
+ _, err := BPF(BPF_LINK_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type LinkUpdateAttr struct {
+ LinkFd uint32
+ NewProgFd uint32
+ Flags uint32
+ OldProgFd uint32
+}
+
+func LinkUpdate(attr *LinkUpdateAttr) error {
+ _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapCreateAttr struct {
+ MapType MapType
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+ MapFlags MapFlags
+ InnerMapFd uint32
+ NumaNode uint32
+ MapName ObjName
+ MapIfindex uint32
+ BtfFd uint32
+ BtfKeyTypeId TypeID
+ BtfValueTypeId TypeID
+ BtfVmlinuxValueTypeId TypeID
+ MapExtra uint64
+}
+
+func MapCreate(attr *MapCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type MapDeleteBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapDeleteBatch(attr *MapDeleteBatchAttr) error {
+ _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapDeleteElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapDeleteElem(attr *MapDeleteElemAttr) error {
+ _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapFreezeAttr struct{ MapFd uint32 }
+
+func MapFreeze(attr *MapFreezeAttr) error {
+ _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapGetFdByIdAttr struct{ Id uint32 }
+
+func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type MapGetNextIdAttr struct {
+ Id uint32
+ NextId uint32
+}
+
+func MapGetNextId(attr *MapGetNextIdAttr) error {
+ _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapGetNextKeyAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ NextKey Pointer
+}
+
+func MapGetNextKey(attr *MapGetNextKeyAttr) error {
+ _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupAndDeleteBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupAndDeleteElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapLookupBatch(attr *MapLookupBatchAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapLookupElem(attr *MapLookupElemAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapUpdateBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapUpdateBatch(attr *MapUpdateBatchAttr) error {
+ _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapUpdateElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapUpdateElem(attr *MapUpdateElemAttr) error {
+ _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ObjGetAttr struct {
+ Pathname Pointer
+ BpfFd uint32
+ FileFlags uint32
+ PathFd int32
+ _ [4]byte
+}
+
+func ObjGet(attr *ObjGetAttr) (*FD, error) {
+ fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ObjGetInfoByFdAttr struct {
+ BpfFd uint32
+ InfoLen uint32
+ Info Pointer
+}
+
+func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error {
+ _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ObjPinAttr struct {
+ Pathname Pointer
+ BpfFd uint32
+ FileFlags uint32
+ PathFd int32
+ _ [4]byte
+}
+
+func ObjPin(attr *ObjPinAttr) error {
+ _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgAttachAttr struct {
+ TargetFdOrIfindex uint32
+ AttachBpfFd uint32
+ AttachType uint32
+ AttachFlags uint32
+ ReplaceBpfFd uint32
+ RelativeFdOrId uint32
+ ExpectedRevision uint64
+}
+
+func ProgAttach(attr *ProgAttachAttr) error {
+ _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgBindMapAttr struct {
+ ProgFd uint32
+ MapFd uint32
+ Flags uint32
+}
+
+func ProgBindMap(attr *ProgBindMapAttr) error {
+ _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgDetachAttr struct {
+ TargetFdOrIfindex uint32
+ AttachBpfFd uint32
+ AttachType uint32
+ AttachFlags uint32
+ _ [4]byte
+ RelativeFdOrId uint32
+ ExpectedRevision uint64
+}
+
+func ProgDetach(attr *ProgDetachAttr) error {
+ _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgGetFdByIdAttr struct{ Id uint32 }
+
+func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ProgGetNextIdAttr struct {
+ Id uint32
+ NextId uint32
+}
+
+func ProgGetNextId(attr *ProgGetNextIdAttr) error {
+ _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgLoadAttr struct {
+ ProgType ProgType
+ InsnCnt uint32
+ Insns Pointer
+ License Pointer
+ LogLevel LogLevel
+ LogSize uint32
+ LogBuf Pointer
+ KernVersion uint32
+ ProgFlags uint32
+ ProgName ObjName
+ ProgIfindex uint32
+ ExpectedAttachType AttachType
+ ProgBtfFd uint32
+ FuncInfoRecSize uint32
+ FuncInfo Pointer
+ FuncInfoCnt uint32
+ LineInfoRecSize uint32
+ LineInfo Pointer
+ LineInfoCnt uint32
+ AttachBtfId TypeID
+ AttachBtfObjFd uint32
+ CoreReloCnt uint32
+ FdArray Pointer
+ CoreRelos Pointer
+ CoreReloRecSize uint32
+ LogTrueSize uint32
+}
+
+func ProgLoad(attr *ProgLoadAttr) (*FD, error) {
+ fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ProgQueryAttr struct {
+ TargetFdOrIfindex uint32
+ AttachType AttachType
+ QueryFlags uint32
+ AttachFlags uint32
+ ProgIds Pointer
+ Count uint32
+ _ [4]byte
+ ProgAttachFlags Pointer
+ LinkIds Pointer
+ LinkAttachFlags Pointer
+ Revision uint64
+}
+
+func ProgQuery(attr *ProgQueryAttr) error {
+ _, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgRunAttr struct {
+ ProgFd uint32
+ Retval uint32
+ DataSizeIn uint32
+ DataSizeOut uint32
+ DataIn Pointer
+ DataOut Pointer
+ Repeat uint32
+ Duration uint32
+ CtxSizeIn uint32
+ CtxSizeOut uint32
+ CtxIn Pointer
+ CtxOut Pointer
+ Flags uint32
+ Cpu uint32
+ BatchSize uint32
+ _ [4]byte
+}
+
+func ProgRun(attr *ProgRunAttr) error {
+ _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type RawTracepointOpenAttr struct {
+ Name Pointer
+ ProgFd uint32
+ _ [4]byte
+}
+
+func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) {
+ fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type CgroupLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ CgroupId uint64
+ AttachType AttachType
+ _ [36]byte
+}
+
+type IterLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ TargetName Pointer
+ TargetNameLen uint32
+}
+
+type KprobeLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ PerfEventType PerfEventType
+ _ [4]byte
+ FuncName Pointer
+ NameLen uint32
+ Offset uint32
+ Addr uint64
+ Missed uint64
+ _ [8]byte
+}
+
+type KprobeMultiLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ Addrs Pointer
+ Count uint32
+ Flags uint32
+ Missed uint64
+ _ [24]byte
+}
+
+type NetNsLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ NetnsIno uint32
+ AttachType AttachType
+ _ [40]byte
+}
+
+type NetfilterLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ Pf uint32
+ Hooknum uint32
+ Priority int32
+ Flags uint32
+ _ [32]byte
+}
+
+type NetkitLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ Ifindex uint32
+ AttachType AttachType
+ _ [40]byte
+}
+
+type PerfEventLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ PerfEventType PerfEventType
+}
+
+type RawTracepointLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ TpName Pointer
+ TpNameLen uint32
+ _ [36]byte
+}
+
+type TcxLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ Ifindex uint32
+ AttachType AttachType
+ _ [40]byte
+}
+
+type TracingLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ AttachType AttachType
+ TargetObjId uint32
+ TargetBtfId TypeID
+ _ [36]byte
+}
+
+type XDPLinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ Ifindex uint32
+ _ [44]byte
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
new file mode 100644
index 000000000..d184ea196
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
@@ -0,0 +1,83 @@
+package sysenc
+
+import (
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type Buffer struct {
+ ptr unsafe.Pointer
+ // Size of the buffer. syscallPointerOnly if created from UnsafeBuffer or when using
+ // zero-copy unmarshaling.
+ size int
+}
+
+const syscallPointerOnly = -1
+
+func newBuffer(buf []byte) Buffer {
+ if len(buf) == 0 {
+ return Buffer{}
+ }
+ return Buffer{unsafe.Pointer(&buf[0]), len(buf)}
+}
+
+// UnsafeBuffer constructs a Buffer for zero-copy unmarshaling.
+//
+// [Pointer] is the only valid method to call on such a Buffer.
+// Use [SyscallBuffer] instead if possible.
+func UnsafeBuffer(ptr unsafe.Pointer) Buffer {
+ return Buffer{ptr, syscallPointerOnly}
+}
+
+// SyscallOutput prepares a Buffer for a syscall to write into.
+//
+// size is the length of the desired buffer in bytes.
+// The buffer may point at the underlying memory of dst, in which case [Unmarshal]
+// becomes a no-op.
+//
+// The contents of the buffer are undefined and may be non-zero.
+func SyscallOutput(dst any, size int) Buffer {
+ if dstBuf := unsafeBackingMemory(dst); len(dstBuf) == size {
+ buf := newBuffer(dstBuf)
+ buf.size = syscallPointerOnly
+ return buf
+ }
+
+ return newBuffer(make([]byte, size))
+}
+
+// CopyTo copies the buffer into dst.
+//
+// Returns the number of copied bytes.
+func (b Buffer) CopyTo(dst []byte) int {
+ return copy(dst, b.unsafeBytes())
+}
+
+// AppendTo appends the buffer onto dst.
+func (b Buffer) AppendTo(dst []byte) []byte {
+ return append(dst, b.unsafeBytes()...)
+}
+
+// Pointer returns the location where a syscall should write.
+func (b Buffer) Pointer() sys.Pointer {
+ // NB: This deliberately ignores b.length to support zero-copy
+ // marshaling / unmarshaling using unsafe.Pointer.
+ return sys.NewPointer(b.ptr)
+}
+
+// Unmarshal the buffer into the provided value.
+func (b Buffer) Unmarshal(data any) error {
+ if b.size == syscallPointerOnly {
+ return nil
+ }
+
+ return Unmarshal(data, b.unsafeBytes())
+}
+
+func (b Buffer) unsafeBytes() []byte {
+ if b.size == syscallPointerOnly {
+ return nil
+ }
+ return unsafe.Slice((*byte)(b.ptr), b.size)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go
new file mode 100644
index 000000000..676ad98ba
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go
@@ -0,0 +1,3 @@
+// Package sysenc provides efficient conversion of Go values to system
+// call interfaces.
+package sysenc
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go
new file mode 100644
index 000000000..52d111e7a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go
@@ -0,0 +1,41 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found at https://go.dev/LICENSE.
+
+package sysenc
+
+import (
+ "reflect"
+ "sync"
+)
+
+var hasUnexportedFieldsCache sync.Map // map[reflect.Type]bool
+
+func hasUnexportedFields(typ reflect.Type) bool {
+ switch typ.Kind() {
+ case reflect.Slice, reflect.Array, reflect.Pointer:
+ return hasUnexportedFields(typ.Elem())
+
+ case reflect.Struct:
+ if unexported, ok := hasUnexportedFieldsCache.Load(typ); ok {
+ return unexported.(bool)
+ }
+
+ unexported := false
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ field := typ.Field(i)
+ // Package binary allows _ fields but always writes zeroes into them.
+ if (!field.IsExported() && field.Name != "_") || hasUnexportedFields(field.Type) {
+ unexported = true
+ break
+ }
+ }
+
+ hasUnexportedFieldsCache.Store(typ, unexported)
+ return unexported
+
+ default:
+ // NB: It's not clear what this means for Chan and so on.
+ return false
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go
new file mode 100644
index 000000000..0026af8f2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go
@@ -0,0 +1,177 @@
+package sysenc
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+ "slices"
+ "sync"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+// Marshal turns data into a byte slice using the system's native endianness.
+//
+// If possible, avoids allocations by directly using the backing memory
+// of data. This means that the variable must not be modified for the lifetime
+// of the returned [Buffer].
+//
+// Returns an error if the data can't be turned into a byte slice according to
+// the behaviour of [binary.Write].
+func Marshal(data any, size int) (Buffer, error) {
+ if data == nil {
+ return Buffer{}, errors.New("can't marshal a nil value")
+ }
+
+ var buf []byte
+ var err error
+ switch value := data.(type) {
+ case encoding.BinaryMarshaler:
+ buf, err = value.MarshalBinary()
+ case string:
+ buf = unsafe.Slice(unsafe.StringData(value), len(value))
+ case []byte:
+ buf = value
+ case int16:
+ buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), uint16(value))
+ case uint16:
+ buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), value)
+ case int32:
+ buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), uint32(value))
+ case uint32:
+ buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), value)
+ case int64:
+ buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), uint64(value))
+ case uint64:
+ buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), value)
+ default:
+ if buf := unsafeBackingMemory(data); len(buf) == size {
+ return newBuffer(buf), nil
+ }
+
+ wr := internal.NewBuffer(make([]byte, 0, size))
+ defer internal.PutBuffer(wr)
+
+ err = binary.Write(wr, internal.NativeEndian, value)
+ buf = wr.Bytes()
+ }
+ if err != nil {
+ return Buffer{}, err
+ }
+
+ if len(buf) != size {
+ return Buffer{}, fmt.Errorf("%T doesn't marshal to %d bytes", data, size)
+ }
+
+ return newBuffer(buf), nil
+}
+
+var bytesReaderPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Reader)
+ },
+}
+
+// Unmarshal a byte slice in the system's native endianness into data.
+//
+// Returns an error if buf can't be unmarshalled according to the behaviour
+// of [binary.Read].
+func Unmarshal(data interface{}, buf []byte) error {
+ switch value := data.(type) {
+ case encoding.BinaryUnmarshaler:
+ return value.UnmarshalBinary(buf)
+
+ case *string:
+ *value = string(buf)
+ return nil
+
+ case *[]byte:
+ // Backwards compat: unmarshaling into a slice replaces the whole slice.
+ *value = slices.Clone(buf)
+ return nil
+
+ default:
+ if dataBuf := unsafeBackingMemory(data); len(dataBuf) == len(buf) {
+ copy(dataBuf, buf)
+ return nil
+ }
+
+ rd := bytesReaderPool.Get().(*bytes.Reader)
+ defer bytesReaderPool.Put(rd)
+
+ rd.Reset(buf)
+
+ if err := binary.Read(rd, internal.NativeEndian, value); err != nil {
+ return err
+ }
+
+ if rd.Len() != 0 {
+ return fmt.Errorf("unmarshaling %T doesn't consume all data", data)
+ }
+
+ return nil
+ }
+}
+
+// unsafeBackingMemory returns the backing memory of data if it can be used
+// instead of calling into package binary.
+//
+// Returns nil if the value is not a pointer or a slice, or if it contains
+// padding or unexported fields.
+func unsafeBackingMemory(data any) []byte {
+ if data == nil {
+ return nil
+ }
+
+ value := reflect.ValueOf(data)
+ var valueSize int
+ switch value.Kind() {
+ case reflect.Pointer:
+ if value.IsNil() {
+ return nil
+ }
+
+ if elemType := value.Type().Elem(); elemType.Kind() != reflect.Slice {
+ valueSize = int(elemType.Size())
+ break
+ }
+
+ // We're dealing with a pointer to a slice. Dereference and
+ // handle it like a regular slice.
+ value = value.Elem()
+ fallthrough
+
+ case reflect.Slice:
+ valueSize = int(value.Type().Elem().Size()) * value.Len()
+
+ default:
+ // Prevent Value.UnsafePointer from panicking.
+ return nil
+ }
+
+ // Some nil pointer types currently crash binary.Size. Call it after our own
+ // code so that the panic isn't reachable.
+ // See https://github.com/golang/go/issues/60892
+ if size := binary.Size(data); size == -1 || size != valueSize {
+ // The type contains padding or unsupported types.
+ return nil
+ }
+
+ if hasUnexportedFields(reflect.TypeOf(data)) {
+ return nil
+ }
+
+ // Reinterpret the pointer as a byte slice. This violates the unsafe.Pointer
+ // rules because it's very unlikely that the source data has "an equivalent
+ // memory layout". However, we can make it safe-ish because of the
+ // following reasons:
+ // - There is no alignment mismatch since we cast to a type with an
+ // alignment of 1.
+ // - There are no pointers in the source type so we don't upset the GC.
+ // - The length is verified at runtime.
+ return unsafe.Slice((*byte)(value.UnsafePointer()), valueSize)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
new file mode 100644
index 000000000..897740fec
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
@@ -0,0 +1,360 @@
+package tracefs
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ ErrInvalidInput = errors.New("invalid input")
+
+ ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes")
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -type=ProbeType -linecomment
+
+type ProbeType uint8
+
+const (
+ Kprobe ProbeType = iota // kprobe
+ Uprobe // uprobe
+)
+
+func (pt ProbeType) eventsFile() (*os.File, error) {
+ path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String()))
+ if err != nil {
+ return nil, err
+ }
+
+ return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666)
+}
+
+type ProbeArgs struct {
+ Type ProbeType
+ Symbol, Group, Path string
+ Offset, RefCtrOffset, Cookie uint64
+ Pid, RetprobeMaxActive int
+ Ret bool
+}
+
+// RandomGroup generates a pseudorandom string for use as a tracefs group name.
+// Returns an error when the output string would exceed 63 characters (kernel
+// limitation), when rand.Read() fails or when prefix contains characters not
+// allowed by IsValidTraceID.
+func RandomGroup(prefix string) (string, error) {
+ if !validIdentifier(prefix) {
+ return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput)
+ }
+
+ b := make([]byte, 8)
+ if _, err := rand.Read(b); err != nil {
+ return "", fmt.Errorf("reading random bytes: %w", err)
+ }
+
+ group := fmt.Sprintf("%s_%x", prefix, b)
+ if len(group) > 63 {
+ return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput)
+ }
+
+ return group, nil
+}
+
+// validIdentifier implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
+//
+// Trace event groups, names and kernel symbols must adhere to this set
+// of characters. Non-empty, first character must not be a number, all
+// characters must be alphanumeric or underscore.
+func validIdentifier(s string) bool {
+ if len(s) < 1 {
+ return false
+ }
+ for i, c := range []byte(s) {
+ switch {
+ case c >= 'a' && c <= 'z':
+ case c >= 'A' && c <= 'Z':
+ case c == '_':
+ case i > 0 && c >= '0' && c <= '9':
+
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+func sanitizeTracefsPath(path ...string) (string, error) {
+ base, err := getTracefsPath()
+ if err != nil {
+ return "", err
+ }
+ l := filepath.Join(path...)
+ p := filepath.Join(base, l)
+ if !strings.HasPrefix(p, base) {
+ return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput)
+ }
+ return p, nil
+}
+
+// getTracefsPath will return a correct path to the tracefs mount point.
+// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing,
+// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted.
+// The available tracefs paths will depends on distribution choices.
+var getTracefsPath = sync.OnceValues(func() (string, error) {
+ for _, p := range []struct {
+ path string
+ fsType int64
+ }{
+ {"/sys/kernel/tracing", unix.TRACEFS_MAGIC},
+ {"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC},
+ // RHEL/CentOS
+ {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC},
+ } {
+ if fsType, err := internal.FSType(p.path); err == nil && fsType == p.fsType {
+ return p.path, nil
+ }
+ }
+
+ return "", errors.New("neither debugfs nor tracefs are mounted")
+})
+
+// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore.
+//
+// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
+func sanitizeIdentifier(s string) string {
+ var skip bool
+ return strings.Map(func(c rune) rune {
+ switch {
+ case c >= 'a' && c <= 'z',
+ c >= 'A' && c <= 'Z',
+ c >= '0' && c <= '9':
+ skip = false
+ return c
+
+ case skip:
+ return -1
+
+ default:
+ skip = true
+ return '_'
+ }
+ }, s)
+}
+
+// EventID reads a trace event's ID from tracefs given its group and name.
+// The kernel requires group and name to be alphanumeric or underscore.
+func EventID(group, name string) (uint64, error) {
+ if !validIdentifier(group) {
+ return 0, fmt.Errorf("invalid tracefs group: %q", group)
+ }
+
+ if !validIdentifier(name) {
+ return 0, fmt.Errorf("invalid tracefs name: %q", name)
+ }
+
+ path, err := sanitizeTracefsPath("events", group, name, "id")
+ if err != nil {
+ return 0, err
+ }
+ tid, err := internal.ReadUint64FromFile("%d\n", path)
+ if errors.Is(err, os.ErrNotExist) {
+ return 0, err
+ }
+ if err != nil {
+ return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
+ }
+
+ return tid, nil
+}
+
+func probePrefix(ret bool, maxActive int) string {
+ if ret {
+ if maxActive > 0 {
+ return fmt.Sprintf("r%d", maxActive)
+ }
+ return "r"
+ }
+ return "p"
+}
+
+// Event represents an entry in a tracefs probe events file.
+type Event struct {
+ typ ProbeType
+ group, name string
+ // event id allocated by the kernel. 0 if the event has already been removed.
+ id uint64
+}
+
+// NewEvent creates a new ephemeral trace event.
+//
+// Returns os.ErrNotExist if symbol is not a valid
+// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
+// if a probe with the same group and symbol already exists. Returns an error if
+// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if
+// the kernel is too old to support kretprobe maxactive.
+func NewEvent(args ProbeArgs) (*Event, error) {
+ // Before attempting to create a trace event through tracefs,
+ // check if an event with the same group and name already exists.
+ // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
+ // entry, so we need to rely on reads for detecting uniqueness.
+ eventName := sanitizeIdentifier(args.Symbol)
+ _, err := EventID(args.Group, eventName)
+ if err == nil {
+ return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist)
+ }
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err)
+ }
+
+ // Open the kprobe_events file in tracefs.
+ f, err := args.Type.eventsFile()
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var pe, token string
+ switch args.Type {
+ case Kprobe:
+ // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
+ // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
+ // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
+ // -:[GRP/]EVENT : Clear a probe
+ //
+ // Some examples:
+ // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
+ // p:ebpf_5678/p_my_kprobe __x64_sys_execve
+ //
+ // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
+ // kernel default to NR_CPUS. This is desired in most eBPF cases since
+ // subsampling or rate limiting logic can be more accurately implemented in
+ // the eBPF program itself.
+ // See Documentation/kprobes.txt for more details.
+ if args.RetprobeMaxActive != 0 && !args.Ret {
+ return nil, ErrInvalidMaxActive
+ }
+ token = KprobeToken(args)
+ pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token)
+ case Uprobe:
+ // The uprobe_events syntax is as follows:
+ // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
+ // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
+ // -:[GRP/]EVENT : Clear a probe
+ //
+ // Some examples:
+ // r:ebpf_1234/readline /bin/bash:0x12345
+ // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
+ //
+ // See Documentation/trace/uprobetracer.txt for more details.
+ if args.RetprobeMaxActive != 0 {
+ return nil, ErrInvalidMaxActive
+ }
+ token = UprobeToken(args)
+ pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token)
+ }
+ _, err = f.WriteString(pe)
+
+ // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+ // when trying to create a retprobe for a missing symbol.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("token %s: not found: %w", token, err)
+ }
+ // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+ // to an invalid insn boundary. The exact conditions that trigger this error are
+ // arch specific however.
+ if errors.Is(err, syscall.EILSEQ) {
+ return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+ }
+ // ERANGE is returned when the `SYM[+offs]` token is too big and cannot
+ // be resolved.
+ if errors.Is(err, syscall.ERANGE) {
+ return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err)
+ }
+
+ // Get the newly-created trace event's id.
+ tid, err := EventID(args.Group, eventName)
+ if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) {
+ // Kernels < 4.12 don't support maxactive and therefore auto generate
+ // group and event names from the symbol and offset. The symbol is used
+ // without any sanitization.
+ // See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712
+ event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset)
+ if err := removeEvent(args.Type, event); err != nil {
+ return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err)
+ }
+ return nil, fmt.Errorf("create trace event with non-default maxactive: %w", internal.ErrNotSupported)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("get trace event id: %w", err)
+ }
+
+ evt := &Event{args.Type, args.Group, eventName, tid}
+ runtime.SetFinalizer(evt, (*Event).Close)
+ return evt, nil
+}
+
+// Close removes the event from tracefs.
+//
+// Returns os.ErrClosed if the event has already been closed before.
+func (evt *Event) Close() error {
+ if evt.id == 0 {
+ return os.ErrClosed
+ }
+
+ evt.id = 0
+ runtime.SetFinalizer(evt, nil)
+ pe := fmt.Sprintf("%s/%s", evt.group, evt.name)
+ return removeEvent(evt.typ, pe)
+}
+
+func removeEvent(typ ProbeType, pe string) error {
+ f, err := typ.eventsFile()
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // See [k,u]probe_events syntax above. The probe type does not need to be specified
+ // for removals.
+ if _, err = f.WriteString("-:" + pe); err != nil {
+ return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err)
+ }
+
+ return nil
+}
+
+// ID returns the tracefs ID associated with the event.
+func (evt *Event) ID() uint64 {
+ return evt.id
+}
+
+// Group returns the tracefs group used by the event.
+func (evt *Event) Group() string {
+ return evt.group
+}
+
+// KprobeToken creates the SYM[+offs] token for the tracefs api.
+func KprobeToken(args ProbeArgs) string {
+ po := args.Symbol
+
+ if args.Offset != 0 {
+ po += fmt.Sprintf("+%#x", args.Offset)
+ }
+
+ return po
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
new file mode 100644
index 000000000..87cb0a059
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
@@ -0,0 +1,24 @@
+// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT.
+
+package tracefs
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Kprobe-0]
+ _ = x[Uprobe-1]
+}
+
+const _ProbeType_name = "kprobeuprobe"
+
+var _ProbeType_index = [...]uint8{0, 6, 12}
+
+func (i ProbeType) String() string {
+ if i >= ProbeType(len(_ProbeType_index)-1) {
+ return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _ProbeType_name[_ProbeType_index[i]:_ProbeType_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
new file mode 100644
index 000000000..994f31260
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
@@ -0,0 +1,16 @@
+package tracefs
+
+import "fmt"
+
+// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.
+func UprobeToken(args ProbeArgs) string {
+ po := fmt.Sprintf("%s:%#x", args.Path, args.Offset)
+
+ if args.RefCtrOffset != 0 {
+ // This is not documented in Documentation/trace/uprobetracer.txt.
+ // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564
+ po += fmt.Sprintf("(%#x)", args.RefCtrOffset)
+ }
+
+ return po
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/doc.go b/vendor/github.com/cilium/ebpf/internal/unix/doc.go
new file mode 100644
index 000000000..d168d36f1
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/doc.go
@@ -0,0 +1,11 @@
+// Package unix re-exports Linux specific parts of golang.org/x/sys/unix.
+//
+// It avoids breaking compilation on other OS by providing stubs as follows:
+// - Invoking a function always returns an error.
+// - Errnos have distinct, non-zero values.
+// - Constants have distinct but meaningless values.
+// - Types use the same names for members, but may or may not follow the
+// Linux layout.
+package unix
+
+// Note: please don't add any custom API to this package. Use internal/sys instead.
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
new file mode 100644
index 000000000..d725cfaa3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
@@ -0,0 +1,216 @@
+//go:build linux
+
+package unix
+
+import (
+ "syscall"
+
+ linux "golang.org/x/sys/unix"
+)
+
+const (
+ ENOENT = linux.ENOENT
+ EEXIST = linux.EEXIST
+ EAGAIN = linux.EAGAIN
+ ENOSPC = linux.ENOSPC
+ EINVAL = linux.EINVAL
+ EPOLLIN = linux.EPOLLIN
+ EINTR = linux.EINTR
+ EPERM = linux.EPERM
+ ESRCH = linux.ESRCH
+ ENODEV = linux.ENODEV
+ EBADF = linux.EBADF
+ E2BIG = linux.E2BIG
+ EFAULT = linux.EFAULT
+ EACCES = linux.EACCES
+ EILSEQ = linux.EILSEQ
+ EOPNOTSUPP = linux.EOPNOTSUPP
+ ESTALE = linux.ESTALE
+)
+
+const (
+ BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
+ BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
+ BPF_F_RDONLY = linux.BPF_F_RDONLY
+ BPF_F_WRONLY = linux.BPF_F_WRONLY
+ BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
+ BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE
+ BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS
+ BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
+ BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
+ BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN
+ BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN
+ BPF_F_LOCK = linux.BPF_F_LOCK
+ BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
+ BPF_TAG_SIZE = linux.BPF_TAG_SIZE
+ BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT
+ BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT
+ BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ
+ SYS_BPF = linux.SYS_BPF
+ F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
+ EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
+ EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC
+ O_CLOEXEC = linux.O_CLOEXEC
+ O_NONBLOCK = linux.O_NONBLOCK
+ PROT_NONE = linux.PROT_NONE
+ PROT_READ = linux.PROT_READ
+ PROT_WRITE = linux.PROT_WRITE
+ MAP_ANON = linux.MAP_ANON
+ MAP_SHARED = linux.MAP_SHARED
+ MAP_PRIVATE = linux.MAP_PRIVATE
+ PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
+ PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
+ PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
+ PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
+ PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE
+ PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE
+ PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF
+ PerfBitWatermark = linux.PerfBitWatermark
+ PerfBitWriteBackward = linux.PerfBitWriteBackward
+ PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
+ PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
+ RLIM_INFINITY = linux.RLIM_INFINITY
+ RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
+ BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
+ PERF_RECORD_LOST = linux.PERF_RECORD_LOST
+ PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE
+ AT_FDCWD = linux.AT_FDCWD
+ RENAME_NOREPLACE = linux.RENAME_NOREPLACE
+ SO_ATTACH_BPF = linux.SO_ATTACH_BPF
+ SO_DETACH_BPF = linux.SO_DETACH_BPF
+ SOL_SOCKET = linux.SOL_SOCKET
+ SIGPROF = linux.SIGPROF
+ SIG_BLOCK = linux.SIG_BLOCK
+ SIG_UNBLOCK = linux.SIG_UNBLOCK
+ EM_NONE = linux.EM_NONE
+ EM_BPF = linux.EM_BPF
+ BPF_FS_MAGIC = linux.BPF_FS_MAGIC
+ TRACEFS_MAGIC = linux.TRACEFS_MAGIC
+ DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC
+ BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP
+ BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP
+)
+
+type Statfs_t = linux.Statfs_t
+type Stat_t = linux.Stat_t
+type Rlimit = linux.Rlimit
+type Signal = linux.Signal
+type Sigset_t = linux.Sigset_t
+type PerfEventMmapPage = linux.PerfEventMmapPage
+type EpollEvent = linux.EpollEvent
+type PerfEventAttr = linux.PerfEventAttr
+type Utsname = linux.Utsname
+type CPUSet = linux.CPUSet
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ return linux.Syscall(trap, a1, a2, a3)
+}
+
+func PthreadSigmask(how int, set, oldset *Sigset_t) error {
+ return linux.PthreadSigmask(how, set, oldset)
+}
+
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return linux.FcntlInt(fd, cmd, arg)
+}
+
+func IoctlSetInt(fd int, req uint, value int) error {
+ return linux.IoctlSetInt(fd, req, value)
+}
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ return linux.Statfs(path, buf)
+}
+
+func Close(fd int) (err error) {
+ return linux.Close(fd)
+}
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ return linux.EpollWait(epfd, events, msec)
+}
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ return linux.EpollCtl(epfd, op, fd, event)
+}
+
+func Eventfd(initval uint, flags int) (fd int, err error) {
+ return linux.Eventfd(initval, flags)
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ return linux.Write(fd, p)
+}
+
+func EpollCreate1(flag int) (fd int, err error) {
+ return linux.EpollCreate1(flag)
+}
+
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ return linux.SetNonblock(fd, nonblocking)
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return linux.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return linux.Munmap(b)
+}
+
+func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
+ return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags)
+}
+
+func Uname(buf *Utsname) (err error) {
+ return linux.Uname(buf)
+}
+
+func Getpid() int {
+ return linux.Getpid()
+}
+
+func Gettid() int {
+ return linux.Gettid()
+}
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+ return linux.Tgkill(tgid, tid, sig)
+}
+
+func BytePtrFromString(s string) (*byte, error) {
+ return linux.BytePtrFromString(s)
+}
+
+func ByteSliceToString(s []byte) string {
+ return linux.ByteSliceToString(s)
+}
+
+func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
+ return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags)
+}
+
+func Prlimit(pid, resource int, new, old *Rlimit) error {
+ return linux.Prlimit(pid, resource, new, old)
+}
+
+func Open(path string, mode int, perm uint32) (int, error) {
+ return linux.Open(path, mode, perm)
+}
+
+func Fstat(fd int, stat *Stat_t) error {
+ return linux.Fstat(fd, stat)
+}
+
+func SetsockoptInt(fd, level, opt, value int) error {
+ return linux.SetsockoptInt(fd, level, opt, value)
+}
+
+func SchedSetaffinity(pid int, set *CPUSet) error {
+ return linux.SchedSetaffinity(pid, set)
+}
+
+func SchedGetaffinity(pid int, set *CPUSet) error {
+ return linux.SchedGetaffinity(pid, set)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
new file mode 100644
index 000000000..3ff896271
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
@@ -0,0 +1,311 @@
+//go:build !linux
+
+package unix
+
+import (
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+
+// Errnos are distinct and non-zero.
+const (
+ ENOENT syscall.Errno = iota + 1
+ EEXIST
+ EAGAIN
+ ENOSPC
+ EINVAL
+ EINTR
+ EPERM
+ ESRCH
+ ENODEV
+ EBADF
+ E2BIG
+ EFAULT
+ EACCES
+ EILSEQ
+ EOPNOTSUPP
+ ESTALE
+)
+
+// Constants are distinct to avoid breaking switch statements.
+const (
+ BPF_F_NO_PREALLOC = iota
+ BPF_F_NUMA_NODE
+ BPF_F_RDONLY
+ BPF_F_WRONLY
+ BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG
+ BPF_F_SLEEPABLE
+ BPF_F_MMAPABLE
+ BPF_F_INNER_MAP
+ BPF_F_KPROBE_MULTI_RETURN
+ BPF_F_UPROBE_MULTI_RETURN
+ BPF_F_XDP_HAS_FRAGS
+ BPF_OBJ_NAME_LEN
+ BPF_TAG_SIZE
+ BPF_RINGBUF_BUSY_BIT
+ BPF_RINGBUF_DISCARD_BIT
+ BPF_RINGBUF_HDR_SZ
+ SYS_BPF
+ F_DUPFD_CLOEXEC
+ EPOLLIN
+ EPOLL_CTL_ADD
+ EPOLL_CLOEXEC
+ O_CLOEXEC
+ O_NONBLOCK
+ PROT_NONE
+ PROT_READ
+ PROT_WRITE
+ MAP_ANON
+ MAP_SHARED
+ MAP_PRIVATE
+ PERF_ATTR_SIZE_VER1
+ PERF_TYPE_SOFTWARE
+ PERF_TYPE_TRACEPOINT
+ PERF_COUNT_SW_BPF_OUTPUT
+ PERF_EVENT_IOC_DISABLE
+ PERF_EVENT_IOC_ENABLE
+ PERF_EVENT_IOC_SET_BPF
+ PerfBitWatermark
+ PerfBitWriteBackward
+ PERF_SAMPLE_RAW
+ PERF_FLAG_FD_CLOEXEC
+ RLIM_INFINITY
+ RLIMIT_MEMLOCK
+ BPF_STATS_RUN_TIME
+ PERF_RECORD_LOST
+ PERF_RECORD_SAMPLE
+ AT_FDCWD
+ RENAME_NOREPLACE
+ SO_ATTACH_BPF
+ SO_DETACH_BPF
+ SOL_SOCKET
+ SIGPROF
+ SIG_BLOCK
+ SIG_UNBLOCK
+ EM_NONE
+ EM_BPF
+ BPF_FS_MAGIC
+ TRACEFS_MAGIC
+ DEBUGFS_MAGIC
+ BPF_RB_NO_WAKEUP
+ BPF_RB_FORCE_WAKEUP
+ BPF_F_LOCK
+)
+
+type Statfs_t struct {
+ Type int64
+ Bsize int64
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid [2]int32
+ Namelen int64
+ Frsize int64
+ Flags int64
+ Spare [4]int64
+}
+
+type Stat_t struct {
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint32
+ Uid uint32
+ Gid uint32
+ _ int32
+ Rdev uint64
+ Size int64
+ Blksize int64
+ Blocks int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type Signal int
+
+type Sigset_t struct {
+ Val [4]uint64
+}
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ return 0, 0, syscall.ENOTSUP
+}
+
+func PthreadSigmask(how int, set, oldset *Sigset_t) error {
+ return errNonLinux
+}
+
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return -1, errNonLinux
+}
+
+func IoctlSetInt(fd int, req uint, value int) error {
+ return errNonLinux
+}
+
+func Statfs(path string, buf *Statfs_t) error {
+ return errNonLinux
+}
+
+func Close(fd int) (err error) {
+ return errNonLinux
+}
+
+type EpollEvent struct {
+ Events uint32
+ Fd int32
+ Pad int32
+}
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ return 0, errNonLinux
+}
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ return errNonLinux
+}
+
+func Eventfd(initval uint, flags int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ return 0, errNonLinux
+}
+
+func EpollCreate1(flag int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+type PerfEventMmapPage struct {
+ Version uint32
+ Compat_version uint32
+ Lock uint32
+ Index uint32
+ Offset int64
+ Time_enabled uint64
+ Time_running uint64
+ Capabilities uint64
+ Pmc_width uint16
+ Time_shift uint16
+ Time_mult uint32
+ Time_offset uint64
+ Time_zero uint64
+ Size uint32
+
+ Data_head uint64
+ Data_tail uint64
+ Data_offset uint64
+ Data_size uint64
+ Aux_head uint64
+ Aux_tail uint64
+ Aux_offset uint64
+ Aux_size uint64
+}
+
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ return errNonLinux
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return []byte{}, errNonLinux
+}
+
+func Munmap(b []byte) (err error) {
+ return errNonLinux
+}
+
+type PerfEventAttr struct {
+ Type uint32
+ Size uint32
+ Config uint64
+ Sample uint64
+ Sample_type uint64
+ Read_format uint64
+ Bits uint64
+ Wakeup uint32
+ Bp_type uint32
+ Ext1 uint64
+ Ext2 uint64
+ Branch_sample_type uint64
+ Sample_regs_user uint64
+ Sample_stack_user uint32
+ Clockid int32
+ Sample_regs_intr uint64
+ Aux_watermark uint32
+ Sample_max_stack uint16
+}
+
+func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+type Utsname struct {
+ Release [65]byte
+ Version [65]byte
+}
+
+func Uname(buf *Utsname) (err error) {
+ return errNonLinux
+}
+
+func Getpid() int {
+ return -1
+}
+
+func Gettid() int {
+ return -1
+}
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+ return errNonLinux
+}
+
+func BytePtrFromString(s string) (*byte, error) {
+ return nil, errNonLinux
+}
+
+func ByteSliceToString(s []byte) string {
+ return ""
+}
+
+func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
+ return errNonLinux
+}
+
+func Prlimit(pid, resource int, new, old *Rlimit) error {
+ return errNonLinux
+}
+
+func Open(path string, mode int, perm uint32) (int, error) {
+ return -1, errNonLinux
+}
+
+func Fstat(fd int, stat *Stat_t) error {
+ return errNonLinux
+}
+
+func SetsockoptInt(fd, level, opt, value int) error {
+ return errNonLinux
+}
+
+type CPUSet struct{}
+
+func (*CPUSet) Set(int) {}
+
+func SchedSetaffinity(pid int, set *CPUSet) error {
+ return errNonLinux
+}
+
+func SchedGetaffinity(pid int, set *CPUSet) error {
+ return errNonLinux
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go
new file mode 100644
index 000000000..104927855
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/vdso.go
@@ -0,0 +1,143 @@
+package internal
+
+import (
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ errAuxvNoVDSO = errors.New("no vdso address found in auxv")
+)
+
+// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library
+// linked into the current process image.
+func vdsoVersion() (uint32, error) {
+ av, err := newAuxvRuntimeReader()
+ if err != nil {
+ return 0, err
+ }
+
+ defer av.Close()
+
+ vdsoAddr, err := vdsoMemoryAddress(av)
+ if err != nil {
+ return 0, fmt.Errorf("finding vDSO memory address: %w", err)
+ }
+
+ // Use /proc/self/mem rather than unsafe.Pointer tricks.
+ mem, err := os.Open("/proc/self/mem")
+ if err != nil {
+ return 0, fmt.Errorf("opening mem: %w", err)
+ }
+ defer mem.Close()
+
+ // Open ELF at provided memory address, as offset into /proc/self/mem.
+ c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64))
+ if err != nil {
+ return 0, fmt.Errorf("reading linux version code: %w", err)
+ }
+
+ return c, nil
+}
+
+// vdsoMemoryAddress returns the memory address of the vDSO library
+// linked into the current process image. r is an io.Reader into an auxv blob.
+func vdsoMemoryAddress(r auxvPairReader) (uintptr, error) {
+ // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`,
+ // the address of a page containing the virtual Dynamic Shared Object (vDSO).
+ for {
+ tag, value, err := r.ReadAuxvPair()
+ if err != nil {
+ return 0, err
+ }
+
+ switch tag {
+ case _AT_SYSINFO_EHDR:
+ if value != 0 {
+ return uintptr(value), nil
+ }
+ return 0, fmt.Errorf("invalid vDSO address in auxv")
+ // _AT_NULL is always the last tag/val pair in the aux vector
+ // and can be treated like EOF.
+ case _AT_NULL:
+ return 0, errAuxvNoVDSO
+ }
+ }
+}
+
+// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)'
+type elfNoteHeader struct {
+ NameSize int32
+ DescSize int32
+ Type int32
+}
+
+// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in
+// the ELF notes section of the binary provided by the reader.
+func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) {
+ hdr, err := NewSafeELFFile(r)
+ if err != nil {
+ return 0, fmt.Errorf("reading vDSO ELF: %w", err)
+ }
+
+ sections := hdr.SectionsByType(elf.SHT_NOTE)
+ if len(sections) == 0 {
+ return 0, fmt.Errorf("no note section found in vDSO ELF")
+ }
+
+ for _, sec := range sections {
+ sr := sec.Open()
+ var n elfNoteHeader
+
+ // Read notes until we find one named 'Linux'.
+ for {
+ if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil {
+ if errors.Is(err, io.EOF) {
+ // We looked at all the notes in this section
+ break
+ }
+ return 0, fmt.Errorf("reading note header: %w", err)
+ }
+
+ // If a note name is defined, it follows the note header.
+ var name string
+ if n.NameSize > 0 {
+ // Read the note name, aligned to 4 bytes.
+ buf := make([]byte, Align(n.NameSize, 4))
+ if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil {
+ return 0, fmt.Errorf("reading note name: %w", err)
+ }
+
+ // Read nul-terminated string.
+ name = unix.ByteSliceToString(buf[:n.NameSize])
+ }
+
+ // If a note descriptor is defined, it follows the name.
+ // It is possible for a note to have a descriptor but not a name.
+ if n.DescSize > 0 {
+ // LINUX_VERSION_CODE is a uint32 value.
+ if name == "Linux" && n.DescSize == 4 && n.Type == 0 {
+ var version uint32
+ if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil {
+ return 0, fmt.Errorf("reading note descriptor: %w", err)
+ }
+ return version, nil
+ }
+
+ // Discard the note descriptor if it exists but we're not interested in it.
+ if _, err := io.CopyN(io.Discard, sr, int64(Align(n.DescSize, 4))); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ return 0, fmt.Errorf("no Linux note in ELF")
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go
new file mode 100644
index 000000000..acd4650af
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/version.go
@@ -0,0 +1,107 @@
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+const (
+ // Version constant used in ELF binaries indicating that the loader needs to
+ // substitute the eBPF program's version with the value of the kernel's
+ // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf
+ // and RedSift.
+ MagicKernelVersion = 0xFFFFFFFE
+)
+
+// A Version in the form Major.Minor.Patch.
+type Version [3]uint16
+
+// NewVersion creates a version from a string like "Major.Minor.Patch".
+//
+// Patch is optional.
+func NewVersion(ver string) (Version, error) {
+ var major, minor, patch uint16
+ n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
+ if n < 2 {
+ return Version{}, fmt.Errorf("invalid version: %s", ver)
+ }
+ return Version{major, minor, patch}, nil
+}
+
+// NewVersionFromCode creates a version from a LINUX_VERSION_CODE.
+func NewVersionFromCode(code uint32) Version {
+ return Version{
+ uint16(uint8(code >> 16)),
+ uint16(uint8(code >> 8)),
+ uint16(uint8(code)),
+ }
+}
+
+func (v Version) String() string {
+ if v[2] == 0 {
+ return fmt.Sprintf("v%d.%d", v[0], v[1])
+ }
+ return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2])
+}
+
+// Less returns true if the version is less than another version.
+func (v Version) Less(other Version) bool {
+ for i, a := range v {
+ if a == other[i] {
+ continue
+ }
+ return a < other[i]
+ }
+ return false
+}
+
+// Unspecified returns true if the version is all zero.
+func (v Version) Unspecified() bool {
+ return v[0] == 0 && v[1] == 0 && v[2] == 0
+}
+
+// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h.
+// It represents the kernel version and patch level as a single value.
+func (v Version) Kernel() uint32 {
+
+ // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid
+ // overflowing into PATCHLEVEL.
+ // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255").
+ s := v[2]
+ if s > 255 {
+ s = 255
+ }
+
+ // Truncate members to uint8 to prevent them from spilling over into
+ // each other when overflowing 8 bits.
+ return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s))
+}
+
+// KernelVersion returns the version of the currently running kernel.
+var KernelVersion = sync.OnceValues(func() (Version, error) {
+ return detectKernelVersion()
+})
+
+// detectKernelVersion returns the version of the running kernel.
+func detectKernelVersion() (Version, error) {
+ vc, err := vdsoVersion()
+ if err != nil {
+ return Version{}, err
+ }
+ return NewVersionFromCode(vc), nil
+}
+
+// KernelRelease returns the release string of the running kernel.
+// Its format depends on the Linux distribution and corresponds to directory
+// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and
+// 4.19.0-16-amd64.
+func KernelRelease() (string, error) {
+ var uname unix.Utsname
+ if err := unix.Uname(&uname); err != nil {
+ return "", fmt.Errorf("uname failed: %w", err)
+ }
+
+ return unix.ByteSliceToString(uname.Release[:]), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/anchor.go b/vendor/github.com/cilium/ebpf/link/anchor.go
new file mode 100644
index 000000000..1a3b5f768
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/anchor.go
@@ -0,0 +1,137 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+const anchorFlags = sys.BPF_F_REPLACE |
+ sys.BPF_F_BEFORE |
+ sys.BPF_F_AFTER |
+ sys.BPF_F_ID |
+ sys.BPF_F_LINK_MPROG
+
+// Anchor is a reference to a link or program.
+//
+// It is used to describe where an attachment or detachment should take place
+// for link types which support multiple attachment.
+type Anchor interface {
+ // anchor returns an fd or ID and a set of flags.
+ //
+ // By default fdOrID is taken to reference a program, but BPF_F_LINK_MPROG
+ // changes this to refer to a link instead.
+ //
+ // BPF_F_BEFORE, BPF_F_AFTER, BPF_F_REPLACE modify where a link or program
+ // is attached. The default behaviour if none of these flags is specified
+ // matches BPF_F_AFTER.
+ anchor() (fdOrID, flags uint32, _ error)
+}
+
+type firstAnchor struct{}
+
+func (firstAnchor) anchor() (fdOrID, flags uint32, _ error) {
+ return 0, sys.BPF_F_BEFORE, nil
+}
+
+// Head is the position before all other programs or links.
+func Head() Anchor {
+ return firstAnchor{}
+}
+
+type lastAnchor struct{}
+
+func (lastAnchor) anchor() (fdOrID, flags uint32, _ error) {
+ return 0, sys.BPF_F_AFTER, nil
+}
+
+// Tail is the position after all other programs or links.
+func Tail() Anchor {
+ return lastAnchor{}
+}
+
+// Before is the position just in front of target.
+func BeforeLink(target Link) Anchor {
+ return anchor{target, sys.BPF_F_BEFORE}
+}
+
+// After is the position just after target.
+func AfterLink(target Link) Anchor {
+ return anchor{target, sys.BPF_F_AFTER}
+}
+
+// Before is the position just in front of target.
+func BeforeLinkByID(target ID) Anchor {
+ return anchor{target, sys.BPF_F_BEFORE}
+}
+
+// After is the position just after target.
+func AfterLinkByID(target ID) Anchor {
+ return anchor{target, sys.BPF_F_AFTER}
+}
+
+// Before is the position just in front of target.
+func BeforeProgram(target *ebpf.Program) Anchor {
+ return anchor{target, sys.BPF_F_BEFORE}
+}
+
+// After is the position just after target.
+func AfterProgram(target *ebpf.Program) Anchor {
+ return anchor{target, sys.BPF_F_AFTER}
+}
+
+// Replace the target itself.
+func ReplaceProgram(target *ebpf.Program) Anchor {
+ return anchor{target, sys.BPF_F_REPLACE}
+}
+
+// Before is the position just in front of target.
+func BeforeProgramByID(target ebpf.ProgramID) Anchor {
+ return anchor{target, sys.BPF_F_BEFORE}
+}
+
+// After is the position just after target.
+func AfterProgramByID(target ebpf.ProgramID) Anchor {
+ return anchor{target, sys.BPF_F_AFTER}
+}
+
+// Replace the target itself.
+func ReplaceProgramByID(target ebpf.ProgramID) Anchor {
+ return anchor{target, sys.BPF_F_REPLACE}
+}
+
+type anchor struct {
+ target any
+ position uint32
+}
+
+func (ap anchor) anchor() (fdOrID, flags uint32, _ error) {
+ var typeFlag uint32
+ switch target := ap.target.(type) {
+ case *ebpf.Program:
+ fd := target.FD()
+ if fd < 0 {
+ return 0, 0, sys.ErrClosedFd
+ }
+ fdOrID = uint32(fd)
+ typeFlag = 0
+ case ebpf.ProgramID:
+ fdOrID = uint32(target)
+ typeFlag = sys.BPF_F_ID
+ case interface{ FD() int }:
+ fd := target.FD()
+ if fd < 0 {
+ return 0, 0, sys.ErrClosedFd
+ }
+ fdOrID = uint32(fd)
+ typeFlag = sys.BPF_F_LINK_MPROG
+ case ID:
+ fdOrID = uint32(target)
+ typeFlag = sys.BPF_F_LINK_MPROG | sys.BPF_F_ID
+ default:
+ return 0, 0, fmt.Errorf("invalid target %T", ap.target)
+ }
+
+ return fdOrID, ap.position | typeFlag, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go
new file mode 100644
index 000000000..f17d34f03
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/cgroup.go
@@ -0,0 +1,208 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type cgroupAttachFlags uint32
+
+const (
+ // Allow programs attached to sub-cgroups to override the verdict of this
+ // program.
+ flagAllowOverride cgroupAttachFlags = 1 << iota
+ // Allow attaching multiple programs to the cgroup. Only works if the cgroup
+ // has zero or more programs attached using the Multi flag. Implies override.
+ flagAllowMulti
+ // Set automatically by progAttachCgroup.Update(). Used for updating a
+ // specific given program attached in multi-mode.
+ flagReplace
+)
+
+type CgroupOptions struct {
+ // Path to a cgroupv2 folder.
+ Path string
+ // One of the AttachCgroup* constants
+ Attach ebpf.AttachType
+ // Program must be of type CGroup*, and the attach type must match Attach.
+ Program *ebpf.Program
+}
+
+// AttachCgroup links a BPF program to a cgroup.
+//
+// If the running kernel doesn't support bpf_link, attempts to emulate its
+// semantics using the legacy PROG_ATTACH mechanism. If bpf_link is not
+// available, the returned [Link] will not support pinning to bpffs.
+//
+// If you need more control over attachment flags or the attachment mechanism
+// used, look at [RawAttachProgram] and [AttachRawLink] instead.
+func AttachCgroup(opts CgroupOptions) (cg Link, err error) {
+ cgroup, err := os.Open(opts.Path)
+ if err != nil {
+ return nil, fmt.Errorf("can't open cgroup: %s", err)
+ }
+ defer func() {
+ if _, ok := cg.(*progAttachCgroup); ok {
+ // Skip closing the cgroup handle if we return a valid progAttachCgroup,
+ // where the handle is retained to implement Update().
+ return
+ }
+ cgroup.Close()
+ }()
+
+ cg, err = newLinkCgroup(cgroup, opts.Attach, opts.Program)
+ if err == nil {
+ return cg, nil
+ }
+
+ if errors.Is(err, ErrNotSupported) {
+ cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowMulti)
+ }
+ if errors.Is(err, ErrNotSupported) {
+ cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowOverride)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return cg, nil
+}
+
+type progAttachCgroup struct {
+ cgroup *os.File
+ current *ebpf.Program
+ attachType ebpf.AttachType
+ flags cgroupAttachFlags
+}
+
+var _ Link = (*progAttachCgroup)(nil)
+
+func (cg *progAttachCgroup) isLink() {}
+
+// newProgAttachCgroup attaches prog to cgroup using BPF_PROG_ATTACH.
+// cgroup and prog are retained by [progAttachCgroup].
+func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
+ if flags&flagAllowMulti > 0 {
+ if err := haveProgAttachReplace(); err != nil {
+ return nil, fmt.Errorf("can't support multiple programs: %w", err)
+ }
+ }
+
+ // Use a program handle that cannot be closed by the caller.
+ clone, err := prog.Clone()
+ if err != nil {
+ return nil, err
+ }
+
+ err = RawAttachProgram(RawAttachProgramOptions{
+ Target: int(cgroup.Fd()),
+ Program: clone,
+ Flags: uint32(flags),
+ Attach: attach,
+ })
+ if err != nil {
+ clone.Close()
+ return nil, fmt.Errorf("cgroup: %w", err)
+ }
+
+ return &progAttachCgroup{cgroup, clone, attach, flags}, nil
+}
+
+func (cg *progAttachCgroup) Close() error {
+ defer cg.cgroup.Close()
+ defer cg.current.Close()
+
+ err := RawDetachProgram(RawDetachProgramOptions{
+ Target: int(cg.cgroup.Fd()),
+ Program: cg.current,
+ Attach: cg.attachType,
+ })
+ if err != nil {
+ return fmt.Errorf("close cgroup: %s", err)
+ }
+ return nil
+}
+
+func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
+ new, err := prog.Clone()
+ if err != nil {
+ return err
+ }
+
+ args := RawAttachProgramOptions{
+ Target: int(cg.cgroup.Fd()),
+ Program: prog,
+ Attach: cg.attachType,
+ Flags: uint32(cg.flags),
+ }
+
+ if cg.flags&flagAllowMulti > 0 {
+ // Atomically replacing multiple programs requires at least
+ // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
+ // program in MULTI mode")
+ args.Anchor = ReplaceProgram(cg.current)
+ }
+
+ if err := RawAttachProgram(args); err != nil {
+ new.Close()
+ return fmt.Errorf("can't update cgroup: %s", err)
+ }
+
+ cg.current.Close()
+ cg.current = new
+ return nil
+}
+
+func (cg *progAttachCgroup) Pin(string) error {
+ return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
+}
+
+func (cg *progAttachCgroup) Unpin() error {
+ return fmt.Errorf("can't unpin cgroup: %w", ErrNotSupported)
+}
+
+func (cg *progAttachCgroup) Info() (*Info, error) {
+ return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported)
+}
+
+type linkCgroup struct {
+ RawLink
+}
+
+var _ Link = (*linkCgroup)(nil)
+
+// newLinkCgroup attaches prog to cgroup using BPF_LINK_CREATE.
+func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
+ link, err := AttachRawLink(RawLinkOptions{
+ Target: int(cgroup.Fd()),
+ Program: prog,
+ Attach: attach,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &linkCgroup{*link}, err
+}
+
+func (cg *linkCgroup) Info() (*Info, error) {
+ var info sys.CgroupLinkInfo
+ if err := sys.ObjInfo(cg.fd, &info); err != nil {
+ return nil, fmt.Errorf("cgroup link info: %s", err)
+ }
+ extra := &CgroupInfo{
+ CgroupId: info.CgroupId,
+ AttachType: info.AttachType,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/doc.go b/vendor/github.com/cilium/ebpf/link/doc.go
new file mode 100644
index 000000000..2bde35ed7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/doc.go
@@ -0,0 +1,2 @@
+// Package link allows attaching eBPF programs to various kernel hooks.
+package link
diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go
new file mode 100644
index 000000000..0a39faef8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/iter.go
@@ -0,0 +1,84 @@
+package link
+
+import (
+ "fmt"
+ "io"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type IterOptions struct {
+ // Program must be of type Tracing with attach type
+ // AttachTraceIter. The kind of iterator to attach to is
+ // determined at load time via the AttachTo field.
+ //
+ // AttachTo requires the kernel to include BTF of itself,
+ // and it to be compiled with a recent pahole (>= 1.16).
+ Program *ebpf.Program
+
+ // Map specifies the target map for bpf_map_elem and sockmap iterators.
+ // It may be nil.
+ Map *ebpf.Map
+}
+
+// AttachIter attaches a BPF seq_file iterator.
+func AttachIter(opts IterOptions) (*Iter, error) {
+ progFd := opts.Program.FD()
+ if progFd < 0 {
+ return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+ }
+
+ var info bpfIterLinkInfoMap
+ if opts.Map != nil {
+ mapFd := opts.Map.FD()
+ if mapFd < 0 {
+ return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd)
+ }
+ info.map_fd = uint32(mapFd)
+ }
+
+ attr := sys.LinkCreateIterAttr{
+ ProgFd: uint32(progFd),
+ AttachType: sys.AttachType(ebpf.AttachTraceIter),
+ IterInfo: sys.NewPointer(unsafe.Pointer(&info)),
+ IterInfoLen: uint32(unsafe.Sizeof(info)),
+ }
+
+ fd, err := sys.LinkCreateIter(&attr)
+ if err != nil {
+ if haveFeatErr := haveBPFLink(); haveFeatErr != nil {
+ return nil, haveFeatErr
+ }
+ return nil, fmt.Errorf("can't link iterator: %w", err)
+ }
+
+ return &Iter{RawLink{fd, ""}}, err
+}
+
+// Iter represents an attached bpf_iter.
+type Iter struct {
+ RawLink
+}
+
+// Open creates a new instance of the iterator.
+//
+// Reading from the returned reader triggers the BPF program.
+func (it *Iter) Open() (io.ReadCloser, error) {
+ attr := &sys.IterCreateAttr{
+ LinkFd: it.fd.Uint(),
+ }
+
+ fd, err := sys.IterCreate(attr)
+ if err != nil {
+ return nil, fmt.Errorf("can't create iterator: %w", err)
+ }
+
+ return fd.File("bpf_iter"), nil
+}
+
+// union bpf_iter_link_info.map
+type bpfIterLinkInfoMap struct {
+ map_fd uint32
+}
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go
new file mode 100644
index 000000000..fe3f17c37
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/kprobe.go
@@ -0,0 +1,365 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// KprobeOptions defines additional parameters that will be used
+// when loading Kprobes.
+type KprobeOptions struct {
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+ // Offset of the kprobe relative to the traced symbol.
+ // Can be used to insert kprobes at arbitrary offsets in kernel functions,
+ // e.g. in places where functions have been inlined.
+ Offset uint64
+ // Increase the maximum number of concurrent invocations of a kretprobe.
+ // Required when tracing some long running functions in the kernel.
+ //
+ // Deprecated: this setting forces the use of an outdated kernel API and is not portable
+ // across kernel versions.
+ RetprobeMaxActive int
+ // Prefix used for the event name if the kprobe must be attached using tracefs.
+ // The group name will be formatted as `_`.
+ // The default empty string is equivalent to "ebpf" as the prefix.
+ TraceFSPrefix string
+}
+
+func (ko *KprobeOptions) cookie() uint64 {
+ if ko == nil {
+ return 0
+ }
+ return ko.Cookie
+}
+
+// Kprobe attaches the given eBPF program to a perf event that fires when the
+// given kernel symbol starts executing. See /proc/kallsyms for available
+// symbols. For example, printk():
+//
+// kp, err := Kprobe("printk", prog, nil)
+//
+// Losing the reference to the resulting Link (kp) will close the Kprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// If attaching to symbol fails, automatically retries with the running
+// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
+// in a portable fashion.
+//
+// The returned Link may implement [PerfEvent].
+func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
+ k, err := kprobe(symbol, prog, opts, false)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(k, prog, opts.cookie())
+ if err != nil {
+ k.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// Kretprobe attaches the given eBPF program to a perf event that fires right
+// before the given kernel symbol exits, with the function stack left intact.
+// See /proc/kallsyms for available symbols. For example, printk():
+//
+// kp, err := Kretprobe("printk", prog, nil)
+//
+// Losing the reference to the resulting Link (kp) will close the Kretprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// If attaching to symbol fails, automatically retries with the running
+// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
+// in a portable fashion.
+//
+// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol
+// incorrectly returns unix.EINVAL instead of os.ErrNotExist.
+//
+// The returned Link may implement [PerfEvent].
+func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
+ k, err := kprobe(symbol, prog, opts, true)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(k, prog, opts.cookie())
+ if err != nil {
+ k.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// isValidKprobeSymbol implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_.]*$".
+func isValidKprobeSymbol(s string) bool {
+ if len(s) < 1 {
+ return false
+ }
+
+ for i, c := range []byte(s) {
+ switch {
+ case c >= 'a' && c <= 'z':
+ case c >= 'A' && c <= 'Z':
+ case c == '_':
+ case i > 0 && c >= '0' && c <= '9':
+
+ // Allow `.` in symbol name. GCC-compiled kernel may change symbol name
+ // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`.
+ // See: https://gcc.gnu.org/gcc-10/changes.html
+ case i > 0 && c == '.':
+
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+// kprobe opens a perf event on the given symbol and attaches prog to it.
+// If ret is true, create a kretprobe.
+func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) {
+ if symbol == "" {
+ return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if !isValidKprobeSymbol(symbol) {
+ return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput)
+ }
+ if prog.Type() != ebpf.Kprobe {
+ return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
+ }
+
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Kprobe,
+ Pid: perfAllThreads,
+ Symbol: symbol,
+ Ret: ret,
+ }
+
+ if opts != nil {
+ args.RetprobeMaxActive = opts.RetprobeMaxActive
+ args.Cookie = opts.Cookie
+ args.Offset = opts.Offset
+ args.Group = opts.TraceFSPrefix
+ }
+
+ // Use kprobe PMU if the kernel has it available.
+ tp, err := pmuProbe(args)
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+ if prefix := internal.PlatformPrefix(); prefix != "" {
+ args.Symbol = prefix + symbol
+ tp, err = pmuProbe(args)
+ }
+ }
+ if err == nil {
+ return tp, nil
+ }
+ if err != nil && !errors.Is(err, ErrNotSupported) {
+ return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err)
+ }
+
+ // Use tracefs if kprobe PMU is missing.
+ args.Symbol = symbol
+ tp, err = tracefsProbe(args)
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+ if prefix := internal.PlatformPrefix(); prefix != "" {
+ args.Symbol = prefix + symbol
+ tp, err = tracefsProbe(args)
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("creating tracefs event (arch-specific fallback for %q): %w", symbol, err)
+ }
+
+ return tp, nil
+}
+
+// pmuProbe opens a perf event based on a Performance Monitoring Unit.
+//
+// Requires at least a 4.17 kernel.
+// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU"
+// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
+//
+// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
+func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
+ // Getting the PMU type will fail if the kernel doesn't support
+ // the perf_[k,u]probe PMU.
+ eventType, err := internal.ReadUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", args.Type.String(), "type")
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("%s: %w", args.Type, ErrNotSupported)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Use tracefs if we want to set kretprobe's retprobeMaxActive.
+ if args.RetprobeMaxActive != 0 {
+ return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported)
+ }
+
+ var config uint64
+ if args.Ret {
+ bit, err := internal.ReadUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", args.Type.String(), "/format/retprobe")
+ if err != nil {
+ return nil, err
+ }
+ config |= 1 << bit
+ }
+
+ var (
+ attr unix.PerfEventAttr
+ sp unsafe.Pointer
+ token string
+ )
+ switch args.Type {
+ case tracefs.Kprobe:
+ // Create a pointer to a NUL-terminated string for the kernel.
+ sp, err = unsafeStringPtr(args.Symbol)
+ if err != nil {
+ return nil, err
+ }
+
+ token = tracefs.KprobeToken(args)
+
+ attr = unix.PerfEventAttr{
+ // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1,
+ // since it added the config2 (Ext2) field. Use Ext2 as probe_offset.
+ Size: unix.PERF_ATTR_SIZE_VER1,
+ Type: uint32(eventType), // PMU event type read from sysfs
+ Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
+ Ext2: args.Offset, // Kernel symbol offset
+ Config: config, // Retprobe flag
+ }
+ case tracefs.Uprobe:
+ sp, err = unsafeStringPtr(args.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ if args.RefCtrOffset != 0 {
+ config |= args.RefCtrOffset << uprobeRefCtrOffsetShift
+ }
+
+ token = tracefs.UprobeToken(args)
+
+ attr = unix.PerfEventAttr{
+ // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
+ // since it added the config2 (Ext2) field. The Size field controls the
+ // size of the internal buffer the kernel allocates for reading the
+ // perf_event_attr argument from userspace.
+ Size: unix.PERF_ATTR_SIZE_VER1,
+ Type: uint32(eventType), // PMU event type read from sysfs
+ Ext1: uint64(uintptr(sp)), // Uprobe path
+ Ext2: args.Offset, // Uprobe offset
+ Config: config, // RefCtrOffset, Retprobe flag
+ }
+ }
+
+ cpu := 0
+ if args.Pid != perfAllThreads {
+ cpu = -1
+ }
+ rawFd, err := unix.PerfEventOpen(&attr, args.Pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC)
+
+ // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and
+ // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs.
+ // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343
+ if errors.Is(err, unix.EINVAL) && strings.Contains(args.Symbol, ".") {
+ return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported)
+ }
+ // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+ // when trying to create a retprobe for a missing symbol.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("token %s: not found: %w", token, err)
+ }
+ // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+ // to an invalid insn boundary. The exact conditions that trigger this error are
+ // arch specific however.
+ if errors.Is(err, unix.EILSEQ) {
+ return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+ }
+ // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned
+ // when attempting to set a uprobe on a trap instruction.
+ if errors.Is(err, sys.ENOTSUPP) {
+ return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.Offset, err)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("token %s: opening perf event: %w", token, err)
+ }
+
+ // Ensure the string pointer is not collected before PerfEventOpen returns.
+ runtime.KeepAlive(sp)
+
+ fd, err := sys.NewFD(rawFd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Kernel has perf_[k,u]probe PMU available, initialize perf event.
+ return newPerfEvent(fd, nil), nil
+}
+
+// tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events.
+// A new trace event group name is generated on every call to support creating
+// multiple trace events for the same kernel or userspace symbol.
+// Path and offset are only set in the case of uprobe(s) and are used to set
+// the executable/library path on the filesystem and the offset where the probe is inserted.
+// A perf event is then opened on the newly-created trace event and returned to the caller.
+func tracefsProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
+ groupPrefix := "ebpf"
+ if args.Group != "" {
+ groupPrefix = args.Group
+ }
+
+ // Generate a random string for each trace event we attempt to create.
+ // This value is used as the 'group' token in tracefs to allow creating
+ // multiple kprobe trace events with the same name.
+ group, err := tracefs.RandomGroup(groupPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("randomizing group name: %w", err)
+ }
+ args.Group = group
+
+ // Create the [k,u]probe trace event using tracefs.
+ evt, err := tracefs.NewEvent(args)
+ if err != nil {
+ return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
+ }
+
+ // Kprobes are ephemeral tracepoints and share the same perf event type.
+ fd, err := openTracepointPerfEvent(evt.ID(), args.Pid)
+ if err != nil {
+ // Make sure we clean up the created tracefs event when we return error.
+ // If a livepatch handler is already active on the symbol, the write to
+ // tracefs will succeed, a trace event will show up, but creating the
+ // perf event will fail with EBUSY.
+ _ = evt.Close()
+ return nil, err
+ }
+
+ return newPerfEvent(fd, evt), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
new file mode 100644
index 000000000..f7a8291f9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
@@ -0,0 +1,191 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// KprobeMultiOptions defines additional parameters that will be used
+// when opening a KprobeMulti Link.
+type KprobeMultiOptions struct {
+ // Symbols takes a list of kernel symbol names to attach an ebpf program to.
+ //
+ // Mutually exclusive with Addresses.
+ Symbols []string
+
+ // Addresses takes a list of kernel symbol addresses in case they can not
+ // be referred to by name.
+ //
+ // Note that only start addresses can be specified, since the fprobe API
+ // limits the attach point to the function entry or return.
+ //
+ // Mutually exclusive with Symbols.
+ Addresses []uintptr
+
+ // Cookies specifies arbitrary values that can be fetched from an eBPF
+ // program via `bpf_get_attach_cookie()`.
+ //
+ // If set, its length should be equal to the length of Symbols or Addresses.
+ // Each Cookie is assigned to the Symbol or Address specified at the
+ // corresponding slice index.
+ Cookies []uint64
+}
+
+// KprobeMulti attaches the given eBPF program to the entry point of a given set
+// of kernel symbols.
+//
+// The difference with Kprobe() is that multi-kprobe accomplishes this in a
+// single system call, making it significantly faster than attaching many
+// probes one at a time.
+//
+// Requires at least Linux 5.18.
+func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) {
+ return kprobeMulti(prog, opts, 0)
+}
+
+// KretprobeMulti attaches the given eBPF program to the return point of a given
+// set of kernel symbols.
+//
+// The difference with Kretprobe() is that multi-kprobe accomplishes this in a
+// single system call, making it significantly faster than attaching many
+// probes one at a time.
+//
+// Requires at least Linux 5.18.
+func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) {
+ return kprobeMulti(prog, opts, unix.BPF_F_KPROBE_MULTI_RETURN)
+}
+
+func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) {
+ if prog == nil {
+ return nil, errors.New("cannot attach a nil program")
+ }
+
+ syms := uint32(len(opts.Symbols))
+ addrs := uint32(len(opts.Addresses))
+ cookies := uint32(len(opts.Cookies))
+
+ if syms == 0 && addrs == 0 {
+ return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput)
+ }
+ if syms != 0 && addrs != 0 {
+ return nil, fmt.Errorf("Symbols and Addresses are mutually exclusive: %w", errInvalidInput)
+ }
+ if cookies > 0 && cookies != syms && cookies != addrs {
+ return nil, fmt.Errorf("Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput)
+ }
+
+ attr := &sys.LinkCreateKprobeMultiAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_TRACE_KPROBE_MULTI,
+ KprobeMultiFlags: flags,
+ }
+
+ switch {
+ case syms != 0:
+ attr.Count = syms
+ attr.Syms = sys.NewStringSlicePointer(opts.Symbols)
+
+ case addrs != 0:
+ attr.Count = addrs
+ attr.Addrs = sys.NewPointer(unsafe.Pointer(&opts.Addresses[0]))
+ }
+
+ if cookies != 0 {
+ attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0]))
+ }
+
+ fd, err := sys.LinkCreateKprobeMulti(attr)
+ if errors.Is(err, unix.ESRCH) {
+ return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist)
+ }
+ if errors.Is(err, unix.EINVAL) {
+ return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not AttachTraceKprobeMulti?)", err)
+ }
+
+ if err != nil {
+ if haveFeatErr := haveBPFLinkKprobeMulti(); haveFeatErr != nil {
+ return nil, haveFeatErr
+ }
+ return nil, err
+ }
+
+ return &kprobeMultiLink{RawLink{fd, ""}}, nil
+}
+
+type kprobeMultiLink struct {
+ RawLink
+}
+
+var _ Link = (*kprobeMultiLink)(nil)
+
+func (kml *kprobeMultiLink) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported)
+}
+
+func (kml *kprobeMultiLink) Info() (*Info, error) {
+ var info sys.KprobeMultiLinkInfo
+ if err := sys.ObjInfo(kml.fd, &info); err != nil {
+ return nil, fmt.Errorf("kprobe multi link info: %s", err)
+ }
+ extra := &KprobeMultiInfo{
+ count: info.Count,
+ flags: info.Flags,
+ missed: info.Missed,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
+
+var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Name: "probe_kpm_link",
+ Type: ebpf.Kprobe,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ AttachType: ebpf.AttachTraceKprobeMulti,
+ License: "MIT",
+ })
+ if errors.Is(err, unix.E2BIG) {
+ // Kernel doesn't support AttachType field.
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_TRACE_KPROBE_MULTI,
+ Count: 1,
+ Syms: sys.NewStringSlicePointer([]string{"vprintk"}),
+ })
+ switch {
+ case errors.Is(err, unix.EINVAL):
+ return internal.ErrNotSupported
+ // If CONFIG_FPROBE isn't set.
+ case errors.Is(err, unix.EOPNOTSUPP):
+ return internal.ErrNotSupported
+ case err != nil:
+ return err
+ }
+
+ fd.Close()
+
+ return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go
new file mode 100644
index 000000000..9c34616c9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/link.go
@@ -0,0 +1,530 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+var ErrNotSupported = internal.ErrNotSupported
+
+// Link represents a Program attached to a BPF hook.
+type Link interface {
+ // Replace the current program with a new program.
+ //
+ // Passing a nil program is an error. May return an error wrapping ErrNotSupported.
+ Update(*ebpf.Program) error
+
+ // Persist a link by pinning it into a bpffs.
+ //
+ // May return an error wrapping ErrNotSupported.
+ Pin(string) error
+
+ // Undo a previous call to Pin.
+ //
+ // May return an error wrapping ErrNotSupported.
+ Unpin() error
+
+ // Close frees resources.
+ //
+ // The link will be broken unless it has been successfully pinned.
+ // A link may continue past the lifetime of the process if Close is
+ // not called.
+ Close() error
+
+ // Info returns metadata on a link.
+ //
+ // May return an error wrapping ErrNotSupported.
+ Info() (*Info, error)
+
+ // Prevent external users from implementing this interface.
+ isLink()
+}
+
+// NewLinkFromFD creates a link from a raw fd.
+//
+// Deprecated: use [NewFromFD] instead.
+func NewLinkFromFD(fd int) (Link, error) {
+ return NewFromFD(fd)
+}
+
+// NewFromFD creates a link from a raw fd.
+//
+// You should not use fd after calling this function.
+func NewFromFD(fd int) (Link, error) {
+ sysFD, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return wrapRawLink(&RawLink{fd: sysFD})
+}
+
+// NewFromID returns the link associated with the given id.
+//
+// Returns ErrNotExist if there is no link with the given id.
+func NewFromID(id ID) (Link, error) {
+ getFdAttr := &sys.LinkGetFdByIdAttr{Id: id}
+ fd, err := sys.LinkGetFdById(getFdAttr)
+ if err != nil {
+ return nil, fmt.Errorf("get link fd from ID %d: %w", id, err)
+ }
+
+ return wrapRawLink(&RawLink{fd, ""})
+}
+
+// LoadPinnedLink loads a link that was persisted into a bpffs.
+func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
+ raw, err := loadPinnedRawLink(fileName, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return wrapRawLink(raw)
+}
+
+// wrap a RawLink in a more specific type if possible.
+//
+// The function takes ownership of raw and closes it on error.
+func wrapRawLink(raw *RawLink) (_ Link, err error) {
+ defer func() {
+ if err != nil {
+ raw.Close()
+ }
+ }()
+
+ info, err := raw.Info()
+ if err != nil {
+ return nil, err
+ }
+
+ switch info.Type {
+ case RawTracepointType:
+ return &rawTracepoint{*raw}, nil
+ case TracingType:
+ return &tracing{*raw}, nil
+ case CgroupType:
+ return &linkCgroup{*raw}, nil
+ case IterType:
+ return &Iter{*raw}, nil
+ case NetNsType:
+ return &NetNsLink{*raw}, nil
+ case KprobeMultiType:
+ return &kprobeMultiLink{*raw}, nil
+ case UprobeMultiType:
+ return &uprobeMultiLink{*raw}, nil
+ case PerfEventType:
+ return &perfEventLink{*raw, nil}, nil
+ case TCXType:
+ return &tcxLink{*raw}, nil
+ case NetfilterType:
+ return &netfilterLink{*raw}, nil
+ case NetkitType:
+ return &netkitLink{*raw}, nil
+ case XDPType:
+ return &xdpLink{*raw}, nil
+ default:
+ return raw, nil
+ }
+}
+
+// ID uniquely identifies a BPF link.
+type ID = sys.LinkID
+
+// RawLinkOptions control the creation of a raw link.
+type RawLinkOptions struct {
+ // File descriptor to attach to. This differs for each attach type.
+ Target int
+ // Program to attach.
+ Program *ebpf.Program
+ // Attach must match the attach type of Program.
+ Attach ebpf.AttachType
+ // BTF is the BTF of the attachment target.
+ BTF btf.TypeID
+ // Flags control the attach behaviour.
+ Flags uint32
+}
+
+// Info contains metadata on a link.
+type Info struct {
+ Type Type
+ ID ID
+ Program ebpf.ProgramID
+ extra interface{}
+}
+
+type TracingInfo struct {
+ AttachType sys.AttachType
+ TargetObjId uint32
+ TargetBtfId sys.TypeID
+}
+
+type CgroupInfo struct {
+ CgroupId uint64
+ AttachType sys.AttachType
+ _ [4]byte
+}
+
+type NetNsInfo struct {
+ NetnsIno uint32
+ AttachType sys.AttachType
+}
+
+type TCXInfo struct {
+ Ifindex uint32
+ AttachType sys.AttachType
+}
+
+type XDPInfo struct {
+ Ifindex uint32
+}
+
+type NetfilterInfo struct {
+ Pf uint32
+ Hooknum uint32
+ Priority int32
+ Flags uint32
+}
+
+type NetkitInfo struct {
+ Ifindex uint32
+ AttachType sys.AttachType
+}
+
+type KprobeMultiInfo struct {
+ count uint32
+ flags uint32
+ missed uint64
+}
+
+// AddressCount is the number of addresses hooked by the kprobe.
+func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) {
+ return kpm.count, kpm.count > 0
+}
+
+func (kpm *KprobeMultiInfo) Flags() (uint32, bool) {
+ return kpm.flags, kpm.count > 0
+}
+
+func (kpm *KprobeMultiInfo) Missed() (uint64, bool) {
+ return kpm.missed, kpm.count > 0
+}
+
+type PerfEventInfo struct {
+ Type sys.PerfEventType
+ extra interface{}
+}
+
+func (r *PerfEventInfo) Kprobe() *KprobeInfo {
+ e, _ := r.extra.(*KprobeInfo)
+ return e
+}
+
+type KprobeInfo struct {
+ address uint64
+ missed uint64
+}
+
+func (kp *KprobeInfo) Address() (uint64, bool) {
+ return kp.address, kp.address > 0
+}
+
+func (kp *KprobeInfo) Missed() (uint64, bool) {
+ return kp.missed, kp.address > 0
+}
+
+// Tracing returns tracing type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Tracing() *TracingInfo {
+ e, _ := r.extra.(*TracingInfo)
+ return e
+}
+
+// Cgroup returns cgroup type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Cgroup() *CgroupInfo {
+ e, _ := r.extra.(*CgroupInfo)
+ return e
+}
+
+// NetNs returns netns type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) NetNs() *NetNsInfo {
+ e, _ := r.extra.(*NetNsInfo)
+ return e
+}
+
+// XDP returns XDP type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) XDP() *XDPInfo {
+ e, _ := r.extra.(*XDPInfo)
+ return e
+}
+
+// TCX returns TCX type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) TCX() *TCXInfo {
+ e, _ := r.extra.(*TCXInfo)
+ return e
+}
+
+// Netfilter returns netfilter type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Netfilter() *NetfilterInfo {
+ e, _ := r.extra.(*NetfilterInfo)
+ return e
+}
+
+// Netkit returns netkit type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Netkit() *NetkitInfo {
+ e, _ := r.extra.(*NetkitInfo)
+ return e
+}
+
+// KprobeMulti returns kprobe-multi type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) KprobeMulti() *KprobeMultiInfo {
+ e, _ := r.extra.(*KprobeMultiInfo)
+ return e
+}
+
+// PerfEvent returns perf-event type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) PerfEvent() *PerfEventInfo {
+ e, _ := r.extra.(*PerfEventInfo)
+ return e
+}
+
+// RawLink is the low-level API to bpf_link.
+//
+// You should consider using the higher level interfaces in this
+// package instead.
+type RawLink struct {
+ fd *sys.FD
+ pinnedPath string
+}
+
+// AttachRawLink creates a raw link.
+func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
+ if err := haveBPFLink(); err != nil {
+ return nil, err
+ }
+
+ if opts.Target < 0 {
+ return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd)
+ }
+
+ progFd := opts.Program.FD()
+ if progFd < 0 {
+ return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+ }
+
+ attr := sys.LinkCreateAttr{
+ TargetFd: uint32(opts.Target),
+ ProgFd: uint32(progFd),
+ AttachType: sys.AttachType(opts.Attach),
+ TargetBtfId: opts.BTF,
+ Flags: opts.Flags,
+ }
+ fd, err := sys.LinkCreate(&attr)
+ if err != nil {
+ return nil, fmt.Errorf("create link: %w", err)
+ }
+
+ return &RawLink{fd, ""}, nil
+}
+
+func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("load pinned link: %w", err)
+ }
+
+ return &RawLink{fd, fileName}, nil
+}
+
+func (l *RawLink) isLink() {}
+
+// FD returns the raw file descriptor.
+func (l *RawLink) FD() int {
+ return l.fd.Int()
+}
+
+// Close breaks the link.
+//
+// Use Pin if you want to make the link persistent.
+func (l *RawLink) Close() error {
+ return l.fd.Close()
+}
+
+// Pin persists a link past the lifetime of the process.
+//
+// Calling Close on a pinned Link will not break the link
+// until the pin is removed.
+func (l *RawLink) Pin(fileName string) error {
+ if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil {
+ return err
+ }
+ l.pinnedPath = fileName
+ return nil
+}
+
+// Unpin implements the Link interface.
+func (l *RawLink) Unpin() error {
+ if err := internal.Unpin(l.pinnedPath); err != nil {
+ return err
+ }
+ l.pinnedPath = ""
+ return nil
+}
+
+// IsPinned returns true if the Link has a non-empty pinned path.
+func (l *RawLink) IsPinned() bool {
+ return l.pinnedPath != ""
+}
+
+// Update implements the Link interface.
+func (l *RawLink) Update(new *ebpf.Program) error {
+ return l.UpdateArgs(RawLinkUpdateOptions{
+ New: new,
+ })
+}
+
+// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs.
+type RawLinkUpdateOptions struct {
+ New *ebpf.Program
+ Old *ebpf.Program
+ Flags uint32
+}
+
+// UpdateArgs updates a link based on args.
+func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error {
+ newFd := opts.New.FD()
+ if newFd < 0 {
+ return fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+ }
+
+ var oldFd int
+ if opts.Old != nil {
+ oldFd = opts.Old.FD()
+ if oldFd < 0 {
+ return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd)
+ }
+ }
+
+ attr := sys.LinkUpdateAttr{
+ LinkFd: l.fd.Uint(),
+ NewProgFd: uint32(newFd),
+ OldProgFd: uint32(oldFd),
+ Flags: opts.Flags,
+ }
+ return sys.LinkUpdate(&attr)
+}
+
+// Info returns metadata about the link.
+//
+// Linktype specific metadata is not included and can be retrieved
+// via the linktype specific Info() method.
+func (l *RawLink) Info() (*Info, error) {
+ var info sys.LinkInfo
+
+ if err := sys.ObjInfo(l.fd, &info); err != nil {
+ return nil, fmt.Errorf("link info: %s", err)
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ nil,
+ }, nil
+}
+
+// Iterator allows iterating over links attached into the kernel.
+type Iterator struct {
+ // The ID of the current link. Only valid after a call to Next
+ ID ID
+ // The current link. Only valid until a call to Next.
+ // See Take if you want to retain the link.
+ Link Link
+ err error
+}
+
+// Next retrieves the next link.
+//
+// Returns true if another link was found. Call [Iterator.Err] after the function returns false.
+func (it *Iterator) Next() bool {
+ id := it.ID
+ for {
+ getIdAttr := &sys.LinkGetNextIdAttr{Id: id}
+ err := sys.LinkGetNextId(getIdAttr)
+ if errors.Is(err, os.ErrNotExist) {
+ // There are no more links.
+ break
+ } else if err != nil {
+ it.err = fmt.Errorf("get next link ID: %w", err)
+ break
+ }
+
+ id = getIdAttr.NextId
+ l, err := NewFromID(id)
+ if errors.Is(err, os.ErrNotExist) {
+ // Couldn't load the link fast enough. Try next ID.
+ continue
+ } else if err != nil {
+ it.err = fmt.Errorf("get link for ID %d: %w", id, err)
+ break
+ }
+
+ if it.Link != nil {
+ it.Link.Close()
+ }
+ it.ID, it.Link = id, l
+ return true
+ }
+
+ // No more links or we encountered an error.
+ if it.Link != nil {
+ it.Link.Close()
+ }
+ it.Link = nil
+ return false
+}
+
+// Take the ownership of the current link.
+//
+// It's the callers responsibility to close the link.
+func (it *Iterator) Take() Link {
+ l := it.Link
+ it.Link = nil
+ return l
+}
+
+// Err returns an error if iteration failed for some reason.
+func (it *Iterator) Err() error {
+ return it.err
+}
+
+func (it *Iterator) Close() {
+ if it.Link != nil {
+ it.Link.Close()
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/link/netfilter.go b/vendor/github.com/cilium/ebpf/link/netfilter.go
new file mode 100644
index 000000000..34be39085
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/netfilter.go
@@ -0,0 +1,90 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+const NetfilterIPDefrag NetfilterAttachFlags = 0 // Enable IP packet defragmentation
+
+type NetfilterAttachFlags uint32
+
+type NetfilterOptions struct {
+ // Program must be a netfilter BPF program.
+ Program *ebpf.Program
+ // The protocol family.
+ ProtocolFamily uint32
+ // The number of the hook you are interested in.
+ HookNumber uint32
+ // Priority within hook
+ Priority int32
+ // Extra link flags
+ Flags uint32
+ // Netfilter flags
+ NetfilterFlags NetfilterAttachFlags
+}
+
+type netfilterLink struct {
+ RawLink
+}
+
+// AttachNetfilter links a netfilter BPF program to a netfilter hook.
+func AttachNetfilter(opts NetfilterOptions) (Link, error) {
+ if opts.Program == nil {
+ return nil, fmt.Errorf("netfilter program is nil")
+ }
+
+ if t := opts.Program.Type(); t != ebpf.Netfilter {
+ return nil, fmt.Errorf("invalid program type %s, expected netfilter", t)
+ }
+
+ progFd := opts.Program.FD()
+ if progFd < 0 {
+ return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+ }
+
+ attr := sys.LinkCreateNetfilterAttr{
+ ProgFd: uint32(opts.Program.FD()),
+ AttachType: sys.BPF_NETFILTER,
+ Flags: opts.Flags,
+ Pf: uint32(opts.ProtocolFamily),
+ Hooknum: uint32(opts.HookNumber),
+ Priority: opts.Priority,
+ NetfilterFlags: uint32(opts.NetfilterFlags),
+ }
+
+ fd, err := sys.LinkCreateNetfilter(&attr)
+ if err != nil {
+ return nil, fmt.Errorf("attach netfilter link: %w", err)
+ }
+
+ return &netfilterLink{RawLink{fd, ""}}, nil
+}
+
+func (*netfilterLink) Update(new *ebpf.Program) error {
+ return fmt.Errorf("netfilter update: %w", ErrNotSupported)
+}
+
+func (nf *netfilterLink) Info() (*Info, error) {
+ var info sys.NetfilterLinkInfo
+ if err := sys.ObjInfo(nf.fd, &info); err != nil {
+ return nil, fmt.Errorf("netfilter link info: %s", err)
+ }
+ extra := &NetfilterInfo{
+ Pf: info.Pf,
+ Hooknum: info.Hooknum,
+ Priority: info.Priority,
+ Flags: info.Flags,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
+
+var _ Link = (*netfilterLink)(nil)
diff --git a/vendor/github.com/cilium/ebpf/link/netkit.go b/vendor/github.com/cilium/ebpf/link/netkit.go
new file mode 100644
index 000000000..5eee3b023
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/netkit.go
@@ -0,0 +1,89 @@
+package link
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type NetkitOptions struct {
+ // Index of the interface to attach to.
+ Interface int
+ // Program to attach.
+ Program *ebpf.Program
+ // One of the AttachNetkit* constants.
+ Attach ebpf.AttachType
+ // Attach relative to an anchor. Optional.
+ Anchor Anchor
+ // Only attach if the expected revision matches.
+ ExpectedRevision uint64
+ // Flags control the attach behaviour. Specify an Anchor instead of
+ // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional.
+ Flags uint32
+}
+
+func AttachNetkit(opts NetkitOptions) (Link, error) {
+ if opts.Interface < 0 {
+ return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface)
+ }
+
+ if opts.Flags&anchorFlags != 0 {
+ return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target")
+ }
+
+ attr := sys.LinkCreateNetkitAttr{
+ ProgFd: uint32(opts.Program.FD()),
+ AttachType: sys.AttachType(opts.Attach),
+ TargetIfindex: uint32(opts.Interface),
+ ExpectedRevision: opts.ExpectedRevision,
+ Flags: opts.Flags,
+ }
+
+ if opts.Anchor != nil {
+ fdOrID, flags, err := opts.Anchor.anchor()
+ if err != nil {
+ return nil, fmt.Errorf("attach netkit link: %w", err)
+ }
+
+ attr.RelativeFdOrId = fdOrID
+ attr.Flags |= flags
+ }
+
+ fd, err := sys.LinkCreateNetkit(&attr)
+ runtime.KeepAlive(opts.Program)
+ runtime.KeepAlive(opts.Anchor)
+ if err != nil {
+ if haveFeatErr := haveNetkit(); haveFeatErr != nil {
+ return nil, haveFeatErr
+ }
+ return nil, fmt.Errorf("attach netkit link: %w", err)
+ }
+
+ return &netkitLink{RawLink{fd, ""}}, nil
+}
+
+type netkitLink struct {
+ RawLink
+}
+
+var _ Link = (*netkitLink)(nil)
+
+func (netkit *netkitLink) Info() (*Info, error) {
+ var info sys.NetkitLinkInfo
+ if err := sys.ObjInfo(netkit.fd, &info); err != nil {
+ return nil, fmt.Errorf("netkit link info: %s", err)
+ }
+ extra := &NetkitInfo{
+ Ifindex: info.Ifindex,
+ AttachType: info.AttachType,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go
new file mode 100644
index 000000000..b1edd340a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/netns.go
@@ -0,0 +1,55 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+// NetNsLink is a program attached to a network namespace.
+type NetNsLink struct {
+ RawLink
+}
+
+// AttachNetNs attaches a program to a network namespace.
+func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) {
+ var attach ebpf.AttachType
+ switch t := prog.Type(); t {
+ case ebpf.FlowDissector:
+ attach = ebpf.AttachFlowDissector
+ case ebpf.SkLookup:
+ attach = ebpf.AttachSkLookup
+ default:
+ return nil, fmt.Errorf("can't attach %v to network namespace", t)
+ }
+
+ link, err := AttachRawLink(RawLinkOptions{
+ Target: ns,
+ Program: prog,
+ Attach: attach,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &NetNsLink{*link}, nil
+}
+
+func (ns *NetNsLink) Info() (*Info, error) {
+ var info sys.NetNsLinkInfo
+ if err := sys.ObjInfo(ns.fd, &info); err != nil {
+ return nil, fmt.Errorf("netns link info: %s", err)
+ }
+ extra := &NetNsInfo{
+ NetnsIno: info.NetnsIno,
+ AttachType: info.AttachType,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go
new file mode 100644
index 000000000..1d8feb58c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/perf_event.go
@@ -0,0 +1,332 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Getting the terminology right is usually the hardest part. For posterity and
+// for staying sane during implementation:
+//
+// - trace event: Representation of a kernel runtime hook. Filesystem entries
+// under /events. Can be tracepoints (static), kprobes or uprobes.
+// Can be instantiated into perf events (see below).
+// - tracepoint: A predetermined hook point in the kernel. Exposed as trace
+// events in (sub)directories under /events. Cannot be closed or
+// removed, they are static.
+// - k(ret)probe: Ephemeral trace events based on entry or exit points of
+// exported kernel symbols. kprobe-based (tracefs) trace events can be
+// created system-wide by writing to the /kprobe_events file, or
+// they can be scoped to the current process by creating PMU perf events.
+// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries
+// and offsets. uprobe-based (tracefs) trace events can be
+// created system-wide by writing to the /uprobe_events file, or
+// they can be scoped to the current process by creating PMU perf events.
+// - perf event: An object instantiated based on an existing trace event or
+// kernel symbol. Referred to by fd in userspace.
+// Exactly one eBPF program can be attached to a perf event. Multiple perf
+// events can be created from a single trace event. Closing a perf event
+// stops any further invocations of the attached eBPF program.
+
+var (
+ errInvalidInput = tracefs.ErrInvalidInput
+)
+
+const (
+ perfAllThreads = -1
+)
+
+// A perfEvent represents a perf event kernel object. Exactly one eBPF program
+// can be attached to it. It is created based on a tracefs trace event or a
+// Performance Monitoring Unit (PMU).
+type perfEvent struct {
+ // Trace event backing this perfEvent. May be nil.
+ tracefsEvent *tracefs.Event
+
+ // This is the perf event FD.
+ fd *sys.FD
+}
+
+func newPerfEvent(fd *sys.FD, event *tracefs.Event) *perfEvent {
+ pe := &perfEvent{event, fd}
+ // Both event and fd have their own finalizer, but we want to
+ // guarantee that they are closed in a certain order.
+ runtime.SetFinalizer(pe, (*perfEvent).Close)
+ return pe
+}
+
+func (pe *perfEvent) Close() error {
+ runtime.SetFinalizer(pe, nil)
+
+ if err := pe.fd.Close(); err != nil {
+ return fmt.Errorf("closing perf event fd: %w", err)
+ }
+
+ if pe.tracefsEvent != nil {
+ return pe.tracefsEvent.Close()
+ }
+
+ return nil
+}
+
+// PerfEvent is implemented by some Link types which use a perf event under
+// the hood.
+type PerfEvent interface {
+ // PerfEvent returns a file for the underlying perf event.
+ //
+ // It is the callers responsibility to close the returned file.
+ //
+ // Making changes to the associated perf event lead to
+ // undefined behaviour.
+ PerfEvent() (*os.File, error)
+}
+
+// perfEventLink represents a bpf perf link.
+type perfEventLink struct {
+ RawLink
+ pe *perfEvent
+}
+
+func (pl *perfEventLink) isLink() {}
+
+func (pl *perfEventLink) Close() error {
+ if err := pl.fd.Close(); err != nil {
+ return fmt.Errorf("perf link close: %w", err)
+ }
+
+ // when created from pinned link
+ if pl.pe == nil {
+ return nil
+ }
+
+ if err := pl.pe.Close(); err != nil {
+ return fmt.Errorf("perf event close: %w", err)
+ }
+ return nil
+}
+
+func (pl *perfEventLink) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("perf event link update: %w", ErrNotSupported)
+}
+
+var _ PerfEvent = (*perfEventLink)(nil)
+
+func (pl *perfEventLink) PerfEvent() (*os.File, error) {
+ // when created from pinned link
+ if pl.pe == nil {
+ return nil, ErrNotSupported
+ }
+
+ fd, err := pl.pe.fd.Dup()
+ if err != nil {
+ return nil, err
+ }
+
+ return fd.File("perf-event"), nil
+}
+
+func (pl *perfEventLink) Info() (*Info, error) {
+ var info sys.PerfEventLinkInfo
+ if err := sys.ObjInfo(pl.fd, &info); err != nil {
+ return nil, fmt.Errorf("perf event link info: %s", err)
+ }
+
+ var extra2 interface{}
+ switch info.PerfEventType {
+ case sys.BPF_PERF_EVENT_KPROBE, sys.BPF_PERF_EVENT_KRETPROBE:
+ var kprobeInfo sys.KprobeLinkInfo
+ if err := sys.ObjInfo(pl.fd, &kprobeInfo); err != nil {
+ return nil, fmt.Errorf("kprobe link info: %s", err)
+ }
+ extra2 = &KprobeInfo{
+ address: kprobeInfo.Addr,
+ missed: kprobeInfo.Missed,
+ }
+ }
+
+ extra := &PerfEventInfo{
+ Type: info.PerfEventType,
+ extra: extra2,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
+
+// perfEventIoctl implements Link and handles the perf event lifecycle
+// via ioctl().
+type perfEventIoctl struct {
+ *perfEvent
+}
+
+func (pi *perfEventIoctl) isLink() {}
+
+// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"),
+// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array
+// owned by the perf event, which means multiple programs can be attached
+// simultaneously.
+//
+// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event
+// returns EEXIST.
+//
+// Detaching a program from a perf event is currently not possible, so a
+// program replacement mechanism cannot be implemented for perf events.
+func (pi *perfEventIoctl) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Pin(string) error {
+ return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Unpin() error {
+ return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Info() (*Info, error) {
+ return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported)
+}
+
+var _ PerfEvent = (*perfEventIoctl)(nil)
+
+func (pi *perfEventIoctl) PerfEvent() (*os.File, error) {
+ fd, err := pi.fd.Dup()
+ if err != nil {
+ return nil, err
+ }
+
+ return fd.File("perf-event"), nil
+}
+
+// attach the given eBPF prog to the perf event stored in pe.
+// pe must contain a valid perf event fd.
+// prog's type must match the program type stored in pe.
+func attachPerfEvent(pe *perfEvent, prog *ebpf.Program, cookie uint64) (Link, error) {
+ if prog == nil {
+ return nil, errors.New("cannot attach a nil program")
+ }
+ if prog.FD() < 0 {
+ return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
+ }
+
+ if err := haveBPFLinkPerfEvent(); err == nil {
+ return attachPerfEventLink(pe, prog, cookie)
+ }
+
+ if cookie != 0 {
+ return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
+ }
+
+ return attachPerfEventIoctl(pe, prog)
+}
+
+func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) {
+ // Assign the eBPF program to the perf event.
+ err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
+ if err != nil {
+ return nil, fmt.Errorf("setting perf event bpf program: %w", err)
+ }
+
+ // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values.
+ if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil {
+ return nil, fmt.Errorf("enable perf event: %s", err)
+ }
+
+ return &perfEventIoctl{pe}, nil
+}
+
+// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+).
+//
+// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
+func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program, cookie uint64) (*perfEventLink, error) {
+ fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
+ ProgFd: uint32(prog.FD()),
+ TargetFd: pe.fd.Uint(),
+ AttachType: sys.BPF_PERF_EVENT,
+ BpfCookie: cookie,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("cannot create bpf perf link: %v", err)
+ }
+
+ return &perfEventLink{RawLink{fd: fd}, pe}, nil
+}
+
+// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str.
+func unsafeStringPtr(str string) (unsafe.Pointer, error) {
+ p, err := unix.BytePtrFromString(str)
+ if err != nil {
+ return nil, err
+ }
+ return unsafe.Pointer(p), nil
+}
+
+// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
+// [k,u]probes created by writing to /[k,u]probe_events are tracepoints
+// behind the scenes, and can be attached to using these perf events.
+func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) {
+ attr := unix.PerfEventAttr{
+ Type: unix.PERF_TYPE_TRACEPOINT,
+ Config: tid,
+ Sample_type: unix.PERF_SAMPLE_RAW,
+ Sample: 1,
+ Wakeup: 1,
+ }
+
+ cpu := 0
+ if pid != perfAllThreads {
+ cpu = -1
+ }
+ fd, err := unix.PerfEventOpen(&attr, pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC)
+ if err != nil {
+ return nil, fmt.Errorf("opening tracepoint perf event: %w", err)
+ }
+
+ return sys.NewFD(fd)
+}
+
+// Probe BPF perf link.
+//
+// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307
+// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
+var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Name: "probe_bpf_perf_link",
+ Type: ebpf.Kprobe,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ License: "MIT",
+ })
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_PERF_EVENT,
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go
new file mode 100644
index 000000000..d8a2a15f9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/program.go
@@ -0,0 +1,107 @@
+package link
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type RawAttachProgramOptions struct {
+ // Target to query. This is usually a file descriptor but may refer to
+ // something else based on the attach type.
+ Target int
+ // Program to attach.
+ Program *ebpf.Program
+ // Attach must match the attach type of Program.
+ Attach ebpf.AttachType
+ // Attach relative to an anchor. Optional.
+ Anchor Anchor
+ // Flags control the attach behaviour. Specify an Anchor instead of
+ // F_LINK, F_ID, F_BEFORE, F_AFTER and F_REPLACE. Optional.
+ Flags uint32
+ // Only attach if the internal revision matches the given value.
+ ExpectedRevision uint64
+}
+
+// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH.
+//
+// You should use one of the higher level abstractions available in this
+// package if possible.
+func RawAttachProgram(opts RawAttachProgramOptions) error {
+ if opts.Flags&anchorFlags != 0 {
+ return fmt.Errorf("disallowed flags: use Anchor to specify attach target")
+ }
+
+ attr := sys.ProgAttachAttr{
+ TargetFdOrIfindex: uint32(opts.Target),
+ AttachBpfFd: uint32(opts.Program.FD()),
+ AttachType: uint32(opts.Attach),
+ AttachFlags: uint32(opts.Flags),
+ ExpectedRevision: opts.ExpectedRevision,
+ }
+
+ if opts.Anchor != nil {
+ fdOrID, flags, err := opts.Anchor.anchor()
+ if err != nil {
+ return fmt.Errorf("attach program: %w", err)
+ }
+
+ if flags == sys.BPF_F_REPLACE {
+ // Ensure that replacing a program works on old kernels.
+ attr.ReplaceBpfFd = fdOrID
+ } else {
+ attr.RelativeFdOrId = fdOrID
+ attr.AttachFlags |= flags
+ }
+ }
+
+ if err := sys.ProgAttach(&attr); err != nil {
+ if haveFeatErr := haveProgAttach(); haveFeatErr != nil {
+ return haveFeatErr
+ }
+ return fmt.Errorf("attach program: %w", err)
+ }
+ runtime.KeepAlive(opts.Program)
+
+ return nil
+}
+
+type RawDetachProgramOptions RawAttachProgramOptions
+
+// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH.
+//
+// You should use one of the higher level abstractions available in this
+// package if possible.
+func RawDetachProgram(opts RawDetachProgramOptions) error {
+ if opts.Flags&anchorFlags != 0 {
+ return fmt.Errorf("disallowed flags: use Anchor to specify attach target")
+ }
+
+ attr := sys.ProgDetachAttr{
+ TargetFdOrIfindex: uint32(opts.Target),
+ AttachBpfFd: uint32(opts.Program.FD()),
+ AttachType: uint32(opts.Attach),
+ ExpectedRevision: opts.ExpectedRevision,
+ }
+
+ if opts.Anchor != nil {
+ fdOrID, flags, err := opts.Anchor.anchor()
+ if err != nil {
+ return fmt.Errorf("detach program: %w", err)
+ }
+
+ attr.RelativeFdOrId = fdOrID
+ attr.AttachFlags |= flags
+ }
+
+ if err := sys.ProgDetach(&attr); err != nil {
+ if haveFeatErr := haveProgAttach(); haveFeatErr != nil {
+ return haveFeatErr
+ }
+ return fmt.Errorf("can't detach program: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go
new file mode 100644
index 000000000..fe534f8ef
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/query.go
@@ -0,0 +1,111 @@
+package link
+
+import (
+ "fmt"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+// QueryOptions defines additional parameters when querying for programs.
+type QueryOptions struct {
+ // Target to query. This is usually a file descriptor but may refer to
+ // something else based on the attach type.
+ Target int
+ // Attach specifies the AttachType of the programs queried for
+ Attach ebpf.AttachType
+ // QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE
+ QueryFlags uint32
+}
+
+// QueryResult describes which programs and links are active.
+type QueryResult struct {
+ // List of attached programs.
+ Programs []AttachedProgram
+
+ // Incremented by one every time the set of attached programs changes.
+ // May be zero if not supported by the [ebpf.AttachType].
+ Revision uint64
+}
+
+// HaveLinkInfo returns true if the kernel supports querying link information
+// for a particular [ebpf.AttachType].
+func (qr *QueryResult) HaveLinkInfo() bool {
+ return qr.Revision > 0
+}
+
+type AttachedProgram struct {
+ ID ebpf.ProgramID
+ linkID ID
+}
+
+// LinkID returns the ID associated with the program.
+//
+// Returns 0, false if the kernel doesn't support retrieving the ID or if the
+// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you
+// need to tell the two apart.
+func (ap *AttachedProgram) LinkID() (ID, bool) {
+ return ap.linkID, ap.linkID != 0
+}
+
+// QueryPrograms retrieves a list of programs for the given AttachType.
+//
+// Returns a slice of attached programs, which may be empty.
+// revision counts how many times the set of attached programs has changed and
+// may be zero if not supported by the [ebpf.AttachType].
+// Returns ErrNotSupportd on a kernel without BPF_PROG_QUERY
+func QueryPrograms(opts QueryOptions) (*QueryResult, error) {
+ // query the number of programs to allocate correct slice size
+ attr := sys.ProgQueryAttr{
+ TargetFdOrIfindex: uint32(opts.Target),
+ AttachType: sys.AttachType(opts.Attach),
+ QueryFlags: opts.QueryFlags,
+ }
+ err := sys.ProgQuery(&attr)
+ if err != nil {
+ if haveFeatErr := haveProgQuery(); haveFeatErr != nil {
+ return nil, fmt.Errorf("query programs: %w", haveFeatErr)
+ }
+ return nil, fmt.Errorf("query programs: %w", err)
+ }
+ if attr.Count == 0 {
+ return &QueryResult{Revision: attr.Revision}, nil
+ }
+
+ // The minimum bpf_mprog revision is 1, so we can use the field to detect
+ // whether the attach type supports link ids.
+ haveLinkIDs := attr.Revision != 0
+
+ count := attr.Count
+ progIds := make([]ebpf.ProgramID, count)
+ attr = sys.ProgQueryAttr{
+ TargetFdOrIfindex: uint32(opts.Target),
+ AttachType: sys.AttachType(opts.Attach),
+ QueryFlags: opts.QueryFlags,
+ Count: count,
+ ProgIds: sys.NewPointer(unsafe.Pointer(&progIds[0])),
+ }
+
+ var linkIds []ID
+ if haveLinkIDs {
+ linkIds = make([]ID, count)
+ attr.LinkIds = sys.NewPointer(unsafe.Pointer(&linkIds[0]))
+ }
+
+ if err := sys.ProgQuery(&attr); err != nil {
+ return nil, fmt.Errorf("query programs: %w", err)
+ }
+
+ // NB: attr.Count might have changed between the two syscalls.
+ var programs []AttachedProgram
+ for i, id := range progIds[:attr.Count] {
+ ap := AttachedProgram{ID: id}
+ if haveLinkIDs {
+ ap.linkID = linkIds[i]
+ }
+ programs = append(programs, ap)
+ }
+
+ return &QueryResult{programs, attr.Revision}, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
new file mode 100644
index 000000000..925e621cb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
@@ -0,0 +1,87 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type RawTracepointOptions struct {
+ // Tracepoint name.
+ Name string
+ // Program must be of type RawTracepoint*
+ Program *ebpf.Program
+}
+
+// AttachRawTracepoint links a BPF program to a raw_tracepoint.
+//
+// Requires at least Linux 4.17.
+func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable {
+ return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t)
+ }
+ if opts.Program.FD() < 0 {
+ return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
+ }
+
+ fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+ Name: sys.NewStringPointer(opts.Name),
+ ProgFd: uint32(opts.Program.FD()),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ err = haveBPFLink()
+ if errors.Is(err, ErrNotSupported) {
+ // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction")
+ // raw_tracepoints are just a plain fd.
+ return &simpleRawTracepoint{fd}, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &rawTracepoint{RawLink{fd: fd}}, nil
+}
+
+type simpleRawTracepoint struct {
+ fd *sys.FD
+}
+
+var _ Link = (*simpleRawTracepoint)(nil)
+
+func (frt *simpleRawTracepoint) isLink() {}
+
+func (frt *simpleRawTracepoint) Close() error {
+ return frt.fd.Close()
+}
+
+func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error {
+ return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Pin(string) error {
+ return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Unpin() error {
+ return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Info() (*Info, error) {
+ return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported)
+}
+
+type rawTracepoint struct {
+ RawLink
+}
+
+var _ Link = (*rawTracepoint)(nil)
+
+func (rt *rawTracepoint) Update(_ *ebpf.Program) error {
+ return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go
new file mode 100644
index 000000000..84f0b656f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go
@@ -0,0 +1,40 @@
+package link
+
+import (
+ "syscall"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// AttachSocketFilter attaches a SocketFilter BPF program to a socket.
+func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error {
+ rawConn, err := conn.SyscallConn()
+ if err != nil {
+ return err
+ }
+ var ssoErr error
+ err = rawConn.Control(func(fd uintptr) {
+ ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD())
+ })
+ if ssoErr != nil {
+ return ssoErr
+ }
+ return err
+}
+
+// DetachSocketFilter detaches a SocketFilter BPF program from a socket.
+func DetachSocketFilter(conn syscall.Conn) error {
+ rawConn, err := conn.SyscallConn()
+ if err != nil {
+ return err
+ }
+ var ssoErr error
+ err = rawConn.Control(func(fd uintptr) {
+ ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0)
+ })
+ if ssoErr != nil {
+ return ssoErr
+ }
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go
new file mode 100644
index 000000000..d09b5acb0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/syscalls.go
@@ -0,0 +1,200 @@
+package link
+
+import (
+ "errors"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Type is the kind of link.
+type Type = sys.LinkType
+
+// Valid link types.
+const (
+ UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC
+ RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT
+ TracingType = sys.BPF_LINK_TYPE_TRACING
+ CgroupType = sys.BPF_LINK_TYPE_CGROUP
+ IterType = sys.BPF_LINK_TYPE_ITER
+ NetNsType = sys.BPF_LINK_TYPE_NETNS
+ XDPType = sys.BPF_LINK_TYPE_XDP
+ PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT
+ KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI
+ TCXType = sys.BPF_LINK_TYPE_TCX
+ UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI
+ NetfilterType = sys.BPF_LINK_TYPE_NETFILTER
+ NetkitType = sys.BPF_LINK_TYPE_NETKIT
+)
+
+var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Type: ebpf.CGroupSKB,
+ License: "MIT",
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ })
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ // BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB,
+ // so being able to load the program is enough to infer that we
+ // have the syscall.
+ prog.Close()
+ return nil
+})
+
+var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", "5.5", func() error {
+ if err := haveProgAttach(); err != nil {
+ return err
+ }
+
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Type: ebpf.CGroupSKB,
+ AttachType: ebpf.AttachCGroupInetIngress,
+ License: "MIT",
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ })
+
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ defer prog.Close()
+
+ // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs.
+ // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't
+ // present.
+ attr := sys.ProgAttachAttr{
+ // We rely on this being checked after attachFlags.
+ TargetFdOrIfindex: ^uint32(0),
+ AttachBpfFd: uint32(prog.FD()),
+ AttachType: uint32(ebpf.AttachCGroupInetIngress),
+ AttachFlags: uint32(flagReplace),
+ }
+
+ err = sys.ProgAttach(&attr)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
+
+var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error {
+ attr := sys.LinkCreateAttr{
+ // This is a hopefully invalid file descriptor, which triggers EBADF.
+ TargetFd: ^uint32(0),
+ ProgFd: ^uint32(0),
+ AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
+ }
+ _, err := sys.LinkCreate(&attr)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
+
+var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() error {
+ attr := sys.ProgQueryAttr{
+ // We rely on this being checked during the syscall.
+ // With an otherwise correct payload we expect EBADF here
+ // as an indication that the feature is present.
+ TargetFdOrIfindex: ^uint32(0),
+ AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
+ }
+
+ err := sys.ProgQuery(&attr)
+
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ if err != nil {
+ return ErrNotSupported
+ }
+ return errors.New("syscall succeeded unexpectedly")
+})
+
+var haveTCX = internal.NewFeatureTest("tcx", "6.6", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Type: ebpf.SchedCLS,
+ License: "MIT",
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ })
+
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ defer prog.Close()
+ attr := sys.LinkCreateTcxAttr{
+ // We rely on this being checked during the syscall.
+ // With an otherwise correct payload we expect ENODEV here
+ // as an indication that the feature is present.
+ TargetIfindex: ^uint32(0),
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.AttachType(ebpf.AttachTCXIngress),
+ }
+
+ _, err = sys.LinkCreateTcx(&attr)
+
+ if errors.Is(err, unix.ENODEV) {
+ return nil
+ }
+ if err != nil {
+ return ErrNotSupported
+ }
+ return errors.New("syscall succeeded unexpectedly")
+})
+
+var haveNetkit = internal.NewFeatureTest("netkit", "6.7", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Type: ebpf.SchedCLS,
+ License: "MIT",
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ })
+
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ defer prog.Close()
+ attr := sys.LinkCreateNetkitAttr{
+ // We rely on this being checked during the syscall.
+ // With an otherwise correct payload we expect ENODEV here
+ // as an indication that the feature is present.
+ TargetIfindex: ^uint32(0),
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.AttachType(ebpf.AttachNetkitPrimary),
+ }
+
+ _, err = sys.LinkCreateNetkit(&attr)
+
+ if errors.Is(err, unix.ENODEV) {
+ return nil
+ }
+ if err != nil {
+ return ErrNotSupported
+ }
+ return errors.New("syscall succeeded unexpectedly")
+})
diff --git a/vendor/github.com/cilium/ebpf/link/tcx.go b/vendor/github.com/cilium/ebpf/link/tcx.go
new file mode 100644
index 000000000..ac045b71d
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tcx.go
@@ -0,0 +1,89 @@
+package link
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type TCXOptions struct {
+ // Index of the interface to attach to.
+ Interface int
+ // Program to attach.
+ Program *ebpf.Program
+ // One of the AttachTCX* constants.
+ Attach ebpf.AttachType
+ // Attach relative to an anchor. Optional.
+ Anchor Anchor
+ // Only attach if the expected revision matches.
+ ExpectedRevision uint64
+ // Flags control the attach behaviour. Specify an Anchor instead of
+ // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional.
+ Flags uint32
+}
+
+func AttachTCX(opts TCXOptions) (Link, error) {
+ if opts.Interface < 0 {
+ return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface)
+ }
+
+ if opts.Flags&anchorFlags != 0 {
+ return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target")
+ }
+
+ attr := sys.LinkCreateTcxAttr{
+ ProgFd: uint32(opts.Program.FD()),
+ AttachType: sys.AttachType(opts.Attach),
+ TargetIfindex: uint32(opts.Interface),
+ ExpectedRevision: opts.ExpectedRevision,
+ Flags: opts.Flags,
+ }
+
+ if opts.Anchor != nil {
+ fdOrID, flags, err := opts.Anchor.anchor()
+ if err != nil {
+ return nil, fmt.Errorf("attach tcx link: %w", err)
+ }
+
+ attr.RelativeFdOrId = fdOrID
+ attr.Flags |= flags
+ }
+
+ fd, err := sys.LinkCreateTcx(&attr)
+ runtime.KeepAlive(opts.Program)
+ runtime.KeepAlive(opts.Anchor)
+ if err != nil {
+ if haveFeatErr := haveTCX(); haveFeatErr != nil {
+ return nil, haveFeatErr
+ }
+ return nil, fmt.Errorf("attach tcx link: %w", err)
+ }
+
+ return &tcxLink{RawLink{fd, ""}}, nil
+}
+
+type tcxLink struct {
+ RawLink
+}
+
+var _ Link = (*tcxLink)(nil)
+
+func (tcx *tcxLink) Info() (*Info, error) {
+ var info sys.TcxLinkInfo
+ if err := sys.ObjInfo(tcx.fd, &info); err != nil {
+ return nil, fmt.Errorf("tcx link info: %s", err)
+ }
+ extra := &TCXInfo{
+ Ifindex: info.Ifindex,
+ AttachType: info.AttachType,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go
new file mode 100644
index 000000000..6fc78b982
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go
@@ -0,0 +1,70 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/tracefs"
+)
+
+// TracepointOptions defines additional parameters that will be used
+// when loading Tracepoints.
+type TracepointOptions struct {
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+}
+
+// Tracepoint attaches the given eBPF program to the tracepoint with the given
+// group and name. See /sys/kernel/tracing/events to find available
+// tracepoints. The top-level directory is the group, the event's subdirectory
+// is the name. Example:
+//
+// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil)
+//
+// Losing the reference to the resulting Link (tp) will close the Tracepoint
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
+// only possible as of kernel 4.14 (commit cf5f5ce).
+//
+// The returned Link may implement [PerfEvent].
+func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) {
+ if group == "" || name == "" {
+ return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if prog.Type() != ebpf.TracePoint {
+ return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput)
+ }
+
+ tid, err := tracefs.EventID(group, name)
+ if err != nil {
+ return nil, err
+ }
+
+ fd, err := openTracepointPerfEvent(tid, perfAllThreads)
+ if err != nil {
+ return nil, err
+ }
+
+ var cookie uint64
+ if opts != nil {
+ cookie = opts.Cookie
+ }
+
+ pe := newPerfEvent(fd, nil)
+
+ lnk, err := attachPerfEvent(pe, prog, cookie)
+ if err != nil {
+ pe.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go
new file mode 100644
index 000000000..9e570afc9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tracing.go
@@ -0,0 +1,218 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+type tracing struct {
+ RawLink
+}
+
+func (f *tracing) Update(new *ebpf.Program) error {
+ return fmt.Errorf("tracing update: %w", ErrNotSupported)
+}
+
+func (f *tracing) Info() (*Info, error) {
+ var info sys.TracingLinkInfo
+ if err := sys.ObjInfo(f.fd, &info); err != nil {
+ return nil, fmt.Errorf("tracing link info: %s", err)
+ }
+ extra := &TracingInfo{
+ TargetObjId: info.TargetObjId,
+ TargetBtfId: info.TargetBtfId,
+ AttachType: info.AttachType,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
+
+// AttachFreplace attaches the given eBPF program to the function it replaces.
+//
+// The program and name can either be provided at link time, or can be provided
+// at program load time. If they were provided at load time, they should be nil
+// and empty respectively here, as they will be ignored by the kernel.
+// Examples:
+//
+// AttachFreplace(dispatcher, "function", replacement)
+// AttachFreplace(nil, "", replacement)
+func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) {
+ if (name == "") != (targetProg == nil) {
+ return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if prog.Type() != ebpf.Extension {
+ return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput)
+ }
+
+ var (
+ target int
+ typeID btf.TypeID
+ )
+ if targetProg != nil {
+ btfHandle, err := targetProg.Handle()
+ if err != nil {
+ return nil, err
+ }
+ defer btfHandle.Close()
+
+ spec, err := btfHandle.Spec(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var function *btf.Func
+ if err := spec.TypeByName(name, &function); err != nil {
+ return nil, err
+ }
+
+ target = targetProg.FD()
+ typeID, err = spec.TypeID(function)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ link, err := AttachRawLink(RawLinkOptions{
+ Target: target,
+ Program: prog,
+ Attach: ebpf.AttachNone,
+ BTF: typeID,
+ })
+ if errors.Is(err, sys.ENOTSUPP) {
+ // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+ return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &tracing{*link}, nil
+}
+
+type TracingOptions struct {
+ // Program must be of type Tracing with attach type
+ // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or
+ // AttachTraceRawTp.
+ Program *ebpf.Program
+ // Program attach type. Can be one of:
+ // - AttachTraceFEntry
+ // - AttachTraceFExit
+ // - AttachModifyReturn
+ // - AttachTraceRawTp
+ // This field is optional.
+ AttachType ebpf.AttachType
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ Cookie uint64
+}
+
+type LSMOptions struct {
+ // Program must be of type LSM with attach type
+ // AttachLSMMac.
+ Program *ebpf.Program
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ Cookie uint64
+}
+
+// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id.
+func attachBTFID(program *ebpf.Program, at ebpf.AttachType, cookie uint64) (Link, error) {
+ if program.FD() < 0 {
+ return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd)
+ }
+
+ var (
+ fd *sys.FD
+ err error
+ )
+ switch at {
+ case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachTraceRawTp,
+ ebpf.AttachModifyReturn, ebpf.AttachLSMMac:
+ // Attach via BPF link
+ fd, err = sys.LinkCreateTracing(&sys.LinkCreateTracingAttr{
+ ProgFd: uint32(program.FD()),
+ AttachType: sys.AttachType(at),
+ Cookie: cookie,
+ })
+ if err == nil {
+ break
+ }
+ if !errors.Is(err, unix.EINVAL) && !errors.Is(err, sys.ENOTSUPP) {
+ return nil, fmt.Errorf("create tracing link: %w", err)
+ }
+ fallthrough
+ case ebpf.AttachNone:
+ // Attach via RawTracepointOpen
+ if cookie > 0 {
+ return nil, fmt.Errorf("create raw tracepoint with cookie: %w", ErrNotSupported)
+ }
+
+ fd, err = sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+ ProgFd: uint32(program.FD()),
+ })
+ if errors.Is(err, sys.ENOTSUPP) {
+ // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+ return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("create raw tracepoint: %w", err)
+ }
+ default:
+ return nil, fmt.Errorf("invalid attach type: %s", at.String())
+ }
+
+ raw := RawLink{fd: fd}
+ info, err := raw.Info()
+ if err != nil {
+ raw.Close()
+ return nil, err
+ }
+
+ if info.Type == RawTracepointType {
+ // Sadness upon sadness: a Tracing program with AttachRawTp returns
+ // a raw_tracepoint link. Other types return a tracing link.
+ return &rawTracepoint{raw}, nil
+ }
+ return &tracing{raw}, nil
+}
+
+// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or
+// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined
+// in kernel modules.
+func AttachTracing(opts TracingOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.Tracing {
+ return nil, fmt.Errorf("invalid program type %s, expected Tracing", t)
+ }
+
+ switch opts.AttachType {
+ case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachModifyReturn,
+ ebpf.AttachTraceRawTp, ebpf.AttachNone:
+ default:
+ return nil, fmt.Errorf("invalid attach type: %s", opts.AttachType.String())
+ }
+
+ return attachBTFID(opts.Program, opts.AttachType, opts.Cookie)
+}
+
+// AttachLSM links a Linux security module (LSM) BPF Program to a BPF
+// hook defined in kernel modules.
+func AttachLSM(opts LSMOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.LSM {
+ return nil, fmt.Errorf("invalid program type %s, expected LSM", t)
+ }
+
+ return attachBTFID(opts.Program, ebpf.AttachLSMMac, opts.Cookie)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go
new file mode 100644
index 000000000..194d1d319
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/uprobe.go
@@ -0,0 +1,335 @@
+package link
+
+import (
+ "debug/elf"
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/tracefs"
+)
+
+var (
+ uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"
+ // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799
+ uprobeRefCtrOffsetShift = 32
+ haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", "4.20", func() error {
+ _, err := os.Stat(uprobeRefCtrOffsetPMUPath)
+ if errors.Is(err, os.ErrNotExist) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+
+ // ErrNoSymbol indicates that the given symbol was not found
+ // in the ELF symbols table.
+ ErrNoSymbol = errors.New("not found")
+)
+
+// Executable defines an executable program on the filesystem.
+type Executable struct {
+ // Path of the executable on the filesystem.
+ path string
+ // Parsed ELF and dynamic symbols' cachedAddresses.
+ cachedAddresses map[string]uint64
+ // Keep track of symbol table lazy load.
+ cacheAddressesOnce sync.Once
+}
+
+// UprobeOptions defines additional parameters that will be used
+// when loading Uprobes.
+type UprobeOptions struct {
+ // Symbol address. Must be provided in case of external symbols (shared libs).
+ // If set, overrides the address eventually parsed from the executable.
+ Address uint64
+ // The offset relative to given symbol. Useful when tracing an arbitrary point
+ // inside the frame of given symbol.
+ //
+ // Note: this field changed from being an absolute offset to being relative
+ // to Address.
+ Offset uint64
+ // Only set the uprobe on the given process ID. Useful when tracing
+ // shared library calls or programs that have many running instances.
+ PID int
+ // Automatically manage SDT reference counts (semaphores).
+ //
+ // If this field is set, the Kernel will increment/decrement the
+ // semaphore located in the process memory at the provided address on
+ // probe attach/detach.
+ //
+ // See also:
+ // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling)
+ // github.com/torvalds/linux/commit/1cc33161a83d
+ // github.com/torvalds/linux/commit/a6ca88b241d5
+ RefCtrOffset uint64
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+ // Prefix used for the event name if the uprobe must be attached using tracefs.
+ // The group name will be formatted as `_`.
+ // The default empty string is equivalent to "ebpf" as the prefix.
+ TraceFSPrefix string
+}
+
+func (uo *UprobeOptions) cookie() uint64 {
+ if uo == nil {
+ return 0
+ }
+ return uo.Cookie
+}
+
+// To open a new Executable, use:
+//
+// OpenExecutable("/bin/bash")
+//
+// The returned value can then be used to open Uprobe(s).
+func OpenExecutable(path string) (*Executable, error) {
+ if path == "" {
+ return nil, fmt.Errorf("path cannot be empty")
+ }
+
+ f, err := internal.OpenSafeELFFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("parse ELF file: %w", err)
+ }
+ defer f.Close()
+
+ if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN {
+ // ELF is not an executable or a shared object.
+ return nil, errors.New("the given file is not an executable or a shared object")
+ }
+
+ return &Executable{
+ path: path,
+ cachedAddresses: make(map[string]uint64),
+ }, nil
+}
+
+func (ex *Executable) load(f *internal.SafeELFFile) error {
+ syms, err := f.Symbols()
+ if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
+ return err
+ }
+
+ dynsyms, err := f.DynamicSymbols()
+ if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
+ return err
+ }
+
+ syms = append(syms, dynsyms...)
+
+ for _, s := range syms {
+ if elf.ST_TYPE(s.Info) != elf.STT_FUNC {
+ // Symbol not associated with a function or other executable code.
+ continue
+ }
+
+ address := s.Value
+
+ // Loop over ELF segments.
+ for _, prog := range f.Progs {
+ // Skip uninteresting segments.
+ if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 {
+ continue
+ }
+
+ if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) {
+ // If the symbol value is contained in the segment, calculate
+ // the symbol offset.
+ //
+ // fn symbol offset = fn symbol VA - .text VA + .text offset
+ //
+ // stackoverflow.com/a/40249502
+ address = s.Value - prog.Vaddr + prog.Off
+ break
+ }
+ }
+
+ ex.cachedAddresses[s.Name] = address
+ }
+
+ return nil
+}
+
+// address calculates the address of a symbol in the executable.
+//
+// opts must not be nil.
+func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) {
+ if address > 0 {
+ return address + offset, nil
+ }
+
+ var err error
+ ex.cacheAddressesOnce.Do(func() {
+ var f *internal.SafeELFFile
+ f, err = internal.OpenSafeELFFile(ex.path)
+ if err != nil {
+ err = fmt.Errorf("parse ELF file: %w", err)
+ return
+ }
+ defer f.Close()
+
+ err = ex.load(f)
+ })
+ if err != nil {
+ return 0, fmt.Errorf("lazy load symbols: %w", err)
+ }
+
+ address, ok := ex.cachedAddresses[symbol]
+ if !ok {
+ return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol)
+ }
+
+ // Symbols with location 0 from section undef are shared library calls and
+ // are relocated before the binary is executed. Dynamic linking is not
+ // implemented by the library, so mark this as unsupported for now.
+ //
+ // Since only offset values are stored and not elf.Symbol, if the value is 0,
+ // assume it's an external symbol.
+ if address == 0 {
+ return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+
+ "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported)
+ }
+
+ return address + offset, nil
+}
+
+// Uprobe attaches the given eBPF program to a perf event that fires when the
+// given symbol starts executing in the given Executable.
+// For example, /bin/bash::main():
+//
+// ex, _ = OpenExecutable("/bin/bash")
+// ex.Uprobe("main", prog, nil)
+//
+// When using symbols which belongs to shared libraries,
+// an offset must be provided via options:
+//
+// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123})
+//
+// Note: Setting the Offset field in the options supersedes the symbol's offset.
+//
+// Losing the reference to the resulting Link (up) will close the Uprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Functions provided by shared libraries can currently not be traced and
+// will result in an ErrNotSupported.
+//
+// The returned Link may implement [PerfEvent].
+func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
+ u, err := ex.uprobe(symbol, prog, opts, false)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(u, prog, opts.cookie())
+ if err != nil {
+ u.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// Uretprobe attaches the given eBPF program to a perf event that fires right
+// before the given symbol exits. For example, /bin/bash::main():
+//
+// ex, _ = OpenExecutable("/bin/bash")
+// ex.Uretprobe("main", prog, nil)
+//
+// When using symbols which belongs to shared libraries,
+// an offset must be provided via options:
+//
+// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123})
+//
+// Note: Setting the Offset field in the options supersedes the symbol's offset.
+//
+// Losing the reference to the resulting Link (up) will close the Uprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Functions provided by shared libraries can currently not be traced and
+// will result in an ErrNotSupported.
+//
+// The returned Link may implement [PerfEvent].
+func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
+ u, err := ex.uprobe(symbol, prog, opts, true)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(u, prog, opts.cookie())
+ if err != nil {
+ u.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// uprobe opens a perf event for the given binary/symbol and attaches prog to it.
+// If ret is true, create a uretprobe.
+func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) {
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if prog.Type() != ebpf.Kprobe {
+ return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput)
+ }
+ if opts == nil {
+ opts = &UprobeOptions{}
+ }
+
+ offset, err := ex.address(symbol, opts.Address, opts.Offset)
+ if err != nil {
+ return nil, err
+ }
+
+ pid := opts.PID
+ if pid == 0 {
+ pid = perfAllThreads
+ }
+
+ if opts.RefCtrOffset != 0 {
+ if err := haveRefCtrOffsetPMU(); err != nil {
+ return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err)
+ }
+ }
+
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Uprobe,
+ Symbol: symbol,
+ Path: ex.path,
+ Offset: offset,
+ Pid: pid,
+ RefCtrOffset: opts.RefCtrOffset,
+ Ret: ret,
+ Cookie: opts.Cookie,
+ Group: opts.TraceFSPrefix,
+ }
+
+ // Use uprobe PMU if the kernel has it available.
+ tp, err := pmuProbe(args)
+ if err == nil {
+ return tp, nil
+ }
+ if err != nil && !errors.Is(err, ErrNotSupported) {
+ return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err)
+ }
+
+ // Use tracefs if uprobe PMU is missing.
+ tp, err = tracefsProbe(args)
+ if err != nil {
+ return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
+ }
+
+ return tp, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/uprobe_multi.go b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go
new file mode 100644
index 000000000..aea807b32
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go
@@ -0,0 +1,216 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// UprobeMultiOptions defines additional parameters that will be used
+// when opening a UprobeMulti Link.
+type UprobeMultiOptions struct {
+ // Symbol addresses. If set, overrides the addresses eventually parsed from
+ // the executable. Mutually exclusive with UprobeMulti's symbols argument.
+ Addresses []uint64
+
+ // Offsets into functions provided by UprobeMulti's symbols argument.
+ // For example: to set uprobes to main+5 and _start+10, call UprobeMulti
+ // with:
+ // symbols: "main", "_start"
+ // opt.Offsets: 5, 10
+ Offsets []uint64
+
+ // Optional list of associated ref counter offsets.
+ RefCtrOffsets []uint64
+
+ // Optional list of associated BPF cookies.
+ Cookies []uint64
+
+ // Only set the uprobe_multi link on the given process ID, zero PID means
+ // system-wide.
+ PID uint32
+}
+
+func (ex *Executable) UprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) {
+ return ex.uprobeMulti(symbols, prog, opts, 0)
+}
+
+func (ex *Executable) UretprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) {
+
+ // The return probe is not limited for symbols entry, so there's no special
+ // setup for return uprobes (other than the extra flag). The symbols, opts.Offsets
+ // and opts.Addresses arrays follow the same logic as for entry uprobes.
+ return ex.uprobeMulti(symbols, prog, opts, unix.BPF_F_UPROBE_MULTI_RETURN)
+}
+
+func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions, flags uint32) (Link, error) {
+ if prog == nil {
+ return nil, errors.New("cannot attach a nil program")
+ }
+
+ if opts == nil {
+ opts = &UprobeMultiOptions{}
+ }
+
+ addresses, err := ex.addresses(symbols, opts.Addresses, opts.Offsets)
+ if err != nil {
+ return nil, err
+ }
+
+ addrs := len(addresses)
+ cookies := len(opts.Cookies)
+ refCtrOffsets := len(opts.RefCtrOffsets)
+
+ if addrs == 0 {
+ return nil, fmt.Errorf("Addresses are required: %w", errInvalidInput)
+ }
+ if refCtrOffsets > 0 && refCtrOffsets != addrs {
+ return nil, fmt.Errorf("RefCtrOffsets must be exactly Addresses in length: %w", errInvalidInput)
+ }
+ if cookies > 0 && cookies != addrs {
+ return nil, fmt.Errorf("Cookies must be exactly Addresses in length: %w", errInvalidInput)
+ }
+
+ attr := &sys.LinkCreateUprobeMultiAttr{
+ Path: sys.NewStringPointer(ex.path),
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_TRACE_UPROBE_MULTI,
+ UprobeMultiFlags: flags,
+ Count: uint32(addrs),
+ Offsets: sys.NewPointer(unsafe.Pointer(&addresses[0])),
+ Pid: opts.PID,
+ }
+
+ if refCtrOffsets != 0 {
+ attr.RefCtrOffsets = sys.NewPointer(unsafe.Pointer(&opts.RefCtrOffsets[0]))
+ }
+ if cookies != 0 {
+ attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0]))
+ }
+
+ fd, err := sys.LinkCreateUprobeMulti(attr)
+ if errors.Is(err, unix.ESRCH) {
+ return nil, fmt.Errorf("%w (specified pid not found?)", os.ErrNotExist)
+ }
+ if errors.Is(err, unix.EINVAL) {
+ return nil, fmt.Errorf("%w (missing symbol or prog's AttachType not AttachTraceUprobeMulti?)", err)
+ }
+
+ if err != nil {
+ if haveFeatErr := haveBPFLinkUprobeMulti(); haveFeatErr != nil {
+ return nil, haveFeatErr
+ }
+ return nil, err
+ }
+
+ return &uprobeMultiLink{RawLink{fd, ""}}, nil
+}
+
+func (ex *Executable) addresses(symbols []string, addresses, offsets []uint64) ([]uint64, error) {
+ n := len(symbols)
+ if n == 0 {
+ n = len(addresses)
+ }
+
+ if n == 0 {
+ return nil, fmt.Errorf("%w: neither symbols nor addresses given", errInvalidInput)
+ }
+
+ if symbols != nil && len(symbols) != n {
+ return nil, fmt.Errorf("%w: have %d symbols but want %d", errInvalidInput, len(symbols), n)
+ }
+
+ if addresses != nil && len(addresses) != n {
+ return nil, fmt.Errorf("%w: have %d addresses but want %d", errInvalidInput, len(addresses), n)
+ }
+
+ if offsets != nil && len(offsets) != n {
+ return nil, fmt.Errorf("%w: have %d offsets but want %d", errInvalidInput, len(offsets), n)
+ }
+
+ results := make([]uint64, 0, n)
+ for i := 0; i < n; i++ {
+ var sym string
+ if symbols != nil {
+ sym = symbols[i]
+ }
+
+ var addr, off uint64
+ if addresses != nil {
+ addr = addresses[i]
+ }
+
+ if offsets != nil {
+ off = offsets[i]
+ }
+
+ result, err := ex.address(sym, addr, off)
+ if err != nil {
+ return nil, err
+ }
+
+ results = append(results, result)
+ }
+
+ return results, nil
+}
+
+type uprobeMultiLink struct {
+ RawLink
+}
+
+var _ Link = (*uprobeMultiLink)(nil)
+
+func (kml *uprobeMultiLink) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("update uprobe_multi: %w", ErrNotSupported)
+}
+
+var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", "6.6", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Name: "probe_upm_link",
+ Type: ebpf.Kprobe,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ AttachType: ebpf.AttachTraceUprobeMulti,
+ License: "MIT",
+ })
+ if errors.Is(err, unix.E2BIG) {
+ // Kernel doesn't support AttachType field.
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ // We try to create uprobe multi link on '/' path which results in
+ // error with -EBADF in case uprobe multi link is supported.
+ fd, err := sys.LinkCreateUprobeMulti(&sys.LinkCreateUprobeMultiAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_TRACE_UPROBE_MULTI,
+ Path: sys.NewStringPointer("/"),
+ Offsets: sys.NewPointer(unsafe.Pointer(&[]uint64{0})),
+ Count: 1,
+ })
+ switch {
+ case errors.Is(err, unix.EBADF):
+ return nil
+ case errors.Is(err, unix.EINVAL):
+ return internal.ErrNotSupported
+ case err != nil:
+ return err
+ }
+
+ // should not happen
+ fd.Close()
+ return errors.New("successfully attached uprobe_multi to /, kernel bug?")
+})
diff --git a/vendor/github.com/cilium/ebpf/link/xdp.go b/vendor/github.com/cilium/ebpf/link/xdp.go
new file mode 100644
index 000000000..2ec441229
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/xdp.go
@@ -0,0 +1,80 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+// XDPAttachFlags represents how XDP program will be attached to interface.
+type XDPAttachFlags uint32
+
+const (
+ // XDPGenericMode (SKB) links XDP BPF program for drivers which do
+ // not yet support native XDP.
+ XDPGenericMode XDPAttachFlags = 1 << (iota + 1)
+ // XDPDriverMode links XDP BPF program into the driver’s receive path.
+ XDPDriverMode
+ // XDPOffloadMode offloads the entire XDP BPF program into hardware.
+ XDPOffloadMode
+)
+
+type XDPOptions struct {
+ // Program must be an XDP BPF program.
+ Program *ebpf.Program
+
+ // Interface is the interface index to attach program to.
+ Interface int
+
+ // Flags is one of XDPAttachFlags (optional).
+ //
+ // Only one XDP mode should be set, without flag defaults
+ // to driver/generic mode (best effort).
+ Flags XDPAttachFlags
+}
+
+// AttachXDP links an XDP BPF program to an XDP hook.
+func AttachXDP(opts XDPOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.XDP {
+ return nil, fmt.Errorf("invalid program type %s, expected XDP", t)
+ }
+
+ if opts.Interface < 1 {
+ return nil, fmt.Errorf("invalid interface index: %d", opts.Interface)
+ }
+
+ rawLink, err := AttachRawLink(RawLinkOptions{
+ Program: opts.Program,
+ Attach: ebpf.AttachXDP,
+ Target: opts.Interface,
+ Flags: uint32(opts.Flags),
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to attach link: %w", err)
+ }
+
+ return &xdpLink{*rawLink}, nil
+}
+
+type xdpLink struct {
+ RawLink
+}
+
+func (xdp *xdpLink) Info() (*Info, error) {
+ var info sys.XDPLinkInfo
+ if err := sys.ObjInfo(xdp.fd, &info); err != nil {
+ return nil, fmt.Errorf("xdp link info: %s", err)
+ }
+ extra := &XDPInfo{
+ Ifindex: info.Ifindex,
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go
new file mode 100644
index 000000000..788f21b7b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/linker.go
@@ -0,0 +1,459 @@
+package ebpf
+
+import (
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "math"
+ "slices"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+)
+
+// handles stores handle objects to avoid gc cleanup
+type handles []*btf.Handle
+
+func (hs *handles) add(h *btf.Handle) (int, error) {
+ if h == nil {
+ return 0, nil
+ }
+
+ if len(*hs) == math.MaxInt16 {
+ return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16)
+ }
+
+ *hs = append(*hs, h)
+
+ // return length of slice so that indexes start at 1
+ return len(*hs), nil
+}
+
+func (hs handles) fdArray() []int32 {
+ // first element of fda is reserved as no module can be indexed with 0
+ fda := []int32{0}
+ for _, h := range hs {
+ fda = append(fda, int32(h.FD()))
+ }
+
+ return fda
+}
+
+func (hs *handles) Close() error {
+ var errs []error
+ for _, h := range *hs {
+ errs = append(errs, h.Close())
+ }
+ return errors.Join(errs...)
+}
+
+// splitSymbols splits insns into subsections delimited by Symbol Instructions.
+// insns cannot be empty and must start with a Symbol Instruction.
+//
+// The resulting map is indexed by Symbol name.
+func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) {
+ if len(insns) == 0 {
+ return nil, errors.New("insns is empty")
+ }
+
+ currentSym := insns[0].Symbol()
+ if currentSym == "" {
+ return nil, errors.New("insns must start with a Symbol")
+ }
+
+ start := 0
+ progs := make(map[string]asm.Instructions)
+ for i, ins := range insns[1:] {
+ i := i + 1
+
+ sym := ins.Symbol()
+ if sym == "" {
+ continue
+ }
+
+ // New symbol, flush the old one out.
+ progs[currentSym] = slices.Clone(insns[start:i])
+
+ if progs[sym] != nil {
+ return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym)
+ }
+ currentSym = sym
+ start = i
+ }
+
+ if tail := insns[start:]; len(tail) > 0 {
+ progs[currentSym] = slices.Clone(tail)
+ }
+
+ return progs, nil
+}
+
+// The linker is responsible for resolving bpf-to-bpf calls between programs
+// within an ELF. Each BPF program must be a self-contained binary blob,
+// so when an instruction in one ELF program section wants to jump to
+// a function in another, the linker needs to pull in the bytecode
+// (and BTF info) of the target function and concatenate the instruction
+// streams.
+//
+// Later on in the pipeline, all call sites are fixed up with relative jumps
+// within this newly-created instruction stream to then finally hand off to
+// the kernel with BPF_PROG_LOAD.
+//
+// Each function is denoted by an ELF symbol and the compiler takes care of
+// register setup before each jump instruction.
+
+// hasFunctionReferences returns true if insns contains one or more bpf2bpf
+// function references.
+func hasFunctionReferences(insns asm.Instructions) bool {
+ for _, i := range insns {
+ if i.IsFunctionReference() {
+ return true
+ }
+ }
+ return false
+}
+
+// applyRelocations collects and applies any CO-RE relocations in insns.
+//
+// Passing a nil target will relocate against the running kernel. insns are
+// modified in place.
+func applyRelocations(insns asm.Instructions, targets []*btf.Spec, kmodName string, bo binary.ByteOrder, b *btf.Builder) error {
+ var relos []*btf.CORERelocation
+ var reloInsns []*asm.Instruction
+ iter := insns.Iterate()
+ for iter.Next() {
+ if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil {
+ relos = append(relos, relo)
+ reloInsns = append(reloInsns, iter.Ins)
+ }
+ }
+
+ if len(relos) == 0 {
+ return nil
+ }
+
+ if bo == nil {
+ bo = internal.NativeEndian
+ }
+
+ if len(targets) == 0 {
+ kernelTarget, err := btf.LoadKernelSpec()
+ if err != nil {
+ return fmt.Errorf("load kernel spec: %w", err)
+ }
+ targets = append(targets, kernelTarget)
+
+ if kmodName != "" {
+ kmodTarget, err := btf.LoadKernelModuleSpec(kmodName)
+ // Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES disabled.
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return fmt.Errorf("load kernel module spec: %w", err)
+ }
+ if err == nil {
+ targets = append(targets, kmodTarget)
+ }
+ }
+ }
+
+ fixups, err := btf.CORERelocate(relos, targets, bo, b.Add)
+ if err != nil {
+ return err
+ }
+
+ for i, fixup := range fixups {
+ if err := fixup.Apply(reloInsns[i]); err != nil {
+ return fmt.Errorf("fixup for %s: %w", relos[i], err)
+ }
+ }
+
+ return nil
+}
+
+// flattenPrograms resolves bpf-to-bpf calls for a set of programs.
+//
+// Links all programs in names by modifying their ProgramSpec in progs.
+func flattenPrograms(progs map[string]*ProgramSpec, names []string) {
+ // Pre-calculate all function references.
+ refs := make(map[*ProgramSpec][]string)
+ for _, prog := range progs {
+ refs[prog] = prog.Instructions.FunctionReferences()
+ }
+
+ // Create a flattened instruction stream, but don't modify progs yet to
+ // avoid linking multiple times.
+ flattened := make([]asm.Instructions, 0, len(names))
+ for _, name := range names {
+ flattened = append(flattened, flattenInstructions(name, progs, refs))
+ }
+
+ // Finally, assign the flattened instructions.
+ for i, name := range names {
+ progs[name].Instructions = flattened[i]
+ }
+}
+
+// flattenInstructions resolves bpf-to-bpf calls for a single program.
+//
+// Flattens the instructions of prog by concatenating the instructions of all
+// direct and indirect dependencies.
+//
+// progs contains all referenceable programs, while refs contain the direct
+// dependencies of each program.
+func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions {
+ prog := progs[name]
+
+ insns := make(asm.Instructions, len(prog.Instructions))
+ copy(insns, prog.Instructions)
+
+ // Add all direct references of prog to the list of to be linked programs.
+ pending := make([]string, len(refs[prog]))
+ copy(pending, refs[prog])
+
+ // All references for which we've appended instructions.
+ linked := make(map[string]bool)
+
+ // Iterate all pending references. We can't use a range since pending is
+ // modified in the body below.
+ for len(pending) > 0 {
+ var ref string
+ ref, pending = pending[0], pending[1:]
+
+ if linked[ref] {
+ // We've already linked this ref, don't append instructions again.
+ continue
+ }
+
+ progRef := progs[ref]
+ if progRef == nil {
+ // We don't have instructions that go with this reference. This
+ // happens when calling extern functions.
+ continue
+ }
+
+ insns = append(insns, progRef.Instructions...)
+ linked[ref] = true
+
+ // Make sure we link indirect references.
+ pending = append(pending, refs[progRef]...)
+ }
+
+ return insns
+}
+
+// fixupAndValidate is called by the ELF reader right before marshaling the
+// instruction stream. It performs last-minute adjustments to the program and
+// runs some sanity checks before sending it off to the kernel.
+func fixupAndValidate(insns asm.Instructions) error {
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+
+ // Map load was tagged with a Reference, but does not contain a Map pointer.
+ needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil
+ if ins.IsLoadFromMap() && needsMap && ins.Map() == nil {
+ return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference)
+ }
+
+ fixupProbeReadKernel(ins)
+ }
+
+ return nil
+}
+
+// POISON_CALL_KFUNC_BASE in libbpf.
+// https://github.com/libbpf/libbpf/blob/2778cbce609aa1e2747a69349f7f46a2f94f0522/src/libbpf.c#L5767
+const kfuncCallPoisonBase = 2002000000
+
+// fixupKfuncs loops over all instructions in search for kfunc calls.
+// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant
+// and Instruction.Offset to the correct values.
+func fixupKfuncs(insns asm.Instructions) (_ handles, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+ if metadata := ins.Metadata.Get(kfuncMetaKey{}); metadata != nil {
+ goto fixups
+ }
+ }
+
+ return nil, nil
+
+fixups:
+ // only load the kernel spec if we found at least one kfunc call
+ kernelSpec, err := btf.LoadKernelSpec()
+ if err != nil {
+ return nil, err
+ }
+
+ fdArray := make(handles, 0)
+ defer closeOnError(&fdArray)
+
+ for {
+ ins := iter.Ins
+
+ metadata := ins.Metadata.Get(kfuncMetaKey{})
+ if metadata == nil {
+ if !iter.Next() {
+ // break loop if this was the last instruction in the stream.
+ break
+ }
+ continue
+ }
+
+ // check meta, if no meta return err
+ kfm, _ := metadata.(*kfuncMeta)
+ if kfm == nil {
+ return nil, fmt.Errorf("kfuncMetaKey doesn't contain kfuncMeta")
+ }
+
+ target := btf.Type((*btf.Func)(nil))
+ spec, module, err := findTargetInKernel(kernelSpec, kfm.Func.Name, &target)
+ if kfm.Binding == elf.STB_WEAK && errors.Is(err, btf.ErrNotFound) {
+ if ins.IsKfuncCall() {
+ // If the kfunc call is weak and not found, poison the call. Use a recognizable constant
+ // to make it easier to debug. And set src to zero so the verifier doesn't complain
+ // about the invalid imm/offset values before dead-code elimination.
+ ins.Constant = kfuncCallPoisonBase
+ ins.Src = 0
+ } else if ins.OpCode.IsDWordLoad() {
+ // If the kfunc DWordLoad is weak and not found, set its address to 0.
+ ins.Constant = 0
+ ins.Src = 0
+ } else {
+ return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata")
+ }
+
+ iter.Next()
+ continue
+ }
+ // Error on non-weak kfunc not found.
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil, fmt.Errorf("kfunc %q: %w", kfm.Func.Name, ErrNotSupported)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ idx, err := fdArray.add(module)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := btf.CheckTypeCompatibility(kfm.Func.Type, target.(*btf.Func).Type); err != nil {
+ return nil, &incompatibleKfuncError{kfm.Func.Name, err}
+ }
+
+ id, err := spec.TypeID(target)
+ if err != nil {
+ return nil, err
+ }
+
+ ins.Constant = int64(id)
+ ins.Offset = int16(idx)
+
+ if !iter.Next() {
+ break
+ }
+ }
+
+ return fdArray, nil
+}
+
+type incompatibleKfuncError struct {
+ name string
+ err error
+}
+
+func (ike *incompatibleKfuncError) Error() string {
+ return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err)
+}
+
+// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str)
+// with bpf_probe_read(_str) on kernels that don't support it yet.
+func fixupProbeReadKernel(ins *asm.Instruction) {
+ if !ins.IsBuiltinCall() {
+ return
+ }
+
+ // Kernel supports bpf_probe_read_kernel, nothing to do.
+ if haveProbeReadKernel() == nil {
+ return
+ }
+
+ switch asm.BuiltinFunc(ins.Constant) {
+ case asm.FnProbeReadKernel, asm.FnProbeReadUser:
+ ins.Constant = int64(asm.FnProbeRead)
+ case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr:
+ ins.Constant = int64(asm.FnProbeReadStr)
+ }
+}
+
+// resolveKconfigReferences creates and populates a .kconfig map if necessary.
+//
+// Returns a nil Map and no error if no references exist.
+func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
+ var spec *MapSpec
+ iter := insns.Iterate()
+ for iter.Next() {
+ meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta)
+ if meta != nil {
+ spec = meta.Map
+ break
+ }
+ }
+
+ if spec == nil {
+ return nil, nil
+ }
+
+ cpy := spec.Copy()
+ if err := resolveKconfig(cpy); err != nil {
+ return nil, err
+ }
+
+ kconfig, err := NewMap(cpy)
+ if err != nil {
+ return nil, err
+ }
+ defer closeOnError(kconfig)
+
+ // Resolve all instructions which load from .kconfig map with actual map
+ // and offset inside it.
+ iter = insns.Iterate()
+ for iter.Next() {
+ meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta)
+ if meta == nil {
+ continue
+ }
+
+ if meta.Map != spec {
+ return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index)
+ }
+
+ if err := iter.Ins.AssociateMap(kconfig); err != nil {
+ return nil, fmt.Errorf("instruction %d: %w", iter.Index, err)
+ }
+
+ // Encode a map read at the offset of the var in the datasec.
+ iter.Ins.Constant = int64(uint64(meta.Offset) << 32)
+ iter.Ins.Metadata.Set(kconfigMetaKey{}, nil)
+ }
+
+ return kconfig, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go
new file mode 100644
index 000000000..0b62101c3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/map.go
@@ -0,0 +1,1669 @@
+package ebpf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "slices"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/sysenc"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Errors returned by Map and MapIterator methods.
+var (
+ ErrKeyNotExist = errors.New("key does not exist")
+ ErrKeyExist = errors.New("key already exists")
+ ErrIterationAborted = errors.New("iteration aborted")
+ ErrMapIncompatible = errors.New("map spec is incompatible with existing map")
+ errMapNoBTFValue = errors.New("map spec does not contain a BTF Value")
+
+ // pre-allocating these errors here since they may get called in hot code paths
+ // and cause unnecessary memory allocations
+ errMapLookupKeyNotExist = fmt.Errorf("lookup: %w", sysErrKeyNotExist)
+)
+
+// MapOptions control loading a map into the kernel.
+type MapOptions struct {
+ // The base path to pin maps in if requested via PinByName.
+ // Existing maps will be re-used if they are compatible, otherwise an
+ // error is returned.
+ PinPath string
+ LoadPinOptions LoadPinOptions
+}
+
+// MapID represents the unique ID of an eBPF map
+type MapID uint32
+
+// MapSpec defines a Map.
+type MapSpec struct {
+ // Name is passed to the kernel as a debug aid. Must only contain
+ // alpha numeric and '_' characters.
+ Name string
+ Type MapType
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+
+ // Flags is passed to the kernel and specifies additional map
+ // creation attributes.
+ Flags uint32
+
+ // Automatically pin and load a map from MapOptions.PinPath.
+ // Generates an error if an existing pinned map is incompatible with the MapSpec.
+ Pinning PinType
+
+ // Specify numa node during map creation
+ // (effective only if unix.BPF_F_NUMA_NODE flag is set,
+ // which can be imported from golang.org/x/sys/unix)
+ NumaNode uint32
+
+ // The initial contents of the map. May be nil.
+ Contents []MapKV
+
+ // Whether to freeze a map after setting its initial contents.
+ Freeze bool
+
+ // InnerMap is used as a template for ArrayOfMaps and HashOfMaps
+ InnerMap *MapSpec
+
+ // Extra trailing bytes found in the ELF map definition when using structs
+ // larger than libbpf's bpf_map_def. nil if no trailing bytes were present.
+ // Must be nil or empty before instantiating the MapSpec into a Map.
+ Extra *bytes.Reader
+
+ // The key and value type of this map. May be nil.
+ Key, Value btf.Type
+}
+
+func (ms *MapSpec) String() string {
+ return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags)
+}
+
+// Copy returns a copy of the spec.
+//
+// MapSpec.Contents is a shallow copy.
+func (ms *MapSpec) Copy() *MapSpec {
+ if ms == nil {
+ return nil
+ }
+
+ cpy := *ms
+ cpy.Contents = slices.Clone(cpy.Contents)
+ cpy.Key = btf.Copy(cpy.Key)
+ cpy.Value = btf.Copy(cpy.Value)
+
+ if cpy.InnerMap == ms {
+ cpy.InnerMap = &cpy
+ } else {
+ cpy.InnerMap = ms.InnerMap.Copy()
+ }
+
+ if cpy.Extra != nil {
+ extra := *cpy.Extra
+ cpy.Extra = &extra
+ }
+
+ return &cpy
+}
+
+// fixupMagicFields fills fields of MapSpec which are usually
+// left empty in ELF or which depend on runtime information.
+//
+// The method doesn't modify Spec, instead returning a copy.
+// The copy is only performed if fixups are necessary, so callers mustn't mutate
+// the returned spec.
+func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) {
+ switch spec.Type {
+ case ArrayOfMaps, HashOfMaps:
+ if spec.ValueSize != 0 && spec.ValueSize != 4 {
+ return nil, errors.New("ValueSize must be zero or four for map of map")
+ }
+
+ spec = spec.Copy()
+ spec.ValueSize = 4
+
+ case PerfEventArray:
+ if spec.KeySize != 0 && spec.KeySize != 4 {
+ return nil, errors.New("KeySize must be zero or four for perf event array")
+ }
+
+ if spec.ValueSize != 0 && spec.ValueSize != 4 {
+ return nil, errors.New("ValueSize must be zero or four for perf event array")
+ }
+
+ spec = spec.Copy()
+ spec.KeySize = 4
+ spec.ValueSize = 4
+
+ n, err := PossibleCPU()
+ if err != nil {
+ return nil, fmt.Errorf("fixup perf event array: %w", err)
+ }
+
+ if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n {
+ // MaxEntries should be zero most of the time, but there is code
+ // out there which hardcodes large constants. Clamp the number
+ // of entries to the number of CPUs at most. Allow creating maps with
+ // less than n items since some kernel selftests relied on this
+ // behaviour in the past.
+ spec.MaxEntries = n
+ }
+ }
+
+ return spec, nil
+}
+
+// dataSection returns the contents and BTF Datasec descriptor of the spec.
+func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) {
+ if ms.Value == nil {
+ return nil, nil, errMapNoBTFValue
+ }
+
+ ds, ok := ms.Value.(*btf.Datasec)
+ if !ok {
+ return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value)
+ }
+
+ if n := len(ms.Contents); n != 1 {
+ return nil, nil, fmt.Errorf("expected one key, found %d", n)
+ }
+
+ kv := ms.Contents[0]
+ value, ok := kv.Value.([]byte)
+ if !ok {
+ return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value)
+ }
+
+ return value, ds, nil
+}
+
+// MapKV is used to initialize the contents of a Map.
+type MapKV struct {
+ Key interface{}
+ Value interface{}
+}
+
+// Compatible returns nil if an existing map may be used instead of creating
+// one from the spec.
+//
+// Returns an error wrapping [ErrMapIncompatible] otherwise.
+func (ms *MapSpec) Compatible(m *Map) error {
+ ms, err := ms.fixupMagicFields()
+ if err != nil {
+ return err
+ }
+
+ diffs := []string{}
+ if m.typ != ms.Type {
+ diffs = append(diffs, fmt.Sprintf("Type: %s changed to %s", m.typ, ms.Type))
+ }
+ if m.keySize != ms.KeySize {
+ diffs = append(diffs, fmt.Sprintf("KeySize: %d changed to %d", m.keySize, ms.KeySize))
+ }
+ if m.valueSize != ms.ValueSize {
+ diffs = append(diffs, fmt.Sprintf("ValueSize: %d changed to %d", m.valueSize, ms.ValueSize))
+ }
+ if m.maxEntries != ms.MaxEntries {
+ diffs = append(diffs, fmt.Sprintf("MaxEntries: %d changed to %d", m.maxEntries, ms.MaxEntries))
+ }
+
+ // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow this
+ // mismatch.
+ if !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) &&
+ m.flags != ms.Flags {
+ diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, ms.Flags))
+ }
+
+ if len(diffs) == 0 {
+ return nil
+ }
+
+ return fmt.Errorf("%s: %w", strings.Join(diffs, ", "), ErrMapIncompatible)
+}
+
+// Map represents a Map file descriptor.
+//
+// It is not safe to close a map which is used by other goroutines.
+//
+// Methods which take interface{} arguments by default encode
+// them using binary.Read/Write in the machine's native endianness.
+//
+// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler
+// if you require custom encoding.
+type Map struct {
+ name string
+ fd *sys.FD
+ typ MapType
+ keySize uint32
+ valueSize uint32
+ maxEntries uint32
+ flags uint32
+ pinnedPath string
+ // Per CPU maps return values larger than the size in the spec
+ fullValueSize int
+}
+
+// NewMapFromFD creates a map from a raw fd.
+//
+// You should not use fd after calling this function.
+func NewMapFromFD(fd int) (*Map, error) {
+ f, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return newMapFromFD(f)
+}
+
+func newMapFromFD(fd *sys.FD) (*Map, error) {
+ info, err := newMapInfoFromFd(fd)
+ if err != nil {
+ fd.Close()
+ return nil, fmt.Errorf("get map info: %w", err)
+ }
+
+ return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags)
+}
+
+// NewMap creates a new Map.
+//
+// It's equivalent to calling NewMapWithOptions with default options.
+func NewMap(spec *MapSpec) (*Map, error) {
+ return NewMapWithOptions(spec, MapOptions{})
+}
+
+// NewMapWithOptions creates a new Map.
+//
+// Creating a map for the first time will perform feature detection
+// by creating small, temporary maps.
+//
+// The caller is responsible for ensuring the process' rlimit is set
+// sufficiently high for locking memory during map creation. This can be done
+// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions.
+//
+// May return an error wrapping ErrMapIncompatible.
+func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
+ m, err := newMapWithOptions(spec, opts)
+ if err != nil {
+ return nil, fmt.Errorf("creating map: %w", err)
+ }
+
+ if err := m.finalize(spec); err != nil {
+ m.Close()
+ return nil, fmt.Errorf("populating map: %w", err)
+ }
+
+ return m, nil
+}
+
+func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
+ switch spec.Pinning {
+ case PinByName:
+ if spec.Name == "" {
+ return nil, fmt.Errorf("pin by name: missing Name")
+ }
+
+ if opts.PinPath == "" {
+ return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath")
+ }
+
+ path := filepath.Join(opts.PinPath, spec.Name)
+ m, err := LoadPinnedMap(path, &opts.LoadPinOptions)
+ if errors.Is(err, unix.ENOENT) {
+ break
+ }
+ if err != nil {
+ return nil, fmt.Errorf("load pinned map: %w", err)
+ }
+ defer closeOnError(m)
+
+ if err := spec.Compatible(m); err != nil {
+ return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err)
+ }
+
+ return m, nil
+
+ case PinNone:
+ // Nothing to do here
+
+ default:
+ return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported)
+ }
+
+ var innerFd *sys.FD
+ if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps {
+ if spec.InnerMap == nil {
+ return nil, fmt.Errorf("%s requires InnerMap", spec.Type)
+ }
+
+ if spec.InnerMap.Pinning != PinNone {
+ return nil, errors.New("inner maps cannot be pinned")
+ }
+
+ template, err := spec.InnerMap.createMap(nil, opts)
+ if err != nil {
+ return nil, fmt.Errorf("inner map: %w", err)
+ }
+ defer template.Close()
+
+ // Intentionally skip populating and freezing (finalizing)
+ // the inner map template since it will be removed shortly.
+
+ innerFd = template.fd
+ }
+
+ m, err := spec.createMap(innerFd, opts)
+ if err != nil {
+ return nil, err
+ }
+ defer closeOnError(m)
+
+ if spec.Pinning == PinByName {
+ path := filepath.Join(opts.PinPath, spec.Name)
+ if err := m.Pin(path); err != nil {
+ return nil, fmt.Errorf("pin map to %s: %w", path, err)
+ }
+ }
+
+ return m, nil
+}
+
+// createMap validates the spec's properties and creates the map in the kernel
+// using the given opts. It does not populate or freeze the map.
+func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err error) {
+ closeOnError := func(closer io.Closer) {
+ if err != nil {
+ closer.Close()
+ }
+ }
+
+ // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained
+ // additional 'inner_map_idx' and later 'numa_node' fields.
+ // In order to support loading these definitions, tolerate the presence of
+ // extra bytes, but require them to be zeroes.
+ if spec.Extra != nil {
+ if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil {
+ return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map")
+ }
+ }
+
+ spec, err = spec.fixupMagicFields()
+ if err != nil {
+ return nil, err
+ }
+
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(spec.Type),
+ KeySize: spec.KeySize,
+ ValueSize: spec.ValueSize,
+ MaxEntries: spec.MaxEntries,
+ MapFlags: sys.MapFlags(spec.Flags),
+ NumaNode: spec.NumaNode,
+ }
+
+ if inner != nil {
+ attr.InnerMapFd = inner.Uint()
+ }
+
+ if haveObjName() == nil {
+ attr.MapName = sys.NewObjName(spec.Name)
+ }
+
+ if spec.Key != nil || spec.Value != nil {
+ handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value)
+ if err != nil && !errors.Is(err, btf.ErrNotSupported) {
+ return nil, fmt.Errorf("load BTF: %w", err)
+ }
+
+ if handle != nil {
+ defer handle.Close()
+
+ // Use BTF k/v during map creation.
+ attr.BtfFd = uint32(handle.FD())
+ attr.BtfKeyTypeId = keyTypeID
+ attr.BtfValueTypeId = valueTypeID
+ }
+ }
+
+ fd, err := sys.MapCreate(&attr)
+
+ // Some map types don't support BTF k/v in earlier kernel versions.
+ // Remove BTF metadata and retry map creation.
+ if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 {
+ attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0
+ fd, err = sys.MapCreate(&attr)
+ }
+ if err != nil {
+ return nil, handleMapCreateError(attr, spec, err)
+ }
+
+ defer closeOnError(fd)
+ m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags)
+ if err != nil {
+ return nil, fmt.Errorf("map create: %w", err)
+ }
+ return m, nil
+}
+
+func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) error {
+ if errors.Is(err, unix.EPERM) {
+ return fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
+ }
+ if errors.Is(err, unix.EINVAL) && spec.MaxEntries == 0 {
+ return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err)
+ }
+ if errors.Is(err, unix.EINVAL) && spec.Type == UnspecifiedMap {
+ return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap)
+ }
+ if errors.Is(err, unix.EINVAL) && spec.Flags&unix.BPF_F_NO_PREALLOC > 0 {
+ return fmt.Errorf("map create: %w (noPrealloc flag may be incompatible with map type %s)", err, spec.Type)
+ }
+
+ switch spec.Type {
+ case ArrayOfMaps, HashOfMaps:
+ if haveFeatErr := haveNestedMaps(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if spec.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze {
+ if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if spec.Flags&unix.BPF_F_MMAPABLE > 0 {
+ if haveFeatErr := haveMmapableMaps(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if spec.Flags&unix.BPF_F_INNER_MAP > 0 {
+ if haveFeatErr := haveInnerMaps(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 {
+ if haveFeatErr := haveNoPreallocMaps(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ // BPF_MAP_TYPE_RINGBUF's max_entries must be a power-of-2 multiple of kernel's page size.
+ if errors.Is(err, unix.EINVAL) &&
+ (attr.MapType == sys.BPF_MAP_TYPE_RINGBUF || attr.MapType == sys.BPF_MAP_TYPE_USER_RINGBUF) {
+ pageSize := uint32(os.Getpagesize())
+ maxEntries := attr.MaxEntries
+ if maxEntries%pageSize != 0 || !internal.IsPow(maxEntries) {
+ return fmt.Errorf("map create: %w (ring map size %d not a multiple of page size %d)", err, maxEntries, pageSize)
+ }
+ }
+
+ return fmt.Errorf("map create: %w", err)
+}
+
+// newMap allocates and returns a new Map structure.
+// Sets the fullValueSize on per-CPU maps.
+func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) {
+ m := &Map{
+ name,
+ fd,
+ typ,
+ keySize,
+ valueSize,
+ maxEntries,
+ flags,
+ "",
+ int(valueSize),
+ }
+
+ if !typ.hasPerCPUValue() {
+ return m, nil
+ }
+
+ possibleCPUs, err := PossibleCPU()
+ if err != nil {
+ return nil, err
+ }
+
+ m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs
+ return m, nil
+}
+
+func (m *Map) String() string {
+ if m.name != "" {
+ return fmt.Sprintf("%s(%s)#%v", m.typ, m.name, m.fd)
+ }
+ return fmt.Sprintf("%s#%v", m.typ, m.fd)
+}
+
+// Type returns the underlying type of the map.
+func (m *Map) Type() MapType {
+ return m.typ
+}
+
+// KeySize returns the size of the map key in bytes.
+func (m *Map) KeySize() uint32 {
+ return m.keySize
+}
+
+// ValueSize returns the size of the map value in bytes.
+func (m *Map) ValueSize() uint32 {
+ return m.valueSize
+}
+
+// MaxEntries returns the maximum number of elements the map can hold.
+func (m *Map) MaxEntries() uint32 {
+ return m.maxEntries
+}
+
+// Flags returns the flags of the map.
+func (m *Map) Flags() uint32 {
+ return m.flags
+}
+
+// Info returns metadata about the map.
+func (m *Map) Info() (*MapInfo, error) {
+ return newMapInfoFromFd(m.fd)
+}
+
+// Handle returns a reference to the Map's type information in the kernel.
+//
+// Returns ErrNotSupported if the kernel has no BTF support, or if there is no
+// BTF associated with the Map.
+func (m *Map) Handle() (*btf.Handle, error) {
+ info, err := m.Info()
+ if err != nil {
+ return nil, err
+ }
+
+ id, ok := info.BTFID()
+ if !ok {
+ return nil, fmt.Errorf("map %s: retrieve BTF ID: %w", m, ErrNotSupported)
+ }
+
+ return btf.NewHandleFromID(id)
+}
+
+// MapLookupFlags controls the behaviour of the map lookup calls.
+type MapLookupFlags uint64
+
+// LookupLock look up the value of a spin-locked map.
+const LookupLock MapLookupFlags = unix.BPF_F_LOCK
+
+// Lookup retrieves a value from a Map.
+//
+// Calls Close() on valueOut if it is of type **Map or **Program,
+// and *valueOut is not nil.
+//
+// Returns an error if the key doesn't exist, see ErrKeyNotExist.
+func (m *Map) Lookup(key, valueOut interface{}) error {
+ return m.LookupWithFlags(key, valueOut, 0)
+}
+
+// LookupWithFlags retrieves a value from a Map with flags.
+//
+// Passing LookupLock flag will look up the value of a spin-locked
+// map without returning the lock. This must be specified if the
+// elements contain a spinlock.
+//
+// Calls Close() on valueOut if it is of type **Map or **Program,
+// and *valueOut is not nil.
+//
+// Returns an error if the key doesn't exist, see ErrKeyNotExist.
+func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
+ if m.typ.hasPerCPUValue() {
+ return m.lookupPerCPU(key, valueOut, flags)
+ }
+
+ valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize)
+ if err := m.lookup(key, valueBytes.Pointer(), flags); err != nil {
+ return err
+ }
+
+ return m.unmarshalValue(valueOut, valueBytes)
+}
+
+// LookupAndDelete retrieves and deletes a value from a Map.
+//
+// Returns ErrKeyNotExist if the key doesn't exist.
+func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
+ return m.LookupAndDeleteWithFlags(key, valueOut, 0)
+}
+
+// LookupAndDeleteWithFlags retrieves and deletes a value from a Map.
+//
+// Passing LookupLock flag will look up and delete the value of a spin-locked
+// map without returning the lock. This must be specified if the elements
+// contain a spinlock.
+//
+// Returns ErrKeyNotExist if the key doesn't exist.
+func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
+ if m.typ.hasPerCPUValue() {
+ return m.lookupAndDeletePerCPU(key, valueOut, flags)
+ }
+
+ valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize)
+ if err := m.lookupAndDelete(key, valueBytes.Pointer(), flags); err != nil {
+ return err
+ }
+ return m.unmarshalValue(valueOut, valueBytes)
+}
+
+// LookupBytes gets a value from Map.
+//
+// Returns a nil value if a key doesn't exist.
+func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
+ valueBytes := make([]byte, m.fullValueSize)
+ valuePtr := sys.NewSlicePointer(valueBytes)
+
+ err := m.lookup(key, valuePtr, 0)
+ if errors.Is(err, ErrKeyNotExist) {
+ return nil, nil
+ }
+
+ return valueBytes, err
+}
+
+func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error {
+ slice, err := ensurePerCPUSlice(valueOut)
+ if err != nil {
+ return err
+ }
+ valueBytes := make([]byte, m.fullValueSize)
+ if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
+ return err
+ }
+ return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes)
+}
+
+func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+
+ attr := sys.MapLookupElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valueOut,
+ Flags: uint64(flags),
+ }
+
+ if err = sys.MapLookupElem(&attr); err != nil {
+ if errors.Is(err, unix.ENOENT) {
+ return errMapLookupKeyNotExist
+ }
+ return fmt.Errorf("lookup: %w", wrapMapError(err))
+ }
+ return nil
+}
+
+func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error {
+ slice, err := ensurePerCPUSlice(valueOut)
+ if err != nil {
+ return err
+ }
+ valueBytes := make([]byte, m.fullValueSize)
+ if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
+ return err
+ }
+ return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes)
+}
+
+// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary.
+func ensurePerCPUSlice(sliceOrPtr any) (any, error) {
+ sliceOrPtrType := reflect.TypeOf(sliceOrPtr)
+ if sliceOrPtrType.Kind() == reflect.Slice {
+ // The target is a slice, the caller is responsible for ensuring that
+ // size is correct.
+ return sliceOrPtr, nil
+ }
+
+ slicePtrType := sliceOrPtrType
+ if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
+ return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice")
+ }
+
+ possibleCPUs, err := PossibleCPU()
+ if err != nil {
+ return nil, err
+ }
+
+ sliceType := slicePtrType.Elem()
+ slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs)
+
+ sliceElemType := sliceType.Elem()
+ sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr
+ reflect.ValueOf(sliceOrPtr).Elem().Set(slice)
+ if !sliceElemIsPointer {
+ return slice.Interface(), nil
+ }
+ sliceElemType = sliceElemType.Elem()
+
+ for i := 0; i < possibleCPUs; i++ {
+ newElem := reflect.New(sliceElemType)
+ slice.Index(i).Set(newElem)
+ }
+
+ return slice.Interface(), nil
+}
+
+func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+
+ attr := sys.MapLookupAndDeleteElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valuePtr,
+ Flags: uint64(flags),
+ }
+
+ if err := sys.MapLookupAndDeleteElem(&attr); err != nil {
+ return fmt.Errorf("lookup and delete: %w", wrapMapError(err))
+ }
+
+ return nil
+}
+
+// MapUpdateFlags controls the behaviour of the Map.Update call.
+//
+// The exact semantics depend on the specific MapType.
+type MapUpdateFlags uint64
+
+const (
+ // UpdateAny creates a new element or update an existing one.
+ UpdateAny MapUpdateFlags = iota
+ // UpdateNoExist creates a new element.
+ UpdateNoExist MapUpdateFlags = 1 << (iota - 1)
+ // UpdateExist updates an existing element.
+ UpdateExist
+ // UpdateLock updates elements under bpf_spin_lock.
+ UpdateLock
+)
+
+// Put replaces or creates a value in map.
+//
+// It is equivalent to calling Update with UpdateAny.
+func (m *Map) Put(key, value interface{}) error {
+ return m.Update(key, value, UpdateAny)
+}
+
+// Update changes the value of a key.
+func (m *Map) Update(key, value any, flags MapUpdateFlags) error {
+ if m.typ.hasPerCPUValue() {
+ return m.updatePerCPU(key, value, flags)
+ }
+
+ valuePtr, err := m.marshalValue(value)
+ if err != nil {
+ return fmt.Errorf("marshal value: %w", err)
+ }
+
+ return m.update(key, valuePtr, flags)
+}
+
+func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error {
+ valuePtr, err := marshalPerCPUValue(value, int(m.valueSize))
+ if err != nil {
+ return fmt.Errorf("marshal value: %w", err)
+ }
+
+ return m.update(key, valuePtr, flags)
+}
+
+func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("marshal key: %w", err)
+ }
+
+ attr := sys.MapUpdateElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valuePtr,
+ Flags: uint64(flags),
+ }
+
+ if err = sys.MapUpdateElem(&attr); err != nil {
+ return fmt.Errorf("update: %w", wrapMapError(err))
+ }
+
+ return nil
+}
+
+// Delete removes a value.
+//
+// Returns ErrKeyNotExist if the key does not exist.
+func (m *Map) Delete(key interface{}) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+
+ attr := sys.MapDeleteElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ }
+
+ if err = sys.MapDeleteElem(&attr); err != nil {
+ return fmt.Errorf("delete: %w", wrapMapError(err))
+ }
+ return nil
+}
+
+// NextKey finds the key following an initial key.
+//
+// See NextKeyBytes for details.
+//
+// Returns ErrKeyNotExist if there is no next key.
+func (m *Map) NextKey(key, nextKeyOut interface{}) error {
+ nextKeyBytes := makeMapSyscallOutput(nextKeyOut, int(m.keySize))
+
+ if err := m.nextKey(key, nextKeyBytes.Pointer()); err != nil {
+ return err
+ }
+
+ if err := nextKeyBytes.Unmarshal(nextKeyOut); err != nil {
+ return fmt.Errorf("can't unmarshal next key: %w", err)
+ }
+ return nil
+}
+
+// NextKeyBytes returns the key following an initial key as a byte slice.
+//
+// Passing nil will return the first key.
+//
+// Use Iterate if you want to traverse all entries in the map.
+//
+// Returns nil if there are no more keys.
+func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
+ nextKey := make([]byte, m.keySize)
+ nextKeyPtr := sys.NewSlicePointer(nextKey)
+
+ err := m.nextKey(key, nextKeyPtr)
+ if errors.Is(err, ErrKeyNotExist) {
+ return nil, nil
+ }
+
+ return nextKey, err
+}
+
+func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error {
+ var (
+ keyPtr sys.Pointer
+ err error
+ )
+
+ if key != nil {
+ keyPtr, err = m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+ }
+
+ attr := sys.MapGetNextKeyAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ NextKey: nextKeyOut,
+ }
+
+ if err = sys.MapGetNextKey(&attr); err != nil {
+ // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the
+ // first map element when a nil key pointer is specified.
+ if key == nil && errors.Is(err, unix.EFAULT) {
+ var guessKey []byte
+ guessKey, err = m.guessNonExistentKey()
+ if err != nil {
+ return err
+ }
+
+ // Retry the syscall with a valid non-existing key.
+ attr.Key = sys.NewSlicePointer(guessKey)
+ if err = sys.MapGetNextKey(&attr); err == nil {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("next key: %w", wrapMapError(err))
+ }
+
+ return nil
+}
+
+var mmapProtectedPage = sync.OnceValues(func() ([]byte, error) {
+ return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED)
+})
+
+// guessNonExistentKey attempts to perform a map lookup that returns ENOENT.
+// This is necessary on kernels before 4.4.132, since those don't support
+// iterating maps from the start by providing an invalid key pointer.
+func (m *Map) guessNonExistentKey() ([]byte, error) {
+ // Map a protected page and use that as the value pointer. This saves some
+ // work copying out the value, which we're not interested in.
+ page, err := mmapProtectedPage()
+ if err != nil {
+ return nil, err
+ }
+ valuePtr := sys.NewSlicePointer(page)
+
+ randKey := make([]byte, int(m.keySize))
+
+ for i := 0; i < 4; i++ {
+ switch i {
+ // For hash maps, the 0 key is less likely to be occupied. They're often
+ // used for storing data related to pointers, and their access pattern is
+ // generally scattered across the keyspace.
+ case 0:
+ // An all-0xff key is guaranteed to be out of bounds of any array, since
+ // those have a fixed key size of 4 bytes. The only corner case being
+ // arrays with 2^32 max entries, but those are prohibitively expensive
+ // in many environments.
+ case 1:
+ for r := range randKey {
+ randKey[r] = 0xff
+ }
+ // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so
+ // is unlikely to be taken.
+ case 2:
+ for r := range randKey {
+ randKey[r] = 0x55
+ }
+ // Last ditch effort, generate a random key.
+ case 3:
+ rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey)
+ }
+
+ err := m.lookup(randKey, valuePtr, 0)
+ if errors.Is(err, ErrKeyNotExist) {
+ return randKey, nil
+ }
+ }
+
+ return nil, errors.New("couldn't find non-existing key")
+}
+
+// BatchLookup looks up many elements in a map at once.
+//
+// "keysOut" and "valuesOut" must be of type slice, a pointer
+// to a slice or buffer will not work.
+// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass
+// "cursor" to subsequent calls of this function to continue the batching
+// operation in the case of chunking.
+//
+// Warning: This API is not very safe to use as the kernel implementation for
+// batching relies on the user to be aware of subtle details with regarding to
+// different map type implementations.
+//
+// ErrKeyNotExist is returned when the batch lookup has reached
+// the end of all possible results, even when partial results
+// are returned. It should be used to evaluate when lookup is "done".
+func (m *Map) BatchLookup(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+ n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts)
+ if err != nil {
+ return n, fmt.Errorf("map batch lookup: %w", err)
+ }
+ return n, nil
+}
+
+// BatchLookupAndDelete looks up many elements in a map at once,
+//
+// It then deletes all those elements.
+// "keysOut" and "valuesOut" must be of type slice, a pointer
+// to a slice or buffer will not work.
+// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass
+// "cursor" to subsequent calls of this function to continue the batching
+// operation in the case of chunking.
+//
+// Warning: This API is not very safe to use as the kernel implementation for
+// batching relies on the user to be aware of subtle details with regarding to
+// different map type implementations.
+//
+// ErrKeyNotExist is returned when the batch lookup has reached
+// the end of all possible results, even when partial results
+// are returned. It should be used to evaluate when lookup is "done".
+func (m *Map) BatchLookupAndDelete(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+ n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts)
+ if err != nil {
+ return n, fmt.Errorf("map batch lookup and delete: %w", err)
+ }
+ return n, nil
+}
+
+// MapBatchCursor represents a starting point for a batch operation.
+type MapBatchCursor struct {
+ m *Map
+ opaque []byte
+}
+
+func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+ if m.typ.hasPerCPUValue() {
+ return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts)
+ }
+
+ count, err := batchCount(keysOut, valuesOut)
+ if err != nil {
+ return 0, err
+ }
+
+ valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize))
+
+ n, err := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts)
+ if errors.Is(err, unix.ENOSPC) {
+ // Hash tables return ENOSPC when the size of the batch is smaller than
+ // any bucket.
+ return n, fmt.Errorf("%w (batch size too small?)", err)
+ } else if err != nil {
+ return n, err
+ }
+
+ err = valueBuf.Unmarshal(valuesOut)
+ if err != nil {
+ return 0, err
+ }
+
+ return n, nil
+}
+
+func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+ count, err := sliceLen(keysOut)
+ if err != nil {
+ return 0, fmt.Errorf("keys: %w", err)
+ }
+
+ valueBuf := make([]byte, count*int(m.fullValueSize))
+ valuePtr := sys.NewSlicePointer(valueBuf)
+
+ n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valuePtr, opts)
+ if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) {
+ return 0, err
+ }
+
+ err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), valueBuf)
+ if err != nil {
+ return 0, err
+ }
+
+ return n, sysErr
+}
+
+func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) {
+ cursorLen := int(m.keySize)
+ if cursorLen < 4 {
+ // * generic_map_lookup_batch requires that batch_out is key_size bytes.
+ // This is used by array and LPM maps.
+ //
+ // * __htab_map_lookup_and_delete_batch requires u32. This is used by the
+ // various hash maps.
+ //
+ // Use a minimum of 4 bytes to avoid having to distinguish between the two.
+ cursorLen = 4
+ }
+
+ inBatch := cursor.opaque
+ if inBatch == nil {
+ // This is the first lookup, allocate a buffer to hold the cursor.
+ cursor.opaque = make([]byte, cursorLen)
+ cursor.m = m
+ } else if cursor.m != m {
+ // Prevent reuse of a cursor across maps. First, it's unlikely to work.
+ // Second, the maps may require different cursorLen and cursor.opaque
+ // may therefore be too short. This could lead to the kernel clobbering
+ // user space memory.
+ return 0, errors.New("a cursor may not be reused across maps")
+ }
+
+ if err := haveBatchAPI(); err != nil {
+ return 0, err
+ }
+
+ keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize))
+
+ attr := sys.MapLookupBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyBuf.Pointer(),
+ Values: valuePtr,
+ Count: uint32(count),
+ InBatch: sys.NewSlicePointer(inBatch),
+ OutBatch: sys.NewSlicePointer(cursor.opaque),
+ }
+
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
+ sysErr = wrapMapError(sysErr)
+ if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) {
+ return 0, sysErr
+ }
+
+ if err := keyBuf.Unmarshal(keysOut); err != nil {
+ return 0, err
+ }
+
+ return int(attr.Count), sysErr
+}
+
+// BatchUpdate updates the map with multiple keys and values
+// simultaneously.
+// "keys" and "values" must be of type slice, a pointer
+// to a slice or buffer will not work.
+func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) {
+ if m.typ.hasPerCPUValue() {
+ return m.batchUpdatePerCPU(keys, values, opts)
+ }
+
+ count, err := batchCount(keys, values)
+ if err != nil {
+ return 0, err
+ }
+
+ valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize))
+ if err != nil {
+ return 0, err
+ }
+
+ return m.batchUpdate(count, keys, valuePtr, opts)
+}
+
+func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) {
+ keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
+ if err != nil {
+ return 0, err
+ }
+
+ attr := sys.MapUpdateBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyPtr,
+ Values: valuePtr,
+ Count: uint32(count),
+ }
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ err = sys.MapUpdateBatch(&attr)
+ if err != nil {
+ if haveFeatErr := haveBatchAPI(); haveFeatErr != nil {
+ return 0, haveFeatErr
+ }
+ return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err))
+ }
+
+ return int(attr.Count), nil
+}
+
+func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) {
+ count, err := sliceLen(keys)
+ if err != nil {
+ return 0, fmt.Errorf("keys: %w", err)
+ }
+
+ valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize))
+ if err != nil {
+ return 0, err
+ }
+
+ return m.batchUpdate(count, keys, sys.NewSlicePointer(valueBuf), opts)
+}
+
+// BatchDelete batch deletes entries in the map by keys.
+// "keys" must be of type slice, a pointer to a slice or buffer will not work.
+func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) {
+ count, err := sliceLen(keys)
+ if err != nil {
+ return 0, fmt.Errorf("keys: %w", err)
+ }
+
+ keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
+ if err != nil {
+ return 0, fmt.Errorf("cannot marshal keys: %v", err)
+ }
+
+ attr := sys.MapDeleteBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyPtr,
+ Count: uint32(count),
+ }
+
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ if err = sys.MapDeleteBatch(&attr); err != nil {
+ if haveFeatErr := haveBatchAPI(); haveFeatErr != nil {
+ return 0, haveFeatErr
+ }
+ return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err))
+ }
+
+ return int(attr.Count), nil
+}
+
+func batchCount(keys, values any) (int, error) {
+ keysLen, err := sliceLen(keys)
+ if err != nil {
+ return 0, fmt.Errorf("keys: %w", err)
+ }
+
+ valuesLen, err := sliceLen(values)
+ if err != nil {
+ return 0, fmt.Errorf("values: %w", err)
+ }
+
+ if keysLen != valuesLen {
+ return 0, fmt.Errorf("keys and values must have the same length")
+ }
+
+ return keysLen, nil
+}
+
+// Iterate traverses a map.
+//
+// It's safe to create multiple iterators at the same time.
+//
+// It's not possible to guarantee that all keys in a map will be
+// returned if there are concurrent modifications to the map.
+func (m *Map) Iterate() *MapIterator {
+ return newMapIterator(m)
+}
+
+// Close the Map's underlying file descriptor, which could unload the
+// Map from the kernel if it is not pinned or in use by a loaded Program.
+func (m *Map) Close() error {
+ if m == nil {
+ // This makes it easier to clean up when iterating maps
+ // of maps / programs.
+ return nil
+ }
+
+ return m.fd.Close()
+}
+
+// FD gets the file descriptor of the Map.
+//
+// Calling this function is invalid after Close has been called.
+func (m *Map) FD() int {
+ return m.fd.Int()
+}
+
+// Clone creates a duplicate of the Map.
+//
+// Closing the duplicate does not affect the original, and vice versa.
+// Changes made to the map are reflected by both instances however.
+// If the original map was pinned, the cloned map will not be pinned by default.
+//
+// Cloning a nil Map returns nil.
+func (m *Map) Clone() (*Map, error) {
+ if m == nil {
+ return nil, nil
+ }
+
+ dup, err := m.fd.Dup()
+ if err != nil {
+ return nil, fmt.Errorf("can't clone map: %w", err)
+ }
+
+ return &Map{
+ m.name,
+ dup,
+ m.typ,
+ m.keySize,
+ m.valueSize,
+ m.maxEntries,
+ m.flags,
+ "",
+ m.fullValueSize,
+ }, nil
+}
+
+// Pin persists the map on the BPF virtual file system past the lifetime of
+// the process that created it .
+//
+// Calling Pin on a previously pinned map will overwrite the path, except when
+// the new path already exists. Re-pinning across filesystems is not supported.
+// You can Clone a map to pin it to a different path.
+//
+// This requires bpffs to be mounted above fileName.
+// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd
+func (m *Map) Pin(fileName string) error {
+ if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil {
+ return err
+ }
+ m.pinnedPath = fileName
+ return nil
+}
+
+// Unpin removes the persisted state for the map from the BPF virtual filesystem.
+//
+// Failed calls to Unpin will not alter the state returned by IsPinned.
+//
+// Unpinning an unpinned Map returns nil.
+func (m *Map) Unpin() error {
+ if err := internal.Unpin(m.pinnedPath); err != nil {
+ return err
+ }
+ m.pinnedPath = ""
+ return nil
+}
+
+// IsPinned returns true if the map has a non-empty pinned path.
+func (m *Map) IsPinned() bool {
+ return m.pinnedPath != ""
+}
+
+// Freeze prevents a map to be modified from user space.
+//
+// It makes no changes to kernel-side restrictions.
+func (m *Map) Freeze() error {
+ attr := sys.MapFreezeAttr{
+ MapFd: m.fd.Uint(),
+ }
+
+ if err := sys.MapFreeze(&attr); err != nil {
+ if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil {
+ return fmt.Errorf("can't freeze map: %w", haveFeatErr)
+ }
+ return fmt.Errorf("can't freeze map: %w", err)
+ }
+ return nil
+}
+
+// finalize populates the Map according to the Contents specified
+// in spec and freezes the Map if requested by spec.
+func (m *Map) finalize(spec *MapSpec) error {
+ for _, kv := range spec.Contents {
+ if err := m.Put(kv.Key, kv.Value); err != nil {
+ return fmt.Errorf("putting value: key %v: %w", kv.Key, err)
+ }
+ }
+
+ if spec.Freeze {
+ if err := m.Freeze(); err != nil {
+ return fmt.Errorf("freezing map: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) {
+ if data == nil {
+ if m.keySize == 0 {
+ // Queues have a key length of zero, so passing nil here is valid.
+ return sys.NewPointer(nil), nil
+ }
+ return sys.Pointer{}, errors.New("can't use nil as key of map")
+ }
+
+ return marshalMapSyscallInput(data, int(m.keySize))
+}
+
+func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) {
+ var (
+ buf []byte
+ err error
+ )
+
+ switch value := data.(type) {
+ case *Map:
+ if !m.typ.canStoreMap() {
+ return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ)
+ }
+ buf, err = marshalMap(value, int(m.valueSize))
+
+ case *Program:
+ if !m.typ.canStoreProgram() {
+ return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ)
+ }
+ buf, err = marshalProgram(value, int(m.valueSize))
+
+ default:
+ return marshalMapSyscallInput(data, int(m.valueSize))
+ }
+
+ if err != nil {
+ return sys.Pointer{}, err
+ }
+
+ return sys.NewSlicePointer(buf), nil
+}
+
+func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error {
+ switch value := value.(type) {
+ case **Map:
+ if !m.typ.canStoreMap() {
+ return fmt.Errorf("can't read a map from %s", m.typ)
+ }
+
+ other, err := unmarshalMap(buf)
+ if err != nil {
+ return err
+ }
+
+ // The caller might close the map externally, so ignore errors.
+ _ = (*value).Close()
+
+ *value = other
+ return nil
+
+ case *Map:
+ if !m.typ.canStoreMap() {
+ return fmt.Errorf("can't read a map from %s", m.typ)
+ }
+ return errors.New("require pointer to *Map")
+
+ case **Program:
+ if !m.typ.canStoreProgram() {
+ return fmt.Errorf("can't read a program from %s", m.typ)
+ }
+
+ other, err := unmarshalProgram(buf)
+ if err != nil {
+ return err
+ }
+
+ // The caller might close the program externally, so ignore errors.
+ _ = (*value).Close()
+
+ *value = other
+ return nil
+
+ case *Program:
+ if !m.typ.canStoreProgram() {
+ return fmt.Errorf("can't read a program from %s", m.typ)
+ }
+ return errors.New("require pointer to *Program")
+ }
+
+ return buf.Unmarshal(value)
+}
+
+// LoadPinnedMap loads a Map from a BPF file.
+func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ m, err := newMapFromFD(fd)
+ if err == nil {
+ m.pinnedPath = fileName
+ }
+
+ return m, err
+}
+
+// unmarshalMap creates a map from a map ID encoded in host endianness.
+func unmarshalMap(buf sysenc.Buffer) (*Map, error) {
+ var id uint32
+ if err := buf.Unmarshal(&id); err != nil {
+ return nil, err
+ }
+ return NewMapFromID(MapID(id))
+}
+
+// marshalMap marshals the fd of a map into a buffer in host endianness.
+func marshalMap(m *Map, length int) ([]byte, error) {
+ if length != 4 {
+ return nil, fmt.Errorf("can't marshal map to %d bytes", length)
+ }
+
+ buf := make([]byte, 4)
+ internal.NativeEndian.PutUint32(buf, m.fd.Uint())
+ return buf, nil
+}
+
+// MapIterator iterates a Map.
+//
+// See Map.Iterate.
+type MapIterator struct {
+ target *Map
+ // Temporary storage to avoid allocations in Next(). This is any instead
+ // of []byte to avoid allocations.
+ cursor any
+ count, maxEntries uint32
+ done bool
+ err error
+}
+
+func newMapIterator(target *Map) *MapIterator {
+ return &MapIterator{
+ target: target,
+ maxEntries: target.maxEntries,
+ }
+}
+
+// Next decodes the next key and value.
+//
+// Iterating a hash map from which keys are being deleted is not
+// safe. You may see the same key multiple times. Iteration may
+// also abort with an error, see IsIterationAborted.
+//
+// Returns false if there are no more entries. You must check
+// the result of Err afterwards.
+//
+// See Map.Get for further caveats around valueOut.
+func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
+ if mi.err != nil || mi.done {
+ return false
+ }
+
+ // For array-like maps NextKey returns nil only after maxEntries
+ // iterations.
+ for mi.count <= mi.maxEntries {
+ if mi.cursor == nil {
+ // Pass nil interface to NextKey to make sure the Map's first key
+ // is returned. If we pass an uninitialized []byte instead, it'll see a
+ // non-nil interface and try to marshal it.
+ mi.cursor = make([]byte, mi.target.keySize)
+ mi.err = mi.target.NextKey(nil, mi.cursor)
+ } else {
+ mi.err = mi.target.NextKey(mi.cursor, mi.cursor)
+ }
+
+ if errors.Is(mi.err, ErrKeyNotExist) {
+ mi.done = true
+ mi.err = nil
+ return false
+ } else if mi.err != nil {
+ mi.err = fmt.Errorf("get next key: %w", mi.err)
+ return false
+ }
+
+ mi.count++
+ mi.err = mi.target.Lookup(mi.cursor, valueOut)
+ if errors.Is(mi.err, ErrKeyNotExist) {
+ // Even though the key should be valid, we couldn't look up
+ // its value. If we're iterating a hash map this is probably
+ // because a concurrent delete removed the value before we
+ // could get it. This means that the next call to NextKeyBytes
+ // is very likely to restart iteration.
+ // If we're iterating one of the fd maps like
+ // ProgramArray it means that a given slot doesn't have
+ // a valid fd associated. It's OK to continue to the next slot.
+ continue
+ }
+ if mi.err != nil {
+ mi.err = fmt.Errorf("look up next key: %w", mi.err)
+ return false
+ }
+
+ buf := mi.cursor.([]byte)
+ if ptr, ok := keyOut.(unsafe.Pointer); ok {
+ copy(unsafe.Slice((*byte)(ptr), len(buf)), buf)
+ } else {
+ mi.err = sysenc.Unmarshal(keyOut, buf)
+ }
+
+ return mi.err == nil
+ }
+
+ mi.err = fmt.Errorf("%w", ErrIterationAborted)
+ return false
+}
+
+// Err returns any encountered error.
+//
+// The method must be called after Next returns nil.
+//
+// Returns ErrIterationAborted if it wasn't possible to do a full iteration.
+func (mi *MapIterator) Err() error {
+ return mi.err
+}
+
+// MapGetNextID returns the ID of the next eBPF map.
+//
+// Returns ErrNotExist, if there is no next eBPF map.
+func MapGetNextID(startID MapID) (MapID, error) {
+ attr := &sys.MapGetNextIdAttr{Id: uint32(startID)}
+ return MapID(attr.NextId), sys.MapGetNextId(attr)
+}
+
+// NewMapFromID returns the map for a given id.
+//
+// Returns ErrNotExist, if there is no eBPF map with the given id.
+func NewMapFromID(id MapID) (*Map, error) {
+ fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return newMapFromFD(fd)
+}
+
+// sliceLen returns the length if the value is a slice or an error otherwise.
+func sliceLen(slice any) (int, error) {
+ sliceValue := reflect.ValueOf(slice)
+ if sliceValue.Kind() != reflect.Slice {
+ return 0, fmt.Errorf("%T is not a slice", slice)
+ }
+ return sliceValue.Len(), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go
new file mode 100644
index 000000000..57a0a8e88
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/marshalers.go
@@ -0,0 +1,210 @@
+package ebpf
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "slices"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/sysenc"
+)
+
+// marshalMapSyscallInput converts an arbitrary value into a pointer suitable
+// to be passed to the kernel.
+//
+// As an optimization, it returns the original value if it is an
+// unsafe.Pointer.
+func marshalMapSyscallInput(data any, length int) (sys.Pointer, error) {
+ if ptr, ok := data.(unsafe.Pointer); ok {
+ return sys.NewPointer(ptr), nil
+ }
+
+ buf, err := sysenc.Marshal(data, length)
+ if err != nil {
+ return sys.Pointer{}, err
+ }
+
+ return buf.Pointer(), nil
+}
+
+func makeMapSyscallOutput(dst any, length int) sysenc.Buffer {
+ if ptr, ok := dst.(unsafe.Pointer); ok {
+ return sysenc.UnsafeBuffer(ptr)
+ }
+
+ _, ok := dst.(encoding.BinaryUnmarshaler)
+ if ok {
+ return sysenc.SyscallOutput(nil, length)
+ }
+
+ return sysenc.SyscallOutput(dst, length)
+}
+
+// appendPerCPUSlice encodes a slice containing one value per
+// possible CPU into a buffer of bytes.
+//
+// Values are initialized to zero if the slice has less elements than CPUs.
+func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) {
+ sliceType := reflect.TypeOf(slice)
+ if sliceType.Kind() != reflect.Slice {
+ return nil, errors.New("per-CPU value requires slice")
+ }
+
+ sliceValue := reflect.ValueOf(slice)
+ sliceLen := sliceValue.Len()
+ if sliceLen > possibleCPUs {
+ return nil, fmt.Errorf("per-CPU value greater than number of CPUs")
+ }
+
+ // Grow increases the slice's capacity, _if_necessary_
+ buf = slices.Grow(buf, alignedElemLength*possibleCPUs)
+ for i := 0; i < sliceLen; i++ {
+ elem := sliceValue.Index(i).Interface()
+ elemBytes, err := sysenc.Marshal(elem, elemLength)
+ if err != nil {
+ return nil, err
+ }
+
+ buf = elemBytes.AppendTo(buf)
+ buf = append(buf, make([]byte, alignedElemLength-elemLength)...)
+ }
+
+ // Ensure buf is zero-padded full size.
+ buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...)
+
+ return buf, nil
+}
+
+// marshalPerCPUValue encodes a slice containing one value per
+// possible CPU into a buffer of bytes.
+//
+// Values are initialized to zero if the slice has less elements than CPUs.
+func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) {
+ possibleCPUs, err := PossibleCPU()
+ if err != nil {
+ return sys.Pointer{}, err
+ }
+
+ alignedElemLength := internal.Align(elemLength, 8)
+ buf := make([]byte, 0, alignedElemLength*possibleCPUs)
+ buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength)
+ if err != nil {
+ return sys.Pointer{}, err
+ }
+
+ return sys.NewSlicePointer(buf), nil
+}
+
+// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing
+// one value per possible CPU into a buffer of bytes.
+func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) {
+ sliceType := reflect.TypeOf(slice)
+ if sliceType.Kind() != reflect.Slice {
+ return nil, fmt.Errorf("batch value requires a slice")
+ }
+ sliceValue := reflect.ValueOf(slice)
+
+ possibleCPUs, err := PossibleCPU()
+ if err != nil {
+ return nil, err
+ }
+ if sliceValue.Len() != batchLen*possibleCPUs {
+ return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
+ batchLen*possibleCPUs, sliceValue.Len())
+ }
+ alignedElemLength := internal.Align(elemLength, 8)
+ buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs)
+ for i := 0; i < batchLen; i++ {
+ batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface()
+ buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength)
+ if err != nil {
+ return nil, fmt.Errorf("batch %d: %w", i, err)
+ }
+ }
+ return buf, nil
+}
+
+// unmarshalPerCPUValue decodes a buffer into a slice containing one value per
+// possible CPU.
+//
+// slice must be a literal slice and not a pointer.
+func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error {
+ sliceType := reflect.TypeOf(slice)
+ if sliceType.Kind() != reflect.Slice {
+ return fmt.Errorf("per-CPU value requires a slice")
+ }
+
+ possibleCPUs, err := PossibleCPU()
+ if err != nil {
+ return err
+ }
+
+ sliceValue := reflect.ValueOf(slice)
+ if sliceValue.Len() != possibleCPUs {
+ return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
+ possibleCPUs, sliceValue.Len())
+ }
+
+ sliceElemType := sliceType.Elem()
+ sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr
+ stride := internal.Align(elemLength, 8)
+ for i := 0; i < possibleCPUs; i++ {
+ var elem any
+ v := sliceValue.Index(i)
+ if sliceElemIsPointer {
+ if !v.Elem().CanAddr() {
+ return fmt.Errorf("per-CPU slice elements cannot be nil")
+ }
+ elem = v.Elem().Addr().Interface()
+ } else {
+ elem = v.Addr().Interface()
+ }
+ err := sysenc.Unmarshal(elem, buf[:elemLength])
+ if err != nil {
+ return fmt.Errorf("cpu %d: %w", i, err)
+ }
+
+ buf = buf[stride:]
+ }
+ return nil
+}
+
+// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice
+// containing one value per possible CPU.
+//
+// slice must have length batchLen * PossibleCPUs().
+func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error {
+ sliceType := reflect.TypeOf(slice)
+ if sliceType.Kind() != reflect.Slice {
+ return fmt.Errorf("batch requires a slice")
+ }
+
+ sliceValue := reflect.ValueOf(slice)
+ possibleCPUs, err := PossibleCPU()
+ if err != nil {
+ return err
+ }
+ if sliceValue.Len() != batchLen*possibleCPUs {
+ return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
+ sliceValue.Len(), batchLen*possibleCPUs)
+ }
+
+ fullValueSize := possibleCPUs * internal.Align(elemLength, 8)
+ if len(buf) != batchLen*fullValueSize {
+ return fmt.Errorf("input buffer has incorrect length, expected %d, got %d",
+ len(buf), batchLen*fullValueSize)
+ }
+
+ for i := 0; i < batchLen; i++ {
+ elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface()
+ if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil {
+ return fmt.Errorf("batch %d: %w", i, err)
+ }
+ buf = buf[fullValueSize:]
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/netlify.toml b/vendor/github.com/cilium/ebpf/netlify.toml
new file mode 100644
index 000000000..67c83f3b3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/netlify.toml
@@ -0,0 +1,4 @@
+[build]
+ base = "docs/"
+ publish = "site/"
+ command = "mkdocs build"
diff --git a/vendor/github.com/cilium/ebpf/perf/doc.go b/vendor/github.com/cilium/ebpf/perf/doc.go
new file mode 100644
index 000000000..b92bc56af
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/perf/doc.go
@@ -0,0 +1,5 @@
+// Package perf allows reading from BPF perf event arrays.
+//
+// A perf event array contains multiple perf event ringbuffers which can be used
+// to exchange sample like data with user space.
+package perf
diff --git a/vendor/github.com/cilium/ebpf/perf/reader.go b/vendor/github.com/cilium/ebpf/perf/reader.go
new file mode 100644
index 000000000..3c3d56942
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/perf/reader.go
@@ -0,0 +1,491 @@
+package perf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/epoll"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ ErrClosed = os.ErrClosed
+ ErrFlushed = epoll.ErrFlushed
+ errEOR = errors.New("end of ring")
+)
+
+var perfEventHeaderSize = binary.Size(perfEventHeader{})
+
+// perfEventHeader must match 'struct perf_event_header` in .
+type perfEventHeader struct {
+ Type uint32
+ Misc uint16
+ Size uint16
+}
+
+// Record contains either a sample or a counter of the
+// number of lost samples.
+type Record struct {
+ // The CPU this record was generated on.
+ CPU int
+
+ // The data submitted via bpf_perf_event_output.
+ // Due to a kernel bug, this can contain between 0 and 7 bytes of trailing
+ // garbage from the ring depending on the input sample's length.
+ RawSample []byte
+
+ // The number of samples which could not be output, since
+ // the ring buffer was full.
+ LostSamples uint64
+
+ // The minimum number of bytes remaining in the per-CPU buffer after this Record has been read.
+ // Negative for overwritable buffers.
+ Remaining int
+}
+
+// Read a record from a reader and tag it as being from the given CPU.
+//
+// buf must be at least perfEventHeaderSize bytes long.
+func readRecord(rd io.Reader, rec *Record, buf []byte, overwritable bool) error {
+ // Assert that the buffer is large enough.
+ buf = buf[:perfEventHeaderSize]
+ _, err := io.ReadFull(rd, buf)
+ if errors.Is(err, io.EOF) {
+ return errEOR
+ } else if err != nil {
+ return fmt.Errorf("read perf event header: %v", err)
+ }
+
+ header := perfEventHeader{
+ internal.NativeEndian.Uint32(buf[0:4]),
+ internal.NativeEndian.Uint16(buf[4:6]),
+ internal.NativeEndian.Uint16(buf[6:8]),
+ }
+
+ switch header.Type {
+ case unix.PERF_RECORD_LOST:
+ rec.RawSample = rec.RawSample[:0]
+ rec.LostSamples, err = readLostRecords(rd)
+ return err
+
+ case unix.PERF_RECORD_SAMPLE:
+ rec.LostSamples = 0
+ // We can reuse buf here because perfEventHeaderSize > perfEventSampleSize.
+ rec.RawSample, err = readRawSample(rd, buf, rec.RawSample)
+ return err
+
+ default:
+ return &unknownEventError{header.Type}
+ }
+}
+
+func readLostRecords(rd io.Reader) (uint64, error) {
+ // lostHeader must match 'struct perf_event_lost in kernel sources.
+ var lostHeader struct {
+ ID uint64
+ Lost uint64
+ }
+
+ err := binary.Read(rd, internal.NativeEndian, &lostHeader)
+ if err != nil {
+ return 0, fmt.Errorf("can't read lost records header: %v", err)
+ }
+
+ return lostHeader.Lost, nil
+}
+
+var perfEventSampleSize = binary.Size(uint32(0))
+
+// This must match 'struct perf_event_sample in kernel sources.
+type perfEventSample struct {
+ Size uint32
+}
+
+func readRawSample(rd io.Reader, buf, sampleBuf []byte) ([]byte, error) {
+ buf = buf[:perfEventSampleSize]
+ if _, err := io.ReadFull(rd, buf); err != nil {
+ return nil, fmt.Errorf("read sample size: %w", err)
+ }
+
+ sample := perfEventSample{
+ internal.NativeEndian.Uint32(buf),
+ }
+
+ var data []byte
+ if size := int(sample.Size); cap(sampleBuf) < size {
+ data = make([]byte, size)
+ } else {
+ data = sampleBuf[:size]
+ }
+
+ if _, err := io.ReadFull(rd, data); err != nil {
+ return nil, fmt.Errorf("read sample: %w", err)
+ }
+ return data, nil
+}
+
+// Reader allows reading bpf_perf_event_output
+// from user space.
+type Reader struct {
+ poller *epoll.Poller
+
+ // mu protects read/write access to the Reader structure with the
+ // exception fields protected by 'pauseMu'.
+ // If locking both 'mu' and 'pauseMu', 'mu' must be locked first.
+ mu sync.Mutex
+ array *ebpf.Map
+ rings []*perfEventRing
+ epollEvents []unix.EpollEvent
+ epollRings []*perfEventRing
+ eventHeader []byte
+ deadline time.Time
+ overwritable bool
+ bufferSize int
+ pendingErr error
+
+ // pauseMu protects eventFds so that Pause / Resume can be invoked while
+ // Read is blocked.
+ pauseMu sync.Mutex
+ eventFds []*sys.FD
+ paused bool
+}
+
+// ReaderOptions control the behaviour of the user
+// space reader.
+type ReaderOptions struct {
+ // The number of events required in any per CPU buffer before
+ // Read will process data. This is mutually exclusive with Watermark.
+ // The default is zero, which means Watermark will take precedence.
+ WakeupEvents int
+ // The number of written bytes required in any per CPU buffer before
+ // Read will process data. Must be smaller than PerCPUBuffer.
+ // The default is to start processing as soon as data is available.
+ Watermark int
+ // This perf ring buffer is overwritable, once full the oldest event will be
+ // overwritten by newest.
+ Overwritable bool
+}
+
+// NewReader creates a new reader with default options.
+//
+// array must be a PerfEventArray. perCPUBuffer gives the size of the
+// per CPU buffer in bytes. It is rounded up to the nearest multiple
+// of the current page size.
+func NewReader(array *ebpf.Map, perCPUBuffer int) (*Reader, error) {
+ return NewReaderWithOptions(array, perCPUBuffer, ReaderOptions{})
+}
+
+// NewReaderWithOptions creates a new reader with the given options.
+func NewReaderWithOptions(array *ebpf.Map, perCPUBuffer int, opts ReaderOptions) (pr *Reader, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
+ if perCPUBuffer < 1 {
+ return nil, errors.New("perCPUBuffer must be larger than 0")
+ }
+ if opts.WakeupEvents > 0 && opts.Watermark > 0 {
+ return nil, errors.New("WakeupEvents and Watermark cannot both be non-zero")
+ }
+
+ var (
+ nCPU = int(array.MaxEntries())
+ rings = make([]*perfEventRing, 0, nCPU)
+ eventFds = make([]*sys.FD, 0, nCPU)
+ )
+
+ poller, err := epoll.New()
+ if err != nil {
+ return nil, err
+ }
+ defer closeOnError(poller)
+
+ // bpf_perf_event_output checks which CPU an event is enabled on,
+ // but doesn't allow using a wildcard like -1 to specify "all CPUs".
+ // Hence we have to create a ring for each CPU.
+ bufferSize := 0
+ for i := 0; i < nCPU; i++ {
+ event, ring, err := newPerfEventRing(i, perCPUBuffer, opts)
+ if errors.Is(err, unix.ENODEV) {
+ // The requested CPU is currently offline, skip it.
+ continue
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to create perf ring for CPU %d: %v", i, err)
+ }
+ defer closeOnError(event)
+ defer closeOnError(ring)
+
+ bufferSize = ring.size()
+ rings = append(rings, ring)
+ eventFds = append(eventFds, event)
+
+ if err := poller.Add(event.Int(), 0); err != nil {
+ return nil, err
+ }
+ }
+
+ // Closing a PERF_EVENT_ARRAY removes all event fds
+ // stored in it, so we keep a reference alive.
+ array, err = array.Clone()
+ if err != nil {
+ return nil, err
+ }
+
+ pr = &Reader{
+ array: array,
+ rings: rings,
+ poller: poller,
+ deadline: time.Time{},
+ epollEvents: make([]unix.EpollEvent, len(rings)),
+ epollRings: make([]*perfEventRing, 0, len(rings)),
+ eventHeader: make([]byte, perfEventHeaderSize),
+ eventFds: eventFds,
+ overwritable: opts.Overwritable,
+ bufferSize: bufferSize,
+ }
+ if err = pr.Resume(); err != nil {
+ return nil, err
+ }
+ runtime.SetFinalizer(pr, (*Reader).Close)
+ return pr, nil
+}
+
+// Close frees resources used by the reader.
+//
+// It interrupts calls to Read.
+//
+// Calls to perf_event_output from eBPF programs will return
+// ENOENT after calling this method.
+func (pr *Reader) Close() error {
+ if err := pr.poller.Close(); err != nil {
+ if errors.Is(err, os.ErrClosed) {
+ return nil
+ }
+ return fmt.Errorf("close poller: %w", err)
+ }
+
+ // Trying to poll will now fail, so Read() can't block anymore. Acquire the
+ // locks so that we can clean up.
+ pr.mu.Lock()
+ defer pr.mu.Unlock()
+
+ pr.pauseMu.Lock()
+ defer pr.pauseMu.Unlock()
+
+ for _, ring := range pr.rings {
+ ring.Close()
+ }
+ for _, event := range pr.eventFds {
+ event.Close()
+ }
+ pr.rings = nil
+ pr.eventFds = nil
+ pr.array.Close()
+
+ return nil
+}
+
+// SetDeadline controls how long Read and ReadInto will block waiting for samples.
+//
+// Passing a zero time.Time will remove the deadline. Passing a deadline in the
+// past will prevent the reader from blocking if there are no records to be read.
+func (pr *Reader) SetDeadline(t time.Time) {
+ pr.mu.Lock()
+ defer pr.mu.Unlock()
+
+ pr.deadline = t
+}
+
+// Read the next record from the perf ring buffer.
+//
+// The method blocks until there are at least Watermark bytes in one
+// of the per CPU buffers. Records from buffers below the Watermark
+// are not returned.
+//
+// Records can contain between 0 and 7 bytes of trailing garbage from the ring
+// depending on the input sample's length.
+//
+// Calling [Close] interrupts the method with [os.ErrClosed]. Calling [Flush]
+// makes it return all records currently in the ring buffer, followed by [ErrFlushed].
+//
+// Returns [os.ErrDeadlineExceeded] if a deadline was set and after all records
+// have been read from the ring.
+//
+// See [Reader.ReadInto] for a more efficient version of this method.
+func (pr *Reader) Read() (Record, error) {
+ var r Record
+
+ return r, pr.ReadInto(&r)
+}
+
+var errMustBePaused = fmt.Errorf("perf ringbuffer: must have been paused before reading overwritable buffer")
+
+// ReadInto is like [Reader.Read] except that it allows reusing Record and associated buffers.
+func (pr *Reader) ReadInto(rec *Record) error {
+ pr.mu.Lock()
+ defer pr.mu.Unlock()
+
+ pr.pauseMu.Lock()
+ defer pr.pauseMu.Unlock()
+
+ if pr.overwritable && !pr.paused {
+ return errMustBePaused
+ }
+
+ if pr.rings == nil {
+ return fmt.Errorf("perf ringbuffer: %w", ErrClosed)
+ }
+
+ for {
+ if len(pr.epollRings) == 0 {
+ if pe := pr.pendingErr; pe != nil {
+ // All rings have been emptied since the error occurred, return
+ // appropriate error.
+ pr.pendingErr = nil
+ return pe
+ }
+
+ // NB: The deferred pauseMu.Unlock will panic if Wait panics, which
+ // might obscure the original panic.
+ pr.pauseMu.Unlock()
+ _, err := pr.poller.Wait(pr.epollEvents, pr.deadline)
+ pr.pauseMu.Lock()
+
+ if errors.Is(err, os.ErrDeadlineExceeded) || errors.Is(err, ErrFlushed) {
+ // We've hit the deadline, check whether there is any data in
+ // the rings that we've not been woken up for.
+ pr.pendingErr = err
+ } else if err != nil {
+ return err
+ }
+
+ // Re-validate pr.paused since we dropped pauseMu.
+ if pr.overwritable && !pr.paused {
+ return errMustBePaused
+ }
+
+ // Waking up userspace is expensive, make the most of it by checking
+ // all rings.
+ for _, ring := range pr.rings {
+ ring.loadHead()
+ pr.epollRings = append(pr.epollRings, ring)
+ }
+ }
+
+ // Start at the last available event. The order in which we
+ // process them doesn't matter, and starting at the back allows
+ // resizing epollRings to keep track of processed rings.
+ err := pr.readRecordFromRing(rec, pr.epollRings[len(pr.epollRings)-1])
+ if err == errEOR {
+ // We've emptied the current ring buffer, process
+ // the next one.
+ pr.epollRings = pr.epollRings[:len(pr.epollRings)-1]
+ continue
+ }
+
+ return err
+ }
+}
+
+// Pause stops all notifications from this Reader.
+//
+// While the Reader is paused, any attempts to write to the event buffer from
+// BPF programs will return -ENOENT.
+//
+// Subsequent calls to Read will block until a call to Resume.
+func (pr *Reader) Pause() error {
+ pr.pauseMu.Lock()
+ defer pr.pauseMu.Unlock()
+
+ if pr.eventFds == nil {
+ return fmt.Errorf("%w", ErrClosed)
+ }
+
+ for i := range pr.eventFds {
+ if err := pr.array.Delete(uint32(i)); err != nil && !errors.Is(err, ebpf.ErrKeyNotExist) {
+ return fmt.Errorf("could't delete event fd for CPU %d: %w", i, err)
+ }
+ }
+
+ pr.paused = true
+
+ return nil
+}
+
+// Resume allows this perf reader to emit notifications.
+//
+// Subsequent calls to Read will block until the next event notification.
+func (pr *Reader) Resume() error {
+ pr.pauseMu.Lock()
+ defer pr.pauseMu.Unlock()
+
+ if pr.eventFds == nil {
+ return fmt.Errorf("%w", ErrClosed)
+ }
+
+ for i, fd := range pr.eventFds {
+ if fd == nil {
+ continue
+ }
+
+ if err := pr.array.Put(uint32(i), fd.Uint()); err != nil {
+ return fmt.Errorf("couldn't put event fd %d for CPU %d: %w", fd, i, err)
+ }
+ }
+
+ pr.paused = false
+
+ return nil
+}
+
+// BufferSize is the size in bytes of each per-CPU buffer
+func (pr *Reader) BufferSize() int {
+ return pr.bufferSize
+}
+
+// Flush unblocks Read/ReadInto and successive Read/ReadInto calls will return pending samples at this point,
+// until you receive a [ErrFlushed] error.
+func (pr *Reader) Flush() error {
+ return pr.poller.Flush()
+}
+
+// NB: Has to be preceded by a call to ring.loadHead.
+func (pr *Reader) readRecordFromRing(rec *Record, ring *perfEventRing) error {
+ defer ring.writeTail()
+
+ rec.CPU = ring.cpu
+ err := readRecord(ring, rec, pr.eventHeader, pr.overwritable)
+ if pr.overwritable && (errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
+ return errEOR
+ }
+ rec.Remaining = ring.remaining()
+ return err
+}
+
+type unknownEventError struct {
+ eventType uint32
+}
+
+func (uev *unknownEventError) Error() string {
+ return fmt.Sprintf("unknown event type: %d", uev.eventType)
+}
+
+// IsUnknownEvent returns true if the error occurred
+// because an unknown event was submitted to the perf event ring.
+func IsUnknownEvent(err error) bool {
+ var uee *unknownEventError
+ return errors.As(err, &uee)
+}
diff --git a/vendor/github.com/cilium/ebpf/perf/ring.go b/vendor/github.com/cilium/ebpf/perf/ring.go
new file mode 100644
index 000000000..63555f323
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/perf/ring.go
@@ -0,0 +1,293 @@
+package perf
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "runtime"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// perfEventRing is a page of metadata followed by
+// a variable number of pages which form a ring buffer.
+type perfEventRing struct {
+ cpu int
+ mmap []byte
+ ringReader
+}
+
+func newPerfEventRing(cpu, perCPUBuffer int, opts ReaderOptions) (_ *sys.FD, _ *perfEventRing, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
+ if opts.Watermark >= perCPUBuffer {
+ return nil, nil, errors.New("watermark must be smaller than perCPUBuffer")
+ }
+
+ fd, err := createPerfEvent(cpu, opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer closeOnError(fd)
+
+ if err := unix.SetNonblock(fd.Int(), true); err != nil {
+ return nil, nil, err
+ }
+
+ protections := unix.PROT_READ
+ if !opts.Overwritable {
+ protections |= unix.PROT_WRITE
+ }
+
+ mmap, err := unix.Mmap(fd.Int(), 0, perfBufferSize(perCPUBuffer), protections, unix.MAP_SHARED)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't mmap: %v", err)
+ }
+
+ // This relies on the fact that we allocate an extra metadata page,
+ // and that the struct is smaller than an OS page.
+ // This use of unsafe.Pointer isn't explicitly sanctioned by the
+ // documentation, since a byte is smaller than sampledPerfEvent.
+ meta := (*unix.PerfEventMmapPage)(unsafe.Pointer(&mmap[0]))
+
+ var reader ringReader
+ if opts.Overwritable {
+ reader = newReverseReader(meta, mmap[meta.Data_offset:meta.Data_offset+meta.Data_size])
+ } else {
+ reader = newForwardReader(meta, mmap[meta.Data_offset:meta.Data_offset+meta.Data_size])
+ }
+
+ ring := &perfEventRing{
+ cpu: cpu,
+ mmap: mmap,
+ ringReader: reader,
+ }
+ runtime.SetFinalizer(ring, (*perfEventRing).Close)
+
+ return fd, ring, nil
+}
+
+// perfBufferSize returns a valid mmap buffer size for use with perf_event_open (1+2^n pages)
+func perfBufferSize(perCPUBuffer int) int {
+ pageSize := os.Getpagesize()
+
+ // Smallest whole number of pages
+ nPages := (perCPUBuffer + pageSize - 1) / pageSize
+
+ // Round up to nearest power of two number of pages
+ nPages = int(math.Pow(2, math.Ceil(math.Log2(float64(nPages)))))
+
+ // Add one for metadata
+ nPages += 1
+
+ return nPages * pageSize
+}
+
+func (ring *perfEventRing) Close() error {
+ runtime.SetFinalizer(ring, nil)
+ mmap := ring.mmap
+ ring.mmap = nil
+ return unix.Munmap(mmap)
+}
+
+func createPerfEvent(cpu int, opts ReaderOptions) (*sys.FD, error) {
+ wakeup := 0
+ bits := 0
+ if opts.WakeupEvents > 0 {
+ wakeup = opts.WakeupEvents
+ } else {
+ wakeup = opts.Watermark
+ if wakeup == 0 {
+ wakeup = 1
+ }
+ bits |= unix.PerfBitWatermark
+ }
+
+ if opts.Overwritable {
+ bits |= unix.PerfBitWriteBackward
+ }
+
+ attr := unix.PerfEventAttr{
+ Type: unix.PERF_TYPE_SOFTWARE,
+ Config: unix.PERF_COUNT_SW_BPF_OUTPUT,
+ Bits: uint64(bits),
+ Sample_type: unix.PERF_SAMPLE_RAW,
+ Wakeup: uint32(wakeup),
+ }
+
+ attr.Size = uint32(unsafe.Sizeof(attr))
+ fd, err := unix.PerfEventOpen(&attr, -1, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC)
+ if err != nil {
+ return nil, fmt.Errorf("can't create perf event: %w", err)
+ }
+ return sys.NewFD(fd)
+}
+
+type ringReader interface {
+ loadHead()
+ size() int
+ remaining() int
+ writeTail()
+ Read(p []byte) (int, error)
+}
+
+type forwardReader struct {
+ meta *unix.PerfEventMmapPage
+ head, tail uint64
+ mask uint64
+ ring []byte
+}
+
+func newForwardReader(meta *unix.PerfEventMmapPage, ring []byte) *forwardReader {
+ return &forwardReader{
+ meta: meta,
+ head: atomic.LoadUint64(&meta.Data_head),
+ tail: atomic.LoadUint64(&meta.Data_tail),
+ // cap is always a power of two
+ mask: uint64(cap(ring) - 1),
+ ring: ring,
+ }
+}
+
+func (rr *forwardReader) loadHead() {
+ rr.head = atomic.LoadUint64(&rr.meta.Data_head)
+}
+
+func (rr *forwardReader) size() int {
+ return len(rr.ring)
+}
+
+func (rr *forwardReader) remaining() int {
+ return int((rr.head - rr.tail) & rr.mask)
+}
+
+func (rr *forwardReader) writeTail() {
+ // Commit the new tail. This lets the kernel know that
+ // the ring buffer has been consumed.
+ atomic.StoreUint64(&rr.meta.Data_tail, rr.tail)
+}
+
+func (rr *forwardReader) Read(p []byte) (int, error) {
+ start := int(rr.tail & rr.mask)
+
+ n := len(p)
+ // Truncate if the read wraps in the ring buffer
+ if remainder := cap(rr.ring) - start; n > remainder {
+ n = remainder
+ }
+
+ // Truncate if there isn't enough data
+ if remainder := int(rr.head - rr.tail); n > remainder {
+ n = remainder
+ }
+
+ copy(p, rr.ring[start:start+n])
+ rr.tail += uint64(n)
+
+ if rr.tail == rr.head {
+ return n, io.EOF
+ }
+
+ return n, nil
+}
+
+type reverseReader struct {
+ meta *unix.PerfEventMmapPage
+ // head is the position where the kernel last wrote data.
+ head uint64
+ // read is the position we read the next data from. Updated as reads are made.
+ read uint64
+ // tail is the end of the ring buffer. No reads must be made past it.
+ tail uint64
+ mask uint64
+ ring []byte
+}
+
+func newReverseReader(meta *unix.PerfEventMmapPage, ring []byte) *reverseReader {
+ rr := &reverseReader{
+ meta: meta,
+ mask: uint64(cap(ring) - 1),
+ ring: ring,
+ }
+ rr.loadHead()
+ return rr
+}
+
+func (rr *reverseReader) loadHead() {
+ // The diagram below represents an overwritable perf ring buffer:
+ //
+ // head read tail
+ // | | |
+ // V V V
+ // +---+--------+------------+---------+--------+
+ // | |H-D....D|H-C........C|H-B.....B|H-A....A|
+ // +---+--------+------------+---------+--------+
+ // <--Write from right to left
+ // Read from left to right-->
+ // (H means header)
+ //
+ // The buffer is read left to right beginning from head to tail.
+ // [head, read) is the read portion of the buffer, [read, tail) the unread one.
+ // read is adjusted as we progress through the buffer.
+
+ // Avoid reading sample D multiple times by discarding unread samples C, B, A.
+ rr.tail = rr.head
+
+ // Get the new head and starting reading from it.
+ rr.head = atomic.LoadUint64(&rr.meta.Data_head)
+ rr.read = rr.head
+
+ if rr.tail-rr.head > uint64(cap(rr.ring)) {
+ // ring has been fully written, only permit at most cap(rr.ring)
+ // bytes to be read.
+ rr.tail = rr.head + uint64(cap(rr.ring))
+ }
+}
+
+func (rr *reverseReader) size() int {
+ return len(rr.ring)
+}
+
+func (rr *reverseReader) remaining() int {
+ // remaining data is inaccurate for overwritable buffers
+ // once an overwrite happens, so return -1 here.
+ return -1
+}
+
+func (rr *reverseReader) writeTail() {
+ // We do not care about tail for over writable perf buffer.
+ // So, this function is noop.
+}
+
+func (rr *reverseReader) Read(p []byte) (int, error) {
+ start := int(rr.read & rr.mask)
+
+ n := len(p)
+ // Truncate if the read wraps in the ring buffer
+ if remainder := cap(rr.ring) - start; n > remainder {
+ n = remainder
+ }
+
+ // Truncate if there isn't enough data
+ if remainder := int(rr.tail - rr.read); n > remainder {
+ n = remainder
+ }
+
+ copy(p, rr.ring[start:start+n])
+ rr.read += uint64(n)
+
+ if rr.read == rr.tail {
+ return n, io.EOF
+ }
+
+ return n, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go
new file mode 100644
index 000000000..9bc6325f8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/prog.go
@@ -0,0 +1,1141 @@
+package ebpf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+ "unsafe"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/kallsyms"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/sysenc"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// ErrNotSupported is returned whenever the kernel doesn't support a feature.
+var ErrNotSupported = internal.ErrNotSupported
+
+// errBadRelocation is returned when the verifier rejects a program due to a
+// bad CO-RE relocation.
+//
+// This error is detected based on heuristics and therefore may not be reliable.
+var errBadRelocation = errors.New("bad CO-RE relocation")
+
+// errUnknownKfunc is returned when the verifier rejects a program due to an
+// unknown kfunc.
+//
+// This error is detected based on heuristics and therefore may not be reliable.
+var errUnknownKfunc = errors.New("unknown kfunc")
+
+// ProgramID represents the unique ID of an eBPF program.
+type ProgramID uint32
+
+const (
+ // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN.
+ // This is currently the maximum of spare space allocated for SKB
+ // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN.
+ outputPad = 256 + 2
+)
+
+// Deprecated: the correct log size is now detected automatically and this
+// constant is unused.
+const DefaultVerifierLogSize = 64 * 1024
+
+// minVerifierLogSize is the default number of bytes allocated for the
+// verifier log.
+const minVerifierLogSize = 64 * 1024
+
+// ProgramOptions control loading a program into the kernel.
+type ProgramOptions struct {
+ // Bitmap controlling the detail emitted by the kernel's eBPF verifier log.
+ // LogLevel-type values can be ORed together to request specific kinds of
+ // verifier output. See the documentation on [ebpf.LogLevel] for details.
+ //
+ // opts.LogLevel = (ebpf.LogLevelBranch | ebpf.LogLevelStats)
+ //
+ // If left to its default value, the program will first be loaded without
+ // verifier output enabled. Upon error, the program load will be repeated
+ // with LogLevelBranch and the given (or default) LogSize value.
+ //
+ // Unless LogDisabled is set, setting this to a non-zero value will enable the verifier
+ // log, populating the [ebpf.Program.VerifierLog] field on successful loads
+ // and including detailed verifier errors if the program is rejected. This
+ // will always allocate an output buffer, but will result in only a single
+ // attempt at loading the program.
+ LogLevel LogLevel
+
+ // Deprecated: the correct log buffer size is determined automatically
+ // and this field is ignored.
+ LogSize int
+
+ // Disables the verifier log completely, regardless of other options.
+ LogDisabled bool
+
+ // Type information used for CO-RE relocations.
+ //
+ // This is useful in environments where the kernel BTF is not available
+ // (containers) or where it is in a non-standard location. Defaults to
+ // use the kernel BTF from a well-known location if nil.
+ KernelTypes *btf.Spec
+
+ // Type information used for CO-RE relocations of kernel modules,
+ // indexed by module name.
+ //
+ // This is useful in environments where the kernel BTF is not available
+ // (containers) or where it is in a non-standard location. Defaults to
+ // use the kernel module BTF from a well-known location if nil.
+ KernelModuleTypes map[string]*btf.Spec
+}
+
+// ProgramSpec defines a Program.
+type ProgramSpec struct {
+ // Name is passed to the kernel as a debug aid. Must only contain
+ // alpha numeric and '_' characters.
+ Name string
+
+ // Type determines at which hook in the kernel a program will run.
+ Type ProgramType
+
+ // AttachType of the program, needed to differentiate allowed context
+ // accesses in some newer program types like CGroupSockAddr.
+ //
+ // Available on kernels 4.17 and later.
+ AttachType AttachType
+
+ // Name of a kernel data structure or function to attach to. Its
+ // interpretation depends on Type and AttachType.
+ AttachTo string
+
+ // The program to attach to. Must be provided manually.
+ AttachTarget *Program
+
+ // The name of the ELF section this program originated from.
+ SectionName string
+
+ Instructions asm.Instructions
+
+ // Flags is passed to the kernel and specifies additional program
+ // load attributes.
+ Flags uint32
+
+ // License of the program. Some helpers are only available if
+ // the license is deemed compatible with the GPL.
+ //
+ // See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1
+ License string
+
+ // Version used by Kprobe programs.
+ //
+ // Deprecated on kernels 5.0 and later. Leave empty to let the library
+ // detect this value automatically.
+ KernelVersion uint32
+
+ // The byte order this program was compiled for, may be nil.
+ ByteOrder binary.ByteOrder
+}
+
+// Copy returns a copy of the spec.
+func (ps *ProgramSpec) Copy() *ProgramSpec {
+ if ps == nil {
+ return nil
+ }
+
+ cpy := *ps
+ cpy.Instructions = make(asm.Instructions, len(ps.Instructions))
+ copy(cpy.Instructions, ps.Instructions)
+ return &cpy
+}
+
+// Tag calculates the kernel tag for a series of instructions.
+//
+// Use asm.Instructions.Tag if you need to calculate for non-native endianness.
+func (ps *ProgramSpec) Tag() (string, error) {
+ return ps.Instructions.Tag(internal.NativeEndian)
+}
+
+// KernelModule returns the kernel module, if any, the AttachTo function is contained in.
+func (ps *ProgramSpec) KernelModule() (string, error) {
+ if ps.AttachTo == "" {
+ return "", nil
+ }
+
+ switch ps.Type {
+ default:
+ return "", nil
+ case Tracing:
+ switch ps.AttachType {
+ default:
+ return "", nil
+ case AttachTraceFEntry:
+ case AttachTraceFExit:
+ }
+ fallthrough
+ case Kprobe:
+ return kallsyms.KernelModule(ps.AttachTo)
+ }
+}
+
+// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a
+// program is rejected by the verifier.
+//
+// Use [errors.As] to access the error.
+type VerifierError = internal.VerifierError
+
+// Program represents BPF program loaded into the kernel.
+//
+// It is not safe to close a Program which is used by other goroutines.
+type Program struct {
+ // Contains the output of the kernel verifier if enabled,
+ // otherwise it is empty.
+ VerifierLog string
+
+ fd *sys.FD
+ name string
+ pinnedPath string
+ typ ProgramType
+}
+
+// NewProgram creates a new Program.
+//
+// See [NewProgramWithOptions] for details.
+//
+// Returns a [VerifierError] containing the full verifier log if the program is
+// rejected by the kernel.
+func NewProgram(spec *ProgramSpec) (*Program, error) {
+ return NewProgramWithOptions(spec, ProgramOptions{})
+}
+
+// NewProgramWithOptions creates a new Program.
+//
+// Loading a program for the first time will perform
+// feature detection by loading small, temporary programs.
+//
+// Returns a [VerifierError] containing the full verifier log if the program is
+// rejected by the kernel.
+func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
+ if spec == nil {
+ return nil, errors.New("can't load a program from a nil spec")
+ }
+
+ prog, err := newProgramWithOptions(spec, opts)
+ if errors.Is(err, asm.ErrUnsatisfiedMapReference) {
+ return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err)
+ }
+ return prog, err
+}
+
+var (
+ coreBadLoad = []byte(fmt.Sprintf("(18) r10 = 0x%x\n", btf.COREBadRelocationSentinel))
+ // This log message was introduced by ebb676daa1a3 ("bpf: Print function name in
+ // addition to function id") which first appeared in v4.10 and has remained
+ // unchanged since.
+ coreBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", btf.COREBadRelocationSentinel))
+ kfuncBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", kfuncCallPoisonBase))
+)
+
+func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
+ if len(spec.Instructions) == 0 {
+ return nil, errors.New("instructions cannot be empty")
+ }
+
+ if spec.Type == UnspecifiedProgram {
+ return nil, errors.New("can't load program of unspecified type")
+ }
+
+ if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
+ return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
+ }
+
+ // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load")
+ // require the version field to be set to the value of the KERNEL_VERSION
+ // macro for kprobe-type programs.
+ // Overwrite Kprobe program version if set to zero or the magic version constant.
+ kv := spec.KernelVersion
+ if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) {
+ v, err := internal.KernelVersion()
+ if err != nil {
+ return nil, fmt.Errorf("detecting kernel version: %w", err)
+ }
+ kv = v.Kernel()
+ }
+
+ attr := &sys.ProgLoadAttr{
+ ProgType: sys.ProgType(spec.Type),
+ ProgFlags: spec.Flags,
+ ExpectedAttachType: sys.AttachType(spec.AttachType),
+ License: sys.NewStringPointer(spec.License),
+ KernVersion: kv,
+ }
+
+ if haveObjName() == nil {
+ attr.ProgName = sys.NewObjName(spec.Name)
+ }
+
+ insns := make(asm.Instructions, len(spec.Instructions))
+ copy(insns, spec.Instructions)
+
+ kmodName, err := spec.KernelModule()
+ if err != nil {
+ return nil, fmt.Errorf("kernel module search: %w", err)
+ }
+
+ var targets []*btf.Spec
+ if opts.KernelTypes != nil {
+ targets = append(targets, opts.KernelTypes)
+ }
+ if kmodName != "" && opts.KernelModuleTypes != nil {
+ if modBTF, ok := opts.KernelModuleTypes[kmodName]; ok {
+ targets = append(targets, modBTF)
+ }
+ }
+
+ var b btf.Builder
+ if err := applyRelocations(insns, targets, kmodName, spec.ByteOrder, &b); err != nil {
+ return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
+ }
+
+ errExtInfos := haveProgramExtInfos()
+ if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) {
+ // There is at least one CO-RE relocation which relies on a stable local
+ // type ID.
+ // Return ErrNotSupported instead of E2BIG if there is no BTF support.
+ return nil, errExtInfos
+ }
+
+ if errExtInfos == nil {
+ // Only add func and line info if the kernel supports it. This allows
+ // BPF compiled with modern toolchains to work on old kernels.
+ fib, lib, err := btf.MarshalExtInfos(insns, &b)
+ if err != nil {
+ return nil, fmt.Errorf("marshal ext_infos: %w", err)
+ }
+
+ attr.FuncInfoRecSize = btf.FuncInfoSize
+ attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
+ attr.FuncInfo = sys.NewSlicePointer(fib)
+
+ attr.LineInfoRecSize = btf.LineInfoSize
+ attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize
+ attr.LineInfo = sys.NewSlicePointer(lib)
+ }
+
+ if !b.Empty() {
+ handle, err := btf.NewHandle(&b)
+ if err != nil {
+ return nil, fmt.Errorf("load BTF: %w", err)
+ }
+ defer handle.Close()
+
+ attr.ProgBtfFd = uint32(handle.FD())
+ }
+
+ kconfig, err := resolveKconfigReferences(insns)
+ if err != nil {
+ return nil, fmt.Errorf("resolve .kconfig: %w", err)
+ }
+ defer kconfig.Close()
+
+ if err := fixupAndValidate(insns); err != nil {
+ return nil, err
+ }
+
+ handles, err := fixupKfuncs(insns)
+ if err != nil {
+ return nil, fmt.Errorf("fixing up kfuncs: %w", err)
+ }
+ defer handles.Close()
+
+ if len(handles) > 0 {
+ fdArray := handles.fdArray()
+ attr.FdArray = sys.NewPointer(unsafe.Pointer(&fdArray[0]))
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
+ err = insns.Marshal(buf, internal.NativeEndian)
+ if err != nil {
+ return nil, err
+ }
+
+ bytecode := buf.Bytes()
+ attr.Insns = sys.NewSlicePointer(bytecode)
+ attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize)
+
+ if spec.AttachTarget != nil {
+ targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType)
+ if err != nil {
+ return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
+ }
+
+ attr.AttachBtfId = targetID
+ attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD())
+ defer runtime.KeepAlive(spec.AttachTarget)
+ } else if spec.AttachTo != "" {
+ module, targetID, err := findProgramTargetInKernel(spec.AttachTo, spec.Type, spec.AttachType)
+ if err != nil && !errors.Is(err, errUnrecognizedAttachType) {
+ // We ignore errUnrecognizedAttachType since AttachTo may be non-empty
+ // for programs that don't attach anywhere.
+ return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
+ }
+
+ attr.AttachBtfId = targetID
+ if module != nil {
+ attr.AttachBtfObjFd = uint32(module.FD())
+ defer module.Close()
+ }
+ }
+
+ // The caller requested a specific verifier log level. Set up the log buffer
+ // so that there is a chance of loading the program in a single shot.
+ var logBuf []byte
+ if !opts.LogDisabled && opts.LogLevel != 0 {
+ logBuf = make([]byte, minVerifierLogSize)
+ attr.LogLevel = opts.LogLevel
+ attr.LogSize = uint32(len(logBuf))
+ attr.LogBuf = sys.NewSlicePointer(logBuf)
+ }
+
+ for {
+ var fd *sys.FD
+ fd, err = sys.ProgLoad(attr)
+ if err == nil {
+ return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil
+ }
+
+ if opts.LogDisabled {
+ break
+ }
+
+ if attr.LogTrueSize != 0 && attr.LogSize >= attr.LogTrueSize {
+ // The log buffer already has the correct size.
+ break
+ }
+
+ if attr.LogSize != 0 && !errors.Is(err, unix.ENOSPC) {
+ // Logging is enabled and the error is not ENOSPC, so we can infer
+ // that the log buffer is large enough.
+ break
+ }
+
+ if attr.LogLevel == 0 {
+ // Logging is not enabled but loading the program failed. Enable
+ // basic logging.
+ attr.LogLevel = LogLevelBranch
+ }
+
+ // Make an educated guess how large the buffer should be. Start
+ // at minVerifierLogSize and then double the size.
+ logSize := uint32(max(len(logBuf)*2, minVerifierLogSize))
+ if int(logSize) < len(logBuf) {
+ return nil, errors.New("overflow while probing log buffer size")
+ }
+
+ if attr.LogTrueSize != 0 {
+ // The kernel has given us a hint how large the log buffer has to be.
+ logSize = attr.LogTrueSize
+ }
+
+ logBuf = make([]byte, logSize)
+ attr.LogSize = logSize
+ attr.LogBuf = sys.NewSlicePointer(logBuf)
+ }
+
+ end := bytes.IndexByte(logBuf, 0)
+ if end < 0 {
+ end = len(logBuf)
+ }
+
+ tail := logBuf[max(end-256, 0):end]
+ switch {
+ case errors.Is(err, unix.EPERM):
+ if len(logBuf) > 0 && logBuf[0] == 0 {
+ // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can
+ // check that the log is empty to reduce false positives.
+ return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
+ }
+
+ case errors.Is(err, unix.EINVAL):
+ if bytes.Contains(tail, coreBadCall) {
+ err = errBadRelocation
+ break
+ } else if bytes.Contains(tail, kfuncBadCall) {
+ err = errUnknownKfunc
+ break
+ }
+
+ case errors.Is(err, unix.EACCES):
+ if bytes.Contains(tail, coreBadLoad) {
+ err = errBadRelocation
+ break
+ }
+ }
+
+ // hasFunctionReferences may be expensive, so check it last.
+ if (errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM)) &&
+ hasFunctionReferences(spec.Instructions) {
+ if err := haveBPFToBPFCalls(); err != nil {
+ return nil, fmt.Errorf("load program: %w", err)
+ }
+ }
+
+ return nil, internal.ErrorWithLog("load program", err, logBuf)
+}
+
+// NewProgramFromFD creates a program from a raw fd.
+//
+// You should not use fd after calling this function.
+//
+// Requires at least Linux 4.10.
+func NewProgramFromFD(fd int) (*Program, error) {
+ f, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return newProgramFromFD(f)
+}
+
+// NewProgramFromID returns the program for a given id.
+//
+// Returns ErrNotExist, if there is no eBPF program with the given id.
+func NewProgramFromID(id ProgramID) (*Program, error) {
+ fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get program by id: %w", err)
+ }
+
+ return newProgramFromFD(fd)
+}
+
+func newProgramFromFD(fd *sys.FD) (*Program, error) {
+ info, err := newProgramInfoFromFd(fd)
+ if err != nil {
+ fd.Close()
+ return nil, fmt.Errorf("discover program type: %w", err)
+ }
+
+ return &Program{"", fd, info.Name, "", info.Type}, nil
+}
+
+func (p *Program) String() string {
+ if p.name != "" {
+ return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd)
+ }
+ return fmt.Sprintf("%s(%v)", p.typ, p.fd)
+}
+
+// Type returns the underlying type of the program.
+func (p *Program) Type() ProgramType {
+ return p.typ
+}
+
+// Info returns metadata about the program.
+//
+// Requires at least 4.10.
+func (p *Program) Info() (*ProgramInfo, error) {
+ return newProgramInfoFromFd(p.fd)
+}
+
+// Handle returns a reference to the program's type information in the kernel.
+//
+// Returns ErrNotSupported if the kernel has no BTF support, or if there is no
+// BTF associated with the program.
+func (p *Program) Handle() (*btf.Handle, error) {
+ info, err := p.Info()
+ if err != nil {
+ return nil, err
+ }
+
+ id, ok := info.BTFID()
+ if !ok {
+ return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported)
+ }
+
+ return btf.NewHandleFromID(id)
+}
+
+// FD gets the file descriptor of the Program.
+//
+// It is invalid to call this function after Close has been called.
+func (p *Program) FD() int {
+ return p.fd.Int()
+}
+
+// Clone creates a duplicate of the Program.
+//
+// Closing the duplicate does not affect the original, and vice versa.
+//
+// Cloning a nil Program returns nil.
+func (p *Program) Clone() (*Program, error) {
+ if p == nil {
+ return nil, nil
+ }
+
+ dup, err := p.fd.Dup()
+ if err != nil {
+ return nil, fmt.Errorf("can't clone program: %w", err)
+ }
+
+ return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil
+}
+
+// Pin persists the Program on the BPF virtual file system past the lifetime of
+// the process that created it
+//
+// Calling Pin on a previously pinned program will overwrite the path, except when
+// the new path already exists. Re-pinning across filesystems is not supported.
+//
+// This requires bpffs to be mounted above fileName.
+// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd
+func (p *Program) Pin(fileName string) error {
+ if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil {
+ return err
+ }
+ p.pinnedPath = fileName
+ return nil
+}
+
+// Unpin removes the persisted state for the Program from the BPF virtual filesystem.
+//
+// Failed calls to Unpin will not alter the state returned by IsPinned.
+//
+// Unpinning an unpinned Program returns nil.
+func (p *Program) Unpin() error {
+ if err := internal.Unpin(p.pinnedPath); err != nil {
+ return err
+ }
+ p.pinnedPath = ""
+ return nil
+}
+
+// IsPinned returns true if the Program has a non-empty pinned path.
+func (p *Program) IsPinned() bool {
+ return p.pinnedPath != ""
+}
+
+// Close the Program's underlying file descriptor, which could unload
+// the program from the kernel if it is not pinned or attached to a
+// kernel hook.
+func (p *Program) Close() error {
+ if p == nil {
+ return nil
+ }
+
+ return p.fd.Close()
+}
+
+// Various options for Run'ing a Program
+type RunOptions struct {
+ // Program's data input. Required field.
+ //
+ // The kernel expects at least 14 bytes input for an ethernet header for
+ // XDP and SKB programs.
+ Data []byte
+ // Program's data after Program has run. Caller must allocate. Optional field.
+ DataOut []byte
+ // Program's context input. Optional field.
+ Context interface{}
+ // Program's context after Program has run. Must be a pointer or slice. Optional field.
+ ContextOut interface{}
+ // Minimum number of times to run Program. Optional field. Defaults to 1.
+ //
+ // The program may be executed more often than this due to interruptions, e.g.
+ // when runtime.AllThreadsSyscall is invoked.
+ Repeat uint32
+ // Optional flags.
+ Flags uint32
+ // CPU to run Program on. Optional field.
+ // Note not all program types support this field.
+ CPU uint32
+ // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer
+ // or similar. Typically used during benchmarking. Optional field.
+ //
+ // Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead.
+ Reset func()
+}
+
+// Test runs the Program in the kernel with the given input and returns the
+// value returned by the eBPF program.
+//
+// Note: the kernel expects at least 14 bytes input for an ethernet header for
+// XDP and SKB programs.
+//
+// This function requires at least Linux 4.12.
+func (p *Program) Test(in []byte) (uint32, []byte, error) {
+ // Older kernels ignore the dataSizeOut argument when copying to user space.
+ // Combined with things like bpf_xdp_adjust_head() we don't really know what the final
+ // size will be. Hence we allocate an output buffer which we hope will always be large
+ // enough, and panic if the kernel wrote past the end of the allocation.
+ // See https://patchwork.ozlabs.org/cover/1006822/
+ var out []byte
+ if len(in) > 0 {
+ out = make([]byte, len(in)+outputPad)
+ }
+
+ opts := RunOptions{
+ Data: in,
+ DataOut: out,
+ Repeat: 1,
+ }
+
+ ret, _, err := p.run(&opts)
+ if err != nil {
+ return ret, nil, fmt.Errorf("test program: %w", err)
+ }
+ return ret, opts.DataOut, nil
+}
+
+// Run runs the Program in kernel with given RunOptions.
+//
+// Note: the same restrictions from Test apply.
+func (p *Program) Run(opts *RunOptions) (uint32, error) {
+ ret, _, err := p.run(opts)
+ if err != nil {
+ return ret, fmt.Errorf("run program: %w", err)
+ }
+ return ret, nil
+}
+
+// Benchmark runs the Program with the given input for a number of times
+// and returns the time taken per iteration.
+//
+// Returns the result of the last execution of the program and the time per
+// run or an error. reset is called whenever the benchmark syscall is
+// interrupted, and should be set to testing.B.ResetTimer or similar.
+//
+// This function requires at least Linux 4.12.
+func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) {
+ if uint(repeat) > math.MaxUint32 {
+ return 0, 0, fmt.Errorf("repeat is too high")
+ }
+
+ opts := RunOptions{
+ Data: in,
+ Repeat: uint32(repeat),
+ Reset: reset,
+ }
+
+ ret, total, err := p.run(&opts)
+ if err != nil {
+ return ret, total, fmt.Errorf("benchmark program: %w", err)
+ }
+ return ret, total, nil
+}
+
+var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error {
+ prog, err := NewProgram(&ProgramSpec{
+ // SocketFilter does not require privileges on newer kernels.
+ Type: SocketFilter,
+ Instructions: asm.Instructions{
+ asm.LoadImm(asm.R0, 0, asm.DWord),
+ asm.Return(),
+ },
+ License: "MIT",
+ })
+ if err != nil {
+ // This may be because we lack sufficient permissions, etc.
+ return err
+ }
+ defer prog.Close()
+
+ in := internal.EmptyBPFContext
+ attr := sys.ProgRunAttr{
+ ProgFd: uint32(prog.FD()),
+ DataSizeIn: uint32(len(in)),
+ DataIn: sys.NewSlicePointer(in),
+ }
+
+ err = sys.ProgRun(&attr)
+ switch {
+ case errors.Is(err, unix.EINVAL):
+ // Check for EINVAL specifically, rather than err != nil since we
+ // otherwise misdetect due to insufficient permissions.
+ return internal.ErrNotSupported
+
+ case errors.Is(err, unix.EINTR):
+ // We know that PROG_TEST_RUN is supported if we get EINTR.
+ return nil
+
+ case errors.Is(err, sys.ENOTSUPP):
+ // The first PROG_TEST_RUN patches shipped in 4.12 didn't include
+ // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is
+ // supported, but not for the program type used in the probe.
+ return nil
+ }
+
+ return err
+})
+
+func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) {
+ if uint(len(opts.Data)) > math.MaxUint32 {
+ return 0, 0, fmt.Errorf("input is too long")
+ }
+
+ if err := haveProgRun(); err != nil {
+ return 0, 0, err
+ }
+
+ var ctxBytes []byte
+ if opts.Context != nil {
+ ctx := new(bytes.Buffer)
+ if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil {
+ return 0, 0, fmt.Errorf("cannot serialize context: %v", err)
+ }
+ ctxBytes = ctx.Bytes()
+ }
+
+ var ctxOut []byte
+ if opts.ContextOut != nil {
+ ctxOut = make([]byte, binary.Size(opts.ContextOut))
+ }
+
+ attr := sys.ProgRunAttr{
+ ProgFd: p.fd.Uint(),
+ DataSizeIn: uint32(len(opts.Data)),
+ DataSizeOut: uint32(len(opts.DataOut)),
+ DataIn: sys.NewSlicePointer(opts.Data),
+ DataOut: sys.NewSlicePointer(opts.DataOut),
+ Repeat: uint32(opts.Repeat),
+ CtxSizeIn: uint32(len(ctxBytes)),
+ CtxSizeOut: uint32(len(ctxOut)),
+ CtxIn: sys.NewSlicePointer(ctxBytes),
+ CtxOut: sys.NewSlicePointer(ctxOut),
+ Flags: opts.Flags,
+ Cpu: opts.CPU,
+ }
+
+retry:
+ for {
+ err := sys.ProgRun(&attr)
+ if err == nil {
+ break retry
+ }
+
+ if errors.Is(err, unix.EINTR) {
+ if attr.Repeat <= 1 {
+ // Older kernels check whether enough repetitions have been
+ // executed only after checking for pending signals.
+ //
+ // run signal? done? run ...
+ //
+ // As a result we can get EINTR for repeat==1 even though
+ // the program was run exactly once. Treat this as a
+ // successful run instead.
+ //
+ // Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code")
+ // the conditions are reversed:
+ // run done? signal? ...
+ break retry
+ }
+
+ if opts.Reset != nil {
+ opts.Reset()
+ }
+ continue retry
+ }
+
+ if errors.Is(err, sys.ENOTSUPP) {
+ return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported)
+ }
+
+ return 0, 0, err
+ }
+
+ if opts.DataOut != nil {
+ if int(attr.DataSizeOut) > cap(opts.DataOut) {
+ // Houston, we have a problem. The program created more data than we allocated,
+ // and the kernel wrote past the end of our buffer.
+ panic("kernel wrote past end of output buffer")
+ }
+ opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)]
+ }
+
+ if len(ctxOut) != 0 {
+ b := bytes.NewReader(ctxOut)
+ if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil {
+ return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err)
+ }
+ }
+
+ total := time.Duration(attr.Duration) * time.Nanosecond
+ return attr.Retval, total, nil
+}
+
+func unmarshalProgram(buf sysenc.Buffer) (*Program, error) {
+ var id uint32
+ if err := buf.Unmarshal(&id); err != nil {
+ return nil, err
+ }
+
+ // Looking up an entry in a nested map or prog array returns an id,
+ // not an fd.
+ return NewProgramFromID(ProgramID(id))
+}
+
+func marshalProgram(p *Program, length int) ([]byte, error) {
+ if length != 4 {
+ return nil, fmt.Errorf("can't marshal program to %d bytes", length)
+ }
+
+ buf := make([]byte, 4)
+ internal.NativeEndian.PutUint32(buf, p.fd.Uint())
+ return buf, nil
+}
+
+// LoadPinnedProgram loads a Program from a BPF file.
+//
+// Requires at least Linux 4.11.
+func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ info, err := newProgramInfoFromFd(fd)
+ if err != nil {
+ _ = fd.Close()
+ return nil, fmt.Errorf("info for %s: %w", fileName, err)
+ }
+
+ var progName string
+ if haveObjName() == nil {
+ progName = info.Name
+ } else {
+ progName = filepath.Base(fileName)
+ }
+
+ return &Program{"", fd, progName, fileName, info.Type}, nil
+}
+
+// SanitizeName replaces all invalid characters in name with replacement.
+// Passing a negative value for replacement will delete characters instead
+// of replacing them. Use this to automatically generate valid names for maps
+// and programs at runtime.
+//
+// The set of allowed characters depends on the running kernel version.
+// Dots are only allowed as of kernel 5.2.
+func SanitizeName(name string, replacement rune) string {
+ return strings.Map(func(char rune) rune {
+ if invalidBPFObjNameChar(char) {
+ return replacement
+ }
+ return char
+ }, name)
+}
+
+// ProgramGetNextID returns the ID of the next eBPF program.
+//
+// Returns ErrNotExist, if there is no next eBPF program.
+func ProgramGetNextID(startID ProgramID) (ProgramID, error) {
+ attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)}
+ return ProgramID(attr.NextId), sys.ProgGetNextId(attr)
+}
+
+// BindMap binds map to the program and is only released once program is released.
+//
+// This may be used in cases where metadata should be associated with the program
+// which otherwise does not contain any references to the map.
+func (p *Program) BindMap(m *Map) error {
+ attr := &sys.ProgBindMapAttr{
+ ProgFd: uint32(p.FD()),
+ MapFd: uint32(m.FD()),
+ }
+
+ return sys.ProgBindMap(attr)
+}
+
+var errUnrecognizedAttachType = errors.New("unrecognized attach type")
+
+// find an attach target type in the kernel.
+//
+// name, progType and attachType determine which type we need to attach to.
+//
+// The attach target may be in a loaded kernel module.
+// In that case the returned handle will be non-nil.
+// The caller is responsible for closing the handle.
+//
+// Returns errUnrecognizedAttachType if the combination of progType and attachType
+// is not recognised.
+func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType) (*btf.Handle, btf.TypeID, error) {
+ type match struct {
+ p ProgramType
+ a AttachType
+ }
+
+ var (
+ typeName, featureName string
+ target btf.Type
+ )
+
+ switch (match{progType, attachType}) {
+ case match{LSM, AttachLSMMac}:
+ typeName = "bpf_lsm_" + name
+ featureName = name + " LSM hook"
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachTraceIter}:
+ typeName = "bpf_iter_" + name
+ featureName = name + " iterator"
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachTraceFEntry}:
+ typeName = name
+ featureName = fmt.Sprintf("fentry %s", name)
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachTraceFExit}:
+ typeName = name
+ featureName = fmt.Sprintf("fexit %s", name)
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachModifyReturn}:
+ typeName = name
+ featureName = fmt.Sprintf("fmod_ret %s", name)
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachTraceRawTp}:
+ typeName = fmt.Sprintf("btf_trace_%s", name)
+ featureName = fmt.Sprintf("raw_tp %s", name)
+ target = (*btf.Typedef)(nil)
+ default:
+ return nil, 0, errUnrecognizedAttachType
+ }
+
+ spec, err := btf.LoadKernelSpec()
+ if err != nil {
+ return nil, 0, fmt.Errorf("load kernel spec: %w", err)
+ }
+
+ spec, module, err := findTargetInKernel(spec, typeName, &target)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil, 0, &internal.UnsupportedFeatureError{Name: featureName}
+ }
+ // See cilium/ebpf#894. Until we can disambiguate between equally-named kernel
+ // symbols, we should explicitly refuse program loads. They will not reliably
+ // do what the caller intended.
+ if errors.Is(err, btf.ErrMultipleMatches) {
+ return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err)
+ }
+ if err != nil {
+ return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err)
+ }
+
+ id, err := spec.TypeID(target)
+ if err != nil {
+ module.Close()
+ return nil, 0, err
+ }
+
+ return module, id, nil
+}
+
+// findTargetInKernel attempts to find a named type in the current kernel.
+//
+// target will point at the found type after a successful call. Searches both
+// vmlinux and any loaded modules.
+//
+// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound
+// if the type wasn't found at all.
+func findTargetInKernel(kernelSpec *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) {
+ err := kernelSpec.TypeByName(typeName, target)
+ if errors.Is(err, btf.ErrNotFound) {
+ spec, module, err := findTargetInModule(kernelSpec, typeName, target)
+ if err != nil {
+ return nil, nil, fmt.Errorf("find target in modules: %w", err)
+ }
+ return spec, module, nil
+ }
+ if err != nil {
+ return nil, nil, fmt.Errorf("find target in vmlinux: %w", err)
+ }
+ return kernelSpec, nil, err
+}
+
+// findTargetInModule attempts to find a named type in any loaded module.
+//
+// base must contain the kernel's types and is used to parse kmod BTF. Modules
+// are searched in the order they were loaded.
+//
+// Returns btf.ErrNotFound if the target can't be found in any module.
+func findTargetInModule(base *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) {
+ it := new(btf.HandleIterator)
+ defer it.Handle.Close()
+
+ for it.Next() {
+ info, err := it.Handle.Info()
+ if err != nil {
+ return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err)
+ }
+
+ if !info.IsModule() {
+ continue
+ }
+
+ spec, err := it.Handle.Spec(base)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err)
+ }
+
+ err = spec.TypeByName(typeName, target)
+ if errors.Is(err, btf.ErrNotFound) {
+ continue
+ }
+ if err != nil {
+ return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err)
+ }
+
+ return spec, it.Take(), nil
+ }
+ if err := it.Err(); err != nil {
+ return nil, nil, fmt.Errorf("iterate modules: %w", err)
+ }
+
+ return nil, nil, btf.ErrNotFound
+}
+
+// find an attach target type in a program.
+//
+// Returns errUnrecognizedAttachType.
+func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) {
+ type match struct {
+ p ProgramType
+ a AttachType
+ }
+
+ var typeName string
+ switch (match{progType, attachType}) {
+ case match{Extension, AttachNone},
+ match{Tracing, AttachTraceFEntry},
+ match{Tracing, AttachTraceFExit}:
+ typeName = name
+ default:
+ return 0, errUnrecognizedAttachType
+ }
+
+ btfHandle, err := prog.Handle()
+ if err != nil {
+ return 0, fmt.Errorf("load target BTF: %w", err)
+ }
+ defer btfHandle.Close()
+
+ spec, err := btfHandle.Spec(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ var targetFunc *btf.Func
+ err = spec.TypeByName(typeName, &targetFunc)
+ if err != nil {
+ return 0, fmt.Errorf("find target %s: %w", typeName, err)
+ }
+
+ return spec.TypeID(targetFunc)
+}
diff --git a/vendor/github.com/cilium/ebpf/ringbuf/doc.go b/vendor/github.com/cilium/ebpf/ringbuf/doc.go
new file mode 100644
index 000000000..9e4501218
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/ringbuf/doc.go
@@ -0,0 +1,6 @@
+// Package ringbuf allows interacting with Linux BPF ring buffer.
+//
+// BPF allows submitting custom events to a BPF ring buffer map set up
+// by userspace. This is very useful to push things like packet samples
+// from BPF to a daemon running in user space.
+package ringbuf
diff --git a/vendor/github.com/cilium/ebpf/ringbuf/reader.go b/vendor/github.com/cilium/ebpf/ringbuf/reader.go
new file mode 100644
index 000000000..3d3ba0ecf
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/ringbuf/reader.go
@@ -0,0 +1,197 @@
+package ringbuf
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/epoll"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ ErrClosed = os.ErrClosed
+ ErrFlushed = epoll.ErrFlushed
+ errEOR = errors.New("end of ring")
+ errBusy = errors.New("sample not committed yet")
+)
+
+// ringbufHeader from 'struct bpf_ringbuf_hdr' in kernel/bpf/ringbuf.c
+type ringbufHeader struct {
+ Len uint32
+ _ uint32 // pg_off, only used by kernel internals
+}
+
+func (rh *ringbufHeader) isBusy() bool {
+ return rh.Len&unix.BPF_RINGBUF_BUSY_BIT != 0
+}
+
+func (rh *ringbufHeader) isDiscard() bool {
+ return rh.Len&unix.BPF_RINGBUF_DISCARD_BIT != 0
+}
+
+func (rh *ringbufHeader) dataLen() int {
+ return int(rh.Len & ^uint32(unix.BPF_RINGBUF_BUSY_BIT|unix.BPF_RINGBUF_DISCARD_BIT))
+}
+
+type Record struct {
+ RawSample []byte
+
+ // The minimum number of bytes remaining in the ring buffer after this Record has been read.
+ Remaining int
+}
+
+// Reader allows reading bpf_ringbuf_output
+// from user space.
+type Reader struct {
+ poller *epoll.Poller
+
+ // mu protects read/write access to the Reader structure
+ mu sync.Mutex
+ ring *ringbufEventRing
+ epollEvents []unix.EpollEvent
+ haveData bool
+ deadline time.Time
+ bufferSize int
+ pendingErr error
+}
+
+// NewReader creates a new BPF ringbuf reader.
+func NewReader(ringbufMap *ebpf.Map) (*Reader, error) {
+ if ringbufMap.Type() != ebpf.RingBuf {
+ return nil, fmt.Errorf("invalid Map type: %s", ringbufMap.Type())
+ }
+
+ maxEntries := int(ringbufMap.MaxEntries())
+ if maxEntries == 0 || (maxEntries&(maxEntries-1)) != 0 {
+ return nil, fmt.Errorf("ringbuffer map size %d is zero or not a power of two", maxEntries)
+ }
+
+ poller, err := epoll.New()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := poller.Add(ringbufMap.FD(), 0); err != nil {
+ poller.Close()
+ return nil, err
+ }
+
+ ring, err := newRingBufEventRing(ringbufMap.FD(), maxEntries)
+ if err != nil {
+ poller.Close()
+ return nil, fmt.Errorf("failed to create ringbuf ring: %w", err)
+ }
+
+ return &Reader{
+ poller: poller,
+ ring: ring,
+ epollEvents: make([]unix.EpollEvent, 1),
+ bufferSize: ring.size(),
+ }, nil
+}
+
+// Close frees resources used by the reader.
+//
+// It interrupts calls to Read.
+func (r *Reader) Close() error {
+ if err := r.poller.Close(); err != nil {
+ if errors.Is(err, os.ErrClosed) {
+ return nil
+ }
+ return err
+ }
+
+ // Acquire the lock. This ensures that Read isn't running.
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if r.ring != nil {
+ r.ring.Close()
+ r.ring = nil
+ }
+
+ return nil
+}
+
+// SetDeadline controls how long Read and ReadInto will block waiting for samples.
+//
+// Passing a zero time.Time will remove the deadline.
+func (r *Reader) SetDeadline(t time.Time) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.deadline = t
+}
+
+// Read the next record from the BPF ringbuf.
+//
+// Calling [Close] interrupts the method with [os.ErrClosed]. Calling [Flush]
+// makes it return all records currently in the ring buffer, followed by [ErrFlushed].
+//
+// Returns [os.ErrDeadlineExceeded] if a deadline was set and after all records
+// have been read from the ring.
+//
+// See [ReadInto] for a more efficient version of this method.
+func (r *Reader) Read() (Record, error) {
+ var rec Record
+ return rec, r.ReadInto(&rec)
+}
+
+// ReadInto is like Read except that it allows reusing Record and associated buffers.
+func (r *Reader) ReadInto(rec *Record) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if r.ring == nil {
+ return fmt.Errorf("ringbuffer: %w", ErrClosed)
+ }
+
+ for {
+ if !r.haveData {
+ if pe := r.pendingErr; pe != nil {
+ r.pendingErr = nil
+ return pe
+ }
+
+ _, err := r.poller.Wait(r.epollEvents[:cap(r.epollEvents)], r.deadline)
+ if errors.Is(err, os.ErrDeadlineExceeded) || errors.Is(err, ErrFlushed) {
+ // Ignoring this for reading a valid entry after timeout or flush.
+ // This can occur if the producer submitted to the ring buffer
+ // with BPF_RB_NO_WAKEUP.
+ r.pendingErr = err
+ } else if err != nil {
+ return err
+ }
+ r.haveData = true
+ }
+
+ for {
+ err := r.ring.readRecord(rec)
+ // Not using errors.Is which is quite a bit slower
+ // For a tight loop it might make a difference
+ if err == errBusy {
+ continue
+ }
+ if err == errEOR {
+ r.haveData = false
+ break
+ }
+ return err
+ }
+ }
+}
+
+// BufferSize returns the size in bytes of the ring buffer
+func (r *Reader) BufferSize() int {
+ return r.bufferSize
+}
+
+// Flush unblocks Read/ReadInto and successive Read/ReadInto calls will return pending samples at this point,
+// until you receive a ErrFlushed error.
+func (r *Reader) Flush() error {
+ return r.poller.Flush()
+}
diff --git a/vendor/github.com/cilium/ebpf/ringbuf/ring.go b/vendor/github.com/cilium/ebpf/ringbuf/ring.go
new file mode 100644
index 000000000..8f8f4bce3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/ringbuf/ring.go
@@ -0,0 +1,137 @@
+package ringbuf
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+type ringbufEventRing struct {
+ prod []byte
+ cons []byte
+ *ringReader
+}
+
+func newRingBufEventRing(mapFD, size int) (*ringbufEventRing, error) {
+ cons, err := unix.Mmap(mapFD, 0, os.Getpagesize(), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)
+ if err != nil {
+ return nil, fmt.Errorf("can't mmap consumer page: %w", err)
+ }
+
+ prod, err := unix.Mmap(mapFD, (int64)(os.Getpagesize()), os.Getpagesize()+2*size, unix.PROT_READ, unix.MAP_SHARED)
+ if err != nil {
+ _ = unix.Munmap(cons)
+ return nil, fmt.Errorf("can't mmap data pages: %w", err)
+ }
+
+ cons_pos := (*uint64)(unsafe.Pointer(&cons[0]))
+ prod_pos := (*uint64)(unsafe.Pointer(&prod[0]))
+
+ ring := &ringbufEventRing{
+ prod: prod,
+ cons: cons,
+ ringReader: newRingReader(cons_pos, prod_pos, prod[os.Getpagesize():]),
+ }
+ runtime.SetFinalizer(ring, (*ringbufEventRing).Close)
+
+ return ring, nil
+}
+
+func (ring *ringbufEventRing) Close() {
+ runtime.SetFinalizer(ring, nil)
+
+ _ = unix.Munmap(ring.prod)
+ _ = unix.Munmap(ring.cons)
+
+ ring.prod = nil
+ ring.cons = nil
+}
+
+type ringReader struct {
+ // These point into mmap'ed memory and must be accessed atomically.
+ prod_pos, cons_pos *uint64
+ mask uint64
+ ring []byte
+}
+
+func newRingReader(cons_ptr, prod_ptr *uint64, ring []byte) *ringReader {
+ return &ringReader{
+ prod_pos: prod_ptr,
+ cons_pos: cons_ptr,
+ // cap is always a power of two
+ mask: uint64(cap(ring)/2 - 1),
+ ring: ring,
+ }
+}
+
+// To be able to wrap around data, data pages in ring buffers are mapped twice in
+// a single contiguous virtual region.
+// Therefore the returned usable size is half the size of the mmaped region.
+func (rr *ringReader) size() int {
+ return cap(rr.ring) / 2
+}
+
+// Read a record from an event ring.
+func (rr *ringReader) readRecord(rec *Record) error {
+ prod := atomic.LoadUint64(rr.prod_pos)
+ cons := atomic.LoadUint64(rr.cons_pos)
+
+ for {
+ if remaining := prod - cons; remaining == 0 {
+ return errEOR
+ } else if remaining < unix.BPF_RINGBUF_HDR_SZ {
+ return fmt.Errorf("read record header: %w", io.ErrUnexpectedEOF)
+ }
+
+ // read the len field of the header atomically to ensure a happens before
+ // relationship with the xchg in the kernel. Without this we may see len
+ // without BPF_RINGBUF_BUSY_BIT before the written data is visible.
+ // See https://github.com/torvalds/linux/blob/v6.8/kernel/bpf/ringbuf.c#L484
+ start := cons & rr.mask
+ len := atomic.LoadUint32((*uint32)((unsafe.Pointer)(&rr.ring[start])))
+ header := ringbufHeader{Len: len}
+
+ if header.isBusy() {
+ // the next sample in the ring is not committed yet so we
+ // exit without storing the reader/consumer position
+ // and start again from the same position.
+ return errBusy
+ }
+
+ cons += unix.BPF_RINGBUF_HDR_SZ
+
+ // Data is always padded to 8 byte alignment.
+ dataLenAligned := uint64(internal.Align(header.dataLen(), 8))
+ if remaining := prod - cons; remaining < dataLenAligned {
+ return fmt.Errorf("read sample data: %w", io.ErrUnexpectedEOF)
+ }
+
+ start = cons & rr.mask
+ cons += dataLenAligned
+
+ if header.isDiscard() {
+ // when the record header indicates that the data should be
+ // discarded, we skip it by just updating the consumer position
+ // to the next record.
+ atomic.StoreUint64(rr.cons_pos, cons)
+ continue
+ }
+
+ if n := header.dataLen(); cap(rec.RawSample) < n {
+ rec.RawSample = make([]byte, n)
+ } else {
+ rec.RawSample = rec.RawSample[:n]
+ }
+
+ copy(rec.RawSample, rr.ring[start:])
+ rec.Remaining = int(prod - cons)
+ atomic.StoreUint64(rr.cons_pos, cons)
+ return nil
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/rlimit/rlimit.go b/vendor/github.com/cilium/ebpf/rlimit/rlimit.go
new file mode 100644
index 000000000..2a6973744
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/rlimit/rlimit.go
@@ -0,0 +1,123 @@
+// Package rlimit allows raising RLIMIT_MEMLOCK if necessary for the use of BPF.
+package rlimit
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ unsupportedMemcgAccounting = &internal.UnsupportedFeatureError{
+ MinimumVersion: internal.Version{5, 11, 0},
+ Name: "memcg-based accounting for BPF memory",
+ }
+ haveMemcgAccounting error
+
+ rlimitMu sync.Mutex
+)
+
+func init() {
+ // We have to run this feature test at init, since it relies on changing
+ // RLIMIT_MEMLOCK. Doing so is not safe in a concurrent program. Instead,
+ // we rely on the initialization order guaranteed by the Go runtime to
+ // execute the test in a safe environment:
+ //
+ // the invocation of init functions happens in a single goroutine,
+ // sequentially, one package at a time.
+ //
+ // This is also the reason why RemoveMemlock is in its own package:
+ // we only want to run the initializer if RemoveMemlock is called
+ // from somewhere.
+ haveMemcgAccounting = detectMemcgAccounting()
+}
+
+func detectMemcgAccounting() error {
+ // Retrieve the original limit to prevent lowering Max, since
+ // doing so is a permanent operation when running unprivileged.
+ var oldLimit unix.Rlimit
+ if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, nil, &oldLimit); err != nil {
+ return fmt.Errorf("getting original memlock rlimit: %s", err)
+ }
+
+ // Drop the current limit to zero, maintaining the old Max value.
+ // This is always permitted by the kernel for unprivileged users.
+ // Retrieve a new copy of the old limit tuple to minimize the chances
+ // of failing the restore operation below.
+ zeroLimit := unix.Rlimit{Cur: 0, Max: oldLimit.Max}
+ if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &zeroLimit, &oldLimit); err != nil {
+ return fmt.Errorf("lowering memlock rlimit: %s", err)
+ }
+
+ attr := sys.MapCreateAttr{
+ MapType: 2, /* Array */
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ }
+
+ // Creating a map allocates shared (and locked) memory that counts against
+ // the rlimit on pre-5.11 kernels, but against the memory cgroup budget on
+ // kernels 5.11 and over. If this call succeeds with the process' memlock
+ // rlimit set to 0, we can reasonably assume memcg accounting is supported.
+ fd, mapErr := sys.MapCreate(&attr)
+
+ // Restore old limits regardless of what happened.
+ if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &oldLimit, nil); err != nil {
+ return fmt.Errorf("restoring old memlock rlimit: %s", err)
+ }
+
+ // Map creation successful, memcg accounting supported.
+ if mapErr == nil {
+ fd.Close()
+ return nil
+ }
+
+ // EPERM shows up when map creation would exceed the memory budget.
+ if errors.Is(mapErr, unix.EPERM) {
+ return unsupportedMemcgAccounting
+ }
+
+ // This shouldn't happen really.
+ return fmt.Errorf("unexpected error detecting memory cgroup accounting: %s", mapErr)
+}
+
+// RemoveMemlock removes the limit on the amount of memory the current
+// process can lock into RAM, if necessary.
+//
+// This is not required to load eBPF resources on kernel versions 5.11+
+// due to the introduction of cgroup-based memory accounting. On such kernels
+// the function is a no-op.
+//
+// Since the function may change global per-process limits it should be invoked
+// at program start up, in main() or init().
+//
+// This function exists as a convenience and should only be used when
+// permanently raising RLIMIT_MEMLOCK to infinite is appropriate. Consider
+// invoking prlimit(2) directly with a more reasonable limit if desired.
+//
+// Requires CAP_SYS_RESOURCE on kernels < 5.11.
+func RemoveMemlock() error {
+ if haveMemcgAccounting == nil {
+ return nil
+ }
+
+ if !errors.Is(haveMemcgAccounting, unsupportedMemcgAccounting) {
+ return haveMemcgAccounting
+ }
+
+ rlimitMu.Lock()
+ defer rlimitMu.Unlock()
+
+ // pid 0 affects the current process. Requires CAP_SYS_RESOURCE.
+ newLimit := unix.Rlimit{Cur: unix.RLIM_INFINITY, Max: unix.RLIM_INFINITY}
+ if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &newLimit, nil); err != nil {
+ return fmt.Errorf("failed to set memlock rlimit: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go
new file mode 100644
index 000000000..4aef7faeb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/syscalls.go
@@ -0,0 +1,337 @@
+package ebpf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "runtime"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ // pre-allocating these here since they may
+ // get called in hot code paths and cause
+ // unnecessary memory allocations
+ sysErrKeyNotExist = sys.Error(ErrKeyNotExist, unix.ENOENT)
+ sysErrKeyExist = sys.Error(ErrKeyExist, unix.EEXIST)
+ sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP)
+)
+
+// invalidBPFObjNameChar returns true if char may not appear in
+// a BPF object name.
+func invalidBPFObjNameChar(char rune) bool {
+ dotAllowed := objNameAllowsDot() == nil
+
+ switch {
+ case char >= 'A' && char <= 'Z':
+ return false
+ case char >= 'a' && char <= 'z':
+ return false
+ case char >= '0' && char <= '9':
+ return false
+ case dotAllowed && char == '.':
+ return false
+ case char == '_':
+ return false
+ default:
+ return true
+ }
+}
+
+func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) {
+ buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
+ if err := insns.Marshal(buf, internal.NativeEndian); err != nil {
+ return nil, err
+ }
+ bytecode := buf.Bytes()
+
+ return sys.ProgLoad(&sys.ProgLoadAttr{
+ ProgType: sys.ProgType(typ),
+ License: sys.NewStringPointer(license),
+ Insns: sys.NewSlicePointer(bytecode),
+ InsnCnt: uint32(len(bytecode) / asm.InstructionSize),
+ })
+}
+
+var haveNestedMaps = internal.NewFeatureTest("nested maps", "4.12", func() error {
+ _, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(ArrayOfMaps),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ // Invalid file descriptor.
+ InnerMapFd: ^uint32(0),
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
+
+var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", "5.2", func() error {
+ // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
+ // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_RDONLY_PROG,
+ })
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = m.Close()
+ return nil
+})
+
+var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", "5.5", func() error {
+ // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_MMAPABLE,
+ })
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = m.Close()
+ return nil
+})
+
+var haveInnerMaps = internal.NewFeatureTest("inner maps", "5.10", func() error {
+ // This checks BPF_F_INNER_MAP, which appeared in 5.10.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_INNER_MAP,
+ })
+
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = m.Close()
+ return nil
+})
+
+var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", "4.6", func() error {
+ // This checks BPF_F_NO_PREALLOC, which appeared in 4.6.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Hash),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_NO_PREALLOC,
+ })
+
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = m.Close()
+ return nil
+})
+
+func wrapMapError(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ if errors.Is(err, unix.ENOENT) {
+ return sysErrKeyNotExist
+ }
+
+ if errors.Is(err, unix.EEXIST) {
+ return sysErrKeyExist
+ }
+
+ if errors.Is(err, sys.ENOTSUPP) {
+ return sysErrNotSupported
+ }
+
+ if errors.Is(err, unix.E2BIG) {
+ return fmt.Errorf("key too big for map: %w", err)
+ }
+
+ return err
+}
+
+var haveObjName = internal.NewFeatureTest("object names", "4.15", func() error {
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapName: sys.NewObjName("feature_test"),
+ }
+
+ fd, err := sys.MapCreate(&attr)
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ _ = fd.Close()
+ return nil
+})
+
+var objNameAllowsDot = internal.NewFeatureTest("dot in object names", "5.2", func() error {
+ if err := haveObjName(); err != nil {
+ return err
+ }
+
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapName: sys.NewObjName(".test"),
+ }
+
+ fd, err := sys.MapCreate(&attr)
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ _ = fd.Close()
+ return nil
+})
+
+var haveBatchAPI = internal.NewFeatureTest("map batch api", "5.6", func() error {
+ var maxEntries uint32 = 2
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Hash),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: maxEntries,
+ }
+
+ fd, err := sys.MapCreate(&attr)
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ defer fd.Close()
+
+ keys := []uint32{1, 2}
+ values := []uint32{3, 4}
+ kp, _ := marshalMapSyscallInput(keys, 8)
+ vp, _ := marshalMapSyscallInput(values, 8)
+
+ err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{
+ MapFd: fd.Uint(),
+ Keys: kp,
+ Values: vp,
+ Count: maxEntries,
+ })
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ return nil
+})
+
+var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", "5.5", func() error {
+ insns := asm.Instructions{
+ asm.Mov.Reg(asm.R1, asm.R10),
+ asm.Add.Imm(asm.R1, -8),
+ asm.Mov.Imm(asm.R2, 8),
+ asm.Mov.Imm(asm.R3, 0),
+ asm.FnProbeReadKernel.Call(),
+ asm.Return(),
+ }
+
+ fd, err := progLoad(insns, Kprobe, "GPL")
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = fd.Close()
+ return nil
+})
+
+var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func() error {
+ insns := asm.Instructions{
+ asm.Call.Label("prog2").WithSymbol("prog1"),
+ asm.Return(),
+ asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"),
+ asm.Return(),
+ }
+
+ fd, err := progLoad(insns, SocketFilter, "MIT")
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = fd.Close()
+ return nil
+})
+
+var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func() error {
+ prefix := internal.PlatformPrefix()
+ if prefix == "" {
+ return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH)
+ }
+
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Kprobe,
+ Symbol: prefix + "sys_bpf",
+ Pid: -1,
+ }
+
+ var err error
+ args.Group, err = tracefs.RandomGroup("ebpf_probe")
+ if err != nil {
+ return err
+ }
+
+ evt, err := tracefs.NewEvent(args)
+ if errors.Is(err, os.ErrNotExist) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ return evt.Close()
+})
+
+var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", func() error {
+ insns := asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
+ if err := insns.Marshal(buf, internal.NativeEndian); err != nil {
+ return err
+ }
+ bytecode := buf.Bytes()
+
+ _, err := sys.ProgLoad(&sys.ProgLoadAttr{
+ ProgType: sys.ProgType(SocketFilter),
+ License: sys.NewStringPointer("MIT"),
+ Insns: sys.NewSlicePointer(bytecode),
+ InsnCnt: uint32(len(bytecode) / asm.InstructionSize),
+ FuncInfoCnt: 1,
+ ProgBtfFd: math.MaxUint32,
+ })
+
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+
+ if errors.Is(err, unix.E2BIG) {
+ return ErrNotSupported
+ }
+
+ return err
+})
diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go
new file mode 100644
index 000000000..542c2397c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/types.go
@@ -0,0 +1,299 @@
+package ebpf
+
+import (
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output types_string.go -type=MapType,ProgramType,PinType
+
+// MapType indicates the type map structure
+// that will be initialized in the kernel.
+type MapType uint32
+
+// All the various map types that can be created
+const (
+ UnspecifiedMap MapType = iota
+ // Hash is a hash map
+ Hash
+ // Array is an array map
+ Array
+ // ProgramArray - A program array map is a special kind of array map whose map
+ // values contain only file descriptors referring to other eBPF
+ // programs. Thus, both the key_size and value_size must be
+ // exactly four bytes. This map is used in conjunction with the
+ // TailCall helper.
+ ProgramArray
+ // PerfEventArray - A perf event array is used in conjunction with PerfEventRead
+ // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers.
+ PerfEventArray
+ // PerCPUHash - This data structure is useful for people who have high performance
+ // network needs and can reconcile adds at the end of some cycle, so that
+ // hashes can be lock free without the use of XAdd, which can be costly.
+ PerCPUHash
+ // PerCPUArray - This data structure is useful for people who have high performance
+ // network needs and can reconcile adds at the end of some cycle, so that
+ // hashes can be lock free without the use of XAdd, which can be costly.
+ // Each CPU gets a copy of this hash, the contents of all of which can be reconciled
+ // later.
+ PerCPUArray
+ // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with
+ // GetStackID
+ StackTrace
+ // CGroupArray - This is a very niche structure used to help SKBInCGroup determine
+ // if an skb is from a socket belonging to a specific cgroup
+ CGroupArray
+ // LRUHash - This allows you to create a small hash structure that will purge the
+ // least recently used items rather than throw an error when you run out of memory
+ LRUHash
+ // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs,
+ // it has more to do with including the CPU id with the LRU calculation so that if a
+ // particular CPU is using a value over-and-over again, then it will be saved, but if
+ // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically
+ // giving weight to CPU locality over overall usage.
+ LRUCPUHash
+ // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful,
+ // for storing things like IP addresses which can be bit masked allowing for keys of differing
+ // values to refer to the same reference based on their masks. See wikipedia for more details.
+ LPMTrie
+ // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps
+ // itself.
+ ArrayOfMaps
+ // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps
+ // itself.
+ HashOfMaps
+ // DevMap - Specialized map to store references to network devices.
+ DevMap
+ // SockMap - Specialized map to store references to sockets.
+ SockMap
+ // CPUMap - Specialized map to store references to CPUs.
+ CPUMap
+ // XSKMap - Specialized map for XDP programs to store references to open sockets.
+ XSKMap
+ // SockHash - Specialized hash to store references to sockets.
+ SockHash
+ // CGroupStorage - Special map for CGroups.
+ CGroupStorage
+ // ReusePortSockArray - Specialized map to store references to sockets that can be reused.
+ ReusePortSockArray
+ // PerCPUCGroupStorage - Special per CPU map for CGroups.
+ PerCPUCGroupStorage
+ // Queue - FIFO storage for BPF programs.
+ Queue
+ // Stack - LIFO storage for BPF programs.
+ Stack
+ // SkStorage - Specialized map for local storage at SK for BPF programs.
+ SkStorage
+ // DevMapHash - Hash-based indexing scheme for references to network devices.
+ DevMapHash
+ // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF
+ // program.
+ StructOpsMap
+ // RingBuf - Similar to PerfEventArray, but shared across all CPUs.
+ RingBuf
+ // InodeStorage - Specialized local storage map for inodes.
+ InodeStorage
+ // TaskStorage - Specialized local storage map for task_struct.
+ TaskStorage
+)
+
+// hasPerCPUValue returns true if the Map stores a value per CPU.
+func (mt MapType) hasPerCPUValue() bool {
+ return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage
+}
+
+// canStoreMapOrProgram returns true if the Map stores references to another Map
+// or Program.
+func (mt MapType) canStoreMapOrProgram() bool {
+ return mt.canStoreMap() || mt.canStoreProgram()
+}
+
+// canStoreMap returns true if the map type accepts a map fd
+// for update and returns a map id for lookup.
+func (mt MapType) canStoreMap() bool {
+ return mt == ArrayOfMaps || mt == HashOfMaps
+}
+
+// canStoreProgram returns true if the map type accepts a program fd
+// for update and returns a program id for lookup.
+func (mt MapType) canStoreProgram() bool {
+ return mt == ProgramArray
+}
+
+// ProgramType of the eBPF program
+type ProgramType uint32
+
+// eBPF program types
+const (
+ UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC)
+ SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER)
+ Kprobe = ProgramType(sys.BPF_PROG_TYPE_KPROBE)
+ SchedCLS = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS)
+ SchedACT = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT)
+ TracePoint = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT)
+ XDP = ProgramType(sys.BPF_PROG_TYPE_XDP)
+ PerfEvent = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT)
+ CGroupSKB = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB)
+ CGroupSock = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK)
+ LWTIn = ProgramType(sys.BPF_PROG_TYPE_LWT_IN)
+ LWTOut = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT)
+ LWTXmit = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT)
+ SockOps = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS)
+ SkSKB = ProgramType(sys.BPF_PROG_TYPE_SK_SKB)
+ CGroupDevice = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE)
+ SkMsg = ProgramType(sys.BPF_PROG_TYPE_SK_MSG)
+ RawTracepoint = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT)
+ CGroupSockAddr = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR)
+ LWTSeg6Local = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL)
+ LircMode2 = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2)
+ SkReuseport = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT)
+ FlowDissector = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR)
+ CGroupSysctl = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL)
+ RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE)
+ CGroupSockopt = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT)
+ Tracing = ProgramType(sys.BPF_PROG_TYPE_TRACING)
+ StructOps = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS)
+ Extension = ProgramType(sys.BPF_PROG_TYPE_EXT)
+ LSM = ProgramType(sys.BPF_PROG_TYPE_LSM)
+ SkLookup = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP)
+ Syscall = ProgramType(sys.BPF_PROG_TYPE_SYSCALL)
+ Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER)
+)
+
+// AttachType of the eBPF program, needed to differentiate allowed context accesses in
+// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required.
+// Will cause invalid argument (EINVAL) at program load time if set incorrectly.
+type AttachType uint32
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -type AttachType -trimprefix Attach
+
+// AttachNone is an alias for AttachCGroupInetIngress for readability reasons.
+const AttachNone AttachType = 0
+
+const (
+ AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS)
+ AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS)
+ AttachCGroupInetSockCreate = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE)
+ AttachCGroupSockOps = AttachType(sys.BPF_CGROUP_SOCK_OPS)
+ AttachSkSKBStreamParser = AttachType(sys.BPF_SK_SKB_STREAM_PARSER)
+ AttachSkSKBStreamVerdict = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT)
+ AttachCGroupDevice = AttachType(sys.BPF_CGROUP_DEVICE)
+ AttachSkMsgVerdict = AttachType(sys.BPF_SK_MSG_VERDICT)
+ AttachCGroupInet4Bind = AttachType(sys.BPF_CGROUP_INET4_BIND)
+ AttachCGroupInet6Bind = AttachType(sys.BPF_CGROUP_INET6_BIND)
+ AttachCGroupInet4Connect = AttachType(sys.BPF_CGROUP_INET4_CONNECT)
+ AttachCGroupInet6Connect = AttachType(sys.BPF_CGROUP_INET6_CONNECT)
+ AttachCGroupInet4PostBind = AttachType(sys.BPF_CGROUP_INET4_POST_BIND)
+ AttachCGroupInet6PostBind = AttachType(sys.BPF_CGROUP_INET6_POST_BIND)
+ AttachCGroupUDP4Sendmsg = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG)
+ AttachCGroupUDP6Sendmsg = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG)
+ AttachLircMode2 = AttachType(sys.BPF_LIRC_MODE2)
+ AttachFlowDissector = AttachType(sys.BPF_FLOW_DISSECTOR)
+ AttachCGroupSysctl = AttachType(sys.BPF_CGROUP_SYSCTL)
+ AttachCGroupUDP4Recvmsg = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG)
+ AttachCGroupUDP6Recvmsg = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG)
+ AttachCGroupGetsockopt = AttachType(sys.BPF_CGROUP_GETSOCKOPT)
+ AttachCGroupSetsockopt = AttachType(sys.BPF_CGROUP_SETSOCKOPT)
+ AttachTraceRawTp = AttachType(sys.BPF_TRACE_RAW_TP)
+ AttachTraceFEntry = AttachType(sys.BPF_TRACE_FENTRY)
+ AttachTraceFExit = AttachType(sys.BPF_TRACE_FEXIT)
+ AttachModifyReturn = AttachType(sys.BPF_MODIFY_RETURN)
+ AttachLSMMac = AttachType(sys.BPF_LSM_MAC)
+ AttachTraceIter = AttachType(sys.BPF_TRACE_ITER)
+ AttachCgroupInet4GetPeername = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME)
+ AttachCgroupInet6GetPeername = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME)
+ AttachCgroupInet4GetSockname = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME)
+ AttachCgroupInet6GetSockname = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME)
+ AttachXDPDevMap = AttachType(sys.BPF_XDP_DEVMAP)
+ AttachCgroupInetSockRelease = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE)
+ AttachXDPCPUMap = AttachType(sys.BPF_XDP_CPUMAP)
+ AttachSkLookup = AttachType(sys.BPF_SK_LOOKUP)
+ AttachXDP = AttachType(sys.BPF_XDP)
+ AttachSkSKBVerdict = AttachType(sys.BPF_SK_SKB_VERDICT)
+ AttachSkReuseportSelect = AttachType(sys.BPF_SK_REUSEPORT_SELECT)
+ AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)
+ AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT)
+ AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI)
+ AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP)
+ AttachStructOps = AttachType(sys.BPF_STRUCT_OPS)
+ AttachNetfilter = AttachType(sys.BPF_NETFILTER)
+ AttachTCXIngress = AttachType(sys.BPF_TCX_INGRESS)
+ AttachTCXEgress = AttachType(sys.BPF_TCX_EGRESS)
+ AttachTraceUprobeMulti = AttachType(sys.BPF_TRACE_UPROBE_MULTI)
+ AttachCgroupUnixConnect = AttachType(sys.BPF_CGROUP_UNIX_CONNECT)
+ AttachCgroupUnixSendmsg = AttachType(sys.BPF_CGROUP_UNIX_SENDMSG)
+ AttachCgroupUnixRecvmsg = AttachType(sys.BPF_CGROUP_UNIX_RECVMSG)
+ AttachCgroupUnixGetpeername = AttachType(sys.BPF_CGROUP_UNIX_GETPEERNAME)
+ AttachCgroupUnixGetsockname = AttachType(sys.BPF_CGROUP_UNIX_GETSOCKNAME)
+ AttachNetkitPrimary = AttachType(sys.BPF_NETKIT_PRIMARY)
+ AttachNetkitPeer = AttachType(sys.BPF_NETKIT_PEER)
+)
+
+// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
+type AttachFlags uint32
+
+// PinType determines whether a map is pinned into a BPFFS.
+type PinType uint32
+
+// Valid pin types.
+//
+// Mirrors enum libbpf_pin_type.
+const (
+ PinNone PinType = iota
+ // Pin an object by using its name as the filename.
+ PinByName
+)
+
+// LoadPinOptions control how a pinned object is loaded.
+type LoadPinOptions struct {
+ // Request a read-only or write-only object. The default is a read-write
+ // object. Only one of the flags may be set.
+ ReadOnly bool
+ WriteOnly bool
+
+ // Raw flags for the syscall. Other fields of this struct take precedence.
+ Flags uint32
+}
+
+// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter.
+func (lpo *LoadPinOptions) Marshal() uint32 {
+ if lpo == nil {
+ return 0
+ }
+
+ flags := lpo.Flags
+ if lpo.ReadOnly {
+ flags |= unix.BPF_F_RDONLY
+ }
+ if lpo.WriteOnly {
+ flags |= unix.BPF_F_WRONLY
+ }
+ return flags
+}
+
+// BatchOptions batch map operations options
+//
+// Mirrors libbpf struct bpf_map_batch_opts
+// Currently BPF_F_FLAG is the only supported
+// flag (for ElemFlags).
+type BatchOptions struct {
+ ElemFlags uint64
+ Flags uint64
+}
+
+// LogLevel controls the verbosity of the kernel's eBPF program verifier.
+// These constants can be used for the ProgramOptions.LogLevel field.
+type LogLevel = sys.LogLevel
+
+const (
+ // Print verifier state at branch points.
+ LogLevelBranch = sys.BPF_LOG_LEVEL1
+
+ // Print verifier state for every instruction.
+ // Available since Linux v5.2.
+ LogLevelInstruction = sys.BPF_LOG_LEVEL2
+
+ // Print verifier errors and stats at the end of the verification process.
+ // Available since Linux v5.2.
+ LogLevelStats = sys.BPF_LOG_STATS
+)
diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go
new file mode 100644
index 000000000..ee60b5be5
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/types_string.go
@@ -0,0 +1,119 @@
+// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT.
+
+package ebpf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[UnspecifiedMap-0]
+ _ = x[Hash-1]
+ _ = x[Array-2]
+ _ = x[ProgramArray-3]
+ _ = x[PerfEventArray-4]
+ _ = x[PerCPUHash-5]
+ _ = x[PerCPUArray-6]
+ _ = x[StackTrace-7]
+ _ = x[CGroupArray-8]
+ _ = x[LRUHash-9]
+ _ = x[LRUCPUHash-10]
+ _ = x[LPMTrie-11]
+ _ = x[ArrayOfMaps-12]
+ _ = x[HashOfMaps-13]
+ _ = x[DevMap-14]
+ _ = x[SockMap-15]
+ _ = x[CPUMap-16]
+ _ = x[XSKMap-17]
+ _ = x[SockHash-18]
+ _ = x[CGroupStorage-19]
+ _ = x[ReusePortSockArray-20]
+ _ = x[PerCPUCGroupStorage-21]
+ _ = x[Queue-22]
+ _ = x[Stack-23]
+ _ = x[SkStorage-24]
+ _ = x[DevMapHash-25]
+ _ = x[StructOpsMap-26]
+ _ = x[RingBuf-27]
+ _ = x[InodeStorage-28]
+ _ = x[TaskStorage-29]
+}
+
+const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorage"
+
+var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290}
+
+func (i MapType) String() string {
+ if i >= MapType(len(_MapType_index)-1) {
+ return "MapType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _MapType_name[_MapType_index[i]:_MapType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[UnspecifiedProgram-0]
+ _ = x[SocketFilter-1]
+ _ = x[Kprobe-2]
+ _ = x[SchedCLS-3]
+ _ = x[SchedACT-4]
+ _ = x[TracePoint-5]
+ _ = x[XDP-6]
+ _ = x[PerfEvent-7]
+ _ = x[CGroupSKB-8]
+ _ = x[CGroupSock-9]
+ _ = x[LWTIn-10]
+ _ = x[LWTOut-11]
+ _ = x[LWTXmit-12]
+ _ = x[SockOps-13]
+ _ = x[SkSKB-14]
+ _ = x[CGroupDevice-15]
+ _ = x[SkMsg-16]
+ _ = x[RawTracepoint-17]
+ _ = x[CGroupSockAddr-18]
+ _ = x[LWTSeg6Local-19]
+ _ = x[LircMode2-20]
+ _ = x[SkReuseport-21]
+ _ = x[FlowDissector-22]
+ _ = x[CGroupSysctl-23]
+ _ = x[RawTracepointWritable-24]
+ _ = x[CGroupSockopt-25]
+ _ = x[Tracing-26]
+ _ = x[StructOps-27]
+ _ = x[Extension-28]
+ _ = x[LSM-29]
+ _ = x[SkLookup-30]
+ _ = x[Syscall-31]
+ _ = x[Netfilter-32]
+}
+
+const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter"
+
+var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310}
+
+func (i ProgramType) String() string {
+ if i >= ProgramType(len(_ProgramType_index)-1) {
+ return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[PinNone-0]
+ _ = x[PinByName-1]
+}
+
+const _PinType_name = "PinNonePinByName"
+
+var _PinType_index = [...]uint8{0, 7, 16}
+
+func (i PinType) String() string {
+ if i >= PinType(len(_PinType_index)-1) {
+ return "PinType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _PinType_name[_PinType_index[i]:_PinType_index[i+1]]
+}
diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE
new file mode 100644
index 000000000..8f71f43fe
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go
new file mode 100644
index 000000000..0d82a2dd3
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/libcni/api.go
@@ -0,0 +1,679 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package libcni
+
+// Note this is the actual implementation of the CNI specification, which
+// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file
+// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this
+// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example,
+// to add an IP to a container, to parse the configuration of the CNI and so on.
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containernetworking/cni/pkg/invoke"
+ "github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/types/create"
+ "github.com/containernetworking/cni/pkg/utils"
+ "github.com/containernetworking/cni/pkg/version"
+)
+
+var (
+ CacheDir = "/var/lib/cni"
+)
+
+const (
+ CNICacheV1 = "cniCacheV1"
+)
+
+// A RuntimeConf holds the arguments to one invocation of a CNI plugin
+// excepting the network configuration, with the nested exception that
+// the `runtimeConfig` from the network configuration is included
+// here.
+type RuntimeConf struct {
+ ContainerID string
+ NetNS string
+ IfName string
+ Args [][2]string
+ // A dictionary of capability-specific data passed by the runtime
+ // to plugins as top-level keys in the 'runtimeConfig' dictionary
+ // of the plugin's stdin data. libcni will ensure that only keys
+ // in this map which match the capabilities of the plugin are passed
+ // to the plugin
+ CapabilityArgs map[string]interface{}
+
+ // DEPRECATED. Will be removed in a future release.
+ CacheDir string
+}
+
+type NetworkConfig struct {
+ Network *types.NetConf
+ Bytes []byte
+}
+
+type NetworkConfigList struct {
+ Name string
+ CNIVersion string
+ DisableCheck bool
+ Plugins []*NetworkConfig
+ Bytes []byte
+}
+
+type CNI interface {
+ AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
+ CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
+ DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
+ GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
+ GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
+
+ AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+ CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
+ DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
+ GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+ GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
+
+ ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
+ ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
+}
+
+type CNIConfig struct {
+ Path []string
+ exec invoke.Exec
+ cacheDir string
+}
+
+// CNIConfig implements the CNI interface
+var _ CNI = &CNIConfig{}
+
+// NewCNIConfig returns a new CNIConfig object that will search for plugins
+// in the given paths and use the given exec interface to run those plugins,
+// or if the exec interface is not given, will use a default exec handler.
+func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig {
+ return NewCNIConfigWithCacheDir(path, "", exec)
+}
+
+// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins
+// in the given paths use the given exec interface to run those plugins,
+// or if the exec interface is not given, will use a default exec handler.
+// The given cache directory will be used for temporary data storage when needed.
+func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig {
+ return &CNIConfig{
+ Path: path,
+ cacheDir: cacheDir,
+ exec: exec,
+ }
+}
+
+func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) {
+ var err error
+
+ inject := map[string]interface{}{
+ "name": name,
+ "cniVersion": cniVersion,
+ }
+ // Add previous plugin result
+ if prevResult != nil {
+ inject["prevResult"] = prevResult
+ }
+
+ // Ensure every config uses the same name and version
+ orig, err = InjectConf(orig, inject)
+ if err != nil {
+ return nil, err
+ }
+
+ return injectRuntimeConfig(orig, rt)
+}
+
+// This function takes a libcni RuntimeConf structure and injects values into
+// a "runtimeConfig" dictionary in the CNI network configuration JSON that
+// will be passed to the plugin on stdin.
+//
+// Only "capabilities arguments" passed by the runtime are currently injected.
+// These capabilities arguments are filtered through the plugin's advertised
+// capabilities from its config JSON, and any keys in the CapabilityArgs
+// matching plugin capabilities are added to the "runtimeConfig" dictionary
+// sent to the plugin via JSON on stdin. For example, if the plugin's
+// capabilities include "portMappings", and the CapabilityArgs map includes a
+// "portMappings" key, that key and its value are added to the "runtimeConfig"
+// dictionary to be passed to the plugin's stdin.
+func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) {
+ var err error
+
+ rc := make(map[string]interface{})
+ for capability, supported := range orig.Network.Capabilities {
+ if !supported {
+ continue
+ }
+ if data, ok := rt.CapabilityArgs[capability]; ok {
+ rc[capability] = data
+ }
+ }
+
+ if len(rc) > 0 {
+ orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc})
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return orig, nil
+}
+
+// ensure we have a usable exec if the CNIConfig was not given one
+func (c *CNIConfig) ensureExec() invoke.Exec {
+ if c.exec == nil {
+ c.exec = &invoke.DefaultExec{
+ RawExec: &invoke.RawExec{Stderr: os.Stderr},
+ PluginDecoder: version.PluginDecoder{},
+ }
+ }
+ return c.exec
+}
+
+type cachedInfo struct {
+ Kind string `json:"kind"`
+ ContainerID string `json:"containerId"`
+ Config []byte `json:"config"`
+ IfName string `json:"ifName"`
+ NetworkName string `json:"networkName"`
+ CniArgs [][2]string `json:"cniArgs,omitempty"`
+ CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"`
+ RawResult map[string]interface{} `json:"result,omitempty"`
+ Result types.Result `json:"-"`
+}
+
+// getCacheDir returns the cache directory in this order:
+// 1) global cacheDir from CNIConfig object
+// 2) deprecated cacheDir from RuntimeConf object
+// 3) fall back to default cache directory
+func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string {
+ if c.cacheDir != "" {
+ return c.cacheDir
+ }
+ if rt.CacheDir != "" {
+ return rt.CacheDir
+ }
+ return CacheDir
+}
+
+func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) {
+ if netName == "" || rt.ContainerID == "" || rt.IfName == "" {
+ return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName)
+ }
+ return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil
+}
+
+func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error {
+ cached := cachedInfo{
+ Kind: CNICacheV1,
+ ContainerID: rt.ContainerID,
+ Config: config,
+ IfName: rt.IfName,
+ NetworkName: netName,
+ CniArgs: rt.Args,
+ CapabilityArgs: rt.CapabilityArgs,
+ }
+
+ // We need to get type.Result into cachedInfo as JSON map
+ // Marshal to []byte, then Unmarshal into cached.RawResult
+ data, err := json.Marshal(result)
+ if err != nil {
+ return err
+ }
+
+ err = json.Unmarshal(data, &cached.RawResult)
+ if err != nil {
+ return err
+ }
+
+ newBytes, err := json.Marshal(&cached)
+ if err != nil {
+ return err
+ }
+
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return err
+ }
+ if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(fname, newBytes, 0600)
+}
+
+func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ // Ignore error
+ return nil
+ }
+ return os.Remove(fname)
+}
+
+func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ var bytes []byte
+
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, nil, err
+ }
+ bytes, err = ioutil.ReadFile(fname)
+ if err != nil {
+ // Ignore read errors; the cached result may not exist on-disk
+ return nil, nil, nil
+ }
+
+ unmarshaled := cachedInfo{}
+ if err := json.Unmarshal(bytes, &unmarshaled); err != nil {
+ return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err)
+ }
+ if unmarshaled.Kind != CNICacheV1 {
+ return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind)
+ }
+
+ newRt := *rt
+ if unmarshaled.CniArgs != nil {
+ newRt.Args = unmarshaled.CniArgs
+ }
+ newRt.CapabilityArgs = unmarshaled.CapabilityArgs
+
+ return unmarshaled.Config, &newRt, nil
+}
+
+func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, err
+ }
+ data, err := ioutil.ReadFile(fname)
+ if err != nil {
+ // Ignore read errors; the cached result may not exist on-disk
+ return nil, nil
+ }
+
+ // Load the cached result
+ result, err := create.CreateFromBytes(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert to the config version to ensure plugins get prevResult
+ // in the same version as the config. The cached result version
+ // should match the config version unless the config was changed
+ // while the container was running.
+ result, err = result.GetAsVersion(cniVersion)
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err)
+ }
+ return result, nil
+}
+
+func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, err
+ }
+ fdata, err := ioutil.ReadFile(fname)
+ if err != nil {
+ // Ignore read errors; the cached result may not exist on-disk
+ return nil, nil
+ }
+
+ cachedInfo := cachedInfo{}
+ if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 {
+ return c.getLegacyCachedResult(netName, cniVersion, rt)
+ }
+
+ newBytes, err := json.Marshal(&cachedInfo.RawResult)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err)
+ }
+
+ // Load the cached result
+ result, err := create.CreateFromBytes(newBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert to the config version to ensure plugins get prevResult
+ // in the same version as the config. The cached result version
+ // should match the config version unless the config was changed
+ // while the container was running.
+ result, err = result.GetAsVersion(cniVersion)
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err)
+ }
+ return result, nil
+}
+
+// GetNetworkListCachedResult returns the cached Result of the previous
+// AddNetworkList() operation for a network list, or an error.
+func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
+ return c.getCachedResult(list.Name, list.CNIVersion, rt)
+}
+
+// GetNetworkCachedResult returns the cached Result of the previous
+// AddNetwork() operation for a network, or an error.
+func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
+ return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+}
+
+// GetNetworkListCachedConfig copies the input RuntimeConf to output
+// RuntimeConf with fields updated with info from the cached Config.
+func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ return c.getCachedConfig(list.Name, rt)
+}
+
+// GetNetworkCachedConfig copies the input RuntimeConf to output
+// RuntimeConf with fields updated with info from the cached Config.
+func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ return c.getCachedConfig(net.Network.Name, rt)
+}
+
+func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return nil, err
+ }
+ if err := utils.ValidateContainerID(rt.ContainerID); err != nil {
+ return nil, err
+ }
+ if err := utils.ValidateNetworkName(name); err != nil {
+ return nil, err
+ }
+ if err := utils.ValidateInterfaceName(rt.IfName); err != nil {
+ return nil, err
+ }
+
+ newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
+ if err != nil {
+ return nil, err
+ }
+
+ return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec)
+}
+
+// AddNetworkList executes a sequence of plugins with the ADD command
+func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
+ var err error
+ var result types.Result
+ for _, net := range list.Plugins {
+ result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt)
+ if err != nil {
+ return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err)
+ }
+ }
+
+ if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil {
+ return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err)
+ }
+
+ return result, nil
+}
+
+func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return err
+ }
+
+ newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
+ if err != nil {
+ return err
+ }
+
+ return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec)
+}
+
+// CheckNetworkList executes a sequence of plugins with the CHECK command
+func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
+ // CHECK was added in CNI spec version 0.4.0 and higher
+ if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
+ return err
+ } else if !gtet {
+ return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion)
+ }
+
+ if list.DisableCheck {
+ return nil
+ }
+
+ cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt)
+ if err != nil {
+ return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err)
+ }
+
+ for _, net := range list.Plugins {
+ if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return err
+ }
+
+ newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
+ if err != nil {
+ return err
+ }
+
+ return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec)
+}
+
+// DelNetworkList executes a sequence of plugins with the DEL command
+func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
+ var cachedResult types.Result
+
+ // Cached result on DEL was added in CNI spec version 0.4.0 and higher
+ if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
+ return err
+ } else if gtet {
+ cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt)
+ if err != nil {
+ return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err)
+ }
+ }
+
+ for i := len(list.Plugins) - 1; i >= 0; i-- {
+ net := list.Plugins[i]
+ if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
+ return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err)
+ }
+ }
+ _ = c.cacheDel(list.Name, rt)
+
+ return nil
+}
+
+func pluginDescription(net *types.NetConf) string {
+ if net == nil {
+ return ""
+ }
+ pluginType := net.Type
+ out := fmt.Sprintf("type=%q", pluginType)
+ name := net.Name
+ if name != "" {
+ out += fmt.Sprintf(" name=%q", name)
+ }
+ return out
+}
+
+// AddNetwork executes the plugin with the ADD command
+func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
+ result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil {
+ return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err)
+ }
+
+ return result, nil
+}
+
+// CheckNetwork executes the plugin with the CHECK command
+func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
+ // CHECK was added in CNI spec version 0.4.0 and higher
+ if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
+ return err
+ } else if !gtet {
+ return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion)
+ }
+
+ cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+ if err != nil {
+ return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err)
+ }
+ return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt)
+}
+
+// DelNetwork executes the plugin with the DEL command
+func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
+ var cachedResult types.Result
+
+ // Cached result on DEL was added in CNI spec version 0.4.0 and higher
+ if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
+ return err
+ } else if gtet {
+ cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+ if err != nil {
+ return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err)
+ }
+ }
+
+ if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil {
+ return err
+ }
+ _ = c.cacheDel(net.Network.Name, rt)
+ return nil
+}
+
+// ValidateNetworkList checks that a configuration is reasonably valid.
+// - all the specified plugins exist on disk
+// - every plugin supports the desired version.
+//
+// Returns a list of all capabilities supported by the configuration, or error
+func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) {
+ version := list.CNIVersion
+
+ // holding map for seen caps (in case of duplicates)
+ caps := map[string]interface{}{}
+
+ errs := []error{}
+ for _, net := range list.Plugins {
+ if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil {
+ errs = append(errs, err)
+ }
+ for c, enabled := range net.Network.Capabilities {
+ if !enabled {
+ continue
+ }
+ caps[c] = struct{}{}
+ }
+ }
+
+ if len(errs) > 0 {
+ return nil, fmt.Errorf("%v", errs)
+ }
+
+ // make caps list
+ cc := make([]string, 0, len(caps))
+ for c := range caps {
+ cc = append(cc, c)
+ }
+
+ return cc, nil
+}
+
+// ValidateNetwork checks that a configuration is reasonably valid.
+// It uses the same logic as ValidateNetworkList)
+// Returns a list of capabilities
+func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) {
+ caps := []string{}
+ for c, ok := range net.Network.Capabilities {
+ if ok {
+ caps = append(caps, c)
+ }
+ }
+ if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil {
+ return nil, err
+ }
+ return caps, nil
+}
+
+// validatePlugin checks that an individual plugin's configuration is sane
+func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(pluginName, c.Path)
+ if err != nil {
+ return err
+ }
+ if expectedVersion == "" {
+ expectedVersion = "0.1.0"
+ }
+
+ vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec)
+ if err != nil {
+ return err
+ }
+ for _, vers := range vi.SupportedVersions() {
+ if vers == expectedVersion {
+ return nil
+ }
+ }
+ return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion)
+}
+
+// GetVersionInfo reports which versions of the CNI spec are supported by
+// the given plugin.
+func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(pluginType, c.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ return invoke.GetVersionInfo(ctx, pluginPath, c.exec)
+}
+
+// =====
+func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args {
+ return &invoke.Args{
+ Command: action,
+ ContainerID: rt.ContainerID,
+ NetNS: rt.NetNS,
+ PluginArgs: rt.Args,
+ IfName: rt.IfName,
+ Path: strings.Join(c.Path, string(os.PathListSeparator)),
+ }
+}
diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go
new file mode 100644
index 000000000..3cd6a59d1
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/libcni/conf.go
@@ -0,0 +1,270 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package libcni
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+type NotFoundError struct {
+ Dir string
+ Name string
+}
+
+func (e NotFoundError) Error() string {
+ return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir)
+}
+
+type NoConfigsFoundError struct {
+ Dir string
+}
+
+func (e NoConfigsFoundError) Error() string {
+ return fmt.Sprintf(`no net configurations found in %s`, e.Dir)
+}
+
+func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
+ conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}}
+ if err := json.Unmarshal(bytes, conf.Network); err != nil {
+ return nil, fmt.Errorf("error parsing configuration: %w", err)
+ }
+ if conf.Network.Type == "" {
+ return nil, fmt.Errorf("error parsing configuration: missing 'type'")
+ }
+ return conf, nil
+}
+
+func ConfFromFile(filename string) (*NetworkConfig, error) {
+ bytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("error reading %s: %w", filename, err)
+ }
+ return ConfFromBytes(bytes)
+}
+
+func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
+ rawList := make(map[string]interface{})
+ if err := json.Unmarshal(bytes, &rawList); err != nil {
+ return nil, fmt.Errorf("error parsing configuration list: %w", err)
+ }
+
+ rawName, ok := rawList["name"]
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: no name")
+ }
+ name, ok := rawName.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName)
+ }
+
+ var cniVersion string
+ rawVersion, ok := rawList["cniVersion"]
+ if ok {
+ cniVersion, ok = rawVersion.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion)
+ }
+ }
+
+ disableCheck := false
+ if rawDisableCheck, ok := rawList["disableCheck"]; ok {
+ disableCheck, ok = rawDisableCheck.(bool)
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck)
+ }
+ }
+
+ list := &NetworkConfigList{
+ Name: name,
+ DisableCheck: disableCheck,
+ CNIVersion: cniVersion,
+ Bytes: bytes,
+ }
+
+ var plugins []interface{}
+ plug, ok := rawList["plugins"]
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key")
+ }
+ plugins, ok = plug.([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug)
+ }
+ if len(plugins) == 0 {
+ return nil, fmt.Errorf("error parsing configuration list: no plugins in list")
+ }
+
+ for i, conf := range plugins {
+ newBytes, err := json.Marshal(conf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err)
+ }
+ netConf, err := ConfFromBytes(newBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err)
+ }
+ list.Plugins = append(list.Plugins, netConf)
+ }
+
+ return list, nil
+}
+
+func ConfListFromFile(filename string) (*NetworkConfigList, error) {
+ bytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("error reading %s: %w", filename, err)
+ }
+ return ConfListFromBytes(bytes)
+}
+
+func ConfFiles(dir string, extensions []string) ([]string, error) {
+ // In part, adapted from rkt/networking/podenv.go#listFiles
+ files, err := ioutil.ReadDir(dir)
+ switch {
+ case err == nil: // break
+ case os.IsNotExist(err):
+ return nil, nil
+ default:
+ return nil, err
+ }
+
+ confFiles := []string{}
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+ fileExt := filepath.Ext(f.Name())
+ for _, ext := range extensions {
+ if fileExt == ext {
+ confFiles = append(confFiles, filepath.Join(dir, f.Name()))
+ }
+ }
+ }
+ return confFiles, nil
+}
+
+func LoadConf(dir, name string) (*NetworkConfig, error) {
+ files, err := ConfFiles(dir, []string{".conf", ".json"})
+ switch {
+ case err != nil:
+ return nil, err
+ case len(files) == 0:
+ return nil, NoConfigsFoundError{Dir: dir}
+ }
+ sort.Strings(files)
+
+ for _, confFile := range files {
+ conf, err := ConfFromFile(confFile)
+ if err != nil {
+ return nil, err
+ }
+ if conf.Network.Name == name {
+ return conf, nil
+ }
+ }
+ return nil, NotFoundError{dir, name}
+}
+
+func LoadConfList(dir, name string) (*NetworkConfigList, error) {
+ files, err := ConfFiles(dir, []string{".conflist"})
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(files)
+
+ for _, confFile := range files {
+ conf, err := ConfListFromFile(confFile)
+ if err != nil {
+ return nil, err
+ }
+ if conf.Name == name {
+ return conf, nil
+ }
+ }
+
+ // Try and load a network configuration file (instead of list)
+ // from the same name, then upconvert.
+ singleConf, err := LoadConf(dir, name)
+ if err != nil {
+ // A little extra logic so the error makes sense
+ if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok {
+ // Config lists found but no config files found
+ return nil, NotFoundError{dir, name}
+ }
+
+ return nil, err
+ }
+ return ConfListFromConf(singleConf)
+}
+
+func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) {
+ config := make(map[string]interface{})
+ err := json.Unmarshal(original.Bytes, &config)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshal existing network bytes: %w", err)
+ }
+
+ for key, value := range newValues {
+ if key == "" {
+ return nil, fmt.Errorf("keys cannot be empty")
+ }
+
+ if value == nil {
+ return nil, fmt.Errorf("key '%s' value must not be nil", key)
+ }
+
+ config[key] = value
+ }
+
+ newBytes, err := json.Marshal(config)
+ if err != nil {
+ return nil, err
+ }
+
+ return ConfFromBytes(newBytes)
+}
+
+// ConfListFromConf "upconverts" a network config in to a NetworkConfigList,
+// with the single network as the only entry in the list.
+func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) {
+ // Re-deserialize the config's json, then make a raw map configlist.
+ // This may seem a bit strange, but it's to make the Bytes fields
+ // actually make sense. Otherwise, the generated json is littered with
+ // golang default values.
+
+ rawConfig := make(map[string]interface{})
+ if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil {
+ return nil, err
+ }
+
+ rawConfigList := map[string]interface{}{
+ "name": original.Network.Name,
+ "cniVersion": original.Network.CNIVersion,
+ "plugins": []interface{}{rawConfig},
+ }
+
+ b, err := json.Marshal(rawConfigList)
+ if err != nil {
+ return nil, err
+ }
+ return ConfListFromBytes(b)
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
new file mode 100644
index 000000000..3cdb4bc8d
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
@@ -0,0 +1,128 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+type CNIArgs interface {
+ // For use with os/exec; i.e., return nil to inherit the
+ // environment from this process
+ // For use in delegation; inherit the environment from this
+ // process and allow overrides
+ AsEnv() []string
+}
+
+type inherited struct{}
+
+var inheritArgsFromEnv inherited
+
+func (*inherited) AsEnv() []string {
+ return nil
+}
+
+func ArgsFromEnv() CNIArgs {
+ return &inheritArgsFromEnv
+}
+
+type Args struct {
+ Command string
+ ContainerID string
+ NetNS string
+ PluginArgs [][2]string
+ PluginArgsStr string
+ IfName string
+ Path string
+}
+
+// Args implements the CNIArgs interface
+var _ CNIArgs = &Args{}
+
+func (args *Args) AsEnv() []string {
+ env := os.Environ()
+ pluginArgsStr := args.PluginArgsStr
+ if pluginArgsStr == "" {
+ pluginArgsStr = stringify(args.PluginArgs)
+ }
+
+ // Duplicated values which come first will be overridden, so we must put the
+ // custom values in the end to avoid being overridden by the process environments.
+ env = append(env,
+ "CNI_COMMAND="+args.Command,
+ "CNI_CONTAINERID="+args.ContainerID,
+ "CNI_NETNS="+args.NetNS,
+ "CNI_ARGS="+pluginArgsStr,
+ "CNI_IFNAME="+args.IfName,
+ "CNI_PATH="+args.Path,
+ )
+ return dedupEnv(env)
+}
+
+// taken from rkt/networking/net_plugin.go
+func stringify(pluginArgs [][2]string) string {
+ entries := make([]string, len(pluginArgs))
+
+ for i, kv := range pluginArgs {
+ entries[i] = strings.Join(kv[:], "=")
+ }
+
+ return strings.Join(entries, ";")
+}
+
+// DelegateArgs implements the CNIArgs interface
+// used for delegation to inherit from environments
+// and allow some overrides like CNI_COMMAND
+var _ CNIArgs = &DelegateArgs{}
+
+type DelegateArgs struct {
+ Command string
+}
+
+func (d *DelegateArgs) AsEnv() []string {
+ env := os.Environ()
+
+ // The custom values should come in the end to override the existing
+ // process environment of the same key.
+ env = append(env,
+ "CNI_COMMAND="+d.Command,
+ )
+ return dedupEnv(env)
+}
+
+// dedupEnv returns a copy of env with any duplicates removed, in favor of later values.
+// Items not of the normal environment "key=value" form are preserved unchanged.
+func dedupEnv(env []string) []string {
+ out := make([]string, 0, len(env))
+ envMap := map[string]string{}
+
+ for _, kv := range env {
+ // find the first "=" in environment, if not, just keep it
+ eq := strings.Index(kv, "=")
+ if eq < 0 {
+ out = append(out, kv)
+ continue
+ }
+ envMap[kv[:eq]] = kv[eq+1:]
+ }
+
+ for k, v := range envMap {
+ out = append(out, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ return out
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
new file mode 100644
index 000000000..8defe4dd3
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
@@ -0,0 +1,80 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) {
+ if exec == nil {
+ exec = defaultExec
+ }
+
+ paths := filepath.SplitList(os.Getenv("CNI_PATH"))
+ pluginPath, err := exec.FindInPath(delegatePlugin, paths)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return pluginPath, exec, nil
+}
+
+// DelegateAdd calls the given delegate plugin with the CNI ADD action and
+// JSON configuration
+func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
+ pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
+ if err != nil {
+ return nil, err
+ }
+
+ // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD
+ return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec)
+}
+
+// DelegateCheck calls the given delegate plugin with the CNI CHECK action and
+// JSON configuration
+func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+ pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
+ if err != nil {
+ return err
+ }
+
+ // DelegateCheck will override the original CNI_COMMAND env from process with CHECK
+ return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec)
+}
+
+// DelegateDel calls the given delegate plugin with the CNI DEL action and
+// JSON configuration
+func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+ pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
+ if err != nil {
+ return err
+ }
+
+ // DelegateDel will override the original CNI_COMMAND env from process with DEL
+ return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec)
+}
+
+// return CNIArgs used by delegation
+func delegateArgs(action string) *DelegateArgs {
+ return &DelegateArgs{
+ Command: action,
+ }
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
new file mode 100644
index 000000000..3ad07aa8f
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
@@ -0,0 +1,187 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/types/create"
+ "github.com/containernetworking/cni/pkg/version"
+)
+
+// Exec is an interface encapsulates all operations that deal with finding
+// and executing a CNI plugin. Tests may provide a fake implementation
+// to avoid writing fake plugins to temporary directories during the test.
+type Exec interface {
+ ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error)
+ FindInPath(plugin string, paths []string) (string, error)
+ Decode(jsonBytes []byte) (version.PluginInfo, error)
+}
+
+// Plugin must return result in same version as specified in netconf; but
+// for backwards compatibility reasons if the result version is empty use
+// config version (rather than technically correct 0.1.0).
+// https://github.com/containernetworking/cni/issues/895
+func fixupResultVersion(netconf, result []byte) (string, []byte, error) {
+ versionDecoder := &version.ConfigDecoder{}
+ confVersion, err := versionDecoder.Decode(netconf)
+ if err != nil {
+ return "", nil, err
+ }
+
+ var rawResult map[string]interface{}
+ if err := json.Unmarshal(result, &rawResult); err != nil {
+ return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err)
+ }
+
+ // plugin output of "null" is successfully unmarshalled, but results in a nil
+ // map which causes a panic when the confVersion is assigned below.
+ if rawResult == nil {
+ rawResult = make(map[string]interface{})
+ }
+
+ // Manually decode Result version; we need to know whether its cniVersion
+ // is empty, while built-in decoders (correctly) substitute 0.1.0 for an
+ // empty version per the CNI spec.
+ if resultVerRaw, ok := rawResult["cniVersion"]; ok {
+ resultVer, ok := resultVerRaw.(string)
+ if ok && resultVer != "" {
+ return resultVer, result, nil
+ }
+ }
+
+ // If the cniVersion is not present or empty, assume the result is
+ // the same CNI spec version as the config
+ rawResult["cniVersion"] = confVersion
+ newBytes, err := json.Marshal(rawResult)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err)
+ }
+
+ return confVersion, newBytes, nil
+}
+
+// For example, a testcase could pass an instance of the following fakeExec
+// object to ExecPluginWithResult() to verify the incoming stdin and environment
+// and provide a tailored response:
+//
+//import (
+// "encoding/json"
+// "path"
+// "strings"
+//)
+//
+//type fakeExec struct {
+// version.PluginDecoder
+//}
+//
+//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
+// net := &types.NetConf{}
+// err := json.Unmarshal(stdinData, net)
+// if err != nil {
+// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err)
+// }
+// pluginName := path.Base(pluginPath)
+// if pluginName != net.Type {
+// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type)
+// }
+// for _, e := range environ {
+// // Check environment for forced failure request
+// parts := strings.Split(e, "=")
+// if len(parts) > 0 && parts[0] == "FAIL" {
+// return nil, fmt.Errorf("failed to execute plugin %s", pluginName)
+// }
+// }
+// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil
+//}
+//
+//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) {
+// if len(paths) > 0 {
+// return path.Join(paths[0], plugin), nil
+// }
+// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths)
+//}
+
+func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) {
+ if exec == nil {
+ exec = defaultExec
+ }
+
+ stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
+ if err != nil {
+ return nil, err
+ }
+
+ resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return create.Create(resultVersion, fixedBytes)
+}
+
+func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
+ if exec == nil {
+ exec = defaultExec
+ }
+ _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
+ return err
+}
+
+// GetVersionInfo returns the version information available about the plugin.
+// For recent-enough plugins, it uses the information returned by the VERSION
+// command. For older plugins which do not recognize that command, it reports
+// version 0.1.0
+func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) {
+ if exec == nil {
+ exec = defaultExec
+ }
+ args := &Args{
+ Command: "VERSION",
+
+ // set fake values required by plugins built against an older version of skel
+ NetNS: "dummy",
+ IfName: "dummy",
+ Path: "dummy",
+ }
+ stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current()))
+ stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv())
+ if err != nil {
+ if err.Error() == "unknown CNI_COMMAND: VERSION" {
+ return version.PluginSupports("0.1.0"), nil
+ }
+ return nil, err
+ }
+
+ return exec.Decode(stdoutBytes)
+}
+
+// DefaultExec is an object that implements the Exec interface which looks
+// for and executes plugins from disk.
+type DefaultExec struct {
+ *RawExec
+ version.PluginDecoder
+}
+
+// DefaultExec implements the Exec interface
+var _ Exec = &DefaultExec{}
+
+var defaultExec = &DefaultExec{
+ RawExec: &RawExec{Stderr: os.Stderr},
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go
new file mode 100644
index 000000000..e62029eb7
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go
@@ -0,0 +1,48 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// FindInPath returns the full path of the plugin by searching in the provided path
+func FindInPath(plugin string, paths []string) (string, error) {
+ if plugin == "" {
+ return "", fmt.Errorf("no plugin name provided")
+ }
+
+ if strings.ContainsRune(plugin, os.PathSeparator) {
+ return "", fmt.Errorf("invalid plugin name: %s", plugin)
+ }
+
+ if len(paths) == 0 {
+ return "", fmt.Errorf("no paths provided")
+ }
+
+ for _, path := range paths {
+ for _, fe := range ExecutableFileExtensions {
+ fullpath := filepath.Join(path, plugin) + fe
+ if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() {
+ return fullpath, nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths)
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
new file mode 100644
index 000000000..9bcfb4553
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
@@ -0,0 +1,20 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package invoke
+
+// Valid file extensions for plugin executables.
+var ExecutableFileExtensions = []string{""}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go
new file mode 100644
index 000000000..7665125b1
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+// Valid file extensions for plugin executables.
+var ExecutableFileExtensions = []string{".exe", ""}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
new file mode 100644
index 000000000..5ab5cc885
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
@@ -0,0 +1,88 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+type RawExec struct {
+ Stderr io.Writer
+}
+
+func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ c := exec.CommandContext(ctx, pluginPath)
+ c.Env = environ
+ c.Stdin = bytes.NewBuffer(stdinData)
+ c.Stdout = stdout
+ c.Stderr = stderr
+
+ // Retry the command on "text file busy" errors
+ for i := 0; i <= 5; i++ {
+ err := c.Run()
+
+ // Command succeeded
+ if err == nil {
+ break
+ }
+
+ // If the plugin is currently about to be written, then we wait a
+ // second and try it again
+ if strings.Contains(err.Error(), "text file busy") {
+ time.Sleep(time.Second)
+ continue
+ }
+
+ // All other errors except than the busy text file
+ return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes())
+ }
+
+ // Copy stderr to caller's buffer in case plugin printed to both
+ // stdout and stderr for some reason. Ignore failures as stderr is
+ // only informational.
+ if e.Stderr != nil && stderr.Len() > 0 {
+ _, _ = stderr.WriteTo(e.Stderr)
+ }
+ return stdout.Bytes(), nil
+}
+
+func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error {
+ emsg := types.Error{}
+ if len(stdout) == 0 {
+ if len(stderr) == 0 {
+ emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err)
+ } else {
+ emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr))
+ }
+ } else if perr := json.Unmarshal(stdout, &emsg); perr != nil {
+ emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr)
+ }
+ return &emsg
+}
+
+func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) {
+ return FindInPath(plugin, paths)
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go
new file mode 100644
index 000000000..99b151ff2
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go
@@ -0,0 +1,189 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types020
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "os"
+
+ "github.com/containernetworking/cni/pkg/types"
+ convert "github.com/containernetworking/cni/pkg/types/internal"
+)
+
+const ImplementedSpecVersion string = "0.2.0"
+
+var supportedVersions = []string{"", "0.1.0", ImplementedSpecVersion}
+
+// Register converters for all versions less than the implemented spec version
+func init() {
+ convert.RegisterConverter("0.1.0", []string{ImplementedSpecVersion}, convertFrom010)
+ convert.RegisterConverter(ImplementedSpecVersion, []string{"0.1.0"}, convertTo010)
+
+ // Creator
+ convert.RegisterCreator(supportedVersions, NewResult)
+}
+
+// Compatibility types for CNI version 0.1.0 and 0.2.0
+
+// NewResult creates a new Result object from JSON data. The JSON data
+// must be compatible with the CNI versions implemented by this type.
+func NewResult(data []byte) (types.Result, error) {
+ result := &Result{}
+ if err := json.Unmarshal(data, result); err != nil {
+ return nil, err
+ }
+ for _, v := range supportedVersions {
+ if result.CNIVersion == v {
+ if result.CNIVersion == "" {
+ result.CNIVersion = "0.1.0"
+ }
+ return result, nil
+ }
+ }
+ return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q",
+ supportedVersions, result.CNIVersion)
+}
+
+// GetResult converts the given Result object to the ImplementedSpecVersion
+// and returns the concrete type or an error
+func GetResult(r types.Result) (*Result, error) {
+ result020, err := convert.Convert(r, ImplementedSpecVersion)
+ if err != nil {
+ return nil, err
+ }
+ result, ok := result020.(*Result)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert result")
+ }
+ return result, nil
+}
+
+func convertFrom010(from types.Result, toVersion string) (types.Result, error) {
+ if toVersion != "0.2.0" {
+ panic("only converts to version 0.2.0")
+ }
+ fromResult := from.(*Result)
+ return &Result{
+ CNIVersion: ImplementedSpecVersion,
+ IP4: fromResult.IP4.Copy(),
+ IP6: fromResult.IP6.Copy(),
+ DNS: *fromResult.DNS.Copy(),
+ }, nil
+}
+
+func convertTo010(from types.Result, toVersion string) (types.Result, error) {
+ if toVersion != "0.1.0" {
+ panic("only converts to version 0.1.0")
+ }
+ fromResult := from.(*Result)
+ return &Result{
+ CNIVersion: "0.1.0",
+ IP4: fromResult.IP4.Copy(),
+ IP6: fromResult.IP6.Copy(),
+ DNS: *fromResult.DNS.Copy(),
+ }, nil
+}
+
+// Result is what gets returned from the plugin (via stdout) to the caller
+type Result struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+ IP4 *IPConfig `json:"ip4,omitempty"`
+ IP6 *IPConfig `json:"ip6,omitempty"`
+ DNS types.DNS `json:"dns,omitempty"`
+}
+
+func (r *Result) Version() string {
+ return r.CNIVersion
+}
+
+func (r *Result) GetAsVersion(version string) (types.Result, error) {
+ // If the creator of the result did not set the CNIVersion, assume it
+ // should be the highest spec version implemented by this Result
+ if r.CNIVersion == "" {
+ r.CNIVersion = ImplementedSpecVersion
+ }
+ return convert.Convert(r, version)
+}
+
+func (r *Result) Print() error {
+ return r.PrintTo(os.Stdout)
+}
+
+func (r *Result) PrintTo(writer io.Writer) error {
+ data, err := json.MarshalIndent(r, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = writer.Write(data)
+ return err
+}
+
+// IPConfig contains values necessary to configure an interface
+type IPConfig struct {
+ IP net.IPNet
+ Gateway net.IP
+ Routes []types.Route
+}
+
+func (i *IPConfig) Copy() *IPConfig {
+ if i == nil {
+ return nil
+ }
+
+ var routes []types.Route
+ for _, fromRoute := range i.Routes {
+ routes = append(routes, *fromRoute.Copy())
+ }
+ return &IPConfig{
+ IP: i.IP,
+ Gateway: i.Gateway,
+ Routes: routes,
+ }
+}
+
+// net.IPNet is not JSON (un)marshallable so this duality is needed
+// for our custom IPNet type
+
+// JSON (un)marshallable types
+type ipConfig struct {
+ IP types.IPNet `json:"ip"`
+ Gateway net.IP `json:"gateway,omitempty"`
+ Routes []types.Route `json:"routes,omitempty"`
+}
+
+func (c *IPConfig) MarshalJSON() ([]byte, error) {
+ ipc := ipConfig{
+ IP: types.IPNet(c.IP),
+ Gateway: c.Gateway,
+ Routes: c.Routes,
+ }
+
+ return json.Marshal(ipc)
+}
+
+func (c *IPConfig) UnmarshalJSON(data []byte) error {
+ ipc := ipConfig{}
+ if err := json.Unmarshal(data, &ipc); err != nil {
+ return err
+ }
+
+ c.IP = net.IPNet(ipc.IP)
+ c.Gateway = ipc.Gateway
+ c.Routes = ipc.Routes
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/040/types.go b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go
new file mode 100644
index 000000000..3633b0eaa
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go
@@ -0,0 +1,306 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types040
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "os"
+
+ "github.com/containernetworking/cni/pkg/types"
+ types020 "github.com/containernetworking/cni/pkg/types/020"
+ convert "github.com/containernetworking/cni/pkg/types/internal"
+)
+
+const ImplementedSpecVersion string = "0.4.0"
+
+var supportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion}
+
+// Register converters for all versions less than the implemented spec version
+func init() {
+ // Up-converters
+ convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x)
+ convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x)
+ convert.RegisterConverter("0.3.0", supportedVersions, convertInternal)
+ convert.RegisterConverter("0.3.1", supportedVersions, convertInternal)
+
+ // Down-converters
+ convert.RegisterConverter("0.4.0", []string{"0.3.0", "0.3.1"}, convertInternal)
+ convert.RegisterConverter("0.4.0", []string{"0.1.0", "0.2.0"}, convertTo02x)
+ convert.RegisterConverter("0.3.1", []string{"0.1.0", "0.2.0"}, convertTo02x)
+ convert.RegisterConverter("0.3.0", []string{"0.1.0", "0.2.0"}, convertTo02x)
+
+ // Creator
+ convert.RegisterCreator(supportedVersions, NewResult)
+}
+
+func NewResult(data []byte) (types.Result, error) {
+ result := &Result{}
+ if err := json.Unmarshal(data, result); err != nil {
+ return nil, err
+ }
+ for _, v := range supportedVersions {
+ if result.CNIVersion == v {
+ return result, nil
+ }
+ }
+ return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q",
+ supportedVersions, result.CNIVersion)
+}
+
+func GetResult(r types.Result) (*Result, error) {
+ resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion)
+ if err != nil {
+ return nil, err
+ }
+ result, ok := resultCurrent.(*Result)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert result")
+ }
+ return result, nil
+}
+
+func NewResultFromResult(result types.Result) (*Result, error) {
+ newResult, err := convert.Convert(result, ImplementedSpecVersion)
+ if err != nil {
+ return nil, err
+ }
+ return newResult.(*Result), nil
+}
+
+// Result is what gets returned from the plugin (via stdout) to the caller
+type Result struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+ Interfaces []*Interface `json:"interfaces,omitempty"`
+ IPs []*IPConfig `json:"ips,omitempty"`
+ Routes []*types.Route `json:"routes,omitempty"`
+ DNS types.DNS `json:"dns,omitempty"`
+}
+
+func convert020IPConfig(from *types020.IPConfig, ipVersion string) *IPConfig {
+ return &IPConfig{
+ Version: ipVersion,
+ Address: from.IP,
+ Gateway: from.Gateway,
+ }
+}
+
+func convertFrom02x(from types.Result, toVersion string) (types.Result, error) {
+ fromResult := from.(*types020.Result)
+ toResult := &Result{
+ CNIVersion: toVersion,
+ DNS: *fromResult.DNS.Copy(),
+ Routes: []*types.Route{},
+ }
+ if fromResult.IP4 != nil {
+ toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP4, "4"))
+ for _, fromRoute := range fromResult.IP4.Routes {
+ toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+ }
+ }
+
+ if fromResult.IP6 != nil {
+ toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP6, "6"))
+ for _, fromRoute := range fromResult.IP6.Routes {
+ toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+ }
+ }
+
+ return toResult, nil
+}
+
+func convertInternal(from types.Result, toVersion string) (types.Result, error) {
+ fromResult := from.(*Result)
+ toResult := &Result{
+ CNIVersion: toVersion,
+ DNS: *fromResult.DNS.Copy(),
+ Routes: []*types.Route{},
+ }
+ for _, fromIntf := range fromResult.Interfaces {
+ toResult.Interfaces = append(toResult.Interfaces, fromIntf.Copy())
+ }
+ for _, fromIPC := range fromResult.IPs {
+ toResult.IPs = append(toResult.IPs, fromIPC.Copy())
+ }
+ for _, fromRoute := range fromResult.Routes {
+ toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+ }
+ return toResult, nil
+}
+
+func convertTo02x(from types.Result, toVersion string) (types.Result, error) {
+ fromResult := from.(*Result)
+ toResult := &types020.Result{
+ CNIVersion: toVersion,
+ DNS: *fromResult.DNS.Copy(),
+ }
+
+ for _, fromIP := range fromResult.IPs {
+ // Only convert the first IP address of each version as 0.2.0
+ // and earlier cannot handle multiple IP addresses
+ if fromIP.Version == "4" && toResult.IP4 == nil {
+ toResult.IP4 = &types020.IPConfig{
+ IP: fromIP.Address,
+ Gateway: fromIP.Gateway,
+ }
+ } else if fromIP.Version == "6" && toResult.IP6 == nil {
+ toResult.IP6 = &types020.IPConfig{
+ IP: fromIP.Address,
+ Gateway: fromIP.Gateway,
+ }
+ }
+ if toResult.IP4 != nil && toResult.IP6 != nil {
+ break
+ }
+ }
+
+ for _, fromRoute := range fromResult.Routes {
+ is4 := fromRoute.Dst.IP.To4() != nil
+ if is4 && toResult.IP4 != nil {
+ toResult.IP4.Routes = append(toResult.IP4.Routes, types.Route{
+ Dst: fromRoute.Dst,
+ GW: fromRoute.GW,
+ })
+ } else if !is4 && toResult.IP6 != nil {
+ toResult.IP6.Routes = append(toResult.IP6.Routes, types.Route{
+ Dst: fromRoute.Dst,
+ GW: fromRoute.GW,
+ })
+ }
+ }
+
+ // 0.2.0 and earlier require at least one IP address in the Result
+ if toResult.IP4 == nil && toResult.IP6 == nil {
+ return nil, fmt.Errorf("cannot convert: no valid IP addresses")
+ }
+
+ return toResult, nil
+}
+
+func (r *Result) Version() string {
+ return r.CNIVersion
+}
+
+func (r *Result) GetAsVersion(version string) (types.Result, error) {
+ // If the creator of the result did not set the CNIVersion, assume it
+ // should be the highest spec version implemented by this Result
+ if r.CNIVersion == "" {
+ r.CNIVersion = ImplementedSpecVersion
+ }
+ return convert.Convert(r, version)
+}
+
+func (r *Result) Print() error {
+ return r.PrintTo(os.Stdout)
+}
+
+func (r *Result) PrintTo(writer io.Writer) error {
+ data, err := json.MarshalIndent(r, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = writer.Write(data)
+ return err
+}
+
+// Interface contains values about the created interfaces
+type Interface struct {
+ Name string `json:"name"`
+ Mac string `json:"mac,omitempty"`
+ Sandbox string `json:"sandbox,omitempty"`
+}
+
+func (i *Interface) String() string {
+ return fmt.Sprintf("%+v", *i)
+}
+
+func (i *Interface) Copy() *Interface {
+ if i == nil {
+ return nil
+ }
+ newIntf := *i
+ return &newIntf
+}
+
+// Int returns a pointer to the int value passed in. Used to
+// set the IPConfig.Interface field.
+func Int(v int) *int {
+ return &v
+}
+
+// IPConfig contains values necessary to configure an IP address on an interface
+type IPConfig struct {
+ // IP version, either "4" or "6"
+ Version string
+ // Index into Result structs Interfaces list
+ Interface *int
+ Address net.IPNet
+ Gateway net.IP
+}
+
+func (i *IPConfig) String() string {
+ return fmt.Sprintf("%+v", *i)
+}
+
+func (i *IPConfig) Copy() *IPConfig {
+ if i == nil {
+ return nil
+ }
+
+ ipc := &IPConfig{
+ Version: i.Version,
+ Address: i.Address,
+ Gateway: i.Gateway,
+ }
+ if i.Interface != nil {
+ intf := *i.Interface
+ ipc.Interface = &intf
+ }
+ return ipc
+}
+
+// JSON (un)marshallable types
+type ipConfig struct {
+ Version string `json:"version"`
+ Interface *int `json:"interface,omitempty"`
+ Address types.IPNet `json:"address"`
+ Gateway net.IP `json:"gateway,omitempty"`
+}
+
+func (c *IPConfig) MarshalJSON() ([]byte, error) {
+ ipc := ipConfig{
+ Version: c.Version,
+ Interface: c.Interface,
+ Address: types.IPNet(c.Address),
+ Gateway: c.Gateway,
+ }
+
+ return json.Marshal(ipc)
+}
+
+func (c *IPConfig) UnmarshalJSON(data []byte) error {
+ ipc := ipConfig{}
+ if err := json.Unmarshal(data, &ipc); err != nil {
+ return err
+ }
+
+ c.Version = ipc.Version
+ c.Interface = ipc.Interface
+ c.Address = net.IPNet(ipc.Address)
+ c.Gateway = ipc.Gateway
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go
new file mode 100644
index 000000000..0e1e8b857
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go
@@ -0,0 +1,307 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types100
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "os"
+
+ "github.com/containernetworking/cni/pkg/types"
+ types040 "github.com/containernetworking/cni/pkg/types/040"
+ convert "github.com/containernetworking/cni/pkg/types/internal"
+)
+
+const ImplementedSpecVersion string = "1.0.0"
+
+var supportedVersions = []string{ImplementedSpecVersion}
+
+// Register converters for all versions less than the implemented spec version
+func init() {
+ // Up-converters
+ convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x)
+ convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x)
+ convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x)
+ convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x)
+ convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x)
+
+ // Down-converters
+ convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x)
+ convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x)
+
+ // Creator
+ convert.RegisterCreator(supportedVersions, NewResult)
+}
+
+func NewResult(data []byte) (types.Result, error) {
+ result := &Result{}
+ if err := json.Unmarshal(data, result); err != nil {
+ return nil, err
+ }
+ for _, v := range supportedVersions {
+ if result.CNIVersion == v {
+ return result, nil
+ }
+ }
+ return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q",
+ supportedVersions, result.CNIVersion)
+}
+
+func GetResult(r types.Result) (*Result, error) {
+ resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion)
+ if err != nil {
+ return nil, err
+ }
+ result, ok := resultCurrent.(*Result)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert result")
+ }
+ return result, nil
+}
+
+func NewResultFromResult(result types.Result) (*Result, error) {
+ newResult, err := convert.Convert(result, ImplementedSpecVersion)
+ if err != nil {
+ return nil, err
+ }
+ return newResult.(*Result), nil
+}
+
+// Result is what gets returned from the plugin (via stdout) to the caller
+type Result struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+ Interfaces []*Interface `json:"interfaces,omitempty"`
+ IPs []*IPConfig `json:"ips,omitempty"`
+ Routes []*types.Route `json:"routes,omitempty"`
+ DNS types.DNS `json:"dns,omitempty"`
+}
+
+func convertFrom02x(from types.Result, toVersion string) (types.Result, error) {
+ result040, err := convert.Convert(from, "0.4.0")
+ if err != nil {
+ return nil, err
+ }
+ result100, err := convertFrom04x(result040, ImplementedSpecVersion)
+ if err != nil {
+ return nil, err
+ }
+ return result100, nil
+}
+
+func convertIPConfigFrom040(from *types040.IPConfig) *IPConfig {
+ to := &IPConfig{
+ Address: from.Address,
+ Gateway: from.Gateway,
+ }
+ if from.Interface != nil {
+ intf := *from.Interface
+ to.Interface = &intf
+ }
+ return to
+}
+
+func convertInterfaceFrom040(from *types040.Interface) *Interface {
+ return &Interface{
+ Name: from.Name,
+ Mac: from.Mac,
+ Sandbox: from.Sandbox,
+ }
+}
+
+func convertFrom04x(from types.Result, toVersion string) (types.Result, error) {
+ fromResult := from.(*types040.Result)
+ toResult := &Result{
+ CNIVersion: toVersion,
+ DNS: *fromResult.DNS.Copy(),
+ Routes: []*types.Route{},
+ }
+ for _, fromIntf := range fromResult.Interfaces {
+ toResult.Interfaces = append(toResult.Interfaces, convertInterfaceFrom040(fromIntf))
+ }
+ for _, fromIPC := range fromResult.IPs {
+ toResult.IPs = append(toResult.IPs, convertIPConfigFrom040(fromIPC))
+ }
+ for _, fromRoute := range fromResult.Routes {
+ toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+ }
+ return toResult, nil
+}
+
+func convertIPConfigTo040(from *IPConfig) *types040.IPConfig {
+ version := "6"
+ if from.Address.IP.To4() != nil {
+ version = "4"
+ }
+ to := &types040.IPConfig{
+ Version: version,
+ Address: from.Address,
+ Gateway: from.Gateway,
+ }
+ if from.Interface != nil {
+ intf := *from.Interface
+ to.Interface = &intf
+ }
+ return to
+}
+
+func convertInterfaceTo040(from *Interface) *types040.Interface {
+ return &types040.Interface{
+ Name: from.Name,
+ Mac: from.Mac,
+ Sandbox: from.Sandbox,
+ }
+}
+
+func convertTo04x(from types.Result, toVersion string) (types.Result, error) {
+ fromResult := from.(*Result)
+ toResult := &types040.Result{
+ CNIVersion: toVersion,
+ DNS: *fromResult.DNS.Copy(),
+ Routes: []*types.Route{},
+ }
+ for _, fromIntf := range fromResult.Interfaces {
+ toResult.Interfaces = append(toResult.Interfaces, convertInterfaceTo040(fromIntf))
+ }
+ for _, fromIPC := range fromResult.IPs {
+ toResult.IPs = append(toResult.IPs, convertIPConfigTo040(fromIPC))
+ }
+ for _, fromRoute := range fromResult.Routes {
+ toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+ }
+ return toResult, nil
+}
+
+func convertTo02x(from types.Result, toVersion string) (types.Result, error) {
+ // First convert to 0.4.0
+ result040, err := convertTo04x(from, "0.4.0")
+ if err != nil {
+ return nil, err
+ }
+ result02x, err := convert.Convert(result040, toVersion)
+ if err != nil {
+ return nil, err
+ }
+ return result02x, nil
+}
+
+func (r *Result) Version() string {
+ return r.CNIVersion
+}
+
+func (r *Result) GetAsVersion(version string) (types.Result, error) {
+ // If the creator of the result did not set the CNIVersion, assume it
+ // should be the highest spec version implemented by this Result
+ if r.CNIVersion == "" {
+ r.CNIVersion = ImplementedSpecVersion
+ }
+ return convert.Convert(r, version)
+}
+
+func (r *Result) Print() error {
+ return r.PrintTo(os.Stdout)
+}
+
+func (r *Result) PrintTo(writer io.Writer) error {
+ data, err := json.MarshalIndent(r, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = writer.Write(data)
+ return err
+}
+
+// Interface contains values about the created interfaces
+type Interface struct {
+ Name string `json:"name"`
+ Mac string `json:"mac,omitempty"`
+ Sandbox string `json:"sandbox,omitempty"`
+}
+
+func (i *Interface) String() string {
+ return fmt.Sprintf("%+v", *i)
+}
+
+func (i *Interface) Copy() *Interface {
+ if i == nil {
+ return nil
+ }
+ newIntf := *i
+ return &newIntf
+}
+
+// Int returns a pointer to the int value passed in. Used to
+// set the IPConfig.Interface field.
+func Int(v int) *int {
+ return &v
+}
+
+// IPConfig contains values necessary to configure an IP address on an interface
+type IPConfig struct {
+ // Index into Result structs Interfaces list
+ Interface *int
+ Address net.IPNet
+ Gateway net.IP
+}
+
+func (i *IPConfig) String() string {
+ return fmt.Sprintf("%+v", *i)
+}
+
+func (i *IPConfig) Copy() *IPConfig {
+ if i == nil {
+ return nil
+ }
+
+ ipc := &IPConfig{
+ Address: i.Address,
+ Gateway: i.Gateway,
+ }
+ if i.Interface != nil {
+ intf := *i.Interface
+ ipc.Interface = &intf
+ }
+ return ipc
+}
+
+// JSON (un)marshallable types
+type ipConfig struct {
+ Interface *int `json:"interface,omitempty"`
+ Address types.IPNet `json:"address"`
+ Gateway net.IP `json:"gateway,omitempty"`
+}
+
+func (c *IPConfig) MarshalJSON() ([]byte, error) {
+ ipc := ipConfig{
+ Interface: c.Interface,
+ Address: types.IPNet(c.Address),
+ Gateway: c.Gateway,
+ }
+
+ return json.Marshal(ipc)
+}
+
+func (c *IPConfig) UnmarshalJSON(data []byte) error {
+ ipc := ipConfig{}
+ if err := json.Unmarshal(data, &ipc); err != nil {
+ return err
+ }
+
+ c.Interface = ipc.Interface
+ c.Address = net.IPNet(ipc.Address)
+ c.Gateway = ipc.Gateway
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go
new file mode 100644
index 000000000..7516f03ef
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go
@@ -0,0 +1,122 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// UnmarshallableBool typedef for builtin bool
+// because builtin type's methods can't be declared
+type UnmarshallableBool bool
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Returns boolean true if the string is "1" or "[Tt]rue"
+// Returns boolean false if the string is "0" or "[Ff]alse"
+func (b *UnmarshallableBool) UnmarshalText(data []byte) error {
+ s := strings.ToLower(string(data))
+ switch s {
+ case "1", "true":
+ *b = true
+ case "0", "false":
+ *b = false
+ default:
+ return fmt.Errorf("boolean unmarshal error: invalid input %s", s)
+ }
+ return nil
+}
+
+// UnmarshallableString typedef for builtin string
+type UnmarshallableString string
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Returns the string
+func (s *UnmarshallableString) UnmarshalText(data []byte) error {
+ *s = UnmarshallableString(data)
+ return nil
+}
+
+// CommonArgs contains the IgnoreUnknown argument
+// and must be embedded by all Arg structs
+type CommonArgs struct {
+ IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"`
+}
+
+// GetKeyField is a helper function to receive Values
+// Values that represent a pointer to a struct
+func GetKeyField(keyString string, v reflect.Value) reflect.Value {
+ return v.Elem().FieldByName(keyString)
+}
+
+// UnmarshalableArgsError is used to indicate error unmarshalling args
+// from the args-string in the form "K=V;K2=V2;..."
+type UnmarshalableArgsError struct {
+ error
+}
+
+// LoadArgs parses args from a string in the form "K=V;K2=V2;..."
+func LoadArgs(args string, container interface{}) error {
+ if args == "" {
+ return nil
+ }
+
+ containerValue := reflect.ValueOf(container)
+
+ pairs := strings.Split(args, ";")
+ unknownArgs := []string{}
+ for _, pair := range pairs {
+ kv := strings.Split(pair, "=")
+ if len(kv) != 2 {
+ return fmt.Errorf("ARGS: invalid pair %q", pair)
+ }
+ keyString := kv[0]
+ valueString := kv[1]
+ keyField := GetKeyField(keyString, containerValue)
+ if !keyField.IsValid() {
+ unknownArgs = append(unknownArgs, pair)
+ continue
+ }
+
+ var keyFieldInterface interface{}
+ switch {
+ case keyField.Kind() == reflect.Ptr:
+ keyField.Set(reflect.New(keyField.Type().Elem()))
+ keyFieldInterface = keyField.Interface()
+ case keyField.CanAddr() && keyField.Addr().CanInterface():
+ keyFieldInterface = keyField.Addr().Interface()
+ default:
+ return UnmarshalableArgsError{fmt.Errorf("field '%s' has no valid interface", keyString)}
+ }
+ u, ok := keyFieldInterface.(encoding.TextUnmarshaler)
+ if !ok {
+ return UnmarshalableArgsError{fmt.Errorf(
+ "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler",
+ keyString, reflect.TypeOf(keyFieldInterface))}
+ }
+ err := u.UnmarshalText([]byte(valueString))
+ if err != nil {
+ return fmt.Errorf("ARGS: error parsing value of pair %q: %w", pair, err)
+ }
+ }
+
+ isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool()
+ if len(unknownArgs) > 0 && !isIgnoreUnknown {
+ return fmt.Errorf("ARGS: unknown args %q", unknownArgs)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go
new file mode 100644
index 000000000..ed28b33e8
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go
@@ -0,0 +1,56 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package create
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/containernetworking/cni/pkg/types"
+ convert "github.com/containernetworking/cni/pkg/types/internal"
+)
+
+// DecodeVersion returns the CNI version from CNI configuration or result JSON,
+// or an error if the operation could not be performed.
+func DecodeVersion(jsonBytes []byte) (string, error) {
+ var conf struct {
+ CNIVersion string `json:"cniVersion"`
+ }
+ err := json.Unmarshal(jsonBytes, &conf)
+ if err != nil {
+ return "", fmt.Errorf("decoding version from network config: %w", err)
+ }
+ if conf.CNIVersion == "" {
+ return "0.1.0", nil
+ }
+ return conf.CNIVersion, nil
+}
+
+// Create creates a CNI Result using the given JSON with the expected
+// version, or an error if the creation could not be performed
+func Create(version string, bytes []byte) (types.Result, error) {
+ return convert.Create(version, bytes)
+}
+
+// CreateFromBytes creates a CNI Result from the given JSON, automatically
+// detecting the CNI spec version of the result. An error is returned if the
+// operation could not be performed.
+func CreateFromBytes(bytes []byte) (types.Result, error) {
+ version, err := DecodeVersion(bytes)
+ if err != nil {
+ return nil, err
+ }
+ return convert.Create(version, bytes)
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go
new file mode 100644
index 000000000..bdbe4b0a5
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go
@@ -0,0 +1,92 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package convert
+
+import (
+ "fmt"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+// ConvertFn should convert from the given arbitrary Result type into a
+// Result implementing CNI specification version passed in toVersion.
+// The function is guaranteed to be passed a Result type matching the
+// fromVersion it was registered with, and is guaranteed to be
+// passed a toVersion matching one of the toVersions it was registered with.
+type ConvertFn func(from types.Result, toVersion string) (types.Result, error)
+
+type converter struct {
+ // fromVersion is the CNI Result spec version that convertFn accepts
+ fromVersion string
+ // toVersions is a list of versions that convertFn can convert to
+ toVersions []string
+ convertFn ConvertFn
+}
+
+var converters []*converter
+
+func findConverter(fromVersion, toVersion string) *converter {
+ for _, c := range converters {
+ if c.fromVersion == fromVersion {
+ for _, v := range c.toVersions {
+ if v == toVersion {
+ return c
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Convert converts a CNI Result to the requested CNI specification version,
+// or returns an error if the conversion could not be performed or failed
+func Convert(from types.Result, toVersion string) (types.Result, error) {
+ if toVersion == "" {
+ toVersion = "0.1.0"
+ }
+
+ fromVersion := from.Version()
+
+ // Shortcut for same version
+ if fromVersion == toVersion {
+ return from, nil
+ }
+
+ // Otherwise find the right converter
+ c := findConverter(fromVersion, toVersion)
+ if c == nil {
+ return nil, fmt.Errorf("no converter for CNI result version %s to %s",
+ fromVersion, toVersion)
+ }
+ return c.convertFn(from, toVersion)
+}
+
+// RegisterConverter registers a CNI Result converter. SHOULD NOT BE CALLED
+// EXCEPT FROM CNI ITSELF.
+func RegisterConverter(fromVersion string, toVersions []string, convertFn ConvertFn) {
+ // Make sure there is no converter already registered for these
+ // from and to versions
+ for _, v := range toVersions {
+ if findConverter(fromVersion, v) != nil {
+ panic(fmt.Sprintf("converter already registered for %s to %s",
+ fromVersion, v))
+ }
+ }
+ converters = append(converters, &converter{
+ fromVersion: fromVersion,
+ toVersions: toVersions,
+ convertFn: convertFn,
+ })
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go
new file mode 100644
index 000000000..963630912
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go
@@ -0,0 +1,66 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package convert
+
+import (
+ "fmt"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+type ResultFactoryFunc func([]byte) (types.Result, error)
+
+type creator struct {
+ // CNI Result spec versions that createFn can create a Result for
+ versions []string
+ createFn ResultFactoryFunc
+}
+
+var creators []*creator
+
+func findCreator(version string) *creator {
+ for _, c := range creators {
+ for _, v := range c.versions {
+ if v == version {
+ return c
+ }
+ }
+ }
+ return nil
+}
+
+// Create creates a CNI Result using the given JSON, or an error if the creation
+// could not be performed
+func Create(version string, bytes []byte) (types.Result, error) {
+ if c := findCreator(version); c != nil {
+ return c.createFn(bytes)
+ }
+ return nil, fmt.Errorf("unsupported CNI result version %q", version)
+}
+
+// RegisterCreator registers a CNI Result creator. SHOULD NOT BE CALLED
+// EXCEPT FROM CNI ITSELF.
+func RegisterCreator(versions []string, createFn ResultFactoryFunc) {
+ // Make sure there is no creator already registered for these versions
+ for _, v := range versions {
+ if findCreator(v) != nil {
+ panic(fmt.Sprintf("creator already registered for %s", v))
+ }
+ }
+ creators = append(creators, &creator{
+ versions: versions,
+ createFn: createFn,
+ })
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go
new file mode 100644
index 000000000..fba17dfc0
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go
@@ -0,0 +1,234 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "os"
+)
+
+// like net.IPNet but adds JSON marshalling and unmarshalling
+type IPNet net.IPNet
+
+// ParseCIDR takes a string like "10.2.3.1/24" and
+// return IPNet with "10.2.3.1" and /24 mask
+func ParseCIDR(s string) (*net.IPNet, error) {
+ ip, ipn, err := net.ParseCIDR(s)
+ if err != nil {
+ return nil, err
+ }
+
+ ipn.IP = ip
+ return ipn, nil
+}
+
+func (n IPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(&n).String())
+}
+
+func (n *IPNet) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+
+ tmp, err := ParseCIDR(s)
+ if err != nil {
+ return err
+ }
+
+ *n = IPNet(*tmp)
+ return nil
+}
+
+// NetConf describes a network.
+type NetConf struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+
+ Name string `json:"name,omitempty"`
+ Type string `json:"type,omitempty"`
+ Capabilities map[string]bool `json:"capabilities,omitempty"`
+ IPAM IPAM `json:"ipam,omitempty"`
+ DNS DNS `json:"dns"`
+
+ RawPrevResult map[string]interface{} `json:"prevResult,omitempty"`
+ PrevResult Result `json:"-"`
+}
+
+type IPAM struct {
+ Type string `json:"type,omitempty"`
+}
+
+// NetConfList describes an ordered list of networks.
+type NetConfList struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+
+ Name string `json:"name,omitempty"`
+ DisableCheck bool `json:"disableCheck,omitempty"`
+ Plugins []*NetConf `json:"plugins,omitempty"`
+}
+
+// Result is an interface that provides the result of plugin execution
+type Result interface {
+ // The highest CNI specification result version the result supports
+ // without having to convert
+ Version() string
+
+ // Returns the result converted into the requested CNI specification
+ // result version, or an error if conversion failed
+ GetAsVersion(version string) (Result, error)
+
+ // Prints the result in JSON format to stdout
+ Print() error
+
+ // Prints the result in JSON format to provided writer
+ PrintTo(writer io.Writer) error
+}
+
+func PrintResult(result Result, version string) error {
+ newResult, err := result.GetAsVersion(version)
+ if err != nil {
+ return err
+ }
+ return newResult.Print()
+}
+
+// DNS contains values interesting for DNS resolvers
+type DNS struct {
+ Nameservers []string `json:"nameservers,omitempty"`
+ Domain string `json:"domain,omitempty"`
+ Search []string `json:"search,omitempty"`
+ Options []string `json:"options,omitempty"`
+}
+
+func (d *DNS) Copy() *DNS {
+ if d == nil {
+ return nil
+ }
+
+ to := &DNS{Domain: d.Domain}
+ for _, ns := range d.Nameservers {
+ to.Nameservers = append(to.Nameservers, ns)
+ }
+ for _, s := range d.Search {
+ to.Search = append(to.Search, s)
+ }
+ for _, o := range d.Options {
+ to.Options = append(to.Options, o)
+ }
+ return to
+}
+
+type Route struct {
+ Dst net.IPNet
+ GW net.IP
+}
+
+func (r *Route) String() string {
+ return fmt.Sprintf("%+v", *r)
+}
+
+func (r *Route) Copy() *Route {
+ if r == nil {
+ return nil
+ }
+
+ return &Route{
+ Dst: r.Dst,
+ GW: r.GW,
+ }
+}
+
+// Well known error codes
+// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes
+const (
+ ErrUnknown uint = iota // 0
+ ErrIncompatibleCNIVersion // 1
+ ErrUnsupportedField // 2
+ ErrUnknownContainer // 3
+ ErrInvalidEnvironmentVariables // 4
+ ErrIOFailure // 5
+ ErrDecodingFailure // 6
+ ErrInvalidNetworkConfig // 7
+ ErrTryAgainLater uint = 11
+ ErrInternal uint = 999
+)
+
+type Error struct {
+ Code uint `json:"code"`
+ Msg string `json:"msg"`
+ Details string `json:"details,omitempty"`
+}
+
+func NewError(code uint, msg, details string) *Error {
+ return &Error{
+ Code: code,
+ Msg: msg,
+ Details: details,
+ }
+}
+
+func (e *Error) Error() string {
+ details := ""
+ if e.Details != "" {
+ details = fmt.Sprintf("; %v", e.Details)
+ }
+ return fmt.Sprintf("%v%v", e.Msg, details)
+}
+
+func (e *Error) Print() error {
+ return prettyPrint(e)
+}
+
+// net.IPNet is not JSON (un)marshallable so this duality is needed
+// for our custom IPNet type
+
+// JSON (un)marshallable types
+type route struct {
+ Dst IPNet `json:"dst"`
+ GW net.IP `json:"gw,omitempty"`
+}
+
+func (r *Route) UnmarshalJSON(data []byte) error {
+ rt := route{}
+ if err := json.Unmarshal(data, &rt); err != nil {
+ return err
+ }
+
+ r.Dst = net.IPNet(rt.Dst)
+ r.GW = rt.GW
+ return nil
+}
+
+func (r Route) MarshalJSON() ([]byte, error) {
+ rt := route{
+ Dst: IPNet(r.Dst),
+ GW: r.GW,
+ }
+
+ return json.Marshal(rt)
+}
+
+func prettyPrint(obj interface{}) error {
+ data, err := json.MarshalIndent(obj, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = os.Stdout.Write(data)
+ return err
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
new file mode 100644
index 000000000..b8ec38874
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
@@ -0,0 +1,84 @@
+// Copyright 2019 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "unicode"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+const (
+ // cniValidNameChars is the regexp used to validate valid characters in
+ // containerID and networkName
+ cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]`
+
+ // maxInterfaceNameLength is the length max of a valid interface name
+ maxInterfaceNameLength = 15
+)
+
+var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`)
+
+// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters
+func ValidateContainerID(containerID string) *types.Error {
+
+ if containerID == "" {
+ return types.NewError(types.ErrUnknownContainer, "missing containerID", "")
+ }
+ if !cniReg.MatchString(containerID) {
+ return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID)
+ }
+ return nil
+}
+
+// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters
+func ValidateNetworkName(networkName string) *types.Error {
+
+ if networkName == "" {
+ return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "")
+ }
+ if !cniReg.MatchString(networkName) {
+ return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName)
+ }
+ return nil
+}
+
+// ValidateInterfaceName will validate the interface name based on the three rules below
+// 1. The name must not be empty
+// 2. The name must be less than 16 characters
+// 3. The name must not be "." or ".."
+// 3. The name must not contain / or : or any whitespace characters
+// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024
+func ValidateInterfaceName(ifName string) *types.Error {
+ if len(ifName) == 0 {
+ return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "")
+ }
+ if len(ifName) > maxInterfaceNameLength {
+ return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1))
+ }
+ if ifName == "." || ifName == ".." {
+ return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "")
+ }
+ for _, r := range bytes.Runes([]byte(ifName)) {
+ if r == '/' || r == ':' || unicode.IsSpace(r) {
+ return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "")
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go
new file mode 100644
index 000000000..808c33b83
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/version/conf.go
@@ -0,0 +1,26 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "github.com/containernetworking/cni/pkg/types/create"
+)
+
+// ConfigDecoder can decode the CNI version available in network config data
+type ConfigDecoder struct{}
+
+func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) {
+ return create.DecodeVersion(jsonBytes)
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
new file mode 100644
index 000000000..17b22b6b0
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
@@ -0,0 +1,144 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// PluginInfo reports information about CNI versioning
+type PluginInfo interface {
+ // SupportedVersions returns one or more CNI spec versions that the plugin
+ // supports. If input is provided in one of these versions, then the plugin
+ // promises to use the same CNI version in its response
+ SupportedVersions() []string
+
+ // Encode writes this CNI version information as JSON to the given Writer
+ Encode(io.Writer) error
+}
+
+type pluginInfo struct {
+ CNIVersion_ string `json:"cniVersion"`
+ SupportedVersions_ []string `json:"supportedVersions,omitempty"`
+}
+
+// pluginInfo implements the PluginInfo interface
+var _ PluginInfo = &pluginInfo{}
+
+func (p *pluginInfo) Encode(w io.Writer) error {
+ return json.NewEncoder(w).Encode(p)
+}
+
+func (p *pluginInfo) SupportedVersions() []string {
+ return p.SupportedVersions_
+}
+
+// PluginSupports returns a new PluginInfo that will report the given versions
+// as supported
+func PluginSupports(supportedVersions ...string) PluginInfo {
+ if len(supportedVersions) < 1 {
+ panic("programmer error: you must support at least one version")
+ }
+ return &pluginInfo{
+ CNIVersion_: Current(),
+ SupportedVersions_: supportedVersions,
+ }
+}
+
+// PluginDecoder can decode the response returned by a plugin's VERSION command
+type PluginDecoder struct{}
+
+func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) {
+ var info pluginInfo
+ err := json.Unmarshal(jsonBytes, &info)
+ if err != nil {
+ return nil, fmt.Errorf("decoding version info: %w", err)
+ }
+ if info.CNIVersion_ == "" {
+ return nil, fmt.Errorf("decoding version info: missing field cniVersion")
+ }
+ if len(info.SupportedVersions_) == 0 {
+ if info.CNIVersion_ == "0.2.0" {
+ return PluginSupports("0.1.0", "0.2.0"), nil
+ }
+ return nil, fmt.Errorf("decoding version info: missing field supportedVersions")
+ }
+ return &info, nil
+}
+
+// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major,
+// minor, and micro numbers or returns an error
+func ParseVersion(version string) (int, int, int, error) {
+ var major, minor, micro int
+ if version == "" { // special case: no version declared == v0.1.0
+ return 0, 1, 0, nil
+ }
+
+ parts := strings.Split(version, ".")
+ if len(parts) >= 4 {
+ return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version)
+ }
+
+ major, err := strconv.Atoi(parts[0])
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %w", parts[0], err)
+ }
+
+ if len(parts) >= 2 {
+ minor, err = strconv.Atoi(parts[1])
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %w", parts[1], err)
+ }
+ }
+
+ if len(parts) >= 3 {
+ micro, err = strconv.Atoi(parts[2])
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %w", parts[2], err)
+ }
+ }
+
+ return major, minor, micro, nil
+}
+
+// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro
+// numbers, and compares them to determine whether the first version is greater
+// than or equal to the second
+func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) {
+ firstMajor, firstMinor, firstMicro, err := ParseVersion(version)
+ if err != nil {
+ return false, err
+ }
+
+ secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion)
+ if err != nil {
+ return false, err
+ }
+
+ if firstMajor > secondMajor {
+ return true, nil
+ } else if firstMajor == secondMajor {
+ if firstMinor > secondMinor {
+ return true, nil
+ } else if firstMinor == secondMinor && firstMicro >= secondMicro {
+ return true, nil
+ }
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go
new file mode 100644
index 000000000..25c3810b2
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go
@@ -0,0 +1,49 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import "fmt"
+
+type ErrorIncompatible struct {
+ Config string
+ Supported []string
+}
+
+func (e *ErrorIncompatible) Details() string {
+ return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported)
+}
+
+func (e *ErrorIncompatible) Error() string {
+ return fmt.Sprintf("incompatible CNI versions: %s", e.Details())
+}
+
+type Reconciler struct{}
+
+func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible {
+ return r.CheckRaw(configVersion, pluginInfo.SupportedVersions())
+}
+
+func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible {
+ for _, supportedVersion := range supportedVersions {
+ if configVersion == supportedVersion {
+ return nil
+ }
+ }
+
+ return &ErrorIncompatible{
+ Config: configVersion,
+ Supported: supportedVersions,
+ }
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go
new file mode 100644
index 000000000..1326f8038
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go
@@ -0,0 +1,89 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/containernetworking/cni/pkg/types"
+ types100 "github.com/containernetworking/cni/pkg/types/100"
+ "github.com/containernetworking/cni/pkg/types/create"
+)
+
+// Current reports the version of the CNI spec implemented by this library
+func Current() string {
+ return types100.ImplementedSpecVersion
+}
+
+// Legacy PluginInfo describes a plugin that is backwards compatible with the
+// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0
+// library ought to work correctly with a plugin that reports support for
+// Legacy versions.
+//
+// Any future CNI spec versions which meet this definition should be added to
+// this list.
+var Legacy = PluginSupports("0.1.0", "0.2.0")
+var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0")
+
+// VersionsFrom returns a list of versions starting from min, inclusive
+func VersionsStartingFrom(min string) PluginInfo {
+ out := []string{}
+ // cheat, just assume ordered
+ ok := false
+ for _, v := range All.SupportedVersions() {
+ if !ok && v == min {
+ ok = true
+ }
+ if ok {
+ out = append(out, v)
+ }
+ }
+ return PluginSupports(out...)
+}
+
+// Finds a Result object matching the requested version (if any) and asks
+// that object to parse the plugin result, returning an error if parsing failed.
+func NewResult(version string, resultBytes []byte) (types.Result, error) {
+ return create.Create(version, resultBytes)
+}
+
+// ParsePrevResult parses a prevResult in a NetConf structure and sets
+// the NetConf's PrevResult member to the parsed Result object.
+func ParsePrevResult(conf *types.NetConf) error {
+ if conf.RawPrevResult == nil {
+ return nil
+ }
+
+ // Prior to 1.0.0, Result types may not marshal a CNIVersion. Since the
+ // result version must match the config version, if the Result's version
+ // is empty, inject the config version.
+ if ver, ok := conf.RawPrevResult["CNIVersion"]; !ok || ver == "" {
+ conf.RawPrevResult["CNIVersion"] = conf.CNIVersion
+ }
+
+ resultBytes, err := json.Marshal(conf.RawPrevResult)
+ if err != nil {
+ return fmt.Errorf("could not serialize prevResult: %w", err)
+ }
+
+ conf.RawPrevResult = nil
+ conf.PrevResult, err = create.Create(conf.CNIVersion, resultBytes)
+ if err != nil {
+ return fmt.Errorf("could not parse prevResult: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/plugins/LICENSE b/vendor/github.com/containernetworking/plugins/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go
new file mode 100644
index 000000000..b4db50b9a
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go
@@ -0,0 +1,68 @@
+// Copyright 2017 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ip
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+
+ "github.com/vishvananda/netlink"
+)
+
+const SETTLE_INTERVAL = 50 * time.Millisecond
+
+// SettleAddresses waits for all addresses on a link to leave tentative state.
+// This is particularly useful for ipv6, where all addresses need to do DAD.
+// There is no easy way to wait for this as an event, so just loop until the
+// addresses are no longer tentative.
+// If any addresses are still tentative after timeout seconds, then error.
+func SettleAddresses(ifName string, timeout int) error {
+ link, err := netlink.LinkByName(ifName)
+ if err != nil {
+ return fmt.Errorf("failed to retrieve link: %v", err)
+ }
+
+ deadline := time.Now().Add(time.Duration(timeout) * time.Second)
+ for {
+ addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL)
+ if err != nil {
+ return fmt.Errorf("could not list addresses: %v", err)
+ }
+
+ if len(addrs) == 0 {
+ return nil
+ }
+
+ ok := true
+ for _, addr := range addrs {
+ if addr.Flags&(syscall.IFA_F_TENTATIVE|syscall.IFA_F_DADFAILED) > 0 {
+ ok = false
+ break // Break out of the `range addrs`, not the `for`
+ }
+ }
+
+ if ok {
+ return nil
+ }
+ if time.Now().After(deadline) {
+ return fmt.Errorf("link %s still has tentative addresses after %d seconds",
+ ifName,
+ timeout)
+ }
+
+ time.Sleep(SETTLE_INTERVAL)
+ }
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go b/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go
new file mode 100644
index 000000000..8b380fc74
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go
@@ -0,0 +1,105 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ip
+
+import (
+ "math/big"
+ "net"
+)
+
+// NextIP returns IP incremented by 1, if IP is invalid, return nil
+func NextIP(ip net.IP) net.IP {
+ normalizedIP := normalizeIP(ip)
+ if normalizedIP == nil {
+ return nil
+ }
+
+ i := ipToInt(normalizedIP)
+ return intToIP(i.Add(i, big.NewInt(1)), len(normalizedIP) == net.IPv6len)
+}
+
+// PrevIP returns IP decremented by 1, if IP is invalid, return nil
+func PrevIP(ip net.IP) net.IP {
+ normalizedIP := normalizeIP(ip)
+ if normalizedIP == nil {
+ return nil
+ }
+
+ i := ipToInt(normalizedIP)
+ return intToIP(i.Sub(i, big.NewInt(1)), len(normalizedIP) == net.IPv6len)
+}
+
+// Cmp compares two IPs, returning the usual ordering:
+// a < b : -1
+// a == b : 0
+// a > b : 1
+// incomparable : -2
+func Cmp(a, b net.IP) int {
+ normalizedA := normalizeIP(a)
+ normalizedB := normalizeIP(b)
+
+ if len(normalizedA) == len(normalizedB) && len(normalizedA) != 0 {
+ return ipToInt(normalizedA).Cmp(ipToInt(normalizedB))
+ }
+
+ return -2
+}
+
+func ipToInt(ip net.IP) *big.Int {
+ return big.NewInt(0).SetBytes(ip)
+}
+
+func intToIP(i *big.Int, isIPv6 bool) net.IP {
+ intBytes := i.Bytes()
+
+ if len(intBytes) == net.IPv4len || len(intBytes) == net.IPv6len {
+ return intBytes
+ }
+
+ if isIPv6 {
+ return append(make([]byte, net.IPv6len-len(intBytes)), intBytes...)
+ }
+
+ return append(make([]byte, net.IPv4len-len(intBytes)), intBytes...)
+}
+
+// normalizeIP will normalize IP by family,
+// IPv4 : 4-byte form
+// IPv6 : 16-byte form
+// others : nil
+func normalizeIP(ip net.IP) net.IP {
+ if ipTo4 := ip.To4(); ipTo4 != nil {
+ return ipTo4
+ }
+ return ip.To16()
+}
+
+// Network masks off the host portion of the IP, if IPNet is invalid,
+// return nil
+func Network(ipn *net.IPNet) *net.IPNet {
+ if ipn == nil {
+ return nil
+ }
+
+ maskedIP := ipn.IP.Mask(ipn.Mask)
+ if maskedIP == nil {
+ return nil
+ }
+
+ return &net.IPNet{
+ IP: maskedIP,
+ Mask: ipn.Mask,
+ }
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go
new file mode 100644
index 000000000..4469e1b5d
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go
@@ -0,0 +1,105 @@
+// Copyright 2021 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ip
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IP is a CNI maintained type inherited from net.IPNet which can
+// represent a single IP address with or without prefix.
+type IP struct {
+ net.IPNet
+}
+
+// newIP will create an IP with net.IP and net.IPMask
+func newIP(ip net.IP, mask net.IPMask) *IP {
+ return &IP{
+ IPNet: net.IPNet{
+ IP: ip,
+ Mask: mask,
+ },
+ }
+}
+
+// ParseIP will parse string s as an IP, and return it.
+// The string s must be formed like [/].
+// If s is not a valid textual representation of an IP,
+// will return nil.
+func ParseIP(s string) *IP {
+ if strings.ContainsAny(s, "/") {
+ ip, ipNet, err := net.ParseCIDR(s)
+ if err != nil {
+ return nil
+ }
+ return newIP(ip, ipNet.Mask)
+ } else {
+ ip := net.ParseIP(s)
+ if ip == nil {
+ return nil
+ }
+ return newIP(ip, nil)
+ }
+}
+
+// ToIP will return a net.IP in standard form from this IP.
+// If this IP can not be converted to a valid net.IP, will return nil.
+func (i *IP) ToIP() net.IP {
+ switch {
+ case i.IP.To4() != nil:
+ return i.IP.To4()
+ case i.IP.To16() != nil:
+ return i.IP.To16()
+ default:
+ return nil
+ }
+}
+
+// String returns the string form of this IP.
+func (i *IP) String() string {
+ if len(i.Mask) > 0 {
+ return i.IPNet.String()
+ }
+ return i.IP.String()
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by String,
+// But when len(ip) is zero, will return an empty slice.
+func (i *IP) MarshalText() ([]byte, error) {
+ if len(i.IP) == 0 {
+ return []byte{}, nil
+ }
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The textual bytes are expected in a form accepted by Parse,
+// But when len(b) is zero, will return an empty IP.
+func (i *IP) UnmarshalText(b []byte) error {
+ if len(b) == 0 {
+ *i = IP{}
+ return nil
+ }
+
+ ip := ParseIP(string(b))
+ if ip == nil {
+ return fmt.Errorf("invalid IP address %s", string(b))
+ }
+ *i = *ip
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go
new file mode 100644
index 000000000..0e8b6b691
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go
@@ -0,0 +1,62 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ip
+
+import (
+ "bytes"
+ "os"
+
+ current "github.com/containernetworking/cni/pkg/types/100"
+)
+
+func EnableIP4Forward() error {
+ return echo1("/proc/sys/net/ipv4/ip_forward")
+}
+
+func EnableIP6Forward() error {
+ return echo1("/proc/sys/net/ipv6/conf/all/forwarding")
+}
+
+// EnableForward will enable forwarding for all configured
+// address families
+func EnableForward(ips []*current.IPConfig) error {
+ v4 := false
+ v6 := false
+
+ for _, ip := range ips {
+ isV4 := ip.Address.IP.To4() != nil
+ if isV4 && !v4 {
+ if err := EnableIP4Forward(); err != nil {
+ return err
+ }
+ v4 = true
+ } else if !isV4 && !v6 {
+ if err := EnableIP6Forward(); err != nil {
+ return err
+ }
+ v6 = true
+ }
+ }
+ return nil
+}
+
+func echo1(f string) error {
+ if content, err := os.ReadFile(f); err == nil {
+ if bytes.Equal(bytes.TrimSpace(content), []byte("1")) {
+ return nil
+ }
+ }
+ return os.WriteFile(f, []byte("1"), 0644)
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go
new file mode 100644
index 000000000..cc640a605
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go
@@ -0,0 +1,126 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ip
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/coreos/go-iptables/iptables"
+)
+
+// SetupIPMasq installs iptables rules to masquerade traffic
+// coming from ip of ipn and going outside of ipn
+func SetupIPMasq(ipn *net.IPNet, chain string, comment string) error {
+ isV6 := ipn.IP.To4() == nil
+
+ var ipt *iptables.IPTables
+ var err error
+ var multicastNet string
+
+ if isV6 {
+ ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6)
+ multicastNet = "ff00::/8"
+ } else {
+ ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4)
+ multicastNet = "224.0.0.0/4"
+ }
+ if err != nil {
+ return fmt.Errorf("failed to locate iptables: %v", err)
+ }
+
+ // Create chain if doesn't exist
+ exists := false
+ chains, err := ipt.ListChains("nat")
+ if err != nil {
+ return fmt.Errorf("failed to list chains: %v", err)
+ }
+ for _, ch := range chains {
+ if ch == chain {
+ exists = true
+ break
+ }
+ }
+ if !exists {
+ if err = ipt.NewChain("nat", chain); err != nil {
+ return err
+ }
+ }
+
+ // Packets to this network should not be touched
+ if err := ipt.AppendUnique("nat", chain, "-d", ipn.String(), "-j", "ACCEPT", "-m", "comment", "--comment", comment); err != nil {
+ return err
+ }
+
+ // Don't masquerade multicast - pods should be able to talk to other pods
+ // on the local network via multicast.
+ if err := ipt.AppendUnique("nat", chain, "!", "-d", multicastNet, "-j", "MASQUERADE", "-m", "comment", "--comment", comment); err != nil {
+ return err
+ }
+
+ // Packets from the specific IP of this network will hit the chain
+ return ipt.AppendUnique("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment)
+}
+
+// TeardownIPMasq undoes the effects of SetupIPMasq
+func TeardownIPMasq(ipn *net.IPNet, chain string, comment string) error {
+ isV6 := ipn.IP.To4() == nil
+
+ var ipt *iptables.IPTables
+ var err error
+
+ if isV6 {
+ ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6)
+ } else {
+ ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to locate iptables: %v", err)
+ }
+
+ err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment)
+ if err != nil && !isNotExist(err) {
+ return err
+ }
+
+ // for downward compatibility
+ err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.String(), "-j", chain, "-m", "comment", "--comment", comment)
+ if err != nil && !isNotExist(err) {
+ return err
+ }
+
+ err = ipt.ClearChain("nat", chain)
+ if err != nil && !isNotExist(err) {
+ return err
+
+ }
+
+ err = ipt.DeleteChain("nat", chain)
+ if err != nil && !isNotExist(err) {
+ return err
+ }
+
+ return nil
+}
+
+// isNotExist returnst true if the error is from iptables indicating
+// that the target does not exist.
+func isNotExist(err error) bool {
+ e, ok := err.(*iptables.Error)
+ if !ok {
+ return false
+ }
+ return e.IsNotExist()
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go
new file mode 100644
index 000000000..91f931b57
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go
@@ -0,0 +1,261 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ip
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+
+ "github.com/safchain/ethtool"
+ "github.com/vishvananda/netlink"
+
+ "github.com/containernetworking/plugins/pkg/ns"
+ "github.com/containernetworking/plugins/pkg/utils/sysctl"
+)
+
+var (
+ ErrLinkNotFound = errors.New("link not found")
+)
+
+// makeVethPair is called from within the container's network namespace
+func makeVethPair(name, peer string, mtu int, mac string, hostNS ns.NetNS) (netlink.Link, error) {
+ veth := &netlink.Veth{
+ LinkAttrs: netlink.LinkAttrs{
+ Name: name,
+ MTU: mtu,
+ },
+ PeerName: peer,
+ PeerNamespace: netlink.NsFd(int(hostNS.Fd())),
+ }
+ if mac != "" {
+ m, err := net.ParseMAC(mac)
+ if err != nil {
+ return nil, err
+ }
+ veth.LinkAttrs.HardwareAddr = m
+ }
+ if err := netlink.LinkAdd(veth); err != nil {
+ return nil, err
+ }
+ // Re-fetch the container link to get its creation-time parameters, e.g. index and mac
+ veth2, err := netlink.LinkByName(name)
+ if err != nil {
+ netlink.LinkDel(veth) // try and clean up the link if possible.
+ return nil, err
+ }
+
+ return veth2, nil
+}
+
+func peerExists(name string) bool {
+ if _, err := netlink.LinkByName(name); err != nil {
+ return false
+ }
+ return true
+}
+
+func makeVeth(name, vethPeerName string, mtu int, mac string, hostNS ns.NetNS) (peerName string, veth netlink.Link, err error) {
+ for i := 0; i < 10; i++ {
+ if vethPeerName != "" {
+ peerName = vethPeerName
+ } else {
+ peerName, err = RandomVethName()
+ if err != nil {
+ return
+ }
+ }
+
+ veth, err = makeVethPair(name, peerName, mtu, mac, hostNS)
+ switch {
+ case err == nil:
+ return
+
+ case os.IsExist(err):
+ if peerExists(peerName) && vethPeerName == "" {
+ continue
+ }
+ err = fmt.Errorf("container veth name provided (%v) already exists", name)
+ return
+
+ default:
+ err = fmt.Errorf("failed to make veth pair: %v", err)
+ return
+ }
+ }
+
+ // should really never be hit
+ err = fmt.Errorf("failed to find a unique veth name")
+ return
+}
+
+// RandomVethName returns string "veth" with random prefix (hashed from entropy)
+func RandomVethName() (string, error) {
+ entropy := make([]byte, 4)
+ _, err := rand.Read(entropy)
+ if err != nil {
+ return "", fmt.Errorf("failed to generate random veth name: %v", err)
+ }
+
+ // NetworkManager (recent versions) will ignore veth devices that start with "veth"
+ return fmt.Sprintf("veth%x", entropy), nil
+}
+
+func RenameLink(curName, newName string) error {
+ link, err := netlink.LinkByName(curName)
+ if err == nil {
+ err = netlink.LinkSetName(link, newName)
+ }
+ return err
+}
+
+func ifaceFromNetlinkLink(l netlink.Link) net.Interface {
+ a := l.Attrs()
+ return net.Interface{
+ Index: a.Index,
+ MTU: a.MTU,
+ Name: a.Name,
+ HardwareAddr: a.HardwareAddr,
+ Flags: a.Flags,
+ }
+}
+
+// SetupVethWithName sets up a pair of virtual ethernet devices.
+// Call SetupVethWithName from inside the container netns. It will create both veth
+// devices and move the host-side veth into the provided hostNS namespace.
+// hostVethName: If hostVethName is not specified, the host-side veth name will use a random string.
+// On success, SetupVethWithName returns (hostVeth, containerVeth, nil)
+func SetupVethWithName(contVethName, hostVethName string, mtu int, contVethMac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) {
+ hostVethName, contVeth, err := makeVeth(contVethName, hostVethName, mtu, contVethMac, hostNS)
+ if err != nil {
+ return net.Interface{}, net.Interface{}, err
+ }
+
+ var hostVeth netlink.Link
+ err = hostNS.Do(func(_ ns.NetNS) error {
+ hostVeth, err = netlink.LinkByName(hostVethName)
+ if err != nil {
+ return fmt.Errorf("failed to lookup %q in %q: %v", hostVethName, hostNS.Path(), err)
+ }
+
+ if err = netlink.LinkSetUp(hostVeth); err != nil {
+ return fmt.Errorf("failed to set %q up: %v", hostVethName, err)
+ }
+
+ // we want to own the routes for this interface
+ _, _ = sysctl.Sysctl(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", hostVethName), "0")
+ return nil
+ })
+ if err != nil {
+ return net.Interface{}, net.Interface{}, err
+ }
+ return ifaceFromNetlinkLink(hostVeth), ifaceFromNetlinkLink(contVeth), nil
+}
+
+// SetupVeth sets up a pair of virtual ethernet devices.
+// Call SetupVeth from inside the container netns. It will create both veth
+// devices and move the host-side veth into the provided hostNS namespace.
+// On success, SetupVeth returns (hostVeth, containerVeth, nil)
+func SetupVeth(contVethName string, mtu int, contVethMac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) {
+ return SetupVethWithName(contVethName, "", mtu, contVethMac, hostNS)
+}
+
+// DelLinkByName removes an interface link.
+func DelLinkByName(ifName string) error {
+ iface, err := netlink.LinkByName(ifName)
+ if err != nil {
+ if _, ok := err.(netlink.LinkNotFoundError); ok {
+ return ErrLinkNotFound
+ }
+ return fmt.Errorf("failed to lookup %q: %v", ifName, err)
+ }
+
+ if err = netlink.LinkDel(iface); err != nil {
+ return fmt.Errorf("failed to delete %q: %v", ifName, err)
+ }
+
+ return nil
+}
+
+// DelLinkByNameAddr remove an interface and returns its addresses
+func DelLinkByNameAddr(ifName string) ([]*net.IPNet, error) {
+ iface, err := netlink.LinkByName(ifName)
+ if err != nil {
+ if _, ok := err.(netlink.LinkNotFoundError); ok {
+ return nil, ErrLinkNotFound
+ }
+ return nil, fmt.Errorf("failed to lookup %q: %v", ifName, err)
+ }
+
+ addrs, err := netlink.AddrList(iface, netlink.FAMILY_ALL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get IP addresses for %q: %v", ifName, err)
+ }
+
+ if err = netlink.LinkDel(iface); err != nil {
+ return nil, fmt.Errorf("failed to delete %q: %v", ifName, err)
+ }
+
+ out := []*net.IPNet{}
+ for _, addr := range addrs {
+ if addr.IP.IsGlobalUnicast() {
+ out = append(out, addr.IPNet)
+ }
+ }
+
+ return out, nil
+}
+
+// GetVethPeerIfindex returns the veth link object, the peer ifindex of the
+// veth, or an error. This peer ifindex will only be valid in the peer's
+// network namespace.
+func GetVethPeerIfindex(ifName string) (netlink.Link, int, error) {
+ link, err := netlink.LinkByName(ifName)
+ if err != nil {
+ return nil, -1, fmt.Errorf("could not look up %q: %v", ifName, err)
+ }
+ if _, ok := link.(*netlink.Veth); !ok {
+ return nil, -1, fmt.Errorf("interface %q was not a veth interface", ifName)
+ }
+
+ // veth supports IFLA_LINK (what vishvananda/netlink calls ParentIndex)
+ // on 4.1 and higher kernels
+ peerIndex := link.Attrs().ParentIndex
+ if peerIndex <= 0 {
+ // Fall back to ethtool for 4.0 and earlier kernels
+ e, err := ethtool.NewEthtool()
+ if err != nil {
+ return nil, -1, fmt.Errorf("failed to initialize ethtool: %v", err)
+ }
+ defer e.Close()
+
+ stats, err := e.Stats(link.Attrs().Name)
+ if err != nil {
+ return nil, -1, fmt.Errorf("failed to request ethtool stats: %v", err)
+ }
+ n, ok := stats["peer_ifindex"]
+ if !ok {
+ return nil, -1, fmt.Errorf("failed to find 'peer_ifindex' in ethtool stats")
+ }
+ if n > 32767 || n == 0 {
+ return nil, -1, fmt.Errorf("invalid 'peer_ifindex' %d", n)
+ }
+ peerIndex = int(n)
+ }
+
+ return link, peerIndex, nil
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go
new file mode 100644
index 000000000..e92b6c53e
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go
@@ -0,0 +1,52 @@
+// Copyright 2015-2017 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ip
+
+import (
+ "net"
+
+ "github.com/vishvananda/netlink"
+)
+
+// AddRoute adds a universally-scoped route to a device.
+func AddRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error {
+ return netlink.RouteAdd(&netlink.Route{
+ LinkIndex: dev.Attrs().Index,
+ Scope: netlink.SCOPE_UNIVERSE,
+ Dst: ipn,
+ Gw: gw,
+ })
+}
+
+// AddHostRoute adds a host-scoped route to a device.
+func AddHostRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error {
+ return netlink.RouteAdd(&netlink.Route{
+ LinkIndex: dev.Attrs().Index,
+ Scope: netlink.SCOPE_HOST,
+ Dst: ipn,
+ Gw: gw,
+ })
+}
+
+// AddDefaultRoute sets the default route on the given gateway.
+func AddDefaultRoute(gw net.IP, dev netlink.Link) error {
+ var defNet *net.IPNet
+ if gw.To4() != nil {
+ _, defNet, _ = net.ParseCIDR("0.0.0.0/0")
+ } else {
+ _, defNet, _ = net.ParseCIDR("::/0")
+ }
+ return AddRoute(defNet, gw, dev)
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go
new file mode 100644
index 000000000..943117e18
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go
@@ -0,0 +1,116 @@
+//go:build linux
+// +build linux
+
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ip
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/containernetworking/cni/pkg/types"
+ current "github.com/containernetworking/cni/pkg/types/100"
+ "github.com/vishvananda/netlink"
+)
+
+func ValidateExpectedInterfaceIPs(ifName string, resultIPs []*current.IPConfig) error {
+
+ // Ensure ips
+ for _, ips := range resultIPs {
+ ourAddr := netlink.Addr{IPNet: &ips.Address}
+ match := false
+
+ link, err := netlink.LinkByName(ifName)
+ if err != nil {
+ return fmt.Errorf("Cannot find container link %v", ifName)
+ }
+
+ addrList, err := netlink.AddrList(link, netlink.FAMILY_ALL)
+ if err != nil {
+ return fmt.Errorf("Cannot obtain List of IP Addresses")
+ }
+
+ for _, addr := range addrList {
+ if addr.Equal(ourAddr) {
+ match = true
+ break
+ }
+ }
+ if match == false {
+ return fmt.Errorf("Failed to match addr %v on interface %v", ourAddr, ifName)
+ }
+
+ // Convert the host/prefixlen to just prefix for route lookup.
+ _, ourPrefix, err := net.ParseCIDR(ourAddr.String())
+
+ findGwy := &netlink.Route{Dst: ourPrefix}
+ routeFilter := netlink.RT_FILTER_DST
+
+ family := netlink.FAMILY_V6
+ if ips.Address.IP.To4() != nil {
+ family = netlink.FAMILY_V4
+ }
+
+ gwy, err := netlink.RouteListFiltered(family, findGwy, routeFilter)
+ if err != nil {
+ return fmt.Errorf("Error %v trying to find Gateway %v for interface %v", err, ips.Gateway, ifName)
+ }
+ if gwy == nil {
+ return fmt.Errorf("Failed to find Gateway %v for interface %v", ips.Gateway, ifName)
+ }
+ }
+
+ return nil
+}
+
+func ValidateExpectedRoute(resultRoutes []*types.Route) error {
+
+ // Ensure that each static route in prevResults is found in the routing table
+ for _, route := range resultRoutes {
+ find := &netlink.Route{Dst: &route.Dst, Gw: route.GW}
+ routeFilter := netlink.RT_FILTER_DST | netlink.RT_FILTER_GW
+ var family int
+
+ switch {
+ case route.Dst.IP.To4() != nil:
+ family = netlink.FAMILY_V4
+ // Default route needs Dst set to nil
+ if route.Dst.String() == "0.0.0.0/0" {
+ find = &netlink.Route{Dst: nil, Gw: route.GW}
+ routeFilter = netlink.RT_FILTER_DST
+ }
+ case len(route.Dst.IP) == net.IPv6len:
+ family = netlink.FAMILY_V6
+ // Default route needs Dst set to nil
+ if route.Dst.String() == "::/0" {
+ find = &netlink.Route{Dst: nil, Gw: route.GW}
+ routeFilter = netlink.RT_FILTER_DST
+ }
+ default:
+ return fmt.Errorf("Invalid static route found %v", route)
+ }
+
+ wasFound, err := netlink.RouteListFiltered(family, find, routeFilter)
+ if err != nil {
+ return fmt.Errorf("Expected Route %v not route table lookup error %v", route, err)
+ }
+ if wasFound == nil {
+ return fmt.Errorf("Expected Route %v not found in routing table", route)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md
new file mode 100644
index 000000000..1e265c7a0
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md
@@ -0,0 +1,41 @@
+### Namespaces, Threads, and Go
+On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code.
+
+### Namespace Switching
+Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads.
+
+Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in.
+
+For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly.
+
+### Do() The Recommended Thing
+The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example:
+
+```go
+err = targetNs.Do(func(hostNs ns.NetNS) error {
+ dummy := &netlink.Dummy{
+ LinkAttrs: netlink.LinkAttrs{
+ Name: "dummy0",
+ },
+ }
+ return netlink.LinkAdd(dummy)
+})
+```
+
+Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem.
+
+When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled.
+
+In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater.
+
+
+### Creating network namespaces
+Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration.
+
+
+### Further Reading
+ - https://github.com/golang/go/wiki/LockOSThread
+ - http://morsmachine.dk/go-scheduler
+ - https://github.com/containernetworking/cni/issues/262
+ - https://golang.org/pkg/runtime/
+ - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
new file mode 100644
index 000000000..f260f2813
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
@@ -0,0 +1,234 @@
+// Copyright 2015-2017 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ns
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "sync"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// Returns an object representing the current OS thread's network namespace
+func GetCurrentNS() (NetNS, error) {
+ // Lock the thread in case other goroutine executes in it and changes its
+ // network namespace after getCurrentThreadNetNSPath(), otherwise it might
+ // return an unexpected network namespace.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ return GetNS(getCurrentThreadNetNSPath())
+}
+
+func getCurrentThreadNetNSPath() string {
+ // /proc/self/ns/net returns the namespace of the main thread, not
+ // of whatever thread this goroutine is running on. Make sure we
+ // use the thread's net namespace since the thread is switching around
+ return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
+}
+
+func (ns *netNS) Close() error {
+ if err := ns.errorIfClosed(); err != nil {
+ return err
+ }
+
+ if err := ns.file.Close(); err != nil {
+ return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err)
+ }
+ ns.closed = true
+
+ return nil
+}
+
+func (ns *netNS) Set() error {
+ if err := ns.errorIfClosed(); err != nil {
+ return err
+ }
+
+ if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil {
+ return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err)
+ }
+
+ return nil
+}
+
+type NetNS interface {
+ // Executes the passed closure in this object's network namespace,
+ // attempting to restore the original namespace before returning.
+ // However, since each OS thread can have a different network namespace,
+ // and Go's thread scheduling is highly variable, callers cannot
+ // guarantee any specific namespace is set unless operations that
+ // require that namespace are wrapped with Do(). Also, no code called
+ // from Do() should call runtime.UnlockOSThread(), or the risk
+ // of executing code in an incorrect namespace will be greater. See
+ // https://github.com/golang/go/wiki/LockOSThread for further details.
+ Do(toRun func(NetNS) error) error
+
+ // Sets the current network namespace to this object's network namespace.
+ // Note that since Go's thread scheduling is highly variable, callers
+ // cannot guarantee the requested namespace will be the current namespace
+ // after this function is called; to ensure this wrap operations that
+ // require the namespace with Do() instead.
+ Set() error
+
+ // Returns the filesystem path representing this object's network namespace
+ Path() string
+
+ // Returns a file descriptor representing this object's network namespace
+ Fd() uintptr
+
+ // Cleans up this instance of the network namespace; if this instance
+ // is the last user the namespace will be destroyed
+ Close() error
+}
+
+type netNS struct {
+ file *os.File
+ closed bool
+}
+
+// netNS implements the NetNS interface
+var _ NetNS = &netNS{}
+
+const (
+ // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h
+ NSFS_MAGIC = unix.NSFS_MAGIC
+ PROCFS_MAGIC = unix.PROC_SUPER_MAGIC
+)
+
+type NSPathNotExistErr struct{ msg string }
+
+func (e NSPathNotExistErr) Error() string { return e.msg }
+
+type NSPathNotNSErr struct{ msg string }
+
+func (e NSPathNotNSErr) Error() string { return e.msg }
+
+func IsNSorErr(nspath string) error {
+ stat := syscall.Statfs_t{}
+ if err := syscall.Statfs(nspath, &stat); err != nil {
+ if os.IsNotExist(err) {
+ err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)}
+ } else {
+ err = fmt.Errorf("failed to Statfs %q: %v", nspath, err)
+ }
+ return err
+ }
+
+ switch stat.Type {
+ case PROCFS_MAGIC, NSFS_MAGIC:
+ return nil
+ default:
+ return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)}
+ }
+}
+
+// Returns an object representing the namespace referred to by @path
+func GetNS(nspath string) (NetNS, error) {
+ err := IsNSorErr(nspath)
+ if err != nil {
+ return nil, err
+ }
+
+ fd, err := os.Open(nspath)
+ if err != nil {
+ return nil, err
+ }
+
+ return &netNS{file: fd}, nil
+}
+
+func (ns *netNS) Path() string {
+ return ns.file.Name()
+}
+
+func (ns *netNS) Fd() uintptr {
+ return ns.file.Fd()
+}
+
+func (ns *netNS) errorIfClosed() error {
+ if ns.closed {
+ return fmt.Errorf("%q has already been closed", ns.file.Name())
+ }
+ return nil
+}
+
+func (ns *netNS) Do(toRun func(NetNS) error) error {
+ if err := ns.errorIfClosed(); err != nil {
+ return err
+ }
+
+ containedCall := func(hostNS NetNS) error {
+ threadNS, err := GetCurrentNS()
+ if err != nil {
+ return fmt.Errorf("failed to open current netns: %v", err)
+ }
+ defer threadNS.Close()
+
+ // switch to target namespace
+ if err = ns.Set(); err != nil {
+ return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
+ }
+ defer func() {
+ err := threadNS.Set() // switch back
+ if err == nil {
+ // Unlock the current thread only when we successfully switched back
+ // to the original namespace; otherwise leave the thread locked which
+ // will force the runtime to scrap the current thread, that is maybe
+ // not as optimal but at least always safe to do.
+ runtime.UnlockOSThread()
+ }
+ }()
+
+ return toRun(hostNS)
+ }
+
+ // save a handle to current network namespace
+ hostNS, err := GetCurrentNS()
+ if err != nil {
+ return fmt.Errorf("Failed to open current namespace: %v", err)
+ }
+ defer hostNS.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ // Start the callback in a new green thread so that if we later fail
+ // to switch the namespace back to the original one, we can safely
+ // leave the thread locked to die without a risk of the current thread
+ // left lingering with incorrect namespace.
+ var innerError error
+ go func() {
+ defer wg.Done()
+ runtime.LockOSThread()
+ innerError = containedCall(hostNS)
+ }()
+ wg.Wait()
+
+ return innerError
+}
+
+// WithNetNSPath executes the passed closure under the given network
+// namespace, restoring the original namespace afterwards.
+func WithNetNSPath(nspath string, toRun func(NetNS) error) error {
+ ns, err := GetNS(nspath)
+ if err != nil {
+ return err
+ }
+ defer ns.Close()
+ return ns.Do(toRun)
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go
new file mode 100644
index 000000000..469e9be9e
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go
@@ -0,0 +1,78 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sysctl
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// Sysctl provides a method to set/get values from /proc/sys - in linux systems
+// new interface to set/get values of variables formerly handled by sysctl syscall
+// If optional `params` have only one string value - this function will
+// set this value into corresponding sysctl variable
+func Sysctl(name string, params ...string) (string, error) {
+ if len(params) > 1 {
+ return "", fmt.Errorf("unexcepted additional parameters")
+ } else if len(params) == 1 {
+ return setSysctl(name, params[0])
+ }
+ return getSysctl(name)
+}
+
+func getSysctl(name string) (string, error) {
+ fullName := filepath.Join("/proc/sys", toNormalName(name))
+ data, err := os.ReadFile(fullName)
+ if err != nil {
+ return "", err
+ }
+
+ return string(data[:len(data)-1]), nil
+}
+
+func setSysctl(name, value string) (string, error) {
+ fullName := filepath.Join("/proc/sys", toNormalName(name))
+ if err := os.WriteFile(fullName, []byte(value), 0644); err != nil {
+ return "", err
+ }
+
+ return getSysctl(name)
+}
+
+// Normalize names by using slash as separator
+// Sysctl names can use dots or slashes as separator:
+// - if dots are used, dots and slashes are interchanged.
+// - if slashes are used, slashes and dots are left intact.
+// Separator in use is determined by first occurrence.
+func toNormalName(name string) string {
+ interchange := false
+ for _, c := range name {
+ if c == '.' {
+ interchange = true
+ break
+ }
+ if c == '/' {
+ break
+ }
+ }
+
+ if interchange {
+ r := strings.NewReplacer(".", "/", "/", ".")
+ return r.Replace(name)
+ }
+ return name
+}
diff --git a/vendor/github.com/coreos/go-iptables/LICENSE b/vendor/github.com/coreos/go-iptables/LICENSE
new file mode 100644
index 000000000..37ec93a14
--- /dev/null
+++ b/vendor/github.com/coreos/go-iptables/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/coreos/go-iptables/NOTICE b/vendor/github.com/coreos/go-iptables/NOTICE
new file mode 100644
index 000000000..23a0ada2f
--- /dev/null
+++ b/vendor/github.com/coreos/go-iptables/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2018 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go
new file mode 100644
index 000000000..85047e59d
--- /dev/null
+++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go
@@ -0,0 +1,680 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package iptables
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// Adds the output of stderr to exec.ExitError
+type Error struct {
+ exec.ExitError
+ cmd exec.Cmd
+ msg string
+ exitStatus *int //for overriding
+}
+
+func (e *Error) ExitStatus() int {
+ if e.exitStatus != nil {
+ return *e.exitStatus
+ }
+ return e.Sys().(syscall.WaitStatus).ExitStatus()
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg)
+}
+
+// IsNotExist returns true if the error is due to the chain or rule not existing
+func (e *Error) IsNotExist() bool {
+ if e.ExitStatus() != 1 {
+ return false
+ }
+ msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n"
+ msgNoChainExist := "No chain/target/match by that name.\n"
+ return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist)
+}
+
+// Protocol to differentiate between IPv4 and IPv6
+type Protocol byte
+
+const (
+ ProtocolIPv4 Protocol = iota
+ ProtocolIPv6
+)
+
+type IPTables struct {
+ path string
+ proto Protocol
+ hasCheck bool
+ hasWait bool
+ waitSupportSecond bool
+ hasRandomFully bool
+ v1 int
+ v2 int
+ v3 int
+ mode string // the underlying iptables operating mode, e.g. nf_tables
+ timeout int // time to wait for the iptables lock, default waits forever
+}
+
+// Stat represents a structured statistic entry.
+type Stat struct {
+ Packets uint64 `json:"pkts"`
+ Bytes uint64 `json:"bytes"`
+ Target string `json:"target"`
+ Protocol string `json:"prot"`
+ Opt string `json:"opt"`
+ Input string `json:"in"`
+ Output string `json:"out"`
+ Source *net.IPNet `json:"source"`
+ Destination *net.IPNet `json:"destination"`
+ Options string `json:"options"`
+}
+
+type option func(*IPTables)
+
+func IPFamily(proto Protocol) option {
+ return func(ipt *IPTables) {
+ ipt.proto = proto
+ }
+}
+
+func Timeout(timeout int) option {
+ return func(ipt *IPTables) {
+ ipt.timeout = timeout
+ }
+}
+
+// New creates a new IPTables configured with the options passed as parameter.
+// For backwards compatibility, by default always uses IPv4 and timeout 0.
+// i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing
+// the IPFamily and Timeout options as follow:
+// ip6t := New(IPFamily(ProtocolIPv6), Timeout(5))
+func New(opts ...option) (*IPTables, error) {
+
+ ipt := &IPTables{
+ proto: ProtocolIPv4,
+ timeout: 0,
+ }
+
+ for _, opt := range opts {
+ opt(ipt)
+ }
+
+ path, err := exec.LookPath(getIptablesCommand(ipt.proto))
+ if err != nil {
+ return nil, err
+ }
+ ipt.path = path
+
+ vstring, err := getIptablesVersionString(path)
+ if err != nil {
+ return nil, fmt.Errorf("could not get iptables version: %v", err)
+ }
+ v1, v2, v3, mode, err := extractIptablesVersion(vstring)
+ if err != nil {
+ return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err)
+ }
+ ipt.v1 = v1
+ ipt.v2 = v2
+ ipt.v3 = v3
+ ipt.mode = mode
+
+ checkPresent, waitPresent, waitSupportSecond, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3)
+ ipt.hasCheck = checkPresent
+ ipt.hasWait = waitPresent
+ ipt.waitSupportSecond = waitSupportSecond
+ ipt.hasRandomFully = randomFullyPresent
+
+ return ipt, nil
+}
+
+// New creates a new IPTables for the given proto.
+// The proto will determine which command is used, either "iptables" or "ip6tables".
+func NewWithProtocol(proto Protocol) (*IPTables, error) {
+ return New(IPFamily(proto), Timeout(0))
+}
+
+// Proto returns the protocol used by this IPTables.
+func (ipt *IPTables) Proto() Protocol {
+ return ipt.proto
+}
+
+// Exists checks if given rulespec in specified table/chain exists
+func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) {
+ if !ipt.hasCheck {
+ return ipt.existsForOldIptables(table, chain, rulespec)
+
+ }
+ cmd := append([]string{"-t", table, "-C", chain}, rulespec...)
+ err := ipt.run(cmd...)
+ eerr, eok := err.(*Error)
+ switch {
+ case err == nil:
+ return true, nil
+ case eok && eerr.ExitStatus() == 1:
+ return false, nil
+ default:
+ return false, err
+ }
+}
+
+// Insert inserts rulespec to specified table/chain (in specified pos)
+func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error {
+ cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...)
+ return ipt.run(cmd...)
+}
+
+// Append appends rulespec to specified table/chain
+func (ipt *IPTables) Append(table, chain string, rulespec ...string) error {
+ cmd := append([]string{"-t", table, "-A", chain}, rulespec...)
+ return ipt.run(cmd...)
+}
+
+// AppendUnique acts like Append except that it won't add a duplicate
+func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error {
+ exists, err := ipt.Exists(table, chain, rulespec...)
+ if err != nil {
+ return err
+ }
+
+ if !exists {
+ return ipt.Append(table, chain, rulespec...)
+ }
+
+ return nil
+}
+
+// Delete removes rulespec in specified table/chain
+func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error {
+ cmd := append([]string{"-t", table, "-D", chain}, rulespec...)
+ return ipt.run(cmd...)
+}
+
+func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) error {
+ exists, err := ipt.Exists(table, chain, rulespec...)
+ if err == nil && exists {
+ err = ipt.Delete(table, chain, rulespec...)
+ }
+ return err
+}
+
+// List rules in specified table/chain
+func (ipt *IPTables) List(table, chain string) ([]string, error) {
+ args := []string{"-t", table, "-S", chain}
+ return ipt.executeList(args)
+}
+
+// List rules (with counters) in specified table/chain
+func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) {
+ args := []string{"-t", table, "-v", "-S", chain}
+ return ipt.executeList(args)
+}
+
+// ListChains returns a slice containing the name of each chain in the specified table.
+func (ipt *IPTables) ListChains(table string) ([]string, error) {
+ args := []string{"-t", table, "-S"}
+
+ result, err := ipt.executeList(args)
+ if err != nil {
+ return nil, err
+ }
+
+ // Iterate over rules to find all default (-P) and user-specified (-N) chains.
+ // Chains definition always come before rules.
+ // Format is the following:
+ // -P OUTPUT ACCEPT
+ // -N Custom
+ var chains []string
+ for _, val := range result {
+ if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") {
+ chains = append(chains, strings.Fields(val)[1])
+ } else {
+ break
+ }
+ }
+ return chains, nil
+}
+
+// '-S' is fine with non existing rule index as long as the chain exists
+// therefore pass index 1 to reduce overhead for large chains
+func (ipt *IPTables) ChainExists(table, chain string) (bool, error) {
+ err := ipt.run("-t", table, "-S", chain, "1")
+ eerr, eok := err.(*Error)
+ switch {
+ case err == nil:
+ return true, nil
+ case eok && eerr.ExitStatus() == 1:
+ return false, nil
+ default:
+ return false, err
+ }
+}
+
+// Stats lists rules including the byte and packet counts
+func (ipt *IPTables) Stats(table, chain string) ([][]string, error) {
+ args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"}
+ lines, err := ipt.executeList(args)
+ if err != nil {
+ return nil, err
+ }
+
+ appendSubnet := func(addr string) string {
+ if strings.IndexByte(addr, byte('/')) < 0 {
+ if strings.IndexByte(addr, '.') < 0 {
+ return addr + "/128"
+ }
+ return addr + "/32"
+ }
+ return addr
+ }
+
+ ipv6 := ipt.proto == ProtocolIPv6
+
+ rows := [][]string{}
+ for i, line := range lines {
+ // Skip over chain name and field header
+ if i < 2 {
+ continue
+ }
+
+ // Fields:
+ // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options
+ line = strings.TrimSpace(line)
+ fields := strings.Fields(line)
+
+ // The ip6tables verbose output cannot be naively split due to the default "opt"
+ // field containing 2 single spaces.
+ if ipv6 {
+ // Check if field 6 is "opt" or "source" address
+ dest := fields[6]
+ ip, _, _ := net.ParseCIDR(dest)
+ if ip == nil {
+ ip = net.ParseIP(dest)
+ }
+
+ // If we detected a CIDR or IP, the "opt" field is empty.. insert it.
+ if ip != nil {
+ f := []string{}
+ f = append(f, fields[:4]...)
+ f = append(f, " ") // Empty "opt" field for ip6tables
+ f = append(f, fields[4:]...)
+ fields = f
+ }
+ }
+
+ // Adjust "source" and "destination" to include netmask, to match regular
+ // List output
+ fields[7] = appendSubnet(fields[7])
+ fields[8] = appendSubnet(fields[8])
+
+ // Combine "options" fields 9... into a single space-delimited field.
+ options := fields[9:]
+ fields = fields[:9]
+ fields = append(fields, strings.Join(options, " "))
+ rows = append(rows, fields)
+ }
+ return rows, nil
+}
+
+// ParseStat parses a single statistic row into a Stat struct. The input should
+// be a string slice that is returned from calling the Stat method.
+func (ipt *IPTables) ParseStat(stat []string) (parsed Stat, err error) {
+ // For forward-compatibility, expect at least 10 fields in the stat
+ if len(stat) < 10 {
+ return parsed, fmt.Errorf("stat contained fewer fields than expected")
+ }
+
+ // Convert the fields that are not plain strings
+ parsed.Packets, err = strconv.ParseUint(stat[0], 0, 64)
+ if err != nil {
+ return parsed, fmt.Errorf(err.Error(), "could not parse packets")
+ }
+ parsed.Bytes, err = strconv.ParseUint(stat[1], 0, 64)
+ if err != nil {
+ return parsed, fmt.Errorf(err.Error(), "could not parse bytes")
+ }
+ _, parsed.Source, err = net.ParseCIDR(stat[7])
+ if err != nil {
+ return parsed, fmt.Errorf(err.Error(), "could not parse source")
+ }
+ _, parsed.Destination, err = net.ParseCIDR(stat[8])
+ if err != nil {
+ return parsed, fmt.Errorf(err.Error(), "could not parse destination")
+ }
+
+ // Put the fields that are strings
+ parsed.Target = stat[2]
+ parsed.Protocol = stat[3]
+ parsed.Opt = stat[4]
+ parsed.Input = stat[5]
+ parsed.Output = stat[6]
+ parsed.Options = stat[9]
+
+ return parsed, nil
+}
+
+// StructuredStats returns statistics as structured data which may be further
+// parsed and marshaled.
+func (ipt *IPTables) StructuredStats(table, chain string) ([]Stat, error) {
+ rawStats, err := ipt.Stats(table, chain)
+ if err != nil {
+ return nil, err
+ }
+
+ structStats := []Stat{}
+ for _, rawStat := range rawStats {
+ stat, err := ipt.ParseStat(rawStat)
+ if err != nil {
+ return nil, err
+ }
+ structStats = append(structStats, stat)
+ }
+
+ return structStats, nil
+}
+
+func (ipt *IPTables) executeList(args []string) ([]string, error) {
+ var stdout bytes.Buffer
+ if err := ipt.runWithOutput(args, &stdout); err != nil {
+ return nil, err
+ }
+
+ rules := strings.Split(stdout.String(), "\n")
+
+ // strip trailing newline
+ if len(rules) > 0 && rules[len(rules)-1] == "" {
+ rules = rules[:len(rules)-1]
+ }
+
+ for i, rule := range rules {
+ rules[i] = filterRuleOutput(rule)
+ }
+
+ return rules, nil
+}
+
+// NewChain creates a new chain in the specified table.
+// If the chain already exists, it will result in an error.
+func (ipt *IPTables) NewChain(table, chain string) error {
+ return ipt.run("-t", table, "-N", chain)
+}
+
+const existsErr = 1
+
+// ClearChain flushed (deletes all rules) in the specified table/chain.
+// If the chain does not exist, a new one will be created
+func (ipt *IPTables) ClearChain(table, chain string) error {
+ err := ipt.NewChain(table, chain)
+
+ eerr, eok := err.(*Error)
+ switch {
+ case err == nil:
+ return nil
+ case eok && eerr.ExitStatus() == existsErr:
+ // chain already exists. Flush (clear) it.
+ return ipt.run("-t", table, "-F", chain)
+ default:
+ return err
+ }
+}
+
+// RenameChain renames the old chain to the new one.
+func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error {
+ return ipt.run("-t", table, "-E", oldChain, newChain)
+}
+
+// DeleteChain deletes the chain in the specified table.
+// The chain must be empty
+func (ipt *IPTables) DeleteChain(table, chain string) error {
+ return ipt.run("-t", table, "-X", chain)
+}
+
+func (ipt *IPTables) ClearAndDeleteChain(table, chain string) error {
+ exists, err := ipt.ChainExists(table, chain)
+ if err != nil || !exists {
+ return err
+ }
+ err = ipt.run("-t", table, "-F", chain)
+ if err == nil {
+ err = ipt.run("-t", table, "-X", chain)
+ }
+ return err
+}
+
+func (ipt *IPTables) ClearAll() error {
+ return ipt.run("-F")
+}
+
+func (ipt *IPTables) DeleteAll() error {
+ return ipt.run("-X")
+}
+
+// ChangePolicy changes policy on chain to target
+func (ipt *IPTables) ChangePolicy(table, chain, target string) error {
+ return ipt.run("-t", table, "-P", chain, target)
+}
+
+// Check if the underlying iptables command supports the --random-fully flag
+func (ipt *IPTables) HasRandomFully() bool {
+ return ipt.hasRandomFully
+}
+
+// Return version components of the underlying iptables command
+func (ipt *IPTables) GetIptablesVersion() (int, int, int) {
+ return ipt.v1, ipt.v2, ipt.v3
+}
+
+// run runs an iptables command with the given arguments, ignoring
+// any stdout output
+func (ipt *IPTables) run(args ...string) error {
+ return ipt.runWithOutput(args, nil)
+}
+
+// runWithOutput runs an iptables command with the given arguments,
+// writing any stdout output to the given writer
+func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error {
+ args = append([]string{ipt.path}, args...)
+ if ipt.hasWait {
+ args = append(args, "--wait")
+ if ipt.timeout != 0 && ipt.waitSupportSecond {
+ args = append(args, strconv.Itoa(ipt.timeout))
+ }
+ } else {
+ fmu, err := newXtablesFileLock()
+ if err != nil {
+ return err
+ }
+ ul, err := fmu.tryLock()
+ if err != nil {
+ syscall.Close(fmu.fd)
+ return err
+ }
+ defer ul.Unlock()
+ }
+
+ var stderr bytes.Buffer
+ cmd := exec.Cmd{
+ Path: ipt.path,
+ Args: args,
+ Stdout: stdout,
+ Stderr: &stderr,
+ }
+
+ if err := cmd.Run(); err != nil {
+ switch e := err.(type) {
+ case *exec.ExitError:
+ return &Error{*e, cmd, stderr.String(), nil}
+ default:
+ return err
+ }
+ }
+
+ return nil
+}
+
+// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables".
+func getIptablesCommand(proto Protocol) string {
+ if proto == ProtocolIPv6 {
+ return "ip6tables"
+ } else {
+ return "iptables"
+ }
+}
+
+// Checks if iptables has the "-C" and "--wait" flag
+func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool, bool) {
+ return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesWaitSupportSecond(v1, v2, v3), iptablesHasRandomFully(v1, v2, v3)
+}
+
+// getIptablesVersion returns the first three components of the iptables version
+// and the operating mode (e.g. nf_tables or legacy)
+// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil)
+func extractIptablesVersion(str string) (int, int, int, string, error) {
+ versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`)
+ result := versionMatcher.FindStringSubmatch(str)
+ if result == nil {
+ return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str)
+ }
+
+ v1, err := strconv.Atoi(result[1])
+ if err != nil {
+ return 0, 0, 0, "", err
+ }
+
+ v2, err := strconv.Atoi(result[2])
+ if err != nil {
+ return 0, 0, 0, "", err
+ }
+
+ v3, err := strconv.Atoi(result[3])
+ if err != nil {
+ return 0, 0, 0, "", err
+ }
+
+ mode := "legacy"
+ if result[4] != "" {
+ mode = result[4]
+ }
+ return v1, v2, v3, mode, nil
+}
+
+// Runs "iptables --version" to get the version string
+func getIptablesVersionString(path string) (string, error) {
+ cmd := exec.Command(path, "--version")
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := cmd.Run()
+ if err != nil {
+ return "", err
+ }
+ return out.String(), nil
+}
+
+// Checks if an iptables version is after 1.4.11, when --check was added
+func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool {
+ if v1 > 1 {
+ return true
+ }
+ if v1 == 1 && v2 > 4 {
+ return true
+ }
+ if v1 == 1 && v2 == 4 && v3 >= 11 {
+ return true
+ }
+ return false
+}
+
+// Checks if an iptables version is after 1.4.20, when --wait was added
+func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool {
+ if v1 > 1 {
+ return true
+ }
+ if v1 == 1 && v2 > 4 {
+ return true
+ }
+ if v1 == 1 && v2 == 4 && v3 >= 20 {
+ return true
+ }
+ return false
+}
+
+//Checks if an iptablse version is after 1.6.0, when --wait support second
+func iptablesWaitSupportSecond(v1 int, v2 int, v3 int) bool {
+ if v1 > 1 {
+ return true
+ }
+ if v1 == 1 && v2 >= 6 {
+ return true
+ }
+ return false
+}
+
+// Checks if an iptables version is after 1.6.2, when --random-fully was added
+func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool {
+ if v1 > 1 {
+ return true
+ }
+ if v1 == 1 && v2 > 6 {
+ return true
+ }
+ if v1 == 1 && v2 == 6 && v3 >= 2 {
+ return true
+ }
+ return false
+}
+
+// Checks if a rule specification exists for a table
+func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) {
+ rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ")
+ args := []string{"-t", table, "-S"}
+ var stdout bytes.Buffer
+ err := ipt.runWithOutput(args, &stdout)
+ if err != nil {
+ return false, err
+ }
+ return strings.Contains(stdout.String(), rs), nil
+}
+
+// counterRegex is the regex used to detect nftables counter format
+var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `)
+
+// filterRuleOutput works around some inconsistencies in output.
+// For example, when iptables is in legacy vs. nftables mode, it produces
+// different results.
+func filterRuleOutput(rule string) string {
+ out := rule
+
+ // work around an output difference in nftables mode where counters
+ // are output in iptables-save format, rather than iptables -S format
+ // The string begins with "[0:0]"
+ //
+ // Fixes #49
+ if groups := counterRegex.FindStringSubmatch(out); groups != nil {
+ // drop the brackets
+ out = out[len(groups[0]):]
+ out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2])
+ }
+
+ return out
+}
diff --git a/vendor/github.com/coreos/go-iptables/iptables/lock.go b/vendor/github.com/coreos/go-iptables/iptables/lock.go
new file mode 100644
index 000000000..a88e92b4e
--- /dev/null
+++ b/vendor/github.com/coreos/go-iptables/iptables/lock.go
@@ -0,0 +1,84 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package iptables
+
+import (
+ "os"
+ "sync"
+ "syscall"
+)
+
+const (
+ // In earlier versions of iptables, the xtables lock was implemented
+ // via a Unix socket, but now flock is used via this lockfile:
+ // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707
+ // Note the LSB-conforming "/run" directory does not exist on old
+ // distributions, so assume "/var" is symlinked
+ xtablesLockFilePath = "/var/run/xtables.lock"
+
+ defaultFilePerm = 0600
+)
+
+type Unlocker interface {
+ Unlock() error
+}
+
+type nopUnlocker struct{}
+
+func (_ nopUnlocker) Unlock() error { return nil }
+
+type fileLock struct {
+ // mu is used to protect against concurrent invocations from within this process
+ mu sync.Mutex
+ fd int
+}
+
+// tryLock takes an exclusive lock on the xtables lock file without blocking.
+// This is best-effort only: if the exclusive lock would block (i.e. because
+// another process already holds it), no error is returned. Otherwise, any
+// error encountered during the locking operation is returned.
+// The returned Unlocker should be used to release the lock when the caller is
+// done invoking iptables commands.
+func (l *fileLock) tryLock() (Unlocker, error) {
+ l.mu.Lock()
+ err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
+ switch err {
+ case syscall.EWOULDBLOCK:
+ l.mu.Unlock()
+ return nopUnlocker{}, nil
+ case nil:
+ return l, nil
+ default:
+ l.mu.Unlock()
+ return nil, err
+ }
+}
+
+// Unlock closes the underlying file, which implicitly unlocks it as well. It
+// also unlocks the associated mutex.
+func (l *fileLock) Unlock() error {
+ defer l.mu.Unlock()
+ return syscall.Close(l.fd)
+}
+
+// newXtablesFileLock opens a new lock on the xtables lockfile without
+// acquiring the lock
+func newXtablesFileLock() (*fileLock, error) {
+ fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm)
+ if err != nil {
+ return nil, err
+ }
+ return &fileLock{fd: fd}, nil
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
new file mode 100644
index 000000000..1cade6cef
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Brian Goff
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
new file mode 100644
index 000000000..42bf32aab
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
@@ -0,0 +1,16 @@
+package md2man
+
+import (
+ "github.com/russross/blackfriday/v2"
+)
+
+// Render converts a markdown document into a roff formatted document.
+func Render(doc []byte) []byte {
+ renderer := NewRoffRenderer()
+
+ return blackfriday.Run(doc,
+ []blackfriday.Option{
+ blackfriday.WithRenderer(renderer),
+ blackfriday.WithExtensions(renderer.GetExtensions()),
+ }...)
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
new file mode 100644
index 000000000..8a290f197
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
@@ -0,0 +1,382 @@
+package md2man
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/russross/blackfriday/v2"
+)
+
+// roffRenderer implements the blackfriday.Renderer interface for creating
+// roff format (manpages) from markdown text
+type roffRenderer struct {
+ extensions blackfriday.Extensions
+ listCounters []int
+ firstHeader bool
+ firstDD bool
+ listDepth int
+}
+
+const (
+ titleHeader = ".TH "
+ topLevelHeader = "\n\n.SH "
+ secondLevelHdr = "\n.SH "
+ otherHeader = "\n.SS "
+ crTag = "\n"
+ emphTag = "\\fI"
+ emphCloseTag = "\\fP"
+ strongTag = "\\fB"
+ strongCloseTag = "\\fP"
+ breakTag = "\n.br\n"
+ paraTag = "\n.PP\n"
+ hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
+ linkTag = "\n\\[la]"
+ linkCloseTag = "\\[ra]"
+ codespanTag = "\\fB"
+ codespanCloseTag = "\\fR"
+ codeTag = "\n.EX\n"
+ codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on).
+ quoteTag = "\n.PP\n.RS\n"
+ quoteCloseTag = "\n.RE\n"
+ listTag = "\n.RS\n"
+ listCloseTag = "\n.RE\n"
+ dtTag = "\n.TP\n"
+ dd2Tag = "\n"
+ tableStart = "\n.TS\nallbox;\n"
+ tableEnd = ".TE\n"
+ tableCellStart = "T{\n"
+ tableCellEnd = "\nT}\n"
+ tablePreprocessor = `'\" t`
+)
+
+// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
+// from markdown
+func NewRoffRenderer() *roffRenderer { // nolint: golint
+ var extensions blackfriday.Extensions
+
+ extensions |= blackfriday.NoIntraEmphasis
+ extensions |= blackfriday.Tables
+ extensions |= blackfriday.FencedCode
+ extensions |= blackfriday.SpaceHeadings
+ extensions |= blackfriday.Footnotes
+ extensions |= blackfriday.Titleblock
+ extensions |= blackfriday.DefinitionLists
+ return &roffRenderer{
+ extensions: extensions,
+ }
+}
+
+// GetExtensions returns the list of extensions used by this renderer implementation
+func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
+ return r.extensions
+}
+
+// RenderHeader handles outputting the header at document start
+func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
+ // We need to walk the tree to check if there are any tables.
+ // If there are, we need to enable the roff table preprocessor.
+ ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
+ if node.Type == blackfriday.Table {
+ out(w, tablePreprocessor+"\n")
+ return blackfriday.Terminate
+ }
+ return blackfriday.GoToNext
+ })
+
+ // disable hyphenation
+ out(w, ".nh\n")
+}
+
+// RenderFooter handles outputting the footer at the document end; the roff
+// renderer has no footer information
+func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
+}
+
+// RenderNode is called for each node in a markdown document; based on the node
+// type the equivalent roff output is sent to the writer
+func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
+ walkAction := blackfriday.GoToNext
+
+ switch node.Type {
+ case blackfriday.Text:
+ escapeSpecialChars(w, node.Literal)
+ case blackfriday.Softbreak:
+ out(w, crTag)
+ case blackfriday.Hardbreak:
+ out(w, breakTag)
+ case blackfriday.Emph:
+ if entering {
+ out(w, emphTag)
+ } else {
+ out(w, emphCloseTag)
+ }
+ case blackfriday.Strong:
+ if entering {
+ out(w, strongTag)
+ } else {
+ out(w, strongCloseTag)
+ }
+ case blackfriday.Link:
+ // Don't render the link text for automatic links, because this
+ // will only duplicate the URL in the roff output.
+ // See https://daringfireball.net/projects/markdown/syntax#autolink
+ if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) {
+ out(w, string(node.FirstChild.Literal))
+ }
+ // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page.
+ escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-")
+ out(w, linkTag+escapedLink+linkCloseTag)
+ walkAction = blackfriday.SkipChildren
+ case blackfriday.Image:
+ // ignore images
+ walkAction = blackfriday.SkipChildren
+ case blackfriday.Code:
+ out(w, codespanTag)
+ escapeSpecialChars(w, node.Literal)
+ out(w, codespanCloseTag)
+ case blackfriday.Document:
+ break
+ case blackfriday.Paragraph:
+ // roff .PP markers break lists
+ if r.listDepth > 0 {
+ return blackfriday.GoToNext
+ }
+ if entering {
+ out(w, paraTag)
+ } else {
+ out(w, crTag)
+ }
+ case blackfriday.BlockQuote:
+ if entering {
+ out(w, quoteTag)
+ } else {
+ out(w, quoteCloseTag)
+ }
+ case blackfriday.Heading:
+ r.handleHeading(w, node, entering)
+ case blackfriday.HorizontalRule:
+ out(w, hruleTag)
+ case blackfriday.List:
+ r.handleList(w, node, entering)
+ case blackfriday.Item:
+ r.handleItem(w, node, entering)
+ case blackfriday.CodeBlock:
+ out(w, codeTag)
+ escapeSpecialChars(w, node.Literal)
+ out(w, codeCloseTag)
+ case blackfriday.Table:
+ r.handleTable(w, node, entering)
+ case blackfriday.TableHead:
+ case blackfriday.TableBody:
+ case blackfriday.TableRow:
+ // no action as cell entries do all the nroff formatting
+ return blackfriday.GoToNext
+ case blackfriday.TableCell:
+ r.handleTableCell(w, node, entering)
+ case blackfriday.HTMLSpan:
+ // ignore other HTML tags
+ case blackfriday.HTMLBlock:
+ if bytes.HasPrefix(node.Literal, []byte(" Response
+ s.IsResponse = true
+
+ // Validate SIP Version
+ s.Version, err = GetSIPVersion(splits[0])
+ if err != nil {
+ return err
+ }
+
+ // Compute code
+ s.ResponseCode, err = strconv.Atoi(splits[1])
+ if err != nil {
+ return err
+ }
+
+ // Compute status line
+ s.ResponseStatus = splits[2]
+
+ } else {
+
+ // --> Request
+
+ // Validate method
+ s.Method, err = GetSIPMethod(splits[0])
+ if err != nil {
+ return err
+ }
+
+ s.RequestURI = splits[1]
+
+ // Validate SIP Version
+ s.Version, err = GetSIPVersion(splits[2])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ParseHeader will parse a SIP Header
+// SIP Headers are quite simple, there are colon separated name and value
+// Headers can be spread over multiple lines
+//
+// Examples of header :
+//
+// CSeq: 1 REGISTER
+// Via: SIP/2.0/UDP there.com:5060
+// Authorization:Digest username="UserB",
+// realm="MCI WorldCom SIP",
+// nonce="1cec4341ae6cbe5a359ea9c8e88df84f", opaque="",
+// uri="sip:ss2.wcom.com", response="71ba27c64bd01de719686aa4590d5824"
+func (s *SIP) ParseHeader(header []byte) (err error) {
+
+ // Ignore empty headers
+ if len(header) == 0 {
+ return
+ }
+
+ // Check if this is the following of last header
+ // RFC 3261 - 7.3.1 - Header Field Format specify that following lines of
+ // multiline headers must begin by SP or TAB
+ if header[0] == '\t' || header[0] == ' ' {
+
+ header = bytes.TrimSpace(header)
+ s.Headers[s.lastHeaderParsed][len(s.Headers[s.lastHeaderParsed])-1] += fmt.Sprintf(" %s", string(header))
+ return
+ }
+
+ // Find the ':' to separate header name and value
+ index := bytes.Index(header, []byte(":"))
+ if index >= 0 {
+
+ headerName := strings.ToLower(string(bytes.Trim(header[:index], " ")))
+ headerValue := string(bytes.Trim(header[index+1:], " "))
+
+ // Add header to object
+ s.Headers[headerName] = append(s.Headers[headerName], headerValue)
+ s.lastHeaderParsed = headerName
+
+ // Compute specific headers
+ err = s.ParseSpecificHeaders(headerName, headerValue)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ParseSpecificHeaders will parse some specific key values from
+// specific headers like CSeq or Content-Length integer values
+func (s *SIP) ParseSpecificHeaders(headerName string, headerValue string) (err error) {
+
+ switch headerName {
+ case "cseq":
+
+ // CSeq header value is formatted like that :
+ // CSeq: 123 INVITE
+ // We split the value to parse Cseq integer value, and method
+ splits := strings.Split(headerValue, " ")
+ if len(splits) > 1 {
+
+ // Parse Cseq
+ s.cseq, err = strconv.ParseInt(splits[0], 10, 64)
+ if err != nil {
+ return err
+ }
+
+ // Validate method
+ if s.IsResponse {
+ s.Method, err = GetSIPMethod(splits[1])
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ case "content-length":
+
+ // Parse Content-Length
+ s.contentLength, err = strconv.ParseInt(headerValue, 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetAllHeaders will return the full headers of the
+// current SIP packets in a map[string][]string
+func (s *SIP) GetAllHeaders() map[string][]string {
+ return s.Headers
+}
+
+// GetHeader will return all the headers with
+// the specified name.
+func (s *SIP) GetHeader(headerName string) []string {
+ headerName = strings.ToLower(headerName)
+ h := make([]string, 0)
+ if _, ok := s.Headers[headerName]; ok {
+ return s.Headers[headerName]
+ }
+ compactHeader := compactSipHeadersCorrespondance[headerName]
+ if _, ok := s.Headers[compactHeader]; ok {
+ return s.Headers[compactHeader]
+ }
+ return h
+}
+
+// GetFirstHeader will return the first header with
+// the specified name. If the current SIP packet has multiple
+// headers with the same name, it returns the first.
+func (s *SIP) GetFirstHeader(headerName string) string {
+ headers := s.GetHeader(headerName)
+ if len(headers) > 0 {
+ return headers[0]
+ }
+ return ""
+}
+
+//
+// Some handy getters for most used SIP headers
+//
+
+// GetAuthorization will return the Authorization
+// header of the current SIP packet
+func (s *SIP) GetAuthorization() string {
+ return s.GetFirstHeader("Authorization")
+}
+
+// GetFrom will return the From
+// header of the current SIP packet
+func (s *SIP) GetFrom() string {
+ return s.GetFirstHeader("From")
+}
+
+// GetTo will return the To
+// header of the current SIP packet
+func (s *SIP) GetTo() string {
+ return s.GetFirstHeader("To")
+}
+
+// GetContact will return the Contact
+// header of the current SIP packet
+func (s *SIP) GetContact() string {
+ return s.GetFirstHeader("Contact")
+}
+
+// GetCallID will return the Call-ID
+// header of the current SIP packet
+func (s *SIP) GetCallID() string {
+ return s.GetFirstHeader("Call-ID")
+}
+
+// GetUserAgent will return the User-Agent
+// header of the current SIP packet
+func (s *SIP) GetUserAgent() string {
+ return s.GetFirstHeader("User-Agent")
+}
+
+// GetContentLength will return the parsed integer
+// Content-Length header of the current SIP packet
+func (s *SIP) GetContentLength() int64 {
+ return s.contentLength
+}
+
+// GetCSeq will return the parsed integer CSeq header
+// header of the current SIP packet
+func (s *SIP) GetCSeq() int64 {
+ return s.cseq
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/stp.go b/vendor/github.com/gopacket/gopacket/layers/stp.go
new file mode 100644
index 000000000..5ced41281
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/stp.go
@@ -0,0 +1,150 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/gopacket/gopacket"
+)
+
+type STPSwitchID struct {
+ Priority uint16 // Bridge priority
+ SysID uint16 // VLAN ID
+ HwAddr net.HardwareAddr
+}
+
+// STP decode spanning tree protocol packets to transport BPDU (bridge protocol data unit) message.
+type STP struct {
+ BaseLayer
+ ProtocolID uint16
+ Version uint8
+ Type uint8
+ TC, TCA bool // TC: Topologie change ; TCA: Topologie change ack
+ RouteID, BridgeID STPSwitchID
+ Cost uint32
+ PortID uint16
+ MessageAge uint16
+ MaxAge uint16
+ HelloTime uint16
+ FDelay uint16
+}
+
+// LayerType returns gopacket.LayerTypeSTP.
+func (s *STP) LayerType() gopacket.LayerType { return LayerTypeSTP }
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (s *STP) CanDecode() gopacket.LayerClass {
+ return LayerTypeSTP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (stp *STP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ stpLength := 35
+ if len(data) < stpLength {
+ df.SetTruncated()
+ return fmt.Errorf("STP length %d too short", len(data))
+ }
+
+ stp.ProtocolID = binary.BigEndian.Uint16(data[:2])
+ stp.Version = uint8(data[2])
+ stp.Type = uint8(data[3])
+ stp.TC = data[4]&0x01 != 0
+ stp.TCA = data[4]&0x80 != 0
+ stp.RouteID.Priority = binary.BigEndian.Uint16(data[5:7]) & 0xf000
+ stp.RouteID.SysID = binary.BigEndian.Uint16(data[5:7]) & 0x0fff
+ stp.RouteID.HwAddr = net.HardwareAddr(data[7:13])
+ stp.Cost = binary.BigEndian.Uint32(data[13:17])
+ stp.BridgeID.Priority = binary.BigEndian.Uint16(data[17:19]) & 0xf000
+ stp.BridgeID.SysID = binary.BigEndian.Uint16(data[17:19]) & 0x0fff
+ stp.BridgeID.HwAddr = net.HardwareAddr(data[19:25])
+ stp.PortID = binary.BigEndian.Uint16(data[25:27])
+ stp.MessageAge = binary.BigEndian.Uint16(data[27:29])
+ stp.MaxAge = binary.BigEndian.Uint16(data[29:31])
+ stp.HelloTime = binary.BigEndian.Uint16(data[31:33])
+ stp.FDelay = binary.BigEndian.Uint16(data[33:35])
+ stp.Contents = data[:stpLength]
+ stp.Payload = data[stpLength:]
+
+ return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (stp *STP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// Check if the priority value is correct.
+func checkPriority(prio uint16) (uint16, error) {
+ if prio == 0 {
+ return prio, errors.New("Invalid Priority value must be in the rage <4096-61440> with an increment of 4096")
+ }
+ if prio%4096 == 0 {
+ return prio, nil
+ } else {
+ return prio, errors.New("Invalid Priority value must be in the rage <4096-61440> with an increment of 4096")
+ }
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (s *STP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var flags uint8 = 0x00
+ bytes, err := b.PrependBytes(35)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, s.ProtocolID)
+ bytes[2] = s.Version
+ bytes[3] = s.Type
+ if s.TC {
+ flags |= 0x01
+ }
+ if s.TCA {
+ flags |= 0x80
+ }
+ bytes[4] = flags
+
+ prioRoot, err := checkPriority(s.RouteID.Priority)
+ if err != nil {
+ panic(err)
+ }
+ if s.RouteID.SysID >= 4096 {
+ panic("Invalid VlanID value ..!")
+ }
+ binary.BigEndian.PutUint16(bytes[5:7], prioRoot|s.RouteID.SysID)
+ copy(bytes[7:13], s.RouteID.HwAddr)
+
+ binary.BigEndian.PutUint32(bytes[13:17], s.Cost)
+
+ prioBridge, err := checkPriority(s.BridgeID.Priority)
+ if err != nil {
+ panic(err)
+ }
+ if s.BridgeID.SysID >= 4096 {
+ panic("Invalid VlanID value ..!")
+ }
+ binary.BigEndian.PutUint16(bytes[17:19], prioBridge|s.BridgeID.SysID)
+ copy(bytes[19:25], s.BridgeID.HwAddr)
+
+ binary.BigEndian.PutUint16(bytes[25:27], s.PortID)
+ binary.BigEndian.PutUint16(bytes[27:29], s.MessageAge)
+ binary.BigEndian.PutUint16(bytes[29:31], s.MaxAge)
+ binary.BigEndian.PutUint16(bytes[31:33], s.HelloTime)
+ binary.BigEndian.PutUint16(bytes[33:35], s.FDelay)
+
+ return nil
+}
+
+func decodeSTP(data []byte, p gopacket.PacketBuilder) error {
+ stp := &STP{}
+ return decodingLayerDecoder(stp, data, p)
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/tcp.go b/vendor/github.com/gopacket/gopacket/layers/tcp.go
new file mode 100644
index 000000000..b38858950
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/tcp.go
@@ -0,0 +1,361 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "github.com/gopacket/gopacket"
+)
+
+// TCP is the layer for TCP headers.
+type TCP struct {
+ BaseLayer
+ SrcPort, DstPort TCPPort
+ Seq uint32
+ Ack uint32
+ DataOffset uint8
+ FIN, SYN, RST, PSH, ACK, URG, ECE, CWR, NS bool
+ Window uint16
+ Checksum uint16
+ Urgent uint16
+ sPort, dPort []byte
+ Options []TCPOption
+ Padding []byte
+ opts [4]TCPOption
+ tcpipchecksum
+}
+
+// TCPOptionKind represents a TCP option code.
+type TCPOptionKind uint8
+
+const (
+ TCPOptionKindEndList = 0
+ TCPOptionKindNop = 1
+ TCPOptionKindMSS = 2 // len = 4
+ TCPOptionKindWindowScale = 3 // len = 3
+ TCPOptionKindSACKPermitted = 4 // len = 2
+ TCPOptionKindSACK = 5 // len = n
+ TCPOptionKindEcho = 6 // len = 6, obsolete
+ TCPOptionKindEchoReply = 7 // len = 6, obsolete
+ TCPOptionKindTimestamps = 8 // len = 10
+ TCPOptionKindPartialOrderConnectionPermitted = 9 // len = 2, obsolete
+ TCPOptionKindPartialOrderServiceProfile = 10 // len = 3, obsolete
+ TCPOptionKindCC = 11 // obsolete
+ TCPOptionKindCCNew = 12 // obsolete
+ TCPOptionKindCCEcho = 13 // obsolete
+ TCPOptionKindAltChecksum = 14 // len = 3, obsolete
+ TCPOptionKindAltChecksumData = 15 // len = n, obsolete
+)
+
+func (k TCPOptionKind) String() string {
+ switch k {
+ case TCPOptionKindEndList:
+ return "EndList"
+ case TCPOptionKindNop:
+ return "NOP"
+ case TCPOptionKindMSS:
+ return "MSS"
+ case TCPOptionKindWindowScale:
+ return "WindowScale"
+ case TCPOptionKindSACKPermitted:
+ return "SACKPermitted"
+ case TCPOptionKindSACK:
+ return "SACK"
+ case TCPOptionKindEcho:
+ return "Echo"
+ case TCPOptionKindEchoReply:
+ return "EchoReply"
+ case TCPOptionKindTimestamps:
+ return "Timestamps"
+ case TCPOptionKindPartialOrderConnectionPermitted:
+ return "PartialOrderConnectionPermitted"
+ case TCPOptionKindPartialOrderServiceProfile:
+ return "PartialOrderServiceProfile"
+ case TCPOptionKindCC:
+ return "CC"
+ case TCPOptionKindCCNew:
+ return "CCNew"
+ case TCPOptionKindCCEcho:
+ return "CCEcho"
+ case TCPOptionKindAltChecksum:
+ return "AltChecksum"
+ case TCPOptionKindAltChecksumData:
+ return "AltChecksumData"
+ default:
+ return fmt.Sprintf("Unknown(%d)", k)
+ }
+}
+
+type TCPOption struct {
+ OptionType TCPOptionKind
+ OptionLength uint8
+ OptionData []byte
+}
+
+func (t TCPOption) String() string {
+ hd := hex.EncodeToString(t.OptionData)
+ if len(hd) > 0 {
+ hd = " 0x" + hd
+ }
+ switch t.OptionType {
+ case TCPOptionKindMSS:
+ if len(t.OptionData) >= 2 {
+ return fmt.Sprintf("TCPOption(%s:%v%s)",
+ t.OptionType,
+ binary.BigEndian.Uint16(t.OptionData),
+ hd)
+ }
+
+ case TCPOptionKindTimestamps:
+ if len(t.OptionData) == 8 {
+ return fmt.Sprintf("TCPOption(%s:%v/%v%s)",
+ t.OptionType,
+ binary.BigEndian.Uint32(t.OptionData[:4]),
+ binary.BigEndian.Uint32(t.OptionData[4:8]),
+ hd)
+ }
+ }
+ return fmt.Sprintf("TCPOption(%s:%s)", t.OptionType, hd)
+}
+
+// LayerType returns gopacket.LayerTypeTCP
+func (t *TCP) LayerType() gopacket.LayerType { return LayerTypeTCP }
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (t *TCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var optionLength int
+ for _, o := range t.Options {
+ switch o.OptionType {
+ case 0, 1:
+ optionLength += 1
+ default:
+ optionLength += 2 + len(o.OptionData)
+ }
+ }
+ if opts.FixLengths {
+ if rem := optionLength % 4; rem != 0 {
+ t.Padding = lotsOfZeros[:4-rem]
+ }
+ t.DataOffset = uint8((len(t.Padding) + optionLength + 20) / 4)
+ }
+ bytes, err := b.PrependBytes(20 + optionLength + len(t.Padding))
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(t.SrcPort))
+ binary.BigEndian.PutUint16(bytes[2:], uint16(t.DstPort))
+ binary.BigEndian.PutUint32(bytes[4:], t.Seq)
+ binary.BigEndian.PutUint32(bytes[8:], t.Ack)
+ binary.BigEndian.PutUint16(bytes[12:], t.flagsAndOffset())
+ binary.BigEndian.PutUint16(bytes[14:], t.Window)
+ binary.BigEndian.PutUint16(bytes[18:], t.Urgent)
+ start := 20
+ for _, o := range t.Options {
+ bytes[start] = byte(o.OptionType)
+ switch o.OptionType {
+ case 0, 1:
+ start++
+ default:
+ if opts.FixLengths {
+ o.OptionLength = uint8(len(o.OptionData) + 2)
+ }
+ bytes[start+1] = o.OptionLength
+ copy(bytes[start+2:start+len(o.OptionData)+2], o.OptionData)
+ start += len(o.OptionData) + 2
+ }
+ }
+ copy(bytes[start:], t.Padding)
+ if opts.ComputeChecksums {
+ // zero out checksum bytes in current serialization.
+ bytes[16] = 0
+ bytes[17] = 0
+ csum, err := t.computeChecksum(b.Bytes(), IPProtocolTCP)
+ if err != nil {
+ return err
+ }
+ t.Checksum = gopacket.FoldChecksum(csum)
+ }
+ binary.BigEndian.PutUint16(bytes[16:], t.Checksum)
+ return nil
+}
+
+func (t *TCP) ComputeChecksum() (uint16, error) {
+ csum, err := t.computeChecksum(append(t.Contents, t.Payload...), IPProtocolTCP)
+ if err != nil {
+ return 0, err
+ }
+ return gopacket.FoldChecksum(csum), nil
+}
+
+func (t *TCP) flagsAndOffset() uint16 {
+ f := uint16(t.DataOffset) << 12
+ if t.FIN {
+ f |= 0x0001
+ }
+ if t.SYN {
+ f |= 0x0002
+ }
+ if t.RST {
+ f |= 0x0004
+ }
+ if t.PSH {
+ f |= 0x0008
+ }
+ if t.ACK {
+ f |= 0x0010
+ }
+ if t.URG {
+ f |= 0x0020
+ }
+ if t.ECE {
+ f |= 0x0040
+ }
+ if t.CWR {
+ f |= 0x0080
+ }
+ if t.NS {
+ f |= 0x0100
+ }
+ return f
+}
+
+func (tcp *TCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP header. Length %d less than 20", len(data))
+ }
+ tcp.SrcPort = TCPPort(binary.BigEndian.Uint16(data[0:2]))
+ tcp.sPort = data[0:2]
+ tcp.DstPort = TCPPort(binary.BigEndian.Uint16(data[2:4]))
+ tcp.dPort = data[2:4]
+ tcp.Seq = binary.BigEndian.Uint32(data[4:8])
+ tcp.Ack = binary.BigEndian.Uint32(data[8:12])
+ tcp.DataOffset = data[12] >> 4
+ tcp.FIN = data[13]&0x01 != 0
+ tcp.SYN = data[13]&0x02 != 0
+ tcp.RST = data[13]&0x04 != 0
+ tcp.PSH = data[13]&0x08 != 0
+ tcp.ACK = data[13]&0x10 != 0
+ tcp.URG = data[13]&0x20 != 0
+ tcp.ECE = data[13]&0x40 != 0
+ tcp.CWR = data[13]&0x80 != 0
+ tcp.NS = data[12]&0x01 != 0
+ tcp.Window = binary.BigEndian.Uint16(data[14:16])
+ tcp.Checksum = binary.BigEndian.Uint16(data[16:18])
+ tcp.Urgent = binary.BigEndian.Uint16(data[18:20])
+ if tcp.Options == nil {
+ // Pre-allocate to avoid allocating a slice.
+ tcp.Options = tcp.opts[:0]
+ } else {
+ tcp.Options = tcp.Options[:0]
+ }
+ tcp.Padding = tcp.Padding[:0]
+ if tcp.DataOffset < 5 {
+ return fmt.Errorf("Invalid TCP data offset %d < 5", tcp.DataOffset)
+ }
+ dataStart := int(tcp.DataOffset) * 4
+ if dataStart > len(data) {
+ df.SetTruncated()
+ tcp.Payload = nil
+ tcp.Contents = data
+ return errors.New("TCP data offset greater than packet length")
+ }
+ tcp.Contents = data[:dataStart]
+ tcp.Payload = data[dataStart:]
+ // From here on, data points just to the header options.
+ data = data[20:dataStart]
+OPTIONS:
+ for len(data) > 0 {
+ tcp.Options = append(tcp.Options, TCPOption{OptionType: TCPOptionKind(data[0])})
+ opt := &tcp.Options[len(tcp.Options)-1]
+ switch opt.OptionType {
+ case TCPOptionKindEndList: // End of options
+ opt.OptionLength = 1
+ tcp.Padding = data[1:]
+ break OPTIONS
+ case TCPOptionKindNop: // 1 byte padding
+ opt.OptionLength = 1
+ default:
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP option length. Length %d less than 2", len(data))
+ }
+ opt.OptionLength = data[1]
+ if opt.OptionLength < 2 {
+ return fmt.Errorf("Invalid TCP option length %d < 2", opt.OptionLength)
+ } else if int(opt.OptionLength) > len(data) {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP option length %d exceeds remaining %d bytes", opt.OptionLength, len(data))
+ }
+ opt.OptionData = data[2:opt.OptionLength]
+ }
+ data = data[opt.OptionLength:]
+ }
+ return nil
+}
+
+func (t *TCP) CanDecode() gopacket.LayerClass {
+ return LayerTypeTCP
+}
+
+func (t *TCP) NextLayerType() gopacket.LayerType {
+ lt := t.DstPort.LayerType()
+ if lt == gopacket.LayerTypePayload {
+ lt = t.SrcPort.LayerType()
+ }
+ return lt
+}
+
+func decodeTCP(data []byte, p gopacket.PacketBuilder) error {
+ tcp := &TCP{}
+ err := tcp.DecodeFromBytes(data, p)
+ p.AddLayer(tcp)
+ p.SetTransportLayer(tcp)
+ if err != nil {
+ return err
+ }
+ if p.DecodeOptions().DecodeStreamsAsDatagrams {
+ return p.NextDecoder(tcp.NextLayerType())
+ } else {
+ return p.NextDecoder(gopacket.LayerTypePayload)
+ }
+}
+
+func (t *TCP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointTCPPort, t.sPort, t.dPort)
+}
+
+// For testing only
+func (t *TCP) SetInternalPortsForTesting() {
+ t.sPort = make([]byte, 2)
+ t.dPort = make([]byte, 2)
+ binary.BigEndian.PutUint16(t.sPort, uint16(t.SrcPort))
+ binary.BigEndian.PutUint16(t.dPort, uint16(t.DstPort))
+}
+
+func (t *TCP) VerifyChecksum() (error, gopacket.ChecksumVerificationResult) {
+ bytes := append(t.Contents, t.Payload...)
+
+ existing := t.Checksum
+ verification, err := t.computeChecksum(bytes, IPProtocolTCP)
+ if err != nil {
+ return err, gopacket.ChecksumVerificationResult{}
+ }
+ correct := gopacket.FoldChecksum(verification - uint32(existing))
+ return nil, gopacket.ChecksumVerificationResult{
+ Valid: correct == existing,
+ Correct: uint32(correct),
+ Actual: uint32(existing),
+ }
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/tcpip.go b/vendor/github.com/gopacket/gopacket/layers/tcpip.go
new file mode 100644
index 000000000..90584b32a
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/tcpip.go
@@ -0,0 +1,85 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/gopacket/gopacket"
+)
+
+// Checksum computation for TCP/UDP.
+type tcpipchecksum struct {
+ pseudoheader tcpipPseudoHeader
+}
+
+type tcpipPseudoHeader interface {
+ pseudoheaderChecksum() (uint32, error)
+}
+
+func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) {
+ if err := ip.AddressTo4(); err != nil {
+ return 0, err
+ }
+ csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8
+ csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3])
+ csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8
+ csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3])
+ return csum, nil
+}
+
+func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) {
+ if err := ip.AddressTo16(); err != nil {
+ return 0, err
+ }
+ for i := 0; i < 16; i += 2 {
+ csum += uint32(ip.SrcIP[i]) << 8
+ csum += uint32(ip.SrcIP[i+1])
+ csum += uint32(ip.DstIP[i]) << 8
+ csum += uint32(ip.DstIP[i+1])
+ }
+ return csum, nil
+}
+
+// computeChecksum computes a TCP or UDP checksum. headerAndPayload is the
+// serialized TCP or UDP header plus its payload, with the checksum zero'd
+// out. headerProtocol is the IP protocol number of the upper-layer header.
+// The returned 32bit checksum may need to be folded.
+func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint32, error) {
+ if c.pseudoheader == nil {
+ return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use")
+ }
+ length := uint32(len(headerAndPayload))
+ csum, err := c.pseudoheader.pseudoheaderChecksum()
+ if err != nil {
+ return 0, err
+ }
+ csum += uint32(headerProtocol)
+ csum += length & 0xffff
+ csum += length >> 16
+
+ csum = gopacket.ComputeChecksum(headerAndPayload, csum)
+ return csum, nil
+}
+
+// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it.
+// This is needed for computing the checksum when serializing, since TCP/IP transport
+// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it.
+// The passed in layer must be an *IPv4 or *IPv6.
+func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error {
+ switch v := l.(type) {
+ case *IPv4:
+ i.pseudoheader = v
+ case *IPv6:
+ i.pseudoheader = v
+ default:
+ return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType())
+ }
+ return nil
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/test_creator.py b/vendor/github.com/gopacket/gopacket/layers/test_creator.py
new file mode 100644
index 000000000..c92d2765a
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/test_creator.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# Copyright 2012 Google, Inc. All rights reserved.
+
+"""TestCreator creates test templates from pcap files."""
+
+import argparse
+import base64
+import glob
+import re
+import string
+import subprocess
+import sys
+
+
+class Packet(object):
+ """Helper class encapsulating packet from a pcap file."""
+
+ def __init__(self, packet_lines):
+ self.packet_lines = packet_lines
+ self.data = self._DecodeText(packet_lines)
+
+ @classmethod
+ def _DecodeText(cls, packet_lines):
+ packet_bytes = []
+ # First line is timestamp and stuff, skip it.
+ # Format: 0x0010: 0000 0020 3aff 3ffe 0000 0000 0000 0000 ....:.?.........
+
+ for line in packet_lines[1:]:
+ m = re.match(r'\s+0x[a-f\d]+:\s+((?:[\da-f]{2,4}\s)*)', line, re.IGNORECASE)
+ if m is None: continue
+ for hexpart in m.group(1).split():
+ packet_bytes.append(base64.b16decode(hexpart.upper()))
+ return ''.join(packet_bytes)
+
+ def Test(self, name, link_type):
+ """Yields a test using this packet, as a set of lines."""
+ yield '// testPacket%s is the packet:' % name
+ for line in self.packet_lines:
+ yield '// ' + line
+ yield 'var testPacket%s = []byte{' % name
+ data = list(self.data)
+ while data:
+ linebytes, data = data[:16], data[16:]
+ yield ''.join(['\t'] + ['0x%02x, ' % ord(c) for c in linebytes])
+ yield '}'
+ yield 'func TestPacket%s(t *testing.T) {' % name
+ yield '\tp := gopacket.NewPacket(testPacket%s, LinkType%s, gopacket.Default)' % (name, link_type)
+ yield '\tif p.ErrorLayer() != nil {'
+ yield '\t\tt.Error("Failed to decode packet:", p.ErrorLayer().Error())'
+ yield '\t}'
+ yield '\tcheckLayers(p, []gopacket.LayerType{LayerType%s, FILL_ME_IN_WITH_ACTUAL_LAYERS}, t)' % link_type
+ yield '}'
+ yield 'func BenchmarkDecodePacket%s(b *testing.B) {' % name
+ yield '\tfor i := 0; i < b.N; i++ {'
+ yield '\t\tgopacket.NewPacket(testPacket%s, LinkType%s, gopacket.NoCopy)' % (name, link_type)
+ yield '\t}'
+ yield '}'
+
+
+
+def GetTcpdumpOutput(filename):
+ """Runs tcpdump on the given file, returning output as string."""
+ return subprocess.check_output(
+ ['tcpdump', '-XX', '-s', '0', '-n', '-r', filename])
+
+
+def TcpdumpOutputToPackets(output):
+ """Reads a pcap file with TCPDump, yielding Packet objects."""
+ pdata = []
+ for line in output.splitlines():
+ if line[0] not in string.whitespace and pdata:
+ yield Packet(pdata)
+ pdata = []
+ pdata.append(line)
+ if pdata:
+ yield Packet(pdata)
+
+
+def main():
+ class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
+ def _format_usage(self, usage, actions, groups, prefix=None):
+ header =('TestCreator creates gopacket tests using a pcap file.\n\n'
+ 'Tests are written to standard out... they can then be \n'
+ 'copied into the file of your choice and modified as \n'
+ 'you see.\n\n')
+ return header + argparse.ArgumentDefaultsHelpFormatter._format_usage(
+ self, usage, actions, groups, prefix)
+
+ parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter)
+ parser.add_argument('--link_type', default='Ethernet', help='the link type (default: %(default)s)')
+ parser.add_argument('--name', default='Packet%d', help='the layer type, must have "%d" inside it')
+ parser.add_argument('files', metavar='file.pcap', type=str, nargs='+', help='the files to process')
+
+ args = parser.parse_args()
+
+ for arg in args.files:
+ for path in glob.glob(arg):
+ for i, packet in enumerate(TcpdumpOutputToPackets(GetTcpdumpOutput(path))):
+ print '\n'.join(packet.Test(
+ args.name % i, args.link_type))
+
+if __name__ == '__main__':
+ main()
diff --git a/vendor/github.com/gopacket/gopacket/layers/tls.go b/vendor/github.com/gopacket/gopacket/layers/tls.go
new file mode 100644
index 000000000..b5c041f6a
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/tls.go
@@ -0,0 +1,283 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/gopacket/gopacket"
+)
+
+// TLSType defines the type of data after the TLS Record
+type TLSType uint8
+
+// TLSType known values.
+const (
+ TLSChangeCipherSpec TLSType = 20
+ TLSAlert TLSType = 21
+ TLSHandshake TLSType = 22
+ TLSApplicationData TLSType = 23
+ TLSUnknown TLSType = 255
+)
+
+// String shows the register type nicely formatted
+func (tt TLSType) String() string {
+ switch tt {
+ default:
+ return "Unknown"
+ case TLSChangeCipherSpec:
+ return "Change Cipher Spec"
+ case TLSAlert:
+ return "Alert"
+ case TLSHandshake:
+ return "Handshake"
+ case TLSApplicationData:
+ return "Application Data"
+ }
+}
+
+// TLSVersion represents the TLS version in numeric format
+type TLSVersion uint16
+
+// Strings shows the TLS version nicely formatted
+func (tv TLSVersion) String() string {
+ switch tv {
+ default:
+ return "Unknown"
+ case 0x0200:
+ return "SSL 2.0"
+ case 0x0300:
+ return "SSL 3.0"
+ case 0x0301:
+ return "TLS 1.0"
+ case 0x0302:
+ return "TLS 1.1"
+ case 0x0303:
+ return "TLS 1.2"
+ case 0x0304:
+ return "TLS 1.3"
+ }
+}
+
+// TLS is specified in RFC 5246
+//
+// TLS Record Protocol
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Content Type |
+// +--+--+--+--+--+--+--+--+
+// | Version (major) |
+// +--+--+--+--+--+--+--+--+
+// | Version (minor) |
+// +--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+
+
+// TLS is actually a slide of TLSrecord structures
+type TLS struct {
+ BaseLayer
+
+ // TLS Records
+ ChangeCipherSpec []TLSChangeCipherSpecRecord
+ Handshake []TLSHandshakeRecord
+ AppData []TLSAppDataRecord
+ Alert []TLSAlertRecord
+}
+
+// TLSRecordHeader contains all the information that each TLS Record types should have
+type TLSRecordHeader struct {
+ ContentType TLSType
+ Version TLSVersion
+ Length uint16
+}
+
+// LayerType returns gopacket.LayerTypeTLS.
+func (t *TLS) LayerType() gopacket.LayerType { return LayerTypeTLS }
+
+// decodeTLS decodes the byte slice into a TLS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeTLS(data []byte, p gopacket.PacketBuilder) error {
+ t := &TLS{}
+ err := t.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(t)
+ p.SetApplicationLayer(t)
+ return nil
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ t.BaseLayer.Contents = data
+ t.BaseLayer.Payload = nil
+
+ t.ChangeCipherSpec = t.ChangeCipherSpec[:0]
+ t.Handshake = t.Handshake[:0]
+ t.AppData = t.AppData[:0]
+ t.Alert = t.Alert[:0]
+
+ return t.decodeTLSRecords(data, df)
+}
+
+func (t *TLS) decodeTLSRecords(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 5 {
+ df.SetTruncated()
+ return errors.New("TLS record too short")
+ }
+
+ // since there are no further layers, the baselayer's content is
+ // pointing to this layer
+ // TODO: Consider removing this
+ t.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+ var h TLSRecordHeader
+ h.ContentType = TLSType(data[0])
+ h.Version = TLSVersion(binary.BigEndian.Uint16(data[1:3]))
+ h.Length = binary.BigEndian.Uint16(data[3:5])
+
+ if h.ContentType.String() == "Unknown" {
+ return errors.New("Unknown TLS record type")
+ }
+
+ hl := 5 // header length
+ tl := hl + int(h.Length)
+ if len(data) < tl {
+ df.SetTruncated()
+ return errors.New("TLS packet length mismatch")
+ }
+
+ switch h.ContentType {
+ default:
+ return errors.New("Unknown TLS record type")
+ case TLSChangeCipherSpec:
+ var r TLSChangeCipherSpecRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.ChangeCipherSpec = append(t.ChangeCipherSpec, r)
+ case TLSAlert:
+ var r TLSAlertRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.Alert = append(t.Alert, r)
+ case TLSHandshake:
+ var r TLSHandshakeRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.Handshake = append(t.Handshake, r)
+ case TLSApplicationData:
+ var r TLSAppDataRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.AppData = append(t.AppData, r)
+ }
+
+ if len(data) == tl {
+ return nil
+ }
+ return t.decodeTLSRecords(data[tl:len(data)], df)
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (t *TLS) CanDecode() gopacket.LayerClass {
+ return LayerTypeTLS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (t *TLS) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
+func (t *TLS) Payload() []byte {
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (t *TLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ totalLength := 0
+ for _, record := range t.ChangeCipherSpec {
+ if opts.FixLengths {
+ record.Length = 1
+ }
+ totalLength += 5 + 1 // length of header + record
+ }
+ for range t.Handshake {
+ totalLength += 5
+ // TODO
+ }
+ for _, record := range t.AppData {
+ if opts.FixLengths {
+ record.Length = uint16(len(record.Payload))
+ }
+ totalLength += 5 + len(record.Payload)
+ }
+ for _, record := range t.Alert {
+ if len(record.EncryptedMsg) == 0 {
+ if opts.FixLengths {
+ record.Length = 2
+ }
+ totalLength += 5 + 2
+ } else {
+ if opts.FixLengths {
+ record.Length = uint16(len(record.EncryptedMsg))
+ }
+ totalLength += 5 + len(record.EncryptedMsg)
+ }
+ }
+ data, err := b.PrependBytes(totalLength)
+ if err != nil {
+ return err
+ }
+ off := 0
+ for _, record := range t.ChangeCipherSpec {
+ off = encodeHeader(record.TLSRecordHeader, data, off)
+ data[off] = byte(record.Message)
+ off++
+ }
+ for _, record := range t.Handshake {
+ off = encodeHeader(record.TLSRecordHeader, data, off)
+ // TODO
+ }
+ for _, record := range t.AppData {
+ off = encodeHeader(record.TLSRecordHeader, data, off)
+ copy(data[off:], record.Payload)
+ off += len(record.Payload)
+ }
+ for _, record := range t.Alert {
+ off = encodeHeader(record.TLSRecordHeader, data, off)
+ if len(record.EncryptedMsg) == 0 {
+ data[off] = byte(record.Level)
+ data[off+1] = byte(record.Description)
+ off += 2
+ } else {
+ copy(data[off:], record.EncryptedMsg)
+ off += len(record.EncryptedMsg)
+ }
+ }
+ return nil
+}
+
+func encodeHeader(header TLSRecordHeader, data []byte, offset int) int {
+ data[offset] = byte(header.ContentType)
+ binary.BigEndian.PutUint16(data[offset+1:], uint16(header.Version))
+ binary.BigEndian.PutUint16(data[offset+3:], header.Length)
+
+ return offset + 5
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/tls_alert.go b/vendor/github.com/gopacket/gopacket/layers/tls_alert.go
new file mode 100644
index 000000000..15cc2e16d
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/tls_alert.go
@@ -0,0 +1,165 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/gopacket/gopacket"
+)
+
+// TLSAlertLevel defines the alert level data type
+type TLSAlertLevel uint8
+
+// TLSAlertDescr defines the alert descrption data type
+type TLSAlertDescr uint8
+
+const (
+ TLSAlertWarning TLSAlertLevel = 1
+ TLSAlertFatal TLSAlertLevel = 2
+ TLSAlertUnknownLevel TLSAlertLevel = 255
+
+ TLSAlertCloseNotify TLSAlertDescr = 0
+ TLSAlertUnexpectedMessage TLSAlertDescr = 10
+ TLSAlertBadRecordMac TLSAlertDescr = 20
+ TLSAlertDecryptionFailedRESERVED TLSAlertDescr = 21
+ TLSAlertRecordOverflow TLSAlertDescr = 22
+ TLSAlertDecompressionFailure TLSAlertDescr = 30
+ TLSAlertHandshakeFailure TLSAlertDescr = 40
+ TLSAlertNoCertificateRESERVED TLSAlertDescr = 41
+ TLSAlertBadCertificate TLSAlertDescr = 42
+ TLSAlertUnsupportedCertificate TLSAlertDescr = 43
+ TLSAlertCertificateRevoked TLSAlertDescr = 44
+ TLSAlertCertificateExpired TLSAlertDescr = 45
+ TLSAlertCertificateUnknown TLSAlertDescr = 46
+ TLSAlertIllegalParameter TLSAlertDescr = 47
+ TLSAlertUnknownCa TLSAlertDescr = 48
+ TLSAlertAccessDenied TLSAlertDescr = 49
+ TLSAlertDecodeError TLSAlertDescr = 50
+ TLSAlertDecryptError TLSAlertDescr = 51
+ TLSAlertExportRestrictionRESERVED TLSAlertDescr = 60
+ TLSAlertProtocolVersion TLSAlertDescr = 70
+ TLSAlertInsufficientSecurity TLSAlertDescr = 71
+ TLSAlertInternalError TLSAlertDescr = 80
+ TLSAlertUserCanceled TLSAlertDescr = 90
+ TLSAlertNoRenegotiation TLSAlertDescr = 100
+ TLSAlertUnsupportedExtension TLSAlertDescr = 110
+ TLSAlertUnknownDescription TLSAlertDescr = 255
+)
+
+// TLS Alert
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Level |
+// +--+--+--+--+--+--+--+--+
+// | Description |
+// +--+--+--+--+--+--+--+--+
+
+// TLSAlertRecord contains all the information that each Alert Record type should have
+type TLSAlertRecord struct {
+ TLSRecordHeader
+
+ Level TLSAlertLevel
+ Description TLSAlertDescr
+
+ EncryptedMsg []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAlertRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) < 2 {
+ df.SetTruncated()
+ return errors.New("TLS Alert packet too short")
+ }
+
+ if t.Length == 2 {
+ t.Level = TLSAlertLevel(data[0])
+ t.Description = TLSAlertDescr(data[1])
+ } else {
+ t.Level = TLSAlertUnknownLevel
+ t.Description = TLSAlertUnknownDescription
+ t.EncryptedMsg = data
+ }
+
+ return nil
+}
+
+// Strings shows the TLS alert level nicely formatted
+func (al TLSAlertLevel) String() string {
+ switch al {
+ default:
+ return fmt.Sprintf("Unknown(%d)", al)
+ case TLSAlertWarning:
+ return "Warning"
+ case TLSAlertFatal:
+ return "Fatal"
+ }
+}
+
+// Strings shows the TLS alert description nicely formatted
+func (ad TLSAlertDescr) String() string {
+ switch ad {
+ default:
+ return "Unknown"
+ case TLSAlertCloseNotify:
+ return "close_notify"
+ case TLSAlertUnexpectedMessage:
+ return "unexpected_message"
+ case TLSAlertBadRecordMac:
+ return "bad_record_mac"
+ case TLSAlertDecryptionFailedRESERVED:
+ return "decryption_failed_RESERVED"
+ case TLSAlertRecordOverflow:
+ return "record_overflow"
+ case TLSAlertDecompressionFailure:
+ return "decompression_failure"
+ case TLSAlertHandshakeFailure:
+ return "handshake_failure"
+ case TLSAlertNoCertificateRESERVED:
+ return "no_certificate_RESERVED"
+ case TLSAlertBadCertificate:
+ return "bad_certificate"
+ case TLSAlertUnsupportedCertificate:
+ return "unsupported_certificate"
+ case TLSAlertCertificateRevoked:
+ return "certificate_revoked"
+ case TLSAlertCertificateExpired:
+ return "certificate_expired"
+ case TLSAlertCertificateUnknown:
+ return "certificate_unknown"
+ case TLSAlertIllegalParameter:
+ return "illegal_parameter"
+ case TLSAlertUnknownCa:
+ return "unknown_ca"
+ case TLSAlertAccessDenied:
+ return "access_denied"
+ case TLSAlertDecodeError:
+ return "decode_error"
+ case TLSAlertDecryptError:
+ return "decrypt_error"
+ case TLSAlertExportRestrictionRESERVED:
+ return "export_restriction_RESERVED"
+ case TLSAlertProtocolVersion:
+ return "protocol_version"
+ case TLSAlertInsufficientSecurity:
+ return "insufficient_security"
+ case TLSAlertInternalError:
+ return "internal_error"
+ case TLSAlertUserCanceled:
+ return "user_canceled"
+ case TLSAlertNoRenegotiation:
+ return "no_renegotiation"
+ case TLSAlertUnsupportedExtension:
+ return "unsupported_extension"
+ }
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/tls_appdata.go b/vendor/github.com/gopacket/gopacket/layers/tls_appdata.go
new file mode 100644
index 000000000..cc6b095b5
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/tls_appdata.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+
+ "github.com/gopacket/gopacket"
+)
+
+// TLSAppDataRecord contains all the information that each AppData Record types should have
+type TLSAppDataRecord struct {
+ TLSRecordHeader
+ Payload []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAppDataRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) != int(t.Length) {
+ return errors.New("TLS Application Data length mismatch")
+ }
+
+ t.Payload = data
+ return nil
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/tls_cipherspec.go b/vendor/github.com/gopacket/gopacket/layers/tls_cipherspec.go
new file mode 100644
index 000000000..5f9312ab4
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/tls_cipherspec.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+
+ "github.com/gopacket/gopacket"
+)
+
+// TLSchangeCipherSpec defines the message value inside ChangeCipherSpec Record
+type TLSchangeCipherSpec uint8
+
+const (
+ TLSChangecipherspecMessage TLSchangeCipherSpec = 1
+ TLSChangecipherspecUnknown TLSchangeCipherSpec = 255
+)
+
+// TLS Change Cipher Spec
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Message |
+// +--+--+--+--+--+--+--+--+
+
+// TLSChangeCipherSpecRecord defines the type of data inside ChangeCipherSpec Record
+type TLSChangeCipherSpecRecord struct {
+ TLSRecordHeader
+
+ Message TLSchangeCipherSpec
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSChangeCipherSpecRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) != 1 {
+ df.SetTruncated()
+ return errors.New("TLS Change Cipher Spec record incorrect length")
+ }
+
+ t.Message = TLSchangeCipherSpec(data[0])
+ if t.Message != TLSChangecipherspecMessage {
+ t.Message = TLSChangecipherspecUnknown
+ }
+
+ return nil
+}
+
+// String shows the message value nicely formatted
+func (ccs TLSchangeCipherSpec) String() string {
+ switch ccs {
+ default:
+ return "Unknown"
+ case TLSChangecipherspecMessage:
+ return "Change Cipher Spec Message"
+ }
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/tls_handshake.go b/vendor/github.com/gopacket/gopacket/layers/tls_handshake.go
new file mode 100644
index 000000000..dba0c4c75
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/tls_handshake.go
@@ -0,0 +1,148 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/gopacket/gopacket"
+)
+
+/*refer to https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.4*/
+const (
+ TLSHandshakeHelloRequest = 0
+ TLSHandshakeClientHello = 1
+ TLSHandshakeServerHello = 2
+ TLSHandsharkHelloVerirfyRequest = 3
+ TLSHandshakeCertificate = 11
+ TLSHandshakeServerKeyExchange = 12
+ TLSHandshakeCertificateRequest = 13
+ TLSHandshakeServerHelloDone = 14
+ TLSHandshakeCertificateVerify = 15
+ TLSHandshakeClientKeyExchange = 16
+ TLSHandshakeFinished = 20
+)
+
+var handShakeTypeMap = map[uint8]string{
+ TLSHandshakeHelloRequest: "Hello Request",
+ TLSHandshakeClientHello: "Client Hello",
+ TLSHandshakeServerHello: "Server Hello",
+ TLSHandsharkHelloVerirfyRequest: "Hello Verify Request",
+ TLSHandshakeCertificate: "Certificate",
+ TLSHandshakeServerKeyExchange: "Server Key Exchange",
+ TLSHandshakeCertificateRequest: "Certificate Request",
+ TLSHandshakeServerHelloDone: "Server Hello Done",
+ TLSHandshakeCertificateVerify: "Certificate Verify",
+ TLSHandshakeClientKeyExchange: "Client Key Exchange",
+ TLSHandshakeFinished: "Finished",
+}
+
+type TLSHandshakeRecordClientHello struct {
+ HandshakeType uint8
+ Length uint32
+ ProtocolVersion TLSVersion
+ Random []uint8
+ SessionIDLength uint8
+ SessionID []uint8
+ CipherSuitsLength uint16
+ CipherSuits []uint8
+ CompressionMethodsLength uint8
+ CompressionMethods []uint8
+ ExtensionsLength uint16
+ Extensions []uint8
+}
+
+type TLSHandshakeRecordClientKeyChange struct {
+}
+
+// TLSHandshakeRecord defines the structure of a Handshare Record
+type TLSHandshakeRecord struct {
+ TLSRecordHeader
+ ClientHello TLSHandshakeRecordClientHello
+ ClientKeyChange TLSHandshakeRecordClientKeyChange
+}
+
+func (t *TLSHandshakeRecordClientHello) decodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ t.HandshakeType = data[0]
+ d := make([]byte, 4)
+ for k, v := range data[1:4] {
+ d[k+1] = v
+ }
+ t.Length = binary.BigEndian.Uint32(d)
+ t.ProtocolVersion = TLSVersion(binary.BigEndian.Uint16(data[4:6]))
+ t.Random = data[6:38]
+ t.SessionIDLength = data[38]
+ t.SessionID = data[39 : 39+t.SessionIDLength]
+ t.CipherSuitsLength = binary.BigEndian.Uint16(data[39+t.SessionIDLength : 39+t.SessionIDLength+2])
+ t.CipherSuits = data[39+t.SessionIDLength+2 : (39 + uint16(t.SessionIDLength) + 2 + t.CipherSuitsLength)]
+ t.CompressionMethodsLength = data[(39 + uint16(t.SessionIDLength) + 2 + t.CipherSuitsLength)]
+ t.CompressionMethods = data[(39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1 : (39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1+uint16(t.CompressionMethodsLength)]
+ t.ExtensionsLength = binary.BigEndian.Uint16(data[(39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1+uint16(t.CompressionMethodsLength) : (39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1+uint16(t.CompressionMethodsLength)+2])
+ t.Extensions = data[((39 + uint16(t.SessionIDLength) + 2 + t.CipherSuitsLength) + 1 + uint16(t.CompressionMethodsLength) + 2) : ((39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1+uint16(t.CompressionMethodsLength)+2)+t.ExtensionsLength]
+ return nil
+}
+func (t *TLSHandshakeRecordClientKeyChange) decodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ /*TBD*/
+ return nil
+}
+
+/**
+ * Checks whether a handshake message seems encrypted and cannot be dissected.
+ */
+func (t TLSHandshakeRecord) isEncryptedHandshakeMessage(h TLSRecordHeader, data []byte) bool {
+ if h.Length < 16 {
+ /*
+ * Encrypted data has additional overhead. For TLS 1.0/1.1 with stream
+ * and block ciphers, there is at least a MAC which is at minimum 16
+ * bytes for MD5. In TLS 1.2, AEAD adds an explicit nonce and auth tag.
+ * For AES-GCM/CCM the auth tag is 16 bytes. AES_CCM_8 (RFC 6655) uses 8
+ * byte auth tags, but the explicit nonce is also 8 (sums up to 16).
+ *
+ * So anything smaller than 16 bytes is assumed to be plaintext.
+ */
+ return false
+ }
+ maybeType := data[0]
+ d := make([]byte, 4)
+ for k, v := range data[1:4] {
+ d[k+1] = v
+ }
+ if uint32(h.Length)-binary.BigEndian.Uint32(d) != 4 {
+ return true
+ }
+ if _, ok := handShakeTypeMap[maybeType]; !ok {
+ return true
+ }
+ return false
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if t.isEncryptedHandshakeMessage(h, data) {
+ fmt.Printf("encrypted message\n")
+ return nil
+ }
+ handshakeType := data[0]
+ switch handshakeType {
+ case TLSHandshakeClientHello:
+ t.ClientHello.decodeFromBytes(data, df)
+ case TLSHandshakeClientKeyExchange:
+ t.ClientKeyChange.decodeFromBytes(data, df)
+ default:
+ return errors.New("Unknown TLS handshake type")
+ // TODO
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/udp.go b/vendor/github.com/gopacket/gopacket/layers/udp.go
new file mode 100644
index 000000000..ed489130e
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/udp.go
@@ -0,0 +1,158 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/gopacket/gopacket"
+)
+
+// UDP is the layer for UDP headers.
+type UDP struct {
+ BaseLayer
+ SrcPort, DstPort UDPPort
+ Length uint16
+ Checksum uint16
+ sPort, dPort []byte
+ tcpipchecksum
+}
+
+// LayerType returns gopacket.LayerTypeUDP
+func (u *UDP) LayerType() gopacket.LayerType { return LayerTypeUDP }
+
+func (udp *UDP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid UDP header. Length %d less than 8", len(data))
+ }
+ udp.SrcPort = UDPPort(binary.BigEndian.Uint16(data[0:2]))
+ udp.sPort = data[0:2]
+ udp.DstPort = UDPPort(binary.BigEndian.Uint16(data[2:4]))
+ udp.dPort = data[2:4]
+ udp.Length = binary.BigEndian.Uint16(data[4:6])
+ udp.Checksum = binary.BigEndian.Uint16(data[6:8])
+ udp.BaseLayer = BaseLayer{Contents: data[:8]}
+ switch {
+ case udp.Length >= 8:
+ hlen := int(udp.Length)
+ if hlen > len(data) {
+ df.SetTruncated()
+ hlen = len(data)
+ }
+ udp.Payload = data[8:hlen]
+ case udp.Length == 0: // Jumbogram, use entire rest of data
+ udp.Payload = data[8:]
+ default:
+ return fmt.Errorf("UDP packet too small: %d bytes", udp.Length)
+ }
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (u *UDP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var jumbo bool
+
+ payload := b.Bytes()
+ if _, ok := u.pseudoheader.(*IPv6); ok {
+ if len(payload)+8 > 65535 {
+ jumbo = true
+ }
+ }
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(u.SrcPort))
+ binary.BigEndian.PutUint16(bytes[2:], uint16(u.DstPort))
+ if opts.FixLengths {
+ if jumbo {
+ u.Length = 0
+ } else {
+ u.Length = uint16(len(payload)) + 8
+ }
+ }
+ binary.BigEndian.PutUint16(bytes[4:], u.Length)
+ if opts.ComputeChecksums {
+ // zero out checksum bytes
+ bytes[6] = 0
+ bytes[7] = 0
+
+ csum, err := u.computeChecksum(b.Bytes(), IPProtocolUDP)
+ if err != nil {
+ return err
+ }
+ csumFolded := gopacket.FoldChecksum(csum)
+
+ // RFC768: If the computed checksum is zero, it is transmitted as all ones (the
+ // equivalent in one's complement arithmetic). An all zero transmitted
+ // checksum value means that the transmitter generated no checksum.
+ if csumFolded == 0 {
+ csumFolded = 0xffff
+ }
+ u.Checksum = csumFolded
+ }
+ binary.BigEndian.PutUint16(bytes[6:], u.Checksum)
+ return nil
+}
+
+func (u *UDP) CanDecode() gopacket.LayerClass {
+ return LayerTypeUDP
+}
+
+// NextLayerType use the destination port to select the
+// right next decoder. It tries first to decode via the
+// destination port, then the source port.
+func (u *UDP) NextLayerType() gopacket.LayerType {
+ if lt := u.DstPort.LayerType(); lt != gopacket.LayerTypePayload {
+ return lt
+ }
+ return u.SrcPort.LayerType()
+}
+
+func decodeUDP(data []byte, p gopacket.PacketBuilder) error {
+ udp := &UDP{}
+ err := udp.DecodeFromBytes(data, p)
+ p.AddLayer(udp)
+ p.SetTransportLayer(udp)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(udp.NextLayerType())
+}
+
+func (u *UDP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointUDPPort, u.sPort, u.dPort)
+}
+
+// For testing only
+func (u *UDP) SetInternalPortsForTesting() {
+ u.sPort = make([]byte, 2)
+ u.dPort = make([]byte, 2)
+ binary.BigEndian.PutUint16(u.sPort, uint16(u.SrcPort))
+ binary.BigEndian.PutUint16(u.dPort, uint16(u.DstPort))
+}
+
+func (u *UDP) VerifyChecksum() (error, gopacket.ChecksumVerificationResult) {
+ bytes := append(u.Contents, u.Payload...)
+
+ existing := u.Checksum
+ verification, err := u.computeChecksum(bytes, IPProtocolUDP)
+ if err != nil {
+ return err, gopacket.ChecksumVerificationResult{}
+ }
+ correct := gopacket.FoldChecksum(verification - uint32(existing))
+ return nil, gopacket.ChecksumVerificationResult{
+ Valid: existing == 0 || correct == existing,
+ Correct: uint32(correct),
+ Actual: uint32(existing),
+ }
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/udplite.go b/vendor/github.com/gopacket/gopacket/layers/udplite.go
new file mode 100644
index 000000000..6566e17b8
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/udplite.go
@@ -0,0 +1,45 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+
+ "github.com/gopacket/gopacket"
+)
+
+// UDPLite is the layer for UDP-Lite headers (rfc 3828).
+type UDPLite struct {
+ BaseLayer
+ SrcPort, DstPort UDPLitePort
+ ChecksumCoverage uint16
+ Checksum uint16
+ sPort, dPort []byte
+}
+
+// LayerType returns gopacket.LayerTypeUDPLite
+func (u *UDPLite) LayerType() gopacket.LayerType { return LayerTypeUDPLite }
+
+func decodeUDPLite(data []byte, p gopacket.PacketBuilder) error {
+ udp := &UDPLite{
+ SrcPort: UDPLitePort(binary.BigEndian.Uint16(data[0:2])),
+ sPort: data[0:2],
+ DstPort: UDPLitePort(binary.BigEndian.Uint16(data[2:4])),
+ dPort: data[2:4],
+ ChecksumCoverage: binary.BigEndian.Uint16(data[4:6]),
+ Checksum: binary.BigEndian.Uint16(data[6:8]),
+ BaseLayer: BaseLayer{data[:8], data[8:]},
+ }
+ p.AddLayer(udp)
+ p.SetTransportLayer(udp)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (u *UDPLite) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointUDPLitePort, u.sPort, u.dPort)
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/usb.go b/vendor/github.com/gopacket/gopacket/layers/usb.go
new file mode 100644
index 000000000..b04791d5b
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/usb.go
@@ -0,0 +1,293 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/gopacket/gopacket"
+)
+
+type USBEventType uint8
+
+const (
+ USBEventTypeSubmit USBEventType = 'S'
+ USBEventTypeComplete USBEventType = 'C'
+ USBEventTypeError USBEventType = 'E'
+)
+
+func (a USBEventType) String() string {
+ switch a {
+ case USBEventTypeSubmit:
+ return "SUBMIT"
+ case USBEventTypeComplete:
+ return "COMPLETE"
+ case USBEventTypeError:
+ return "ERROR"
+ default:
+ return "Unknown event type"
+ }
+}
+
+type USBRequestBlockSetupRequest uint8
+
+const (
+ USBRequestBlockSetupRequestGetStatus USBRequestBlockSetupRequest = 0x00
+ USBRequestBlockSetupRequestClearFeature USBRequestBlockSetupRequest = 0x01
+ USBRequestBlockSetupRequestSetFeature USBRequestBlockSetupRequest = 0x03
+ USBRequestBlockSetupRequestSetAddress USBRequestBlockSetupRequest = 0x05
+ USBRequestBlockSetupRequestGetDescriptor USBRequestBlockSetupRequest = 0x06
+ USBRequestBlockSetupRequestSetDescriptor USBRequestBlockSetupRequest = 0x07
+ USBRequestBlockSetupRequestGetConfiguration USBRequestBlockSetupRequest = 0x08
+ USBRequestBlockSetupRequestSetConfiguration USBRequestBlockSetupRequest = 0x09
+ USBRequestBlockSetupRequestSetIdle USBRequestBlockSetupRequest = 0x0a
+)
+
+func (a USBRequestBlockSetupRequest) String() string {
+ switch a {
+ case USBRequestBlockSetupRequestGetStatus:
+ return "GET_STATUS"
+ case USBRequestBlockSetupRequestClearFeature:
+ return "CLEAR_FEATURE"
+ case USBRequestBlockSetupRequestSetFeature:
+ return "SET_FEATURE"
+ case USBRequestBlockSetupRequestSetAddress:
+ return "SET_ADDRESS"
+ case USBRequestBlockSetupRequestGetDescriptor:
+ return "GET_DESCRIPTOR"
+ case USBRequestBlockSetupRequestSetDescriptor:
+ return "SET_DESCRIPTOR"
+ case USBRequestBlockSetupRequestGetConfiguration:
+ return "GET_CONFIGURATION"
+ case USBRequestBlockSetupRequestSetConfiguration:
+ return "SET_CONFIGURATION"
+ case USBRequestBlockSetupRequestSetIdle:
+ return "SET_IDLE"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+type USBTransportType uint8
+
+const (
+ USBTransportTypeTransferIn USBTransportType = 0x80 // Indicates send or receive
+ USBTransportTypeIsochronous USBTransportType = 0x00 // Isochronous transfers occur continuously and periodically. They typically contain time sensitive information, such as an audio or video stream.
+ USBTransportTypeInterrupt USBTransportType = 0x01 // Interrupt transfers are typically non-periodic, small device "initiated" communication requiring bounded latency, such as pointing devices or keyboards.
+ USBTransportTypeControl USBTransportType = 0x02 // Control transfers are typically used for command and status operations.
+ USBTransportTypeBulk USBTransportType = 0x03 // Bulk transfers can be used for large bursty data, using all remaining available bandwidth, no guarantees on bandwidth or latency, such as file transfers.
+)
+
+type USBDirectionType uint8
+
+const (
+ USBDirectionTypeUnknown USBDirectionType = iota
+ USBDirectionTypeIn
+ USBDirectionTypeOut
+)
+
+func (a USBDirectionType) String() string {
+ switch a {
+ case USBDirectionTypeIn:
+ return "In"
+ case USBDirectionTypeOut:
+ return "Out"
+ default:
+ return "Unknown direction type"
+ }
+}
+
+// The reference at http://www.beyondlogic.org/usbnutshell/usb1.shtml contains more information about the protocol.
+type USB struct {
+ BaseLayer
+ ID uint64
+ EventType USBEventType
+ TransferType USBTransportType
+ Direction USBDirectionType
+ EndpointNumber uint8
+ DeviceAddress uint8
+ BusID uint16
+ TimestampSec int64
+ TimestampUsec int32
+ Setup bool
+ Data bool
+ Status int32
+ UrbLength uint32
+ UrbDataLength uint32
+
+ UrbInterval uint32
+ UrbStartFrame uint32
+ UrbCopyOfTransferFlags uint32
+ IsoNumDesc uint32
+}
+
+func (u *USB) LayerType() gopacket.LayerType { return LayerTypeUSB }
+
+func (m *USB) NextLayerType() gopacket.LayerType {
+ if m.Setup {
+ return LayerTypeUSBRequestBlockSetup
+ } else if m.Data {
+ }
+
+ return m.TransferType.LayerType()
+}
+
+func decodeUSB(data []byte, p gopacket.PacketBuilder) error {
+ d := &USB{}
+
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *USB) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 40 {
+ df.SetTruncated()
+ return errors.New("USB < 40 bytes")
+ }
+ m.ID = binary.LittleEndian.Uint64(data[0:8])
+ m.EventType = USBEventType(data[8])
+ m.TransferType = USBTransportType(data[9])
+
+ m.EndpointNumber = data[10] & 0x7f
+ if data[10]&uint8(USBTransportTypeTransferIn) > 0 {
+ m.Direction = USBDirectionTypeIn
+ } else {
+ m.Direction = USBDirectionTypeOut
+ }
+
+ m.DeviceAddress = data[11]
+ m.BusID = binary.LittleEndian.Uint16(data[12:14])
+
+ if uint(data[14]) == 0 {
+ m.Setup = true
+ }
+
+ if uint(data[15]) == 0 {
+ m.Data = true
+ }
+
+ m.TimestampSec = int64(binary.LittleEndian.Uint64(data[16:24]))
+ m.TimestampUsec = int32(binary.LittleEndian.Uint32(data[24:28]))
+ m.Status = int32(binary.LittleEndian.Uint32(data[28:32]))
+ m.UrbLength = binary.LittleEndian.Uint32(data[32:36])
+ m.UrbDataLength = binary.LittleEndian.Uint32(data[36:40])
+
+ m.Contents = data[:40]
+ m.Payload = data[40:]
+
+ if m.Setup {
+ m.Payload = data[40:]
+ } else if m.Data {
+ m.Payload = data[uint32(len(data))-m.UrbDataLength:]
+ }
+
+ // if 64 bit, dissect_linux_usb_pseudo_header_ext
+ if false {
+ m.UrbInterval = binary.LittleEndian.Uint32(data[40:44])
+ m.UrbStartFrame = binary.LittleEndian.Uint32(data[44:48])
+ m.UrbDataLength = binary.LittleEndian.Uint32(data[48:52])
+ m.IsoNumDesc = binary.LittleEndian.Uint32(data[52:56])
+ m.Contents = data[:56]
+ m.Payload = data[56:]
+ }
+
+ // crc5 or crc16
+ // eop (end of packet)
+
+ return nil
+}
+
+type USBRequestBlockSetup struct {
+ BaseLayer
+ RequestType uint8
+ Request USBRequestBlockSetupRequest
+ Value uint16
+ Index uint16
+ Length uint16
+}
+
+func (u *USBRequestBlockSetup) LayerType() gopacket.LayerType { return LayerTypeUSBRequestBlockSetup }
+
+func (m *USBRequestBlockSetup) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBRequestBlockSetup) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.RequestType = data[0]
+ m.Request = USBRequestBlockSetupRequest(data[1])
+ m.Value = binary.LittleEndian.Uint16(data[2:4])
+ m.Index = binary.LittleEndian.Uint16(data[4:6])
+ m.Length = binary.LittleEndian.Uint16(data[6:8])
+ m.Contents = data[:8]
+ m.Payload = data[8:]
+ return nil
+}
+
+func decodeUSBRequestBlockSetup(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBRequestBlockSetup{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBControl struct {
+ BaseLayer
+}
+
+func (u *USBControl) LayerType() gopacket.LayerType { return LayerTypeUSBControl }
+
+func (m *USBControl) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBControl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBControl(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBControl{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBInterrupt struct {
+ BaseLayer
+}
+
+func (u *USBInterrupt) LayerType() gopacket.LayerType { return LayerTypeUSBInterrupt }
+
+func (m *USBInterrupt) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBInterrupt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBInterrupt(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBInterrupt{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBBulk struct {
+ BaseLayer
+}
+
+func (u *USBBulk) LayerType() gopacket.LayerType { return LayerTypeUSBBulk }
+
+func (m *USBBulk) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBBulk) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBBulk(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBBulk{}
+ return decodingLayerDecoder(d, data, p)
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/vrrp.go b/vendor/github.com/gopacket/gopacket/layers/vrrp.go
new file mode 100644
index 000000000..b512e6f82
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/vrrp.go
@@ -0,0 +1,156 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+
+ "github.com/gopacket/gopacket"
+)
+
+/*
+ This layer provides decoding for Virtual Router Redundancy Protocol (VRRP) v2.
+ https://tools.ietf.org/html/rfc3768#section-5
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Version| Type | Virtual Rtr ID| Priority | Count IP Addrs|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Auth Type | Adver Int | Checksum |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IP Address (1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | . |
+ | . |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IP Address (n) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Authentication Data (1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Authentication Data (2) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+type VRRPv2Type uint8
+type VRRPv2AuthType uint8
+
+const (
+ VRRPv2Advertisement VRRPv2Type = 0x01 // router advertisement
+)
+
+// String conversions for VRRP message types
+func (v VRRPv2Type) String() string {
+ switch v {
+ case VRRPv2Advertisement:
+ return "VRRPv2 Advertisement"
+ default:
+ return ""
+ }
+}
+
+const (
+ VRRPv2AuthNoAuth VRRPv2AuthType = 0x00 // No Authentication
+ VRRPv2AuthReserved1 VRRPv2AuthType = 0x01 // Reserved field 1
+ VRRPv2AuthReserved2 VRRPv2AuthType = 0x02 // Reserved field 2
+)
+
+func (v VRRPv2AuthType) String() string {
+ switch v {
+ case VRRPv2AuthNoAuth:
+ return "No Authentication"
+ case VRRPv2AuthReserved1:
+ return "Reserved"
+ case VRRPv2AuthReserved2:
+ return "Reserved"
+ default:
+ return ""
+ }
+}
+
+// VRRPv2 represents an VRRP v2 message.
+type VRRPv2 struct {
+ BaseLayer
+ Version uint8 // The version field specifies the VRRP protocol version of this packet (v2)
+ Type VRRPv2Type // The type field specifies the type of this VRRP packet. The only type defined in v2 is ADVERTISEMENT
+ VirtualRtrID uint8 // identifies the virtual router this packet is reporting status for
+ Priority uint8 // specifies the sending VRRP router's priority for the virtual router (100 = default)
+ CountIPAddr uint8 // The number of IP addresses contained in this VRRP advertisement.
+ AuthType VRRPv2AuthType // identifies the authentication method being utilized
+ AdverInt uint8 // The Advertisement interval indicates the time interval (in seconds) between ADVERTISEMENTS. The default is 1 second
+ Checksum uint16 // used to detect data corruption in the VRRP message.
+ IPAddress []net.IP // one or more IP addresses associated with the virtual router. Specified in the CountIPAddr field.
+}
+
+// LayerType returns LayerTypeVRRP for VRRP v2 message.
+func (v *VRRPv2) LayerType() gopacket.LayerType { return LayerTypeVRRP }
+
+func (v *VRRPv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ v.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+ v.Version = data[0] >> 4 // high nibble == VRRP version. We're expecting v2
+
+ v.Type = VRRPv2Type(data[0] & 0x0F) // low nibble == VRRP type. Expecting 1 (advertisement)
+ if v.Type != 1 {
+ // rfc3768: A packet with unknown type MUST be discarded.
+ return errors.New("Unrecognized VRRPv2 type field.")
+ }
+
+ v.VirtualRtrID = data[1]
+ v.Priority = data[2]
+
+ v.CountIPAddr = data[3]
+ if v.CountIPAddr < 1 {
+ return errors.New("VRRPv2 number of IP addresses is not valid.")
+ }
+
+ v.AuthType = VRRPv2AuthType(data[4])
+ v.AdverInt = uint8(data[5])
+ v.Checksum = binary.BigEndian.Uint16(data[6:8])
+
+ // populate the IPAddress field. The number of addresses is specified in the v.CountIPAddr field
+ // offset references the starting byte containing the list of ip addresses
+ offset := 8
+ for i := uint8(0); i < v.CountIPAddr; i++ {
+ v.IPAddress = append(v.IPAddress, data[offset:offset+4])
+ offset += 4
+ }
+
+ // any trailing packets here may be authentication data and *should* be ignored in v2 as per RFC
+ //
+ // 5.3.10. Authentication Data
+ //
+ // The authentication string is currently only used to maintain
+ // backwards compatibility with RFC 2338. It SHOULD be set to zero on
+ // transmission and ignored on reception.
+ return nil
+}
+
+// CanDecode specifies the layer type in which we are attempting to unwrap.
+func (v *VRRPv2) CanDecode() gopacket.LayerClass {
+ return LayerTypeVRRP
+}
+
+// NextLayerType specifies the next layer that should be decoded. VRRP does not contain any further payload, so we set to 0
+func (v *VRRPv2) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// The VRRP packet does not include payload data. Setting byte slice to nil
+func (v *VRRPv2) Payload() []byte {
+ return nil
+}
+
+// decodeVRRP will parse VRRP v2
+func decodeVRRP(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 8 {
+ return errors.New("Not a valid VRRP packet. Packet length is too small.")
+ }
+ v := &VRRPv2{}
+ return decodingLayerDecoder(v, data, p)
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers/vxlan.go b/vendor/github.com/gopacket/gopacket/layers/vxlan.go
new file mode 100644
index 000000000..f7cdc040a
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers/vxlan.go
@@ -0,0 +1,123 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/gopacket/gopacket"
+)
+
+// VXLAN is specifed in RFC 7348 https://tools.ietf.org/html/rfc7348
+// G, D, A, Group Policy ID from https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// 0 8 16 24 32
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | 24 bit VXLAN Network Identifier | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// VXLAN is a VXLAN packet header
+type VXLAN struct {
+ BaseLayer
+ ValidIDFlag bool // 'I' bit per RFC 7348
+ VNI uint32 // 'VXLAN Network Identifier' 24 bits per RFC 7348
+ GBPExtension bool // 'G' bit per Group Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+ GBPDontLearn bool // 'D' bit per Group Policy
+ GBPApplied bool // 'A' bit per Group Policy
+ GBPGroupPolicyID uint16 // 'Group Policy ID' 16 bits per Group Policy
+}
+
+// LayerType returns LayerTypeVXLAN
+func (vx *VXLAN) LayerType() gopacket.LayerType { return LayerTypeVXLAN }
+
+// CanDecode returns the layer type this DecodingLayer can decode
+func (vx *VXLAN) CanDecode() gopacket.LayerClass {
+ return LayerTypeVXLAN
+}
+
+// NextLayerType retuns the next layer we should see after vxlan
+func (vx *VXLAN) NextLayerType() gopacket.LayerType {
+ return LayerTypeEthernet
+}
+
+// DecodeFromBytes takes a byte buffer and decodes
+func (vx *VXLAN) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ return errors.New("vxlan packet too small")
+ }
+ // VNI is a 24bit number, Uint32 requires 32 bits
+ var buf [4]byte
+ copy(buf[1:], data[4:7])
+
+ // RFC 7348 https://tools.ietf.org/html/rfc7348
+ vx.ValidIDFlag = data[0]&0x08 > 0 // 'I' bit per RFC7348
+ vx.VNI = binary.BigEndian.Uint32(buf[:]) // VXLAN Network Identifier per RFC7348
+
+ // Group Based Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+ vx.GBPExtension = data[0]&0x80 > 0 // 'G' bit per the group policy draft
+ vx.GBPDontLearn = data[1]&0x40 > 0 // 'D' bit - the egress VTEP MUST NOT learn the source address of the encapsulated frame.
+ vx.GBPApplied = data[1]&0x80 > 0 // 'A' bit - indicates that the group policy has already been applied to this packet.
+ vx.GBPGroupPolicyID = binary.BigEndian.Uint16(data[2:4]) // Policy ID as per the group policy draft
+
+ // Layer information
+ const vxlanLength = 8
+ vx.Contents = data[:vxlanLength]
+ vx.Payload = data[vxlanLength:]
+
+ return nil
+
+}
+
+func decodeVXLAN(data []byte, p gopacket.PacketBuilder) error {
+ vx := &VXLAN{}
+ err := vx.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+
+ p.AddLayer(vx)
+ return p.NextDecoder(LinkTypeEthernet)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (vx *VXLAN) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+
+ // PrependBytes does not guarantee that bytes are zeroed. Setting flags via OR requires that they start off at zero
+ bytes[0] = 0
+ bytes[1] = 0
+
+ if vx.ValidIDFlag {
+ bytes[0] |= 0x08
+ }
+ if vx.GBPExtension {
+ bytes[0] |= 0x80
+ }
+ if vx.GBPDontLearn {
+ bytes[1] |= 0x40
+ }
+ if vx.GBPApplied {
+ bytes[1] |= 0x80
+ }
+
+ binary.BigEndian.PutUint16(bytes[2:4], vx.GBPGroupPolicyID)
+ if vx.VNI >= 1<<24 {
+ return fmt.Errorf("Virtual Network Identifier = %x exceeds max for 24-bit uint", vx.VNI)
+ }
+ binary.BigEndian.PutUint32(bytes[4:8], vx.VNI<<8)
+ return nil
+}
diff --git a/vendor/github.com/gopacket/gopacket/layers_decoder.go b/vendor/github.com/gopacket/gopacket/layers_decoder.go
new file mode 100644
index 000000000..8c1f108cf
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layers_decoder.go
@@ -0,0 +1,101 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+
+package gopacket
+
+// Created by gen.go, don't edit manually
+// Generated at 2019-06-18 11:37:31.308731293 +0600 +06 m=+0.000842599
+
+// LayersDecoder returns DecodingLayerFunc for specified
+// DecodingLayerContainer, LayerType value to start decoding with and
+// some DecodeFeedback.
+func LayersDecoder(dl DecodingLayerContainer, first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ firstDec, ok := dl.Decoder(first)
+ if !ok {
+ return func([]byte, *[]LayerType) (LayerType, error) {
+ return first, nil
+ }
+ }
+ if dlc, ok := dl.(DecodingLayerSparse); ok {
+ return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+ }
+ }
+ if dlc, ok := dl.(DecodingLayerArray); ok {
+ return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+ }
+ }
+ if dlc, ok := dl.(DecodingLayerMap); ok {
+ return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+ }
+ }
+ dlc := dl
+ return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+ }
+}
diff --git a/vendor/github.com/gopacket/gopacket/layertype.go b/vendor/github.com/gopacket/gopacket/layertype.go
new file mode 100644
index 000000000..3abfee1e9
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/layertype.go
@@ -0,0 +1,111 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// LayerType is a unique identifier for each type of layer. This enumeration
+// does not match with any externally available numbering scheme... it's solely
+// usable/useful within this library as a means for requesting layer types
+// (see Packet.Layer) and determining which types of layers have been decoded.
+//
+// New LayerTypes may be created by calling gopacket.RegisterLayerType.
+type LayerType int64
+
+// LayerTypeMetadata contains metadata associated with each LayerType.
+type LayerTypeMetadata struct {
+ // Name is the string returned by each layer type's String method.
+ Name string
+ // Decoder is the decoder to use when the layer type is passed in as a
+ // Decoder.
+ Decoder Decoder
+}
+
+type layerTypeMetadata struct {
+ inUse bool
+ LayerTypeMetadata
+}
+
+// DecodersByLayerName maps layer names to decoders for those layers.
+// This allows users to specify decoders by name to a program and have that
+// program pick the correct decoder accordingly.
+var DecodersByLayerName = map[string]Decoder{}
+
+const maxLayerType = 2000
+
+var ltMeta [maxLayerType]layerTypeMetadata
+var ltMetaMap = map[LayerType]layerTypeMetadata{}
+
+// RegisterLayerType creates a new layer type and registers it globally.
+// The number passed in must be unique, or a runtime panic will occur. Numbers
+// 0-999 are reserved for the gopacket library. Numbers 1000-1999 should be
+// used for common application-specific types, and are very fast. Any other
+// number (negative or >= 2000) may be used for uncommon application-specific
+// types, and are somewhat slower (they require a map lookup over an array
+// index).
+func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType {
+ if 0 <= num && num < maxLayerType {
+ if ltMeta[num].inUse {
+ panic("Layer type already exists")
+ }
+ } else {
+ if ltMetaMap[LayerType(num)].inUse {
+ panic("Layer type already exists")
+ }
+ }
+ return OverrideLayerType(num, meta)
+}
+
+// OverrideLayerType acts like RegisterLayerType, except that if the layer type
+// has already been registered, it overrides the metadata with the passed-in
+// metadata intead of panicing.
+func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType {
+ if 0 <= num && num < maxLayerType {
+ ltMeta[num] = layerTypeMetadata{
+ inUse: true,
+ LayerTypeMetadata: meta,
+ }
+ } else {
+ ltMetaMap[LayerType(num)] = layerTypeMetadata{
+ inUse: true,
+ LayerTypeMetadata: meta,
+ }
+ }
+ DecodersByLayerName[meta.Name] = meta.Decoder
+ return LayerType(num)
+}
+
+// Decode decodes the given data using the decoder registered with the layer
+// type.
+func (t LayerType) Decode(data []byte, c PacketBuilder) error {
+ var d Decoder
+ if 0 <= int(t) && int(t) < maxLayerType {
+ d = ltMeta[int(t)].Decoder
+ } else {
+ d = ltMetaMap[t].Decoder
+ }
+ if d != nil {
+ return d.Decode(data, c)
+ }
+ return fmt.Errorf("Layer type %v has no associated decoder", t)
+}
+
+// String returns the string associated with this layer type.
+func (t LayerType) String() (s string) {
+ if 0 <= int(t) && int(t) < maxLayerType {
+ s = ltMeta[int(t)].Name
+ } else {
+ s = ltMetaMap[t].Name
+ }
+ if s == "" {
+ s = strconv.Itoa(int(t))
+ }
+ return
+}
diff --git a/vendor/github.com/gopacket/gopacket/packet.go b/vendor/github.com/gopacket/gopacket/packet.go
new file mode 100644
index 000000000..93563437f
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/packet.go
@@ -0,0 +1,1029 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+)
+
+const maximumMTU = 1500
+
+var (
+ ErrNoLayersAdded = errors.New("NextDecoder called, but no layers added yet")
+ poolPackedPool = &sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, maximumMTU)
+ return &b
+ },
+ }
+)
+
+// CaptureInfo provides standardized information about a packet captured off
+// the wire or read from a file.
+type CaptureInfo struct {
+ // Timestamp is the time the packet was captured, if that is known.
+ Timestamp time.Time
+ // CaptureLength is the total number of bytes read off of the wire.
+ CaptureLength int
+ // Length is the size of the original packet. Should always be >=
+ // CaptureLength.
+ Length int
+ // InterfaceIndex
+ InterfaceIndex int
+ // The packet source can place ancillary data of various types here.
+ // For example, the afpacket source can report the VLAN of captured
+ // packets this way.
+ AncillaryData []interface{}
+}
+
+// PacketMetadata contains metadata for a packet.
+type PacketMetadata struct {
+ CaptureInfo
+ // Truncated is true if packet decoding logic detects that there are fewer
+ // bytes in the packet than are detailed in various headers (for example, if
+ // the number of bytes in the IPv4 contents/payload is less than IPv4.Length).
+ // This is also set automatically for packets captured off the wire if
+ // CaptureInfo.CaptureLength < CaptureInfo.Length.
+ Truncated bool
+}
+
+// Packet is the primary object used by gopacket. Packets are created by a
+// Decoder's Decode call. A packet is made up of a set of Data, which
+// is broken into a number of Layers as it is decoded.
+type Packet interface {
+ //// Functions for outputting the packet as a human-readable string:
+ //// ------------------------------------------------------------------
+ // String returns a human-readable string representation of the packet.
+ // It uses LayerString on each layer to output the layer.
+ String() string
+ // Dump returns a verbose human-readable string representation of the packet,
+ // including a hex dump of all layers. It uses LayerDump on each layer to
+ // output the layer.
+ Dump() string
+
+ //// Functions for accessing arbitrary packet layers:
+ //// ------------------------------------------------------------------
+ // Layers returns all layers in this packet, computing them as necessary
+ Layers() []Layer
+ // Layer returns the first layer in this packet of the given type, or nil
+ Layer(LayerType) Layer
+ // LayerClass returns the first layer in this packet of the given class,
+ // or nil.
+ LayerClass(LayerClass) Layer
+
+ //// Functions for accessing specific types of packet layers. These functions
+ //// return the first layer of each type found within the packet.
+ //// ------------------------------------------------------------------
+ // LinkLayer returns the first link layer in the packet
+ LinkLayer() LinkLayer
+ // NetworkLayer returns the first network layer in the packet
+ NetworkLayer() NetworkLayer
+ // TransportLayer returns the first transport layer in the packet
+ TransportLayer() TransportLayer
+ // ApplicationLayer returns the first application layer in the packet
+ ApplicationLayer() ApplicationLayer
+ // ErrorLayer is particularly useful, since it returns nil if the packet
+ // was fully decoded successfully, and non-nil if an error was encountered
+ // in decoding and the packet was only partially decoded. Thus, its output
+ // can be used to determine if the entire packet was able to be decoded.
+ ErrorLayer() ErrorLayer
+
+ //// Functions for accessing data specific to the packet:
+ //// ------------------------------------------------------------------
+ // Data returns the set of bytes that make up this entire packet.
+ Data() []byte
+ // Metadata returns packet metadata associated with this packet.
+ Metadata() *PacketMetadata
+
+ //// Functions for verifying specific aspects of the packet:
+ //// ------------------------------------------------------------------
+ // VerifyChecksums verifies the checksums of all layers in this packet,
+ // that have one, and returns all found checksum mismatches.
+ VerifyChecksums() (error, []ChecksumMismatch)
+}
+
+type PooledPacket interface {
+ Packet
+ Dispose()
+}
+
+// packet contains all the information we need to fulfill the Packet interface,
+// and its two "subclasses" (yes, no such thing in Go, bear with me),
+// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the
+// various functions needed to access this information.
+type packet struct {
+ // data contains the entire packet data for a packet
+ data []byte
+ // initialLayers is space for an initial set of layers already created inside
+ // the packet.
+ initialLayers [6]Layer
+ // layers contains each layer we've already decoded
+ layers []Layer
+ // last is the last layer added to the packet
+ last Layer
+ // metadata is the PacketMetadata for this packet
+ metadata PacketMetadata
+
+ decodeOptions DecodeOptions
+
+ // Pointers to the various important layers
+ link LinkLayer
+ network NetworkLayer
+ transport TransportLayer
+ application ApplicationLayer
+ failure ErrorLayer
+}
+
+func (p *packet) SetTruncated() {
+ p.metadata.Truncated = true
+}
+
+func (p *packet) SetLinkLayer(l LinkLayer) {
+ if p.link == nil {
+ p.link = l
+ }
+}
+
+func (p *packet) SetNetworkLayer(l NetworkLayer) {
+ if p.network == nil {
+ p.network = l
+ }
+}
+
+func (p *packet) SetTransportLayer(l TransportLayer) {
+ if p.transport == nil {
+ p.transport = l
+ }
+}
+
+func (p *packet) SetApplicationLayer(l ApplicationLayer) {
+ if p.application == nil {
+ p.application = l
+ }
+}
+
+func (p *packet) SetErrorLayer(l ErrorLayer) {
+ if p.failure == nil {
+ p.failure = l
+ }
+}
+
+func (p *packet) AddLayer(l Layer) {
+ p.layers = append(p.layers, l)
+ p.last = l
+}
+
+func (p *packet) DumpPacketData() {
+ fmt.Fprint(os.Stderr, p.packetDump())
+ os.Stderr.Sync()
+}
+
+func (p *packet) Metadata() *PacketMetadata {
+ return &p.metadata
+}
+
+func (p *packet) Data() []byte {
+ return p.data
+}
+
+func (p *packet) DecodeOptions() *DecodeOptions {
+ return &p.decodeOptions
+}
+
+func (p *packet) VerifyChecksums() (error, []ChecksumMismatch) {
+ mismatches := make([]ChecksumMismatch, 0)
+ for i, l := range p.layers {
+ if lwc, ok := l.(LayerWithChecksum); ok {
+ // Verify checksum for that layer
+ err, res := lwc.VerifyChecksum()
+ if err != nil {
+ return fmt.Errorf("couldn't verify checksum for layer %d (%s): %w",
+ i+1, l.LayerType(), err), nil
+ }
+
+ if !res.Valid {
+ mismatches = append(mismatches, ChecksumMismatch{
+ ChecksumVerificationResult: res,
+ Layer: l,
+ LayerIndex: i,
+ })
+ }
+ }
+ }
+
+ return nil, mismatches
+}
+
+func (p *packet) addFinalDecodeError(err error, stack []byte) {
+ fail := &DecodeFailure{err: err, stack: stack}
+ if p.last == nil {
+ fail.data = p.data
+ } else {
+ fail.data = p.last.LayerPayload()
+ }
+ p.AddLayer(fail)
+ p.SetErrorLayer(fail)
+}
+
+func (p *packet) recoverDecodeError() {
+ if !p.decodeOptions.SkipDecodeRecovery {
+ if r := recover(); r != nil {
+ p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack())
+ }
+ }
+}
+
+type pooledPacket struct {
+ Packet
+ origData *[]byte
+}
+
+func (p pooledPacket) Dispose() {
+ poolPackedPool.Put(p.origData)
+}
+
+// LayerString outputs an individual layer as a string. The layer is output
+// in a single line, with no trailing newline. This function is specifically
+// designed to do the right thing for most layers... it follows the following
+// rules:
+// - If the Layer has a String function, just output that.
+// - Otherwise, output all exported fields in the layer, recursing into
+// exported slices and structs.
+//
+// NOTE: This is NOT THE SAME AS fmt's "%#v". %#v will output both exported
+// and unexported fields... many times packet layers contain unexported stuff
+// that would just mess up the output of the layer, see for example the
+// Payload layer and it's internal 'data' field, which contains a large byte
+// array that would really mess up formatting.
+func LayerString(l Layer) string {
+ return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false))
+}
+
+// Dumper dumps verbose information on a value. If a layer type implements
+// Dumper, then its LayerDump() string will include the results in its output.
+type Dumper interface {
+ Dump() string
+}
+
+// LayerDump outputs a very verbose string representation of a layer. Its
+// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()).
+// It contains newlines and ends with a newline.
+func LayerDump(l Layer) string {
+ var b bytes.Buffer
+ b.WriteString(LayerString(l))
+ b.WriteByte('\n')
+ if d, ok := l.(Dumper); ok {
+ dump := d.Dump()
+ if dump != "" {
+ b.WriteString(dump)
+ if dump[len(dump)-1] != '\n' {
+ b.WriteByte('\n')
+ }
+ }
+ }
+ b.WriteString(hex.Dump(l.LayerContents()))
+ return b.String()
+}
+
+// layerString outputs, recursively, a layer in a "smart" way. See docs for
+// LayerString for more details.
+//
+// Params:
+//
+// i - value to write out
+// anonymous: if we're currently recursing an anonymous member of a struct
+// writeSpace: if we've already written a value in a struct, and need to
+// write a space before writing more. This happens when we write various
+// anonymous values, and need to keep writing more.
+func layerString(v reflect.Value, anonymous bool, writeSpace bool) string {
+ // Let String() functions take precedence.
+ if v.CanInterface() {
+ if s, ok := v.Interface().(fmt.Stringer); ok {
+ return s.String()
+ }
+ }
+ // Reflect, and spit out all the exported fields as key=value.
+ switch v.Type().Kind() {
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ return "nil"
+ }
+ r := v.Elem()
+ return layerString(r, anonymous, writeSpace)
+ case reflect.Struct:
+ var b bytes.Buffer
+ typ := v.Type()
+ if !anonymous {
+ b.WriteByte('{')
+ }
+ for i := 0; i < v.NumField(); i++ {
+ // Check if this is upper-case.
+ ftype := typ.Field(i)
+ f := v.Field(i)
+ if ftype.Anonymous {
+ anonStr := layerString(f, true, writeSpace)
+ writeSpace = writeSpace || anonStr != ""
+ b.WriteString(anonStr)
+ } else if ftype.PkgPath == "" { // exported
+ if writeSpace {
+ b.WriteByte(' ')
+ }
+ writeSpace = true
+ fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace))
+ }
+ }
+ if !anonymous {
+ b.WriteByte('}')
+ }
+ return b.String()
+ case reflect.Slice:
+ var b bytes.Buffer
+ b.WriteByte('[')
+ if v.Len() > 4 {
+ fmt.Fprintf(&b, "..%d..", v.Len())
+ } else {
+ for j := 0; j < v.Len(); j++ {
+ if j != 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(layerString(v.Index(j), false, false))
+ }
+ }
+ b.WriteByte(']')
+ return b.String()
+ }
+ return fmt.Sprintf("%v", v.Interface())
+}
+
+const (
+ longBytesLength = 128
+)
+
+// LongBytesGoString returns a string representation of the byte slice shortened
+// using the format '{ ... ( bytes)}' if it
+// exceeds a predetermined length. Can be used to avoid filling the display with
+// very long byte strings.
+func LongBytesGoString(buf []byte) string {
+ if len(buf) < longBytesLength {
+ return fmt.Sprintf("%#v", buf)
+ }
+ s := fmt.Sprintf("%#v", buf[:longBytesLength-1])
+ s = strings.TrimSuffix(s, "}")
+ return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf))
+}
+
+func baseLayerString(value reflect.Value) string {
+ t := value.Type()
+ content := value.Field(0)
+ c := make([]byte, content.Len())
+ for i := range c {
+ c[i] = byte(content.Index(i).Uint())
+ }
+ payload := value.Field(1)
+ p := make([]byte, payload.Len())
+ for i := range p {
+ p[i] = byte(payload.Index(i).Uint())
+ }
+ return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(),
+ LongBytesGoString(c),
+ LongBytesGoString(p))
+}
+
+func layerGoString(i interface{}, b *bytes.Buffer) {
+ if s, ok := i.(fmt.GoStringer); ok {
+ b.WriteString(s.GoString())
+ return
+ }
+
+ var v reflect.Value
+ var ok bool
+ if v, ok = i.(reflect.Value); !ok {
+ v = reflect.ValueOf(i)
+ }
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if v.Kind() == reflect.Ptr {
+ b.WriteByte('&')
+ }
+ layerGoString(v.Elem().Interface(), b)
+ case reflect.Struct:
+ t := v.Type()
+ b.WriteString(t.String())
+ b.WriteByte('{')
+ for i := 0; i < v.NumField(); i++ {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ if t.Field(i).Name == "BaseLayer" {
+ fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i)))
+ } else if v.Field(i).Kind() == reflect.Struct {
+ fmt.Fprintf(b, "%s:", t.Field(i).Name)
+ layerGoString(v.Field(i), b)
+ } else if v.Field(i).Kind() == reflect.Ptr {
+ b.WriteByte('&')
+ layerGoString(v.Field(i), b)
+ } else {
+ fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i))
+ }
+ }
+ b.WriteByte('}')
+ default:
+ fmt.Fprintf(b, "%#v", i)
+ }
+}
+
+// LayerGoString returns a representation of the layer in Go syntax,
+// taking care to shorten "very long" BaseLayer byte slices
+func LayerGoString(l Layer) string {
+ b := new(bytes.Buffer)
+ layerGoString(l, b)
+ return b.String()
+}
+
+func (p *packet) packetString() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data()))
+ if p.metadata.Truncated {
+ b.WriteString(", truncated")
+ }
+ if p.metadata.Length > 0 {
+ fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength)
+ }
+ if !p.metadata.Timestamp.IsZero() {
+ fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp)
+ }
+ b.WriteByte('\n')
+ for i, l := range p.layers {
+ fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l))
+ }
+ return b.String()
+}
+
+func (p *packet) packetDump() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data))
+ for i, l := range p.layers {
+ fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l))
+ }
+ return b.String()
+}
+
+// eagerPacket is a packet implementation that does eager decoding. Upon
+// initial construction, it decodes all the layers it can from packet data.
+// eagerPacket implements Packet and PacketBuilder.
+type eagerPacket struct {
+ packet
+}
+
+var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type")
+
+func (p *eagerPacket) NextDecoder(next Decoder) error {
+ if next == nil {
+ return errNilDecoder
+ }
+ if p.last == nil {
+ return ErrNoLayersAdded
+ }
+ d := p.last.LayerPayload()
+ if len(d) == 0 {
+ return nil
+ }
+ // Since we're eager, immediately call the next decoder.
+ return next.Decode(d, p)
+}
+func (p *eagerPacket) initialDecode(dec Decoder) {
+ defer p.recoverDecodeError()
+ err := dec.Decode(p.data, p)
+ if err != nil {
+ p.addFinalDecodeError(err, nil)
+ }
+}
+func (p *eagerPacket) LinkLayer() LinkLayer {
+ return p.link
+}
+func (p *eagerPacket) NetworkLayer() NetworkLayer {
+ return p.network
+}
+func (p *eagerPacket) TransportLayer() TransportLayer {
+ return p.transport
+}
+func (p *eagerPacket) ApplicationLayer() ApplicationLayer {
+ return p.application
+}
+func (p *eagerPacket) ErrorLayer() ErrorLayer {
+ return p.failure
+}
+func (p *eagerPacket) Layers() []Layer {
+ return p.layers
+}
+func (p *eagerPacket) Layer(t LayerType) Layer {
+ for _, l := range p.layers {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ return nil
+}
+func (p *eagerPacket) LayerClass(lc LayerClass) Layer {
+ for _, l := range p.layers {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ return nil
+}
+func (p *eagerPacket) String() string { return p.packetString() }
+func (p *eagerPacket) Dump() string { return p.packetDump() }
+
+// lazyPacket does lazy decoding on its packet data. On construction it does
+// no initial decoding. For each function call, it decodes only as many layers
+// as are necessary to compute the return value for that function.
+// lazyPacket implements Packet and PacketBuilder.
+type lazyPacket struct {
+ packet
+ next Decoder
+}
+
+func (p *lazyPacket) NextDecoder(next Decoder) error {
+ if next == nil {
+ return errNilDecoder
+ }
+ p.next = next
+ return nil
+}
+func (p *lazyPacket) decodeNextLayer() {
+ if p.next == nil {
+ return
+ }
+ d := p.data
+ if p.last != nil {
+ d = p.last.LayerPayload()
+ }
+ next := p.next
+ p.next = nil
+ // We've just set p.next to nil, so if we see we have no data, this should be
+ // the final call we get to decodeNextLayer if we return here.
+ if len(d) == 0 {
+ return
+ }
+ defer p.recoverDecodeError()
+ err := next.Decode(d, p)
+ if err != nil {
+ p.addFinalDecodeError(err, nil)
+ }
+}
+func (p *lazyPacket) LinkLayer() LinkLayer {
+ for p.link == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.link
+}
+func (p *lazyPacket) NetworkLayer() NetworkLayer {
+ for p.network == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.network
+}
+func (p *lazyPacket) TransportLayer() TransportLayer {
+ for p.transport == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.transport
+}
+func (p *lazyPacket) ApplicationLayer() ApplicationLayer {
+ for p.application == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.application
+}
+func (p *lazyPacket) ErrorLayer() ErrorLayer {
+ for p.failure == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.failure
+}
+func (p *lazyPacket) Layers() []Layer {
+ for p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.layers
+}
+func (p *lazyPacket) Layer(t LayerType) Layer {
+ for _, l := range p.layers {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ numLayers := len(p.layers)
+ for p.next != nil {
+ p.decodeNextLayer()
+ for _, l := range p.layers[numLayers:] {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ numLayers = len(p.layers)
+ }
+ return nil
+}
+func (p *lazyPacket) LayerClass(lc LayerClass) Layer {
+ for _, l := range p.layers {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ numLayers := len(p.layers)
+ for p.next != nil {
+ p.decodeNextLayer()
+ for _, l := range p.layers[numLayers:] {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ numLayers = len(p.layers)
+ }
+ return nil
+}
+func (p *lazyPacket) String() string { p.Layers(); return p.packetString() }
+func (p *lazyPacket) Dump() string { p.Layers(); return p.packetDump() }
+
+// DecodeOptions tells gopacket how to decode a packet.
+type DecodeOptions struct {
+ // Lazy decoding decodes the minimum number of layers needed to return data
+ // for a packet at each function call. Be careful using this with concurrent
+ // packet processors, as each call to packet.* could mutate the packet, and
+ // two concurrent function calls could interact poorly.
+ Lazy bool
+ // NoCopy decoding doesn't copy its input buffer into storage that's owned by
+ // the packet. If you can guarantee that the bytes underlying the slice
+ // passed into NewPacket aren't going to be modified, this can be faster. If
+ // there's any chance that those bytes WILL be changed, this will invalidate
+ // your packets.
+ NoCopy bool
+ // Pool decoding only applies if NoCopy is false.
+ // Instead of always allocating new memory it takes the memory from a pool.
+ // NewPacket then will return a PooledPacket instead of a Packet.
+ // As soon as you're done with the PooledPacket you should call PooledPacket.Dispose() to return it to the pool.
+ Pool bool
+ // SkipDecodeRecovery skips over panic recovery during packet decoding.
+ // Normally, when packets decode, if a panic occurs, that panic is captured
+ // by a recover(), and a DecodeFailure layer is added to the packet detailing
+ // the issue. If this flag is set, panics are instead allowed to continue up
+ // the stack.
+ SkipDecodeRecovery bool
+ // DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP
+ // decoder. If true, we should try to decode layers after TCP in single packets.
+ // This is disabled by default because the reassembly package drives the decoding
+ // of TCP payload data after reassembly.
+ DecodeStreamsAsDatagrams bool
+}
+
+// Default decoding provides the safest (but slowest) method for decoding
+// packets. It eagerly processes all layers (so it's concurrency-safe) and it
+// copies its input buffer upon creation of the packet (so the packet remains
+// valid if the underlying slice is modified. Both of these take time,
+// though, so beware. If you can guarantee that the packet will only be used
+// by one goroutine at a time, set Lazy decoding. If you can guarantee that
+// the underlying slice won't change, set NoCopy decoding.
+var Default = DecodeOptions{}
+
+// Lazy is a DecodeOptions with just Lazy set.
+var Lazy = DecodeOptions{Lazy: true}
+
+// NoCopy is a DecodeOptions with just NoCopy set.
+var NoCopy = DecodeOptions{NoCopy: true}
+
+// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set.
+var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true}
+
+// NewPacket creates a new Packet object from a set of bytes. The
+// firstLayerDecoder tells it how to interpret the first layer from the bytes,
+// future layers will be generated from that first layer automatically.
+func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) (p Packet) {
+ if !options.NoCopy {
+ var (
+ poolMemory *[]byte
+ dataCopy []byte
+ )
+ if options.Pool && len(data) <= maximumMTU {
+ poolMemory = poolPackedPool.Get().(*[]byte)
+ dataCopy = (*poolMemory)[:len(data)]
+ copy(dataCopy, data)
+ data = dataCopy
+ defer func() {
+ p = &pooledPacket{Packet: p, origData: poolMemory}
+ }()
+ } else {
+ dataCopy = make([]byte, len(data))
+ copy(dataCopy, data)
+ data = dataCopy
+ }
+ }
+ if options.Lazy {
+ lp := &lazyPacket{
+ packet: packet{data: data, decodeOptions: options},
+ next: firstLayerDecoder,
+ }
+ lp.layers = lp.initialLayers[:0]
+ // Crazy craziness:
+ // If the following return statemet is REMOVED, and Lazy is FALSE, then
+ // eager packet processing becomes 17% FASTER. No, there is no logical
+ // explanation for this. However, it's such a hacky micro-optimization that
+ // we really can't rely on it. It appears to have to do with the size the
+ // compiler guesses for this function's stack space, since one symptom is
+ // that with the return statement in place, we more than double calls to
+ // runtime.morestack/runtime.lessstack. We'll hope the compiler gets better
+ // over time and we get this optimization for free. Until then, we'll have
+ // to live with slower packet processing.
+ return lp
+ }
+ ep := &eagerPacket{
+ packet: packet{data: data, decodeOptions: options},
+ }
+ ep.layers = ep.initialLayers[:0]
+ ep.initialDecode(firstLayerDecoder)
+ return ep
+}
+
+// PacketDataSource is an interface for some source of packet data. Users may
+// create their own implementations, or use the existing implementations in
+// gopacket/pcap (libpcap, allows reading from live interfaces or from
+// pcap files) or gopacket/pfring (PF_RING, allows reading from live
+// interfaces).
+type PacketDataSource interface {
+ // ReadPacketData returns the next packet available from this data source.
+ // It returns:
+ // data: The bytes of an individual packet.
+ // ci: Metadata about the capture
+ // err: An error encountered while reading packet data. If err != nil,
+ // then data/ci will be ignored.
+ ReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set
+// of internal PacketDataSources, each of which will stop with io.EOF after
+// reading a finite number of packets. The returned PacketDataSource will
+// return all packets from the first finite source, followed by all packets from
+// the second, etc. Once all finite sources have returned io.EOF, the returned
+// source will as well.
+func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource {
+ c := concat(pds)
+ return &c
+}
+
+type concat []PacketDataSource
+
+func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) {
+ for len(*c) > 0 {
+ data, ci, err = (*c)[0].ReadPacketData()
+ if errors.Is(err, io.EOF) {
+ *c = (*c)[1:]
+ continue
+ }
+ return
+ }
+ return nil, CaptureInfo{}, io.EOF
+}
+
+// ZeroCopyPacketDataSource is an interface to pull packet data from sources
+// that allow data to be returned without copying to a user-controlled buffer.
+// It's very similar to PacketDataSource, except that the caller must be more
+// careful in how the returned buffer is handled.
+type ZeroCopyPacketDataSource interface {
+ // ZeroCopyReadPacketData returns the next packet available from this data source.
+ // It returns:
+ // data: The bytes of an individual packet. Unlike with
+ // PacketDataSource's ReadPacketData, the slice returned here points
+ // to a buffer owned by the data source. In particular, the bytes in
+ // this buffer may be changed by future calls to
+ // ZeroCopyReadPacketData. Do not use the returned buffer after
+ // subsequent ZeroCopyReadPacketData calls.
+ // ci: Metadata about the capture
+ // err: An error encountered while reading packet data. If err != nil,
+ // then data/ci will be ignored.
+ ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+type PacketSourceOption interface {
+ apply(ps *PacketSource)
+}
+
+type packetSourceOptionFunc func(ps *PacketSource)
+
+func (f packetSourceOptionFunc) apply(ps *PacketSource) {
+ f(ps)
+}
+
+func WithLazy(lazy bool) packetSourceOptionFunc {
+ return func(ps *PacketSource) {
+ ps.Lazy = lazy
+ }
+}
+
+func WithNoCopy(noCopy bool) packetSourceOptionFunc {
+ return func(ps *PacketSource) {
+ ps.NoCopy = noCopy
+ }
+}
+
+func WithPool(pool bool) packetSourceOptionFunc {
+ return func(ps *PacketSource) {
+ ps.Pool = pool
+ }
+}
+
+func WithSkipDecodeRecovery(skipDecodeRecovery bool) packetSourceOptionFunc {
+ return func(ps *PacketSource) {
+ ps.SkipDecodeRecovery = skipDecodeRecovery
+ }
+}
+
+func WithDecodeStreamsAsDatagrams(decodeStreamsAsDatagrams bool) packetSourceOptionFunc {
+ return func(ps *PacketSource) {
+ ps.DecodeStreamsAsDatagrams = decodeStreamsAsDatagrams
+ }
+}
+
+// PacketSource reads in packets from a PacketDataSource, decodes them, and
+// returns them.
+//
+// There are currently two different methods for reading packets in through
+// a PacketSource:
+//
+// # Reading With Packets Function
+//
+// This method is the most convenient and easiest to code, but lacks
+// flexibility. Packets returns a 'chan Packet', then asynchronously writes
+// packets into that channel. Packets uses a blocking channel, and closes
+// it if an io.EOF is returned by the underlying PacketDataSource. All other
+// PacketDataSource errors are ignored and discarded.
+//
+// for packet := range packetSource.Packets() {
+// ...
+// }
+//
+// # Reading With NextPacket Function
+//
+// This method is the most flexible, and exposes errors that may be
+// encountered by the underlying PacketDataSource. It's also the fastest
+// in a tight loop, since it doesn't have the overhead of a channel
+// read/write. However, it requires the user to handle errors, most
+// importantly the io.EOF error in cases where packets are being read from
+// a file.
+//
+// for {
+// packet, err := packetSource.NextPacket()
+// if err == io.EOF {
+// break
+// } else if err != nil {
+// log.Println("Error:", err)
+// continue
+// }
+// handlePacket(packet) // Do something with each packet.
+// }
+type PacketSource struct {
+ zeroCopy bool
+ source func() (data []byte, ci CaptureInfo, err error)
+ decoder Decoder
+ // DecodeOptions is the set of options to use for decoding each piece
+ // of packet data. This can/should be changed by the user to reflect the
+ // way packets should be decoded.
+ DecodeOptions
+ c chan Packet
+}
+
+// NewZeroCopyPacketSource creates a zero copy packet data source.
+func NewZeroCopyPacketSource(source ZeroCopyPacketDataSource, decoder Decoder, opts ...PacketSourceOption) *PacketSource {
+ ps := &PacketSource{
+ source: source.ZeroCopyReadPacketData,
+ decoder: decoder,
+ }
+
+ for idx := range opts {
+ opts[idx].apply(ps)
+ }
+
+ return ps
+}
+
+// NewPacketSource creates a packet data source.
+func NewPacketSource(source PacketDataSource, decoder Decoder, opts ...PacketSourceOption) *PacketSource {
+ ps := &PacketSource{
+ source: source.ReadPacketData,
+ decoder: decoder,
+ }
+
+ for idx := range opts {
+ opts[idx].apply(ps)
+ }
+
+ return ps
+}
+
+// NextPacket returns the next decoded packet from the PacketSource. On error,
+// it returns a nil packet and a non-nil error.
+func (p *PacketSource) NextPacket() (Packet, error) {
+ data, ci, err := p.source()
+ if err != nil {
+ return nil, err
+ }
+ packet := NewPacket(data, p.decoder, p.DecodeOptions)
+ m := packet.Metadata()
+ m.CaptureInfo = ci
+ m.Truncated = m.Truncated || ci.CaptureLength < ci.Length
+ return packet, nil
+}
+
+// packetsToChannel reads in all packets from the packet source and sends them
+// to the given channel. This routine terminates when a non-temporary error
+// is returned by NextPacket().
+func (p *PacketSource) packetsToChannel(ctx context.Context) {
+ defer close(p.c)
+ for ctx.Err() == nil {
+ packet, err := p.NextPacket()
+ if err == nil {
+ select {
+ case p.c <- packet:
+ continue
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ // if timeout error sleep briefly and retry
+ var netErr net.Error
+ if ok := errors.As(err, &netErr); ok && netErr.Timeout() {
+ time.Sleep(time.Millisecond * time.Duration(5))
+ continue
+ }
+
+ // Immediately break for known unrecoverable errors
+ if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
+ errors.Is(err, io.ErrNoProgress) || errors.Is(err, io.ErrClosedPipe) || errors.Is(err, io.ErrShortBuffer) ||
+ errors.Is(err, syscall.EBADF) ||
+ strings.Contains(err.Error(), "use of closed file") {
+ break
+ }
+
+ // Sleep briefly and try again
+ time.Sleep(time.Millisecond * time.Duration(5))
+ }
+}
+
+// Packets returns a channel of packets, allowing easy iterating over
+// packets. Packets will be asynchronously read in from the underlying
+// PacketDataSource and written to the returned channel. If the underlying
+// PacketDataSource returns an io.EOF error, the channel will be closed.
+// If any other error is encountered, it is ignored.
+//
+// for packet := range packetSource.Packets() {
+// handlePacket(packet) // Do something with each packet
+// }
+//
+// If called more than once, returns the same channel.
+func (p *PacketSource) Packets() chan Packet {
+ return p.PacketsCtx(context.Background())
+}
+
+// PacketsCtx returns a channel of packets, allowing easy iterating over
+// packets. Packets will be asynchronously read in from the underlying
+// PacketDataSource and written to the returned channel. If the underlying
+// PacketDataSource returns an io.EOF error, the channel will be closed.
+// If any other error is encountered, it is ignored.
+// The background Go routine will be canceled as soon as the given context
+// returns an error either because it got canceled or it has reached its deadline.
+//
+// for packet := range packetSource.PacketsCtx(context.Background()) {
+// handlePacket(packet) // Do something with each packet.
+// }
+//
+// If called more than once, returns the same channel.
+func (p *PacketSource) PacketsCtx(ctx context.Context) chan Packet {
+ if p.DecodeOptions.NoCopy && p.zeroCopy {
+ panic("PacketSource uses a zero copy datasource and NoCopy decoder option activated - Packets() uses a buffered channel hence packets are most likely overwritten")
+ }
+
+ const defaultPacketChannelSize = 1000
+ if p.c == nil {
+ p.c = make(chan Packet, defaultPacketChannelSize)
+ go p.packetsToChannel(ctx)
+ }
+ return p.c
+}
diff --git a/vendor/github.com/gopacket/gopacket/parser.go b/vendor/github.com/gopacket/gopacket/parser.go
new file mode 100644
index 000000000..8bc6a68d4
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/parser.go
@@ -0,0 +1,351 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+)
+
+// A container for single LayerType->DecodingLayer mapping.
+type decodingLayerElem struct {
+ typ LayerType
+ dec DecodingLayer
+}
+
+// DecodingLayer is an interface for packet layers that can decode themselves.
+//
+// The important part of DecodingLayer is that they decode themselves in-place.
+// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to
+// the new state defined by the data passed in. A returned error leaves the
+// DecodingLayer in an unknown intermediate state, thus its fields should not be
+// trusted.
+//
+// Because the DecodingLayer is resetting its own fields, a call to
+// DecodeFromBytes should normally not require any memory allocation.
+type DecodingLayer interface {
+ // DecodeFromBytes resets the internal state of this layer to the state
+ // defined by the passed-in bytes. Slices in the DecodingLayer may
+ // reference the passed-in data, so care should be taken to copy it
+ // first should later modification of data be required before the
+ // DecodingLayer is discarded.
+ DecodeFromBytes(data []byte, df DecodeFeedback) error
+ // CanDecode returns the set of LayerTypes this DecodingLayer can
+ // decode. For Layers that are also DecodingLayers, this will most
+ // often be that Layer's LayerType().
+ CanDecode() LayerClass
+ // NextLayerType returns the LayerType which should be used to decode
+ // the LayerPayload.
+ NextLayerType() LayerType
+ // LayerPayload is the set of bytes remaining to decode after a call to
+ // DecodeFromBytes.
+ LayerPayload() []byte
+}
+
+// DecodingLayerFunc decodes given packet and stores decoded LayerType
+// values into specified slice. Returns either first encountered
+// unsupported LayerType value or decoding error. In case of success,
+// returns (LayerTypeZero, nil).
+type DecodingLayerFunc func([]byte, *[]LayerType) (LayerType, error)
+
+// DecodingLayerContainer stores all DecodingLayer-s and serves as a
+// searching tool for DecodingLayerParser.
+type DecodingLayerContainer interface {
+ // Put adds new DecodingLayer to container. The new instance of
+ // the same DecodingLayerContainer is returned so it may be
+ // implemented as a value receiver.
+ Put(DecodingLayer) DecodingLayerContainer
+ // Decoder returns DecodingLayer to decode given LayerType and
+ // true if it was found. If no decoder found, return false.
+ Decoder(LayerType) (DecodingLayer, bool)
+ // LayersDecoder returns DecodingLayerFunc which decodes given
+ // packet, starting with specified LayerType and DecodeFeedback.
+ LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc
+}
+
+// DecodingLayerSparse is a sparse array-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is addressed in an
+// allocated slice by LayerType value itself. Though this is the
+// fastest container it may be memory-consuming if used with big
+// LayerType values.
+type DecodingLayerSparse []DecodingLayer
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) Put(d DecodingLayer) DecodingLayerContainer {
+ maxLayerType := LayerType(len(dl) - 1)
+ for _, typ := range d.CanDecode().LayerTypes() {
+ if typ > maxLayerType {
+ maxLayerType = typ
+ }
+ }
+
+ if extra := maxLayerType - LayerType(len(dl)) + 1; extra > 0 {
+ dl = append(dl, make([]DecodingLayer, extra)...)
+ }
+
+ for _, typ := range d.CanDecode().LayerTypes() {
+ dl[typ] = d
+ }
+ return dl
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ return LayersDecoder(dl, first, df)
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) Decoder(typ LayerType) (DecodingLayer, bool) {
+ if int64(typ) < int64(len(dl)) {
+ decoder := dl[typ]
+ return decoder, decoder != nil
+ }
+ return nil, false
+}
+
+// DecodingLayerArray is an array-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is searched linearly in
+// an allocated slice in one-by-one fashion.
+type DecodingLayerArray []decodingLayerElem
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) Put(d DecodingLayer) DecodingLayerContainer {
+TYPES:
+ for _, typ := range d.CanDecode().LayerTypes() {
+ for i := range dl {
+ if dl[i].typ == typ {
+ dl[i].dec = d
+ continue TYPES
+ }
+ }
+ dl = append(dl, decodingLayerElem{typ, d})
+ }
+ return dl
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) Decoder(typ LayerType) (DecodingLayer, bool) {
+ for i := range dl {
+ if dl[i].typ == typ {
+ return dl[i].dec, true
+ }
+ }
+ return nil, false
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ return LayersDecoder(dl, first, df)
+}
+
+// DecodingLayerMap is an map-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is searched in a map
+// hashed by LayerType value.
+type DecodingLayerMap map[LayerType]DecodingLayer
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) Put(d DecodingLayer) DecodingLayerContainer {
+ for _, typ := range d.CanDecode().LayerTypes() {
+ if dl == nil {
+ dl = make(map[LayerType]DecodingLayer)
+ }
+ dl[typ] = d
+ }
+ return dl
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) Decoder(typ LayerType) (DecodingLayer, bool) {
+ d, ok := dl[typ]
+ return d, ok
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ return LayersDecoder(dl, first, df)
+}
+
+// Static code check.
+var (
+ _ = []DecodingLayerContainer{
+ DecodingLayerSparse(nil),
+ DecodingLayerMap(nil),
+ DecodingLayerArray(nil),
+ }
+)
+
+// DecodingLayerParser parses a given set of layer types. See DecodeLayers for
+// more information on how DecodingLayerParser should be used.
+type DecodingLayerParser struct {
+ // DecodingLayerParserOptions is the set of options available to the
+ // user to define the parser's behavior.
+ DecodingLayerParserOptions
+ dlc DecodingLayerContainer
+ first LayerType
+ df DecodeFeedback
+
+ decodeFunc DecodingLayerFunc
+
+ // Truncated is set when a decode layer detects that the packet has been
+ // truncated.
+ Truncated bool
+}
+
+// AddDecodingLayer adds a decoding layer to the parser. This adds support for
+// the decoding layer's CanDecode layers to the parser... should they be
+// encountered, they'll be parsed.
+func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) {
+ l.SetDecodingLayerContainer(l.dlc.Put(d))
+}
+
+// SetTruncated is used by DecodingLayers to set the Truncated boolean in the
+// DecodingLayerParser. Users should simply read Truncated after calling
+// DecodeLayers.
+func (l *DecodingLayerParser) SetTruncated() {
+ l.Truncated = true
+}
+
+// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all
+// of the given DecodingLayers with AddDecodingLayer.
+//
+// Each call to DecodeLayers will attempt to decode the given bytes first by
+// treating them as a 'first'-type layer, then by using NextLayerType on
+// subsequently decoded layers to find the next relevant decoder. Should a
+// deoder not be available for the layer type returned by NextLayerType,
+// decoding will stop.
+//
+// NewDecodingLayerParser uses DecodingLayerMap container by
+// default.
+func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser {
+ dlp := &DecodingLayerParser{first: first}
+ dlp.df = dlp // Cast this once to the interface
+ // default container
+ dlc := DecodingLayerContainer(DecodingLayerMap(make(map[LayerType]DecodingLayer)))
+ for _, d := range decoders {
+ dlc = dlc.Put(d)
+ }
+
+ dlp.SetDecodingLayerContainer(dlc)
+ return dlp
+}
+
+// SetDecodingLayerContainer specifies container with decoders. This
+// call replaces all decoders already registered in given instance of
+// DecodingLayerParser.
+func (l *DecodingLayerParser) SetDecodingLayerContainer(dlc DecodingLayerContainer) {
+ l.dlc = dlc
+ l.decodeFunc = l.dlc.LayersDecoder(l.first, l.df)
+}
+
+// DecodeLayers decodes as many layers as possible from the given data. It
+// initially treats the data as layer type 'typ', then uses NextLayerType on
+// each subsequent decoded layer until it gets to a layer type it doesn't know
+// how to parse.
+//
+// For each layer successfully decoded, DecodeLayers appends the layer type to
+// the decoded slice. DecodeLayers truncates the 'decoded' slice initially, so
+// there's no need to empty it yourself.
+//
+// This decoding method is about an order of magnitude faster than packet
+// decoding, because it only decodes known layers that have already been
+// allocated. This means it doesn't need to allocate each layer it returns...
+// instead it overwrites the layers that already exist.
+//
+// Example usage:
+//
+// func main() {
+// var eth layers.Ethernet
+// var ip4 layers.IPv4
+// var ip6 layers.IPv6
+// var tcp layers.TCP
+// var udp layers.UDP
+// var payload gopacket.Payload
+// parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp, &payload)
+// var source gopacket.PacketDataSource = getMyDataSource()
+// decodedLayers := make([]gopacket.LayerType, 0, 10)
+// for {
+// data, _, err := source.ReadPacketData()
+// if err != nil {
+// fmt.Println("Error reading packet data: ", err)
+// continue
+// }
+// fmt.Println("Decoding packet")
+// err = parser.DecodeLayers(data, &decodedLayers)
+// for _, typ := range decodedLayers {
+// fmt.Println(" Successfully decoded layer type", typ)
+// switch typ {
+// case layers.LayerTypeEthernet:
+// fmt.Println(" Eth ", eth.SrcMAC, eth.DstMAC)
+// case layers.LayerTypeIPv4:
+// fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP)
+// case layers.LayerTypeIPv6:
+// fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP)
+// case layers.LayerTypeTCP:
+// fmt.Println(" TCP ", tcp.SrcPort, tcp.DstPort)
+// case layers.LayerTypeUDP:
+// fmt.Println(" UDP ", udp.SrcPort, udp.DstPort)
+// }
+// }
+// if decodedLayers.Truncated {
+// fmt.Println(" Packet has been truncated")
+// }
+// if err != nil {
+// fmt.Println(" Error encountered:", err)
+// }
+// }
+// }
+//
+// If DecodeLayers is unable to decode the next layer type, it will return the
+// error UnsupportedLayerType.
+func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) {
+ l.Truncated = false
+ if !l.IgnorePanic {
+ defer panicToError(&err)
+ }
+ typ, err := l.decodeFunc(data, decoded)
+ if typ != LayerTypeZero {
+ // no decoder
+ if l.IgnoreUnsupported {
+ return nil
+ }
+ return UnsupportedLayerType(typ)
+ }
+ return err
+}
+
+// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers
+// encounters a layer type that the DecodingLayerParser has no decoder for.
+type UnsupportedLayerType LayerType
+
+// Error implements the error interface, returning a string to say that the
+// given layer type is unsupported.
+func (e UnsupportedLayerType) Error() string {
+ return fmt.Sprintf("No decoder for layer type %v", LayerType(e))
+}
+
+func panicToError(e *error) {
+ if r := recover(); r != nil {
+ *e = fmt.Errorf("panic: %v", r)
+ }
+}
+
+// DecodingLayerParserOptions provides options to affect the behavior of a given
+// DecodingLayerParser.
+type DecodingLayerParserOptions struct {
+ // IgnorePanic determines whether a DecodingLayerParser should stop
+ // panics on its own (by returning them as an error from DecodeLayers)
+ // or should allow them to raise up the stack. Handling errors does add
+ // latency to the process of decoding layers, but is much safer for
+ // callers. IgnorePanic defaults to false, thus if the caller does
+ // nothing decode panics will be returned as errors.
+ IgnorePanic bool
+ // IgnoreUnsupported will stop parsing and return a nil error when it
+ // encounters a layer it doesn't have a parser for, instead of returning an
+ // UnsupportedLayerType error. If this is true, it's up to the caller to make
+ // sure that all expected layers have been parsed (by checking the decoded
+ // slice).
+ IgnoreUnsupported bool
+}
diff --git a/vendor/github.com/gopacket/gopacket/time.go b/vendor/github.com/gopacket/gopacket/time.go
new file mode 100644
index 000000000..6d116cdfb
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/time.go
@@ -0,0 +1,72 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+ "math"
+ "time"
+)
+
+// TimestampResolution represents the resolution of timestamps in Base^Exponent.
+type TimestampResolution struct {
+ Base, Exponent int
+}
+
+func (t TimestampResolution) String() string {
+ return fmt.Sprintf("%d^%d", t.Base, t.Exponent)
+}
+
+// ToDuration returns the smallest representable time difference as a time.Duration
+func (t TimestampResolution) ToDuration() time.Duration {
+ if t.Base == 0 {
+ return 0
+ }
+ if t.Exponent == 0 {
+ return time.Second
+ }
+ switch t.Base {
+ case 10:
+ return time.Duration(math.Pow10(t.Exponent + 9))
+ case 2:
+ if t.Exponent < 0 {
+ return time.Second >> uint(-t.Exponent)
+ }
+ return time.Second << uint(t.Exponent)
+ default:
+ // this might loose precision
+ return time.Duration(float64(time.Second) * math.Pow(float64(t.Base), float64(t.Exponent)))
+ }
+}
+
+// TimestampResolutionInvalid represents an invalid timestamp resolution
+var TimestampResolutionInvalid = TimestampResolution{}
+
+// TimestampResolutionMillisecond is a resolution of 10^-3s
+var TimestampResolutionMillisecond = TimestampResolution{10, -3}
+
+// TimestampResolutionMicrosecond is a resolution of 10^-6s
+var TimestampResolutionMicrosecond = TimestampResolution{10, -6}
+
+// TimestampResolutionNanosecond is a resolution of 10^-9s
+var TimestampResolutionNanosecond = TimestampResolution{10, -9}
+
+// TimestampResolutionNTP is the resolution of NTP timestamps which is 2^-32 ≈ 233 picoseconds
+var TimestampResolutionNTP = TimestampResolution{2, -32}
+
+// TimestampResolutionCaptureInfo is the resolution used in CaptureInfo, which his currently nanosecond
+var TimestampResolutionCaptureInfo = TimestampResolutionNanosecond
+
+// PacketSourceResolution is an interface for packet data sources that
+// support reporting the timestamp resolution of the aqcuired timestamps.
+// Returned timestamps will always have NanosecondTimestampResolution due
+// to the use of time.Time, but scaling might have occured if acquired
+// timestamps have a different resolution.
+type PacketSourceResolution interface {
+ // Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution.
+ Resolution() TimestampResolution
+}
diff --git a/vendor/github.com/gopacket/gopacket/writer.go b/vendor/github.com/gopacket/gopacket/writer.go
new file mode 100644
index 000000000..9a463f047
--- /dev/null
+++ b/vendor/github.com/gopacket/gopacket/writer.go
@@ -0,0 +1,233 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+)
+
+// SerializableLayer allows its implementations to be written out as a set of bytes,
+// so those bytes may be sent on the wire or otherwise used by the caller.
+// SerializableLayer is implemented by certain Layer types, and can be encoded to
+// bytes using the LayerWriter object.
+type SerializableLayer interface {
+ // SerializeTo writes this layer to a slice, growing that slice if necessary
+ // to make it fit the layer's data.
+ // Args:
+ // b: SerializeBuffer to write this layer on to. When called, b.Bytes()
+ // is the payload this layer should wrap, if any. Note that this
+ // layer can either prepend itself (common), append itself
+ // (uncommon), or both (sometimes padding or footers are required at
+ // the end of packet data). It's also possible (though probably very
+ // rarely needed) to overwrite any bytes in the current payload.
+ // After this call, b.Bytes() should return the byte encoding of
+ // this layer wrapping the original b.Bytes() payload.
+ // opts: options to use while writing out data.
+ // Returns:
+ // error if a problem was encountered during encoding. If an error is
+ // returned, the bytes in data should be considered invalidated, and
+ // not used.
+ //
+ // SerializeTo calls SHOULD entirely ignore LayerContents and
+ // LayerPayload. It just serializes based on struct fields, neither
+ // modifying nor using contents/payload.
+ SerializeTo(b SerializeBuffer, opts SerializeOptions) error
+ // LayerType returns the type of the layer that is being serialized to the buffer
+ LayerType() LayerType
+}
+
+// SerializeOptions provides options for behaviors that SerializableLayers may want to
+// implement.
+type SerializeOptions struct {
+ // FixLengths determines whether, during serialization, layers should fix
+ // the values for any length field that depends on the payload.
+ FixLengths bool
+ // ComputeChecksums determines whether, during serialization, layers
+ // should recompute checksums based on their payloads.
+ ComputeChecksums bool
+}
+
+// SerializeBuffer is a helper used by gopacket for writing out packet layers.
+// SerializeBuffer starts off as an empty []byte. Subsequent calls to PrependBytes
+// return byte slices before the current Bytes(), AppendBytes returns byte
+// slices after.
+//
+// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if
+// you want to make sure they're all zeros, set them as such.
+//
+// SerializeBuffer is specifically designed to handle packet writing, where unlike
+// with normal writes it's easier to start writing at the inner-most layer and
+// work out, meaning that we often need to prepend bytes. This runs counter to
+// typical writes to byte slices using append(), where we only write at the end
+// of the buffer.
+//
+// It can be reused via Clear. Note, however, that a Clear call will invalidate the
+// byte slices returned by any previous Bytes() call (the same buffer is
+// reused).
+//
+// 1. Reusing a write buffer is generally much faster than creating a new one,
+// and with the default implementation it avoids additional memory allocations.
+// 2. If a byte slice from a previous Bytes() call will continue to be used,
+// it's better to create a new SerializeBuffer.
+//
+// The Clear method is specifically designed to minimize memory allocations for
+// similar later workloads on the SerializeBuffer. IE: if you make a set of
+// Prepend/Append calls, then clear, then make the same calls with the same
+// sizes, the second round (and all future similar rounds) shouldn't allocate
+// any new memory.
+type SerializeBuffer interface {
+ // Bytes returns the contiguous set of bytes collected so far by Prepend/Append
+ // calls. The slice returned by Bytes will be modified by future Clear calls,
+ // so if you're planning on clearing this SerializeBuffer, you may want to copy
+ // Bytes somewhere safe first.
+ Bytes() []byte
+ // PrependBytes returns a set of bytes which prepends the current bytes in this
+ // buffer. These bytes start in an indeterminate state, so they should be
+ // overwritten by the caller. The caller must only call PrependBytes if they
+ // know they're going to immediately overwrite all bytes returned.
+ PrependBytes(num int) ([]byte, error)
+ // AppendBytes returns a set of bytes which appends the current bytes in this
+ // buffer. These bytes start in an indeterminate state, so they should be
+ // overwritten by the caller. The caller must only call AppendBytes if they
+ // know they're going to immediately overwrite all bytes returned.
+ AppendBytes(num int) ([]byte, error)
+ // Clear resets the SerializeBuffer to a new, empty buffer. After a call to clear,
+ // the byte slice returned by any previous call to Bytes() for this buffer
+ // should be considered invalidated.
+ Clear() error
+ // Layers returns all the Layers that have been successfully serialized into this buffer
+ // already.
+ Layers() []LayerType
+ // PushLayer adds the current Layer to the list of Layers that have been serialized
+ // into this buffer.
+ PushLayer(LayerType)
+}
+
+type serializeBuffer struct {
+ data []byte
+ start int
+ prepended, appended int
+ layers []LayerType
+}
+
+// NewSerializeBuffer creates a new instance of the default implementation of
+// the SerializeBuffer interface.
+func NewSerializeBuffer() SerializeBuffer {
+ return &serializeBuffer{}
+}
+
+// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an
+// expected number of bytes prepended/appended. This tends to decrease the
+// number of memory allocations made by the buffer during writes.
+func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer {
+ return &serializeBuffer{
+ data: make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength),
+ start: expectedPrependLength,
+ prepended: expectedPrependLength,
+ appended: expectedAppendLength,
+ }
+}
+
+func (w *serializeBuffer) Bytes() []byte {
+ return w.data[w.start:]
+}
+
+func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) {
+ if num < 0 {
+ panic("num < 0")
+ }
+ if w.start < num {
+ toPrepend := w.prepended
+ if toPrepend < num {
+ toPrepend = num
+ }
+ w.prepended += toPrepend
+ length := cap(w.data) + toPrepend
+ newData := make([]byte, length)
+ newStart := w.start + toPrepend
+ copy(newData[newStart:], w.data[w.start:])
+ w.start = newStart
+ w.data = newData[:toPrepend+len(w.data)]
+ }
+ w.start -= num
+ return w.data[w.start : w.start+num], nil
+}
+
+func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) {
+ if num < 0 {
+ panic("num < 0")
+ }
+ initialLength := len(w.data)
+ if cap(w.data)-initialLength < num {
+ toAppend := w.appended
+ if toAppend < num {
+ toAppend = num
+ }
+ w.appended += toAppend
+ newData := make([]byte, cap(w.data)+toAppend)
+ copy(newData[w.start:], w.data[w.start:])
+ w.data = newData[:initialLength]
+ }
+ // Grow the buffer. We know it'll be under capacity given above.
+ w.data = w.data[:initialLength+num]
+ return w.data[initialLength:], nil
+}
+
+func (w *serializeBuffer) Clear() error {
+ w.start = w.prepended
+ w.data = w.data[:w.start]
+ w.layers = w.layers[:0]
+ return nil
+}
+
+func (w *serializeBuffer) Layers() []LayerType {
+ return w.layers
+}
+
+func (w *serializeBuffer) PushLayer(l LayerType) {
+ w.layers = append(w.layers, l)
+}
+
+// SerializeLayers clears the given write buffer, then writes all layers into it so
+// they correctly wrap each other. Note that by clearing the buffer, it
+// invalidates all slices previously returned by w.Bytes()
+//
+// Example:
+//
+// buf := gopacket.NewSerializeBuffer()
+// opts := gopacket.SerializeOptions{}
+// gopacket.SerializeLayers(buf, opts, a, b, c)
+// firstPayload := buf.Bytes() // contains byte representation of a(b(c))
+// gopacket.SerializeLayers(buf, opts, d, e, f)
+// secondPayload := buf.Bytes() // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf.
+func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error {
+ w.Clear()
+ for i := len(layers) - 1; i >= 0; i-- {
+ layer := layers[i]
+ err := layer.SerializeTo(w, opts)
+ if err != nil {
+ return err
+ }
+ w.PushLayer(layer.LayerType())
+ }
+ return nil
+}
+
+// SerializePacket is a convenience function that calls SerializeLayers
+// on packet's Layers().
+// It returns an error if one of the packet layers is not a SerializableLayer.
+func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error {
+ sls := []SerializableLayer{}
+ for _, layer := range packet.Layers() {
+ sl, ok := layer.(SerializableLayer)
+ if !ok {
+ return fmt.Errorf("layer %s is not serializable", layer.LayerType().String())
+ }
+ sls = append(sls, sl)
+ }
+ return SerializeLayers(buf, opts, sls...)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
new file mode 100644
index 000000000..364516251
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name of Gengo, Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
new file mode 100644
index 000000000..b8fbb2b77
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
@@ -0,0 +1,35 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "httprule",
+ srcs = [
+ "compile.go",
+ "parse.go",
+ "types.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
+ deps = ["//utilities"],
+)
+
+go_test(
+ name = "httprule_test",
+ size = "small",
+ srcs = [
+ "compile_test.go",
+ "parse_test.go",
+ "types_test.go",
+ ],
+ embed = [":httprule"],
+ deps = [
+ "//utilities",
+ "@org_golang_google_grpc//grpclog",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":httprule",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
new file mode 100644
index 000000000..3cd937295
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
@@ -0,0 +1,121 @@
+package httprule
+
+import (
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+)
+
+const (
+ opcodeVersion = 1
+)
+
+// Template is a compiled representation of path templates.
+type Template struct {
+ // Version is the version number of the format.
+ Version int
+ // OpCodes is a sequence of operations.
+ OpCodes []int
+ // Pool is a constant pool
+ Pool []string
+ // Verb is a VERB part in the template.
+ Verb string
+ // Fields is a list of field paths bound in this template.
+ Fields []string
+ // Original template (example: /v1/a_bit_of_everything)
+ Template string
+}
+
+// Compiler compiles utilities representation of path templates into marshallable operations.
+// They can be unmarshalled by runtime.NewPattern.
+type Compiler interface {
+ Compile() Template
+}
+
+type op struct {
+ // code is the opcode of the operation
+ code utilities.OpCode
+
+ // str is a string operand of the code.
+ // num is ignored if str is not empty.
+ str string
+
+ // num is a numeric operand of the code.
+ num int
+}
+
+func (w wildcard) compile() []op {
+ return []op{
+ {code: utilities.OpPush},
+ }
+}
+
+func (w deepWildcard) compile() []op {
+ return []op{
+ {code: utilities.OpPushM},
+ }
+}
+
+func (l literal) compile() []op {
+ return []op{
+ {
+ code: utilities.OpLitPush,
+ str: string(l),
+ },
+ }
+}
+
+func (v variable) compile() []op {
+ var ops []op
+ for _, s := range v.segments {
+ ops = append(ops, s.compile()...)
+ }
+ ops = append(ops, op{
+ code: utilities.OpConcatN,
+ num: len(v.segments),
+ }, op{
+ code: utilities.OpCapture,
+ str: v.path,
+ })
+
+ return ops
+}
+
+func (t template) Compile() Template {
+ var rawOps []op
+ for _, s := range t.segments {
+ rawOps = append(rawOps, s.compile()...)
+ }
+
+ var (
+ ops []int
+ pool []string
+ fields []string
+ )
+ consts := make(map[string]int)
+ for _, op := range rawOps {
+ ops = append(ops, int(op.code))
+ if op.str == "" {
+ ops = append(ops, op.num)
+ } else {
+ // eof segment literal represents the "/" path pattern
+ if op.str == eof {
+ op.str = ""
+ }
+ if _, ok := consts[op.str]; !ok {
+ consts[op.str] = len(pool)
+ pool = append(pool, op.str)
+ }
+ ops = append(ops, consts[op.str])
+ }
+ if op.code == utilities.OpCapture {
+ fields = append(fields, op.str)
+ }
+ }
+ return Template{
+ Version: opcodeVersion,
+ OpCodes: ops,
+ Pool: pool,
+ Verb: t.verb,
+ Fields: fields,
+ Template: t.template,
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
new file mode 100644
index 000000000..c056bd305
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
@@ -0,0 +1,11 @@
+//go:build gofuzz
+// +build gofuzz
+
+package httprule
+
+func Fuzz(data []byte) int {
+ if _, err := Parse(string(data)); err != nil {
+ return 0
+ }
+ return 0
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
new file mode 100644
index 000000000..65ffcf5cf
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
@@ -0,0 +1,368 @@
+package httprule
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// InvalidTemplateError indicates that the path template is not valid.
+type InvalidTemplateError struct {
+ tmpl string
+ msg string
+}
+
+func (e InvalidTemplateError) Error() string {
+ return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
+}
+
+// Parse parses the string representation of path template
+func Parse(tmpl string) (Compiler, error) {
+ if !strings.HasPrefix(tmpl, "/") {
+ return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
+ }
+ tokens, verb := tokenize(tmpl[1:])
+
+ p := parser{tokens: tokens}
+ segs, err := p.topLevelSegments()
+ if err != nil {
+ return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
+ }
+
+ return template{
+ segments: segs,
+ verb: verb,
+ template: tmpl,
+ }, nil
+}
+
+func tokenize(path string) (tokens []string, verb string) {
+ if path == "" {
+ return []string{eof}, ""
+ }
+
+ const (
+ init = iota
+ field
+ nested
+ )
+ st := init
+ for path != "" {
+ var idx int
+ switch st {
+ case init:
+ idx = strings.IndexAny(path, "/{")
+ case field:
+ idx = strings.IndexAny(path, ".=}")
+ case nested:
+ idx = strings.IndexAny(path, "/}")
+ }
+ if idx < 0 {
+ tokens = append(tokens, path)
+ break
+ }
+ switch r := path[idx]; r {
+ case '/', '.':
+ case '{':
+ st = field
+ case '=':
+ st = nested
+ case '}':
+ st = init
+ }
+ if idx == 0 {
+ tokens = append(tokens, path[idx:idx+1])
+ } else {
+ tokens = append(tokens, path[:idx], path[idx:idx+1])
+ }
+ path = path[idx+1:]
+ }
+
+ l := len(tokens)
+ // See
+ // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
+ // although normal and backwards-compat logic here is to use the last index
+ // of a colon, if the final segment is a variable followed by a colon, the
+ // part following the colon must be a verb. Hence if the previous token is
+ // an end var marker, we switch the index we're looking for to Index instead
+ // of LastIndex, so that we correctly grab the remaining part of the path as
+ // the verb.
+ var penultimateTokenIsEndVar bool
+ switch l {
+ case 0, 1:
+ // Not enough to be variable so skip this logic and don't result in an
+ // invalid index
+ default:
+ penultimateTokenIsEndVar = tokens[l-2] == "}"
+ }
+ t := tokens[l-1]
+ var idx int
+ if penultimateTokenIsEndVar {
+ idx = strings.Index(t, ":")
+ } else {
+ idx = strings.LastIndex(t, ":")
+ }
+ if idx == 0 {
+ tokens, verb = tokens[:l-1], t[1:]
+ } else if idx > 0 {
+ tokens[l-1], verb = t[:idx], t[idx+1:]
+ }
+ tokens = append(tokens, eof)
+ return tokens, verb
+}
+
+// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
+type parser struct {
+ tokens []string
+ accepted []string
+}
+
+// topLevelSegments is the target of this parser.
+func (p *parser) topLevelSegments() ([]segment, error) {
+ if _, err := p.accept(typeEOF); err == nil {
+ p.tokens = p.tokens[:0]
+ return []segment{literal(eof)}, nil
+ }
+ segs, err := p.segments()
+ if err != nil {
+ return nil, err
+ }
+ if _, err := p.accept(typeEOF); err != nil {
+ return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
+ }
+ return segs, nil
+}
+
+func (p *parser) segments() ([]segment, error) {
+ s, err := p.segment()
+ if err != nil {
+ return nil, err
+ }
+
+ segs := []segment{s}
+ for {
+ if _, err := p.accept("/"); err != nil {
+ return segs, nil
+ }
+ s, err := p.segment()
+ if err != nil {
+ return segs, err
+ }
+ segs = append(segs, s)
+ }
+}
+
+func (p *parser) segment() (segment, error) {
+ if _, err := p.accept("*"); err == nil {
+ return wildcard{}, nil
+ }
+ if _, err := p.accept("**"); err == nil {
+ return deepWildcard{}, nil
+ }
+ if l, err := p.literal(); err == nil {
+ return l, nil
+ }
+
+ v, err := p.variable()
+ if err != nil {
+ return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err)
+ }
+ return v, nil
+}
+
+func (p *parser) literal() (segment, error) {
+ lit, err := p.accept(typeLiteral)
+ if err != nil {
+ return nil, err
+ }
+ return literal(lit), nil
+}
+
+func (p *parser) variable() (segment, error) {
+ if _, err := p.accept("{"); err != nil {
+ return nil, err
+ }
+
+ path, err := p.fieldPath()
+ if err != nil {
+ return nil, err
+ }
+
+ var segs []segment
+ if _, err := p.accept("="); err == nil {
+ segs, err = p.segments()
+ if err != nil {
+ return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err)
+ }
+ } else {
+ segs = []segment{wildcard{}}
+ }
+
+ if _, err := p.accept("}"); err != nil {
+ return nil, fmt.Errorf("unterminated variable segment: %s", path)
+ }
+ return variable{
+ path: path,
+ segments: segs,
+ }, nil
+}
+
+func (p *parser) fieldPath() (string, error) {
+ c, err := p.accept(typeIdent)
+ if err != nil {
+ return "", err
+ }
+ components := []string{c}
+ for {
+ if _, err := p.accept("."); err != nil {
+ return strings.Join(components, "."), nil
+ }
+ c, err := p.accept(typeIdent)
+ if err != nil {
+ return "", fmt.Errorf("invalid field path component: %w", err)
+ }
+ components = append(components, c)
+ }
+}
+
+// A termType is a type of terminal symbols.
+type termType string
+
+// These constants define some of valid values of termType.
+// They improve readability of parse functions.
+//
+// You can also use "/", "*", "**", "." or "=" as valid values.
+const (
+ typeIdent = termType("ident")
+ typeLiteral = termType("literal")
+ typeEOF = termType("$")
+)
+
+// eof is the terminal symbol which always appears at the end of token sequence.
+const eof = "\u0000"
+
+// accept tries to accept a token in "p".
+// This function consumes a token and returns it if it matches to the specified "term".
+// If it doesn't match, the function does not consume any tokens and return an error.
+func (p *parser) accept(term termType) (string, error) {
+ t := p.tokens[0]
+ switch term {
+ case "/", "*", "**", ".", "=", "{", "}":
+ if t != string(term) && t != "/" {
+ return "", fmt.Errorf("expected %q but got %q", term, t)
+ }
+ case typeEOF:
+ if t != eof {
+ return "", fmt.Errorf("expected EOF but got %q", t)
+ }
+ case typeIdent:
+ if err := expectIdent(t); err != nil {
+ return "", err
+ }
+ case typeLiteral:
+ if err := expectPChars(t); err != nil {
+ return "", err
+ }
+ default:
+ return "", fmt.Errorf("unknown termType %q", term)
+ }
+ p.tokens = p.tokens[1:]
+ p.accepted = append(p.accepted, t)
+ return t, nil
+}
+
+// expectPChars determines if "t" consists of only pchars defined in RFC3986.
+//
+// https://www.ietf.org/rfc/rfc3986.txt, P.49
+//
+// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+// / "*" / "+" / "," / ";" / "="
+// pct-encoded = "%" HEXDIG HEXDIG
+func expectPChars(t string) error {
+ const (
+ init = iota
+ pct1
+ pct2
+ )
+ st := init
+ for _, r := range t {
+ if st != init {
+ if !isHexDigit(r) {
+ return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
+ }
+ switch st {
+ case pct1:
+ st = pct2
+ case pct2:
+ st = init
+ }
+ continue
+ }
+
+ // unreserved
+ switch {
+ case 'A' <= r && r <= 'Z':
+ continue
+ case 'a' <= r && r <= 'z':
+ continue
+ case '0' <= r && r <= '9':
+ continue
+ }
+ switch r {
+ case '-', '.', '_', '~':
+ // unreserved
+ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
+ // sub-delims
+ case ':', '@':
+ // rest of pchar
+ case '%':
+ // pct-encoded
+ st = pct1
+ default:
+ return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
+ }
+ }
+ if st != init {
+ return fmt.Errorf("invalid percent-encoding in %q", t)
+ }
+ return nil
+}
+
+// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
+func expectIdent(ident string) error {
+ if ident == "" {
+ return errors.New("empty identifier")
+ }
+ for pos, r := range ident {
+ switch {
+ case '0' <= r && r <= '9':
+ if pos == 0 {
+ return fmt.Errorf("identifier starting with digit: %s", ident)
+ }
+ continue
+ case 'A' <= r && r <= 'Z':
+ continue
+ case 'a' <= r && r <= 'z':
+ continue
+ case r == '_':
+ continue
+ default:
+ return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
+ }
+ }
+ return nil
+}
+
+func isHexDigit(r rune) bool {
+ switch {
+ case '0' <= r && r <= '9':
+ return true
+ case 'A' <= r && r <= 'F':
+ return true
+ case 'a' <= r && r <= 'f':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
new file mode 100644
index 000000000..5a814a000
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
@@ -0,0 +1,60 @@
+package httprule
+
+import (
+ "fmt"
+ "strings"
+)
+
+type template struct {
+ segments []segment
+ verb string
+ template string
+}
+
+type segment interface {
+ fmt.Stringer
+ compile() (ops []op)
+}
+
+type wildcard struct{}
+
+type deepWildcard struct{}
+
+type literal string
+
+type variable struct {
+ path string
+ segments []segment
+}
+
+func (wildcard) String() string {
+ return "*"
+}
+
+func (deepWildcard) String() string {
+ return "**"
+}
+
+func (l literal) String() string {
+ return string(l)
+}
+
+func (v variable) String() string {
+ var segs []string
+ for _, s := range v.segments {
+ segs = append(segs, s.String())
+ }
+ return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
+}
+
+func (t template) String() string {
+ var segs []string
+ for _, s := range t.segments {
+ segs = append(segs, s.String())
+ }
+ str := strings.Join(segs, "/")
+ if t.verb != "" {
+ str = fmt.Sprintf("%s:%s", str, t.verb)
+ }
+ return "/" + str
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
new file mode 100644
index 000000000..a65d88eb8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -0,0 +1,97 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "runtime",
+ srcs = [
+ "context.go",
+ "convert.go",
+ "doc.go",
+ "errors.go",
+ "fieldmask.go",
+ "handler.go",
+ "marshal_httpbodyproto.go",
+ "marshal_json.go",
+ "marshal_jsonpb.go",
+ "marshal_proto.go",
+ "marshaler.go",
+ "marshaler_registry.go",
+ "mux.go",
+ "pattern.go",
+ "proto2_convert.go",
+ "query.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
+ deps = [
+ "//internal/httprule",
+ "//utilities",
+ "@org_golang_google_genproto_googleapis_api//httpbody",
+ "@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//grpclog",
+ "@org_golang_google_grpc//health/grpc_health_v1",
+ "@org_golang_google_grpc//metadata",
+ "@org_golang_google_grpc//status",
+ "@org_golang_google_protobuf//encoding/protojson",
+ "@org_golang_google_protobuf//proto",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//reflect/protoregistry",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/fieldmaskpb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ "@org_golang_google_protobuf//types/known/wrapperspb",
+ ],
+)
+
+go_test(
+ name = "runtime_test",
+ size = "small",
+ srcs = [
+ "context_test.go",
+ "convert_test.go",
+ "errors_test.go",
+ "fieldmask_test.go",
+ "handler_test.go",
+ "marshal_httpbodyproto_test.go",
+ "marshal_json_test.go",
+ "marshal_jsonpb_test.go",
+ "marshal_proto_test.go",
+ "marshaler_registry_test.go",
+ "mux_internal_test.go",
+ "mux_test.go",
+ "pattern_test.go",
+ "query_fuzz_test.go",
+ "query_test.go",
+ ],
+ embed = [":runtime"],
+ deps = [
+ "//runtime/internal/examplepb",
+ "//utilities",
+ "@com_github_google_go_cmp//cmp",
+ "@com_github_google_go_cmp//cmp/cmpopts",
+ "@org_golang_google_genproto_googleapis_api//httpbody",
+ "@org_golang_google_genproto_googleapis_rpc//errdetails",
+ "@org_golang_google_genproto_googleapis_rpc//status",
+ "@org_golang_google_grpc//:grpc",
+ "@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//health/grpc_health_v1",
+ "@org_golang_google_grpc//metadata",
+ "@org_golang_google_grpc//status",
+ "@org_golang_google_protobuf//encoding/protojson",
+ "@org_golang_google_protobuf//proto",
+ "@org_golang_google_protobuf//testing/protocmp",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/fieldmaskpb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ "@org_golang_google_protobuf//types/known/wrapperspb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":runtime",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
new file mode 100644
index 000000000..2f2b34243
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -0,0 +1,417 @@
+package runtime
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+var DefaultContextTimeout = 0 * time.Second
+
+// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
+// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
+var malformedHTTPHeaders = map[string]struct{}{
+ "connection": {},
+}
+
+type (
+ rpcMethodKey struct{}
+ httpPathPatternKey struct{}
+ httpPatternKey struct{}
+
+ AnnotateContextOption func(ctx context.Context) context.Context
+)
+
+func WithHTTPPathPattern(pattern string) AnnotateContextOption {
+ return func(ctx context.Context) context.Context {
+ return withHTTPPathPattern(ctx, pattern)
+ }
+}
+
+func decodeBinHeader(v string) ([]byte, error) {
+ if len(v)%4 == 0 {
+ // Input was padded, or padding was not necessary.
+ return base64.StdEncoding.DecodeString(v)
+ }
+ return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+ ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+ if err != nil {
+ return nil, err
+ }
+ if md == nil {
+ return ctx, nil
+ }
+
+ return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// AnnotateIncomingContext adds context information such as metadata from the request.
+// Attach metadata as incoming context.
+func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+ ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+ if err != nil {
+ return nil, err
+ }
+ if md == nil {
+ return ctx, nil
+ }
+
+ return metadata.NewIncomingContext(ctx, md), nil
+}
+
+func isValidGRPCMetadataKey(key string) bool {
+ // Must be a valid gRPC "Header-Name" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means 0-9 a-z _ - .
+ // Only lowercase letters are valid in the wire protocol, but the client library will normalize
+ // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable.
+ bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ validLowercaseLetter := ch >= 'a' && ch <= 'z'
+ validUppercaseLetter := ch >= 'A' && ch <= 'Z'
+ validDigit := ch >= '0' && ch <= '9'
+ validOther := ch == '.' || ch == '-' || ch == '_'
+ if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther {
+ return false
+ }
+ }
+ return true
+}
+
+func isValidGRPCMetadataTextValue(textValue string) bool {
+ // Must be a valid gRPC "ASCII-Value" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive.
+ bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ if ch < 0x20 || ch > 0x7E {
+ return false
+ }
+ }
+ return true
+}
+
+func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
+ ctx = withRPCMethod(ctx, rpcMethodName)
+ for _, o := range options {
+ ctx = o(ctx)
+ }
+ timeout := DefaultContextTimeout
+ if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+ var err error
+ timeout, err = timeoutDecode(tm)
+ if err != nil {
+ return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+ }
+ }
+ var pairs []string
+ for key, vals := range req.Header {
+ key = textproto.CanonicalMIMEHeaderKey(key)
+ switch key {
+ case xForwardedFor, xForwardedHost:
+ // Handled separately below
+ continue
+ }
+
+ for _, val := range vals {
+ // For backwards-compatibility, pass through 'authorization' header with no prefix.
+ if key == "Authorization" {
+ pairs = append(pairs, "authorization", val)
+ }
+ if h, ok := mux.incomingHeaderMatcher(key); ok {
+ if !isValidGRPCMetadataKey(h) {
+ grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h)
+ continue
+ }
+ // Handles "-bin" metadata in grpc, since grpc will do another base64
+ // encode before sending to server, we need to decode it first.
+ if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+ b, err := decodeBinHeader(val)
+ if err != nil {
+ return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+ }
+
+ val = string(b)
+ } else if !isValidGRPCMetadataTextValue(val) {
+ grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h)
+ continue
+ }
+ pairs = append(pairs, h, val)
+ }
+ }
+ }
+ if host := req.Header.Get(xForwardedHost); host != "" {
+ pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+ } else if req.Host != "" {
+ pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+ }
+
+ xff := req.Header.Values(xForwardedFor)
+ if addr := req.RemoteAddr; addr != "" {
+ if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+ xff = append(xff, remoteIP)
+ }
+ }
+ if len(xff) > 0 {
+ pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", "))
+ }
+
+ if timeout != 0 {
+ ctx, _ = context.WithTimeout(ctx, timeout)
+ }
+ if len(pairs) == 0 {
+ return ctx, nil, nil
+ }
+ md := metadata.Pairs(pairs...)
+ for _, mda := range mux.metadataAnnotators {
+ md = metadata.Join(md, mda(ctx, req))
+ }
+ return ctx, md, nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+ HeaderMD metadata.MD
+ TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+ if ctx == nil {
+ return md, false
+ }
+ md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+ return
+}
+
+// ServerTransportStream implements grpc.ServerTransportStream.
+// It should only be used by the generated files to support grpc.SendHeader
+// outside of gRPC server use.
+type ServerTransportStream struct {
+ mu sync.Mutex
+ header metadata.MD
+ trailer metadata.MD
+}
+
+// Method returns the method for the stream.
+func (s *ServerTransportStream) Method() string {
+ return ""
+}
+
+// Header returns the header metadata of the stream.
+func (s *ServerTransportStream) Header() metadata.MD {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.header.Copy()
+}
+
+// SetHeader sets the header metadata.
+func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+
+ s.mu.Lock()
+ s.header = metadata.Join(s.header, md)
+ s.mu.Unlock()
+ return nil
+}
+
+// SendHeader sets the header metadata.
+func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
+ return s.SetHeader(md)
+}
+
+// Trailer returns the cached trailer metadata.
+func (s *ServerTransportStream) Trailer() metadata.MD {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.trailer.Copy()
+}
+
+// SetTrailer sets the trailer metadata.
+func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+
+ s.mu.Lock()
+ s.trailer = metadata.Join(s.trailer, md)
+ s.mu.Unlock()
+ return nil
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+ size := len(s)
+ if size < 2 {
+ return 0, fmt.Errorf("timeout string is too short: %q", s)
+ }
+ d, ok := timeoutUnitToDuration(s[size-1])
+ if !ok {
+ return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+ }
+ t, err := strconv.ParseInt(s[:size-1], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+ switch u {
+ case 'H':
+ return time.Hour, true
+ case 'M':
+ return time.Minute, true
+ case 'S':
+ return time.Second, true
+ case 'm':
+ return time.Millisecond, true
+ case 'u':
+ return time.Microsecond, true
+ case 'n':
+ return time.Nanosecond, true
+ default:
+ return
+ }
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permanent request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+ switch hdr {
+ case
+ "Accept",
+ "Accept-Charset",
+ "Accept-Language",
+ "Accept-Ranges",
+ "Authorization",
+ "Cache-Control",
+ "Content-Type",
+ "Cookie",
+ "Date",
+ "Expect",
+ "From",
+ "Host",
+ "If-Match",
+ "If-Modified-Since",
+ "If-None-Match",
+ "If-Schedule-Tag-Match",
+ "If-Unmodified-Since",
+ "Max-Forwards",
+ "Origin",
+ "Pragma",
+ "Referer",
+ "User-Agent",
+ "Via",
+ "Warning":
+ return true
+ }
+ return false
+}
+
+// isMalformedHTTPHeader checks whether header belongs to the list of
+// "malformed headers" and would be rejected by the gRPC server.
+func isMalformedHTTPHeader(header string) bool {
+ _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
+ return isMalformed
+}
+
+// RPCMethod returns the method string for the server context. The returned
+// string is in the format of "/package.service/method".
+func RPCMethod(ctx context.Context) (string, bool) {
+ m := ctx.Value(rpcMethodKey{})
+ if m == nil {
+ return "", false
+ }
+ ms, ok := m.(string)
+ if !ok {
+ return "", false
+ }
+ return ms, true
+}
+
+func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
+ return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
+}
+
+// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
+// The format of the returned string is defined by the google.api.http path template type.
+func HTTPPathPattern(ctx context.Context) (string, bool) {
+ m := ctx.Value(httpPathPatternKey{})
+ if m == nil {
+ return "", false
+ }
+ ms, ok := m.(string)
+ if !ok {
+ return "", false
+ }
+ return ms, true
+}
+
+func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
+ return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
+}
+
+// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists.
+func HTTPPattern(ctx context.Context) (Pattern, bool) {
+ v, ok := ctx.Value(httpPatternKey{}).(Pattern)
+ return v, ok
+}
+
+func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context {
+ return context.WithValue(ctx, httpPatternKey{}, httpPattern)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
new file mode 100644
index 000000000..d7b15fcfb
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
@@ -0,0 +1,318 @@
+package runtime
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/types/known/durationpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+ return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+ return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+ s := strings.Split(val, sep)
+ values := make([]bool, len(s))
+ for i, v := range s {
+ value, err := Bool(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+ s := strings.Split(val, sep)
+ values := make([]float64, len(s))
+ for i, v := range s {
+ value, err := Float64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+ s := strings.Split(val, sep)
+ values := make([]float32, len(s))
+ for i, v := range s {
+ value, err := Float32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into a int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+ s := strings.Split(val, sep)
+ values := make([]int64, len(s))
+ for i, v := range s {
+ value, err := Int64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+ i, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into a int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+ s := strings.Split(val, sep)
+ values := make([]int32, len(s))
+ for i, v := range s {
+ value, err := Int32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+ return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+ s := strings.Split(val, sep)
+ values := make([]uint64, len(s))
+ for i, v := range s {
+ value, err := Uint64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+ i, err := strconv.ParseUint(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+ s := strings.Split(val, sep)
+ values := make([]uint32, len(s))
+ for i, v := range s {
+ value, err := Uint32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+ b, err := base64.StdEncoding.DecodeString(val)
+ if err != nil {
+ b, err = base64.URLEncoding.DecodeString(val)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
+func BytesSlice(val, sep string) ([][]byte, error) {
+ s := strings.Split(val, sep)
+ values := make([][]byte, len(s))
+ for i, v := range s {
+ value, err := Bytes(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamppb.Timestamp, error) {
+ var r timestamppb.Timestamp
+ val = strconv.Quote(strings.Trim(val, `"`))
+ unmarshaler := &protojson.UnmarshalOptions{}
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*durationpb.Duration, error) {
+ var r durationpb.Duration
+ val = strconv.Quote(strings.Trim(val, `"`))
+ unmarshaler := &protojson.UnmarshalOptions{}
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+ e, ok := enumValMap[val]
+ if ok {
+ return e, nil
+ }
+
+ i, err := Int32(val)
+ if err != nil {
+ return 0, fmt.Errorf("%s is not valid", val)
+ }
+ for _, v := range enumValMap {
+ if v == i {
+ return i, nil
+ }
+ }
+ return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+ s := strings.Split(val, sep)
+ values := make([]int32, len(s))
+ for i, v := range s {
+ value, err := Enum(v, enumValMap)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Support for google.protobuf.wrappers on top of primitive types
+
+// StringValue well-known type support as wrapper around string type
+func StringValue(val string) (*wrapperspb.StringValue, error) {
+ return wrapperspb.String(val), nil
+}
+
+// FloatValue well-known type support as wrapper around float32 type
+func FloatValue(val string) (*wrapperspb.FloatValue, error) {
+ parsedVal, err := Float32(val)
+ return wrapperspb.Float(parsedVal), err
+}
+
+// DoubleValue well-known type support as wrapper around float64 type
+func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
+ parsedVal, err := Float64(val)
+ return wrapperspb.Double(parsedVal), err
+}
+
+// BoolValue well-known type support as wrapper around bool type
+func BoolValue(val string) (*wrapperspb.BoolValue, error) {
+ parsedVal, err := Bool(val)
+ return wrapperspb.Bool(parsedVal), err
+}
+
+// Int32Value well-known type support as wrapper around int32 type
+func Int32Value(val string) (*wrapperspb.Int32Value, error) {
+ parsedVal, err := Int32(val)
+ return wrapperspb.Int32(parsedVal), err
+}
+
+// UInt32Value well-known type support as wrapper around uint32 type
+func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
+ parsedVal, err := Uint32(val)
+ return wrapperspb.UInt32(parsedVal), err
+}
+
+// Int64Value well-known type support as wrapper around int64 type
+func Int64Value(val string) (*wrapperspb.Int64Value, error) {
+ parsedVal, err := Int64(val)
+ return wrapperspb.Int64(parsedVal), err
+}
+
+// UInt64Value well-known type support as wrapper around uint64 type
+func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
+ parsedVal, err := Uint64(val)
+ return wrapperspb.UInt64(parsedVal), err
+}
+
+// BytesValue well-known type support as wrapper around bytes[] type
+func BytesValue(val string) (*wrapperspb.BytesValue, error) {
+ parsedVal, err := Bytes(val)
+ return wrapperspb.Bytes(parsedVal), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
new file mode 100644
index 000000000..b6e5ddf7a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
@@ -0,0 +1,5 @@
+/*
+Package runtime contains runtime helper functions used by
+servers which protoc-gen-grpc-gateway generates.
+*/
+package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
new file mode 100644
index 000000000..01f573419
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -0,0 +1,191 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net/http"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// ErrorHandlerFunc is the signature used to configure error handling.
+type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+// StreamErrorHandlerFunc is the signature used to configure stream error handling.
+type StreamErrorHandlerFunc func(context.Context, error) *status.Status
+
+// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
+type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
+
+// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
+// passed to the DefaultRoutingErrorHandler.
+type HTTPStatusError struct {
+ HTTPStatus int
+ Err error
+}
+
+func (e *HTTPStatusError) Error() string {
+ return e.Err.Error()
+}
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+ switch code {
+ case codes.OK:
+ return http.StatusOK
+ case codes.Canceled:
+ return 499
+ case codes.Unknown:
+ return http.StatusInternalServerError
+ case codes.InvalidArgument:
+ return http.StatusBadRequest
+ case codes.DeadlineExceeded:
+ return http.StatusGatewayTimeout
+ case codes.NotFound:
+ return http.StatusNotFound
+ case codes.AlreadyExists:
+ return http.StatusConflict
+ case codes.PermissionDenied:
+ return http.StatusForbidden
+ case codes.Unauthenticated:
+ return http.StatusUnauthorized
+ case codes.ResourceExhausted:
+ return http.StatusTooManyRequests
+ case codes.FailedPrecondition:
+ // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
+ return http.StatusBadRequest
+ case codes.Aborted:
+ return http.StatusConflict
+ case codes.OutOfRange:
+ return http.StatusBadRequest
+ case codes.Unimplemented:
+ return http.StatusNotImplemented
+ case codes.Internal:
+ return http.StatusInternalServerError
+ case codes.Unavailable:
+ return http.StatusServiceUnavailable
+ case codes.DataLoss:
+ return http.StatusInternalServerError
+ default:
+ grpclog.Warningf("Unknown gRPC error code: %v", code)
+ return http.StatusInternalServerError
+ }
+}
+
+// HTTPError uses the mux-configured error handler.
+func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+ mux.errorHandler(ctx, mux, marshaler, w, r, err)
+}
+
+// DefaultHTTPErrorHandler is the default error handler.
+// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
+// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
+// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
+// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
+// are insufficient for.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body written by this function is a Status message marshaled by the Marshaler.
+func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+ // return Internal when Marshal failed
+ const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+ const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}`
+
+ var customStatus *HTTPStatusError
+ if errors.As(err, &customStatus) {
+ err = customStatus.Err
+ }
+
+ s := status.Convert(err)
+
+ w.Header().Del("Trailer")
+ w.Header().Del("Transfer-Encoding")
+
+ respRw, err := mux.forwardResponseRewriter(ctx, s.Proto())
+ if err != nil {
+ grpclog.Errorf("Failed to rewrite error message %q: %v", s, err)
+ w.WriteHeader(http.StatusInternalServerError)
+ if _, err := io.WriteString(w, fallbackRewriter); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+ return
+ }
+
+ contentType := marshaler.ContentType(respRw)
+ w.Header().Set("Content-Type", contentType)
+
+ if s.Code() == codes.Unauthenticated {
+ w.Header().Set("WWW-Authenticate", s.Message())
+ }
+
+ buf, merr := marshaler.Marshal(respRw)
+ if merr != nil {
+ grpclog.Errorf("Failed to marshal error message %q: %v", s, merr)
+ w.WriteHeader(http.StatusInternalServerError)
+ if _, err := io.WriteString(w, fallback); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+ return
+ }
+
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Error("Failed to extract ServerMetadata from context")
+ }
+
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+ // Unless the request includes a TE header field indicating "trailers"
+ // is acceptable, as described in Section 4.3, a server SHOULD NOT
+ // generate trailer fields that it believes are necessary for the user
+ // agent to receive.
+ doForwardTrailers := requestAcceptsTrailers(r)
+
+ if doForwardTrailers {
+ handleForwardResponseTrailerHeader(w, mux, md)
+ w.Header().Set("Transfer-Encoding", "chunked")
+ }
+
+ st := HTTPStatusFromCode(s.Code())
+ if customStatus != nil {
+ st = customStatus.HTTPStatus
+ }
+
+ w.WriteHeader(st)
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+
+ if doForwardTrailers {
+ handleForwardResponseTrailer(w, mux, md)
+ }
+}
+
+func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
+ return status.Convert(err)
+}
+
+// DefaultRoutingErrorHandler is our default handler for routing errors.
+// By default http error codes mapped on the following error codes:
+//
+// NotFound -> grpc.NotFound
+// StatusBadRequest -> grpc.InvalidArgument
+// MethodNotAllowed -> grpc.Unimplemented
+// Other -> grpc.Internal, method is not expecting to be called for anything else
+func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
+ sterr := status.Error(codes.Internal, "Unexpected routing error")
+ switch httpStatus {
+ case http.StatusBadRequest:
+ sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
+ case http.StatusMethodNotAllowed:
+ sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
+ case http.StatusNotFound:
+ sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
+ }
+ mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
new file mode 100644
index 000000000..9005d6a0b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -0,0 +1,168 @@
+package runtime
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
+ fd := fields.ByName(protoreflect.Name(name))
+ if fd != nil {
+ return fd
+ }
+
+ return fields.ByJSONName(name)
+}
+
+// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
+func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
+ fm := &field_mask.FieldMask{}
+ var root interface{}
+
+ if err := json.NewDecoder(r).Decode(&root); err != nil {
+ if errors.Is(err, io.EOF) {
+ return fm, nil
+ }
+ return nil, err
+ }
+
+ queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
+ for len(queue) > 0 {
+ // dequeue an item
+ item := queue[0]
+ queue = queue[1:]
+
+ m, ok := item.node.(map[string]interface{})
+ switch {
+ case ok && len(m) > 0:
+ // if the item is an object, then enqueue all of its children
+ for k, v := range m {
+ if item.msg == nil {
+ return nil, errors.New("JSON structure did not match request type")
+ }
+
+ fd := getFieldByName(item.msg.Descriptor().Fields(), k)
+ if fd == nil {
+ return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
+ }
+
+ if isDynamicProtoMessage(fd.Message()) {
+ for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
+ newPath := p
+ if item.path != "" {
+ newPath = item.path + "." + newPath
+ }
+ queue = append(queue, fieldMaskPathItem{path: newPath})
+ }
+ continue
+ }
+
+ if isProtobufAnyMessage(fd.Message()) && !fd.IsList() {
+ _, hasTypeField := v.(map[string]interface{})["@type"]
+ if hasTypeField {
+ queue = append(queue, fieldMaskPathItem{path: k})
+ continue
+ } else {
+ return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
+ }
+
+ }
+
+ child := fieldMaskPathItem{
+ node: v,
+ }
+ if item.path == "" {
+ child.path = string(fd.FullName().Name())
+ } else {
+ child.path = item.path + "." + string(fd.FullName().Name())
+ }
+
+ switch {
+ case fd.IsList(), fd.IsMap():
+ // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
+ // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
+ fm.Paths = append(fm.Paths, child.path)
+ case fd.Message() != nil:
+ child.msg = item.msg.Get(fd).Message()
+ fallthrough
+ default:
+ queue = append(queue, child)
+ }
+ }
+ case ok && len(m) == 0:
+ fallthrough
+ case len(item.path) > 0:
+ // otherwise, it's a leaf node so print its path
+ fm.Paths = append(fm.Paths, item.path)
+ }
+ }
+
+ // Sort for deterministic output in the presence
+ // of repeated fields.
+ sort.Strings(fm.Paths)
+
+ return fm, nil
+}
+
+func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
+ return md != nil && (md.FullName() == "google.protobuf.Any")
+}
+
+func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
+ return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
+}
+
+// buildPathsBlindly does not attempt to match proto field names to the
+// json value keys. Instead it relies completely on the structure of
+// the unmarshalled json contained within in.
+// Returns a slice containing all subpaths with the root at the
+// passed in name and json value.
+func buildPathsBlindly(name string, in interface{}) []string {
+ m, ok := in.(map[string]interface{})
+ if !ok {
+ return []string{name}
+ }
+
+ var paths []string
+ queue := []fieldMaskPathItem{{path: name, node: m}}
+ for len(queue) > 0 {
+ cur := queue[0]
+ queue = queue[1:]
+
+ m, ok := cur.node.(map[string]interface{})
+ if !ok {
+ // This should never happen since we should always check that we only add
+ // nodes of type map[string]interface{} to the queue.
+ continue
+ }
+ for k, v := range m {
+ if mi, ok := v.(map[string]interface{}); ok {
+ queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
+ } else {
+ // This is not a struct, so there are no more levels to descend.
+ curPath := cur.path + "." + k
+ paths = append(paths, curPath)
+ }
+ }
+ }
+ return paths
+}
+
+// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
+type fieldMaskPathItem struct {
+ // the list of prior fields leading up to node connected by dots
+ path string
+
+ // a generic decoded json object the current item to inspect for further path extraction
+ node interface{}
+
+ // parent message
+ msg protoreflect.Message
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
new file mode 100644
index 000000000..9f50a569e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -0,0 +1,247 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "strings"
+
+ "google.golang.org/genproto/googleapis/api/httpbody"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+ rc := http.NewResponseController(w)
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Error("Failed to extract ServerMetadata from context")
+ http.Error(w, "unexpected error", http.StatusInternalServerError)
+ return
+ }
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ w.Header().Set("Transfer-Encoding", "chunked")
+ if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+
+ var delimiter []byte
+ if d, ok := marshaler.(Delimited); ok {
+ delimiter = d.Delimiter()
+ } else {
+ delimiter = []byte("\n")
+ }
+
+ var wroteHeader bool
+ for {
+ resp, err := recv()
+ if errors.Is(err, io.EOF) {
+ return
+ }
+ if err != nil {
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+ if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+
+ respRw, err := mux.forwardResponseRewriter(ctx, resp)
+ if err != nil {
+ grpclog.Errorf("Rewrite error: %v", err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+
+ if !wroteHeader {
+ w.Header().Set("Content-Type", marshaler.ContentType(respRw))
+ }
+
+ var buf []byte
+ httpBody, isHTTPBody := respRw.(*httpbody.HttpBody)
+ switch {
+ case respRw == nil:
+ buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
+ case isHTTPBody:
+ buf = httpBody.GetData()
+ default:
+ result := map[string]interface{}{"result": respRw}
+ if rb, ok := respRw.(responseBody); ok {
+ result["result"] = rb.XXX_ResponseBody()
+ }
+
+ buf, err = marshaler.Marshal(result)
+ }
+
+ if err != nil {
+ grpclog.Errorf("Failed to marshal response chunk: %v", err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to send response chunk: %v", err)
+ return
+ }
+ wroteHeader = true
+ if _, err := w.Write(delimiter); err != nil {
+ grpclog.Errorf("Failed to send delimiter chunk: %v", err)
+ return
+ }
+ err = rc.Flush()
+ if err != nil {
+ if errors.Is(err, http.ErrNotSupported) {
+ grpclog.Errorf("Flush not supported in %T", w)
+ http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+ return
+ }
+ grpclog.Errorf("Failed to flush response to client: %v", err)
+ return
+ }
+ }
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k, vs := range md.HeaderMD {
+ if h, ok := mux.outgoingHeaderMatcher(k); ok {
+ for _, v := range vs {
+ w.Header().Add(h, v)
+ }
+ }
+ }
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k := range md.TrailerMD {
+ if h, ok := mux.outgoingTrailerMatcher(k); ok {
+ w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h))
+ }
+ }
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k, vs := range md.TrailerMD {
+ if h, ok := mux.outgoingTrailerMatcher(k); ok {
+ for _, v := range vs {
+ w.Header().Add(h, v)
+ }
+ }
+ }
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+ XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Error("Failed to extract ServerMetadata from context")
+ }
+
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+ // Unless the request includes a TE header field indicating "trailers"
+ // is acceptable, as described in Section 4.3, a server SHOULD NOT
+ // generate trailer fields that it believes are necessary for the user
+ // agent to receive.
+ doForwardTrailers := requestAcceptsTrailers(req)
+
+ if doForwardTrailers {
+ handleForwardResponseTrailerHeader(w, mux, md)
+ w.Header().Set("Transfer-Encoding", "chunked")
+ }
+
+ contentType := marshaler.ContentType(resp)
+ w.Header().Set("Content-Type", contentType)
+
+ if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+ respRw, err := mux.forwardResponseRewriter(ctx, resp)
+ if err != nil {
+ grpclog.Errorf("Rewrite error: %v", err)
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+ var buf []byte
+ if rb, ok := respRw.(responseBody); ok {
+ buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+ } else {
+ buf, err = marshaler.Marshal(respRw)
+ }
+ if err != nil {
+ grpclog.Errorf("Marshal error: %v", err)
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+
+ if !doForwardTrailers {
+ w.Header().Set("Content-Length", strconv.Itoa(len(buf)))
+ }
+
+ if _, err = w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+
+ if doForwardTrailers {
+ handleForwardResponseTrailer(w, mux, md)
+ }
+}
+
+func requestAcceptsTrailers(req *http.Request) bool {
+ te := req.Header.Get("TE")
+ return strings.Contains(strings.ToLower(te), "trailers")
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+ if len(opts) == 0 {
+ return nil
+ }
+ for _, opt := range opts {
+ if err := opt(ctx, w, resp); err != nil {
+ return fmt.Errorf("error handling ForwardResponseOptions: %w", err)
+ }
+ }
+ return nil
+}
+
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
+ st := mux.streamErrorHandler(ctx, err)
+ msg := errorChunk(st)
+ if !wroteHeader {
+ w.Header().Set("Content-Type", marshaler.ContentType(msg))
+ w.WriteHeader(HTTPStatusFromCode(st.Code()))
+ }
+ buf, err := marshaler.Marshal(msg)
+ if err != nil {
+ grpclog.Errorf("Failed to marshal an error: %v", err)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to notify error to client: %v", err)
+ return
+ }
+ if _, err := w.Write(delimiter); err != nil {
+ grpclog.Errorf("Failed to send delimiter chunk: %v", err)
+ return
+ }
+}
+
+func errorChunk(st *status.Status) map[string]proto.Message {
+ return map[string]proto.Message{"error": st.Proto()}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
new file mode 100644
index 000000000..6de2e220c
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
@@ -0,0 +1,32 @@
+package runtime
+
+import (
+ "google.golang.org/genproto/googleapis/api/httpbody"
+)
+
+// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
+// google.api.HttpBody message as the full response body if it is
+// the actual message used as the response. If not, then this will
+// simply fallback to the Marshaler specified as its default Marshaler.
+type HTTPBodyMarshaler struct {
+ Marshaler
+}
+
+// ContentType returns its specified content type in case v is a
+// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
+// content type.
+func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
+ if httpBody, ok := v.(*httpbody.HttpBody); ok {
+ return httpBody.GetContentType()
+ }
+ return h.Marshaler.ContentType(v)
+}
+
+// Marshal marshals "v" by returning the body bytes if v is a
+// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
+func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
+ if httpBody, ok := v.(*httpbody.HttpBody); ok {
+ return httpBody.GetData(), nil
+ }
+ return h.Marshaler.Marshal(v)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
new file mode 100644
index 000000000..fe52081ab
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
@@ -0,0 +1,50 @@
+package runtime
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType(_ interface{}) string {
+ return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output
+func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ return json.MarshalIndent(v, prefix, indent)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+ return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+ return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+ return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
new file mode 100644
index 000000000..8376d1e0e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -0,0 +1,349 @@
+package runtime
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
+// It supports the full functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb struct {
+ protojson.MarshalOptions
+ protojson.UnmarshalOptions
+}
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType(_ interface{}) string {
+ return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := j.marshalTo(&buf, v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+ p, ok := v.(proto.Message)
+ if !ok {
+ buf, err := j.marshalNonProtoField(v)
+ if err != nil {
+ return err
+ }
+ if j.Indent != "" {
+ b := &bytes.Buffer{}
+ if err := json.Indent(b, buf, "", j.Indent); err != nil {
+ return err
+ }
+ buf = b.Bytes()
+ }
+ _, err = w.Write(buf)
+ return err
+ }
+
+ b, err := j.MarshalOptions.Marshal(p)
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(b)
+ return err
+}
+
+var (
+ // protoMessageType is stored to prevent constant lookup of the same type at runtime.
+ protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshal arbitrary data structures into JSON,
+// it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+ if v == nil {
+ return []byte("null"), nil
+ }
+ rv := reflect.ValueOf(v)
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return []byte("null"), nil
+ }
+ rv = rv.Elem()
+ }
+
+ if rv.Kind() == reflect.Slice {
+ if rv.IsNil() {
+ if j.EmitUnpopulated {
+ return []byte("[]"), nil
+ }
+ return []byte("null"), nil
+ }
+
+ if rv.Type().Elem().Implements(protoMessageType) {
+ var buf bytes.Buffer
+ if err := buf.WriteByte('['); err != nil {
+ return nil, err
+ }
+ for i := 0; i < rv.Len(); i++ {
+ if i != 0 {
+ if err := buf.WriteByte(','); err != nil {
+ return nil, err
+ }
+ }
+ if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+ return nil, err
+ }
+ }
+ if err := buf.WriteByte(']'); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+ }
+
+ if rv.Type().Elem().Implements(typeProtoEnum) {
+ var buf bytes.Buffer
+ if err := buf.WriteByte('['); err != nil {
+ return nil, err
+ }
+ for i := 0; i < rv.Len(); i++ {
+ if i != 0 {
+ if err := buf.WriteByte(','); err != nil {
+ return nil, err
+ }
+ }
+ var err error
+ if j.UseEnumNumbers {
+ _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
+ } else {
+ _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err := buf.WriteByte(']'); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+ }
+ }
+
+ if rv.Kind() == reflect.Map {
+ m := make(map[string]*json.RawMessage)
+ for _, k := range rv.MapKeys() {
+ buf, err := j.Marshal(rv.MapIndex(k).Interface())
+ if err != nil {
+ return nil, err
+ }
+ m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+ }
+ return json.Marshal(m)
+ }
+ if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
+ return json.Marshal(enum.String())
+ }
+ return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+ return unmarshalJSONPb(data, j.UnmarshalOptions, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+ d := json.NewDecoder(r)
+ return DecoderWrapper{
+ Decoder: d,
+ UnmarshalOptions: j.UnmarshalOptions,
+ }
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+ *json.Decoder
+ protojson.UnmarshalOptions
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+ return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+ return EncoderFunc(func(v interface{}) error {
+ if err := j.marshalTo(w, v); err != nil {
+ return err
+ }
+ // mimic json.Encoder by adding a newline (makes output
+ // easier to read when it contains multiple encoded items)
+ _, err := w.Write(j.Delimiter())
+ return err
+ })
+}
+
+func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ d := json.NewDecoder(bytes.NewReader(data))
+ return decodeJSONPb(d, unmarshaler, v)
+}
+
+func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ p, ok := v.(proto.Message)
+ if !ok {
+ return decodeNonProtoField(d, unmarshaler, v)
+ }
+
+ // Decode into bytes for marshalling
+ var b json.RawMessage
+ if err := d.Decode(&b); err != nil {
+ return err
+ }
+
+ return unmarshaler.Unmarshal([]byte(b), p)
+}
+
+func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", v)
+ }
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ if rv.Type().ConvertibleTo(typeProtoMessage) {
+ // Decode into bytes for marshalling
+ var b json.RawMessage
+ if err := d.Decode(&b); err != nil {
+ return err
+ }
+
+ return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
+ }
+ rv = rv.Elem()
+ }
+ if rv.Kind() == reflect.Map {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ conv, ok := convFromType[rv.Type().Key().Kind()]
+ if !ok {
+ return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+ }
+
+ m := make(map[string]*json.RawMessage)
+ if err := d.Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+ if err := result[1].Interface(); err != nil {
+ return err.(error)
+ }
+ bk := result[0]
+ bv := reflect.New(rv.Type().Elem())
+ if v == nil {
+ null := json.RawMessage("null")
+ v = &null
+ }
+ if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
+ return err
+ }
+ rv.SetMapIndex(bk, bv.Elem())
+ }
+ return nil
+ }
+ if rv.Kind() == reflect.Slice {
+ if rv.Type().Elem().Kind() == reflect.Uint8 {
+ var sl []byte
+ if err := d.Decode(&sl); err != nil {
+ return err
+ }
+ if sl != nil {
+ rv.SetBytes(sl)
+ }
+ return nil
+ }
+
+ var sl []json.RawMessage
+ if err := d.Decode(&sl); err != nil {
+ return err
+ }
+ if sl != nil {
+ rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
+ }
+ for _, item := range sl {
+ bv := reflect.New(rv.Type().Elem())
+ if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
+ return err
+ }
+ rv.Set(reflect.Append(rv, bv.Elem()))
+ }
+ return nil
+ }
+ if _, ok := rv.Interface().(protoEnum); ok {
+ var repr interface{}
+ if err := d.Decode(&repr); err != nil {
+ return err
+ }
+ switch v := repr.(type) {
+ case string:
+ // TODO(yugui) Should use proto.StructProperties?
+ return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+ case float64:
+ rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
+ return nil
+ default:
+ return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+ }
+ }
+ return d.Decode(v)
+}
+
+type protoEnum interface {
+ fmt.Stringer
+ EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
+
+var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+ return []byte("\n")
+}
+
+var (
+ convFromType = map[reflect.Kind]reflect.Value{
+ reflect.String: reflect.ValueOf(String),
+ reflect.Bool: reflect.ValueOf(Bool),
+ reflect.Float64: reflect.ValueOf(Float64),
+ reflect.Float32: reflect.ValueOf(Float32),
+ reflect.Int64: reflect.ValueOf(Int64),
+ reflect.Int32: reflect.ValueOf(Int32),
+ reflect.Uint64: reflect.ValueOf(Uint64),
+ reflect.Uint32: reflect.ValueOf(Uint32),
+ reflect.Slice: reflect.ValueOf(Bytes),
+ }
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
new file mode 100644
index 000000000..398c780dc
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
@@ -0,0 +1,60 @@
+package runtime
+
+import (
+ "errors"
+ "io"
+
+ "google.golang.org/protobuf/proto"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType(_ interface{}) string {
+ return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+ message, ok := value.(proto.Message)
+ if !ok {
+ return nil, errors.New("unable to marshal non proto field")
+ }
+ return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+ message, ok := value.(proto.Message)
+ if !ok {
+ return errors.New("unable to unmarshal non proto field")
+ }
+ return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+ return DecoderFunc(func(value interface{}) error {
+ buffer, err := io.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+ return marshaller.Unmarshal(buffer, value)
+ })
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+ return EncoderFunc(func(value interface{}) error {
+ buffer, err := marshaller.Marshal(value)
+ if err != nil {
+ return err
+ }
+ if _, err := writer.Write(buffer); err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
new file mode 100644
index 000000000..2c0d25ff4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
@@ -0,0 +1,50 @@
+package runtime
+
+import (
+ "io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+ // Marshal marshals "v" into byte sequence.
+ Marshal(v interface{}) ([]byte, error)
+ // Unmarshal unmarshals "data" into "v".
+ // "v" must be a pointer value.
+ Unmarshal(data []byte, v interface{}) error
+ // NewDecoder returns a Decoder which reads byte sequence from "r".
+ NewDecoder(r io.Reader) Decoder
+ // NewEncoder returns an Encoder which writes bytes sequence into "w".
+ NewEncoder(w io.Writer) Encoder
+ // ContentType returns the Content-Type which this marshaler is responsible for.
+ // The parameter describes the type which is being marshalled, which can sometimes
+ // affect the content type returned.
+ ContentType(v interface{}) string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+ Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+ Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+ // Delimiter returns the record separator for the stream.
+ Delimiter() []byte
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
new file mode 100644
index 000000000..0b051e6e8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
@@ -0,0 +1,109 @@
+package runtime
+
+import (
+ "errors"
+ "mime"
+ "net/http"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+ acceptHeader = http.CanonicalHeaderKey("Accept")
+ contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+ defaultMarshaler = &HTTPBodyMarshaler{
+ Marshaler: &JSONPb{
+ MarshalOptions: protojson.MarshalOptions{
+ EmitUnpopulated: true,
+ },
+ UnmarshalOptions: protojson.UnmarshalOptions{
+ DiscardUnknown: true,
+ },
+ },
+ }
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+ for _, acceptVal := range r.Header[acceptHeader] {
+ if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+ outbound = m
+ break
+ }
+ }
+
+ for _, contentTypeVal := range r.Header[contentTypeHeader] {
+ contentType, _, err := mime.ParseMediaType(contentTypeVal)
+ if err != nil {
+ grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err)
+ continue
+ }
+ if m, ok := mux.marshalers.mimeMap[contentType]; ok {
+ inbound = m
+ break
+ }
+ }
+
+ if inbound == nil {
+ inbound = mux.marshalers.mimeMap[MIMEWildcard]
+ }
+ if outbound == nil {
+ outbound = inbound
+ }
+
+ return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+ mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+ if len(mime) == 0 {
+ return errors.New("empty MIME type")
+ }
+
+ m.mimeMap[mime] = marshaler
+
+ return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with a "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+ return marshalerRegistry{
+ mimeMap: map[string]Marshaler{
+ MIMEWildcard: defaultMarshaler,
+ },
+ }
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+ return func(mux *ServeMux) {
+ if err := mux.marshalers.add(mime, marshaler); err != nil {
+ panic(err)
+ }
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
new file mode 100644
index 000000000..60c2065dd
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -0,0 +1,537 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/textproto"
+ "regexp"
+ "strings"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/health/grpc_health_v1"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
+type UnescapingMode int
+
+const (
+ // UnescapingModeLegacy is the default V2 behavior, which escapes the entire
+ // path string before doing any routing.
+ UnescapingModeLegacy UnescapingMode = iota
+
+ // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
+ // reserved characters.
+ UnescapingModeAllExceptReserved
+
+ // UnescapingModeAllExceptSlash unescapes URL path parameters except path
+ // separators, which will be left as "%2F".
+ UnescapingModeAllExceptSlash
+
+ // UnescapingModeAllCharacters unescapes all URL path parameters.
+ UnescapingModeAllCharacters
+
+ // UnescapingModeDefault is the default escaping type.
+ // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
+ // reference implementation
+ UnescapingModeDefault = UnescapingModeLegacy
+)
+
+var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation
+// registration methods. It is generally recommended to use gRPC client or server interceptors instead
+// where possible.
+type Middleware func(HandlerFunc) HandlerFunc
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+ // handlers maps HTTP method to a list of handlers.
+ handlers map[string][]handler
+ middlewares []Middleware
+ forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
+ forwardResponseRewriter ForwardResponseRewriter
+ marshalers marshalerRegistry
+ incomingHeaderMatcher HeaderMatcherFunc
+ outgoingHeaderMatcher HeaderMatcherFunc
+ outgoingTrailerMatcher HeaderMatcherFunc
+ metadataAnnotators []func(context.Context, *http.Request) metadata.MD
+ errorHandler ErrorHandlerFunc
+ streamErrorHandler StreamErrorHandlerFunc
+ routingErrorHandler RoutingErrorHandlerFunc
+ disablePathLengthFallback bool
+ unescapingMode UnescapingMode
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages
+// before they are forwarded in a unary, stream, or error response.
+type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error)
+
+// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic
+// that can rewrite the final response before it is forwarded.
+//
+// The response rewriter function is called during unary message forwarding, stream message
+// forwarding and when errors are being forwarded.
+//
+// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect.
+// Since this option involves making runtime changes to the response shape or type.
+func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption {
+ return func(sm *ServeMux) {
+ sm.forwardResponseRewriter = fwdResponseRewriter
+ }
+}
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+ }
+}
+
+// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode
+// for more information.
+func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.unescapingMode = mode
+ }
+}
+
+// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC
+// interceptors when using the direct-to-implementation registration methods and cannot rely
+// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible.
+func WithMiddlewares(middlewares ...Middleware) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.middlewares = append(serveMux.middlewares, middlewares...)
+ }
+}
+
+// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
+// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
+// done with careful consideration.
+func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ currentQueryParser = queryParameterParser
+ }
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function.
+// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'.
+// Other headers are not added to the gRPC metadata.
+func DefaultHeaderMatcher(key string) (string, bool) {
+ switch key = textproto.CanonicalMIMEHeaderKey(key); {
+ case isPermanentHTTPHeader(key):
+ return MetadataPrefix + key, true
+ case strings.HasPrefix(key, MetadataHeaderPrefix):
+ return key[len(MetadataHeaderPrefix):], true
+ }
+ return "", false
+}
+
+func defaultOutgoingHeaderMatcher(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+}
+
+func defaultOutgoingTrailerMatcher(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ for _, header := range fn.matchedMalformedHeaders() {
+ grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
+ }
+
+ return func(mux *ServeMux) {
+ mux.incomingHeaderMatcher = fn
+ }
+}
+
+// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
+func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
+ if fn == nil {
+ return nil
+ }
+ headers := make([]string, 0)
+ for header := range malformedHTTPHeaders {
+ out, accept := fn(header)
+ if accept && isMalformedHTTPHeader(out) {
+ headers = append(headers, out)
+ }
+ }
+ return headers
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.outgoingHeaderMatcher = fn
+ }
+}
+
+// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.outgoingTrailerMatcher = fn
+ }
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+ }
+}
+
+// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
+//
+// This can be used to configure a custom error response.
+func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.errorHandler = fn
+ }
+}
+
+// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
+// error handler, which allows for customizing the error trailer for server-streaming
+// calls.
+//
+// For stream errors that occur before any response has been written, the mux's
+// ErrorHandler will be invoked. However, once data has been written, the errors must
+// be handled differently: they must be included in the response body. The response body's
+// final message will include the error details returned by the stream error handler.
+func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.streamErrorHandler = fn
+ }
+}
+
+// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors.
+//
+// Method called for errors which can happen before gRPC route selected or executed.
+// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
+func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.routingErrorHandler = fn
+ }
+}
+
+// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
+func WithDisablePathLengthFallback() ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.disablePathLengthFallback = true
+ }
+}
+
+// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
+// When called the handler will forward the request to the upstream grpc service health check (defined in the
+// gRPC Health Checking Protocol).
+//
+// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
+// to setup the protocol in the grpc server.
+//
+// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
+func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
+ return func(s *ServeMux) {
+ // error can be ignored since pattern is definitely valid
+ _ = s.HandlePath(
+ http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
+ ) {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+
+ resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
+ Service: r.URL.Query().Get("service"),
+ })
+ if err != nil {
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+
+ if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
+ switch resp.GetStatus() {
+ case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
+ err = status.Error(codes.Unavailable, resp.String())
+ case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
+ err = status.Error(codes.NotFound, resp.String())
+ }
+
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ _ = outboundMarshaler.NewEncoder(w).Encode(resp)
+ })
+ }
+}
+
+// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
+//
+// See WithHealthEndpointAt for the general implementation.
+func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
+ return WithHealthEndpointAt(healthCheckClient, "/healthz")
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+ serveMux := &ServeMux{
+ handlers: make(map[string][]handler),
+ forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+ forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil },
+ marshalers: makeMarshalerMIMERegistry(),
+ errorHandler: DefaultHTTPErrorHandler,
+ streamErrorHandler: DefaultStreamErrorHandler,
+ routingErrorHandler: DefaultRoutingErrorHandler,
+ unescapingMode: UnescapingModeDefault,
+ }
+
+ for _, opt := range opts {
+ opt(serveMux)
+ }
+
+ if serveMux.incomingHeaderMatcher == nil {
+ serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+ }
+ if serveMux.outgoingHeaderMatcher == nil {
+ serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher
+ }
+ if serveMux.outgoingTrailerMatcher == nil {
+ serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher
+ }
+
+ return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+ if len(s.middlewares) > 0 {
+ h = chainMiddlewares(s.middlewares)(h)
+ }
+ s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
+}
+
+// HandlePath allows users to configure custom path handlers.
+// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
+func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
+ compiler, err := httprule.Parse(pathPattern)
+ if err != nil {
+ return fmt.Errorf("parsing path pattern: %w", err)
+ }
+ tp := compiler.Compile()
+ pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
+ if err != nil {
+ return fmt.Errorf("creating new pattern: %w", err)
+ }
+ s.Handle(meth, pattern, h)
+ return nil
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ path := r.URL.Path
+ if !strings.HasPrefix(path, "/") {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
+ return
+ }
+
+ // TODO(v3): remove UnescapingModeLegacy
+ if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
+ path = r.URL.RawPath
+ }
+
+ if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
+ if err := r.ParseForm(); err != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, err.Error())
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ return
+ }
+ r.Method = strings.ToUpper(override)
+ }
+
+ var pathComponents []string
+ // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
+ // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
+ // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
+ // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
+ if s.unescapingMode == UnescapingModeAllCharacters {
+ pathComponents = encodedPathSplitter.Split(path[1:], -1)
+ } else {
+ pathComponents = strings.Split(path[1:], "/")
+ }
+
+ lastPathComponent := pathComponents[len(pathComponents)-1]
+
+ for _, h := range s.handlers[r.Method] {
+ // If the pattern has a verb, explicitly look for a suffix in the last
+ // component that matches a colon plus the verb. This allows us to
+ // handle some cases that otherwise can't be correctly handled by the
+ // former LastIndex case, such as when the verb literal itself contains
+ // a colon. This should work for all cases that have run through the
+ // parser because we know what verb we're looking for, however, there
+ // are still some cases that the parser itself cannot disambiguate. See
+ // the comment there if interested.
+
+ var verb string
+ patVerb := h.pat.Verb()
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
+ }
+ if idx == 0 {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+ return
+ }
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
+ if idx > 0 {
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+ }
+
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
+ if err != nil {
+ var mse MalformedSequenceError
+ if ok := errors.As(err, &mse); ok {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: mse,
+ })
+ }
+ continue
+ }
+ s.handleHandler(h, w, r, pathParams)
+ return
+ }
+
+ // if no handler has found for the request, lookup for other methods
+ // to handle POST -> GET fallback if the request is subject to path
+ // length fallback.
+ // Note we are not eagerly checking the request here as we want to return the
+ // right HTTP status code, and we need to process the fallback candidates in
+ // order to do that.
+ for m, handlers := range s.handlers {
+ if m == r.Method {
+ continue
+ }
+ for _, h := range handlers {
+ var verb string
+ patVerb := h.pat.Verb()
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
+ }
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
+ if idx > 0 {
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+ }
+
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
+ if err != nil {
+ var mse MalformedSequenceError
+ if ok := errors.As(err, &mse); ok {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: mse,
+ })
+ }
+ continue
+ }
+
+ // X-HTTP-Method-Override is optional. Always allow fallback to POST.
+ // Also, only consider POST -> GET fallbacks, and avoid falling back to
+ // potentially dangerous operations like DELETE.
+ if s.isPathLengthFallback(r) && m == http.MethodGet {
+ if err := r.ParseForm(); err != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, err.Error())
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ return
+ }
+ s.handleHandler(h, w, r, pathParams)
+ return
+ }
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
+ return
+ }
+ }
+
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+ return s.forwardResponseOptions
+}
+
+func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
+ return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+ pat Pattern
+ h HandlerFunc
+}
+
+func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) {
+ h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams)
+}
+
+func chainMiddlewares(mws []Middleware) Middleware {
+ return func(next HandlerFunc) HandlerFunc {
+ for i := len(mws); i > 0; i-- {
+ next = mws[i-1](next)
+ }
+ return next
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
new file mode 100644
index 000000000..e54507145
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
@@ -0,0 +1,381 @@
+package runtime
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc/grpclog"
+)
+
+var (
+ // ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+ ErrNotMatch = errors.New("not match to the path pattern")
+ // ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+ ErrInvalidPattern = errors.New("invalid pattern")
+)
+
+type MalformedSequenceError string
+
+func (e MalformedSequenceError) Error() string {
+ return "malformed path escape " + strconv.Quote(string(e))
+}
+
+type op struct {
+ code utilities.OpCode
+ operand int
+}
+
+// Pattern is a template pattern of http request paths defined in
+// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
+type Pattern struct {
+ // ops is a list of operations
+ ops []op
+ // pool is a constant pool indexed by the operands or vars.
+ pool []string
+ // vars is a list of variables names to be bound by this pattern
+ vars []string
+ // stacksize is the max depth of the stack
+ stacksize int
+ // tailLen is the length of the fixed-size segments after a deep wildcard
+ tailLen int
+ // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+ verb string
+}
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
+ if version != 1 {
+ grpclog.Errorf("unsupported version: %d", version)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ l := len(ops)
+ if l%2 != 0 {
+ grpclog.Errorf("odd number of ops codes: %d", l)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ var (
+ typedOps []op
+ stack, maxstack int
+ tailLen int
+ pushMSeen bool
+ vars []string
+ )
+ for i := 0; i < l; i += 2 {
+ op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush:
+ if pushMSeen {
+ tailLen++
+ }
+ stack++
+ case utilities.OpPushM:
+ if pushMSeen {
+ grpclog.Error("pushM appears twice")
+ return Pattern{}, ErrInvalidPattern
+ }
+ pushMSeen = true
+ stack++
+ case utilities.OpLitPush:
+ if op.operand < 0 || len(pool) <= op.operand {
+ grpclog.Errorf("negative literal index: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ if pushMSeen {
+ tailLen++
+ }
+ stack++
+ case utilities.OpConcatN:
+ if op.operand <= 0 {
+ grpclog.Errorf("negative concat size: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ stack -= op.operand
+ if stack < 0 {
+ grpclog.Error("stack underflow")
+ return Pattern{}, ErrInvalidPattern
+ }
+ stack++
+ case utilities.OpCapture:
+ if op.operand < 0 || len(pool) <= op.operand {
+ grpclog.Errorf("variable name index out of bound: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ v := pool[op.operand]
+ op.operand = len(vars)
+ vars = append(vars, v)
+ stack--
+ if stack < 0 {
+ grpclog.Error("stack underflow")
+ return Pattern{}, ErrInvalidPattern
+ }
+ default:
+ grpclog.Errorf("invalid opcode: %d", op.code)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ if maxstack < stack {
+ maxstack = stack
+ }
+ typedOps = append(typedOps, op)
+ }
+ return Pattern{
+ ops: typedOps,
+ pool: pool,
+ vars: vars,
+ stacksize: maxstack,
+ tailLen: tailLen,
+ verb: verb,
+ }, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+ if err != nil {
+ grpclog.Fatalf("Pattern initialization failed: %v", err)
+ }
+ return p
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// MatchAndEscape will return an error if no Patterns matched or if a pattern
+// matched but contained malformed escape sequences. If successful, the function
+// returns a mapping from field paths to their captured values.
+func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
+ if p.verb != verb {
+ if p.verb != "" {
+ return nil, ErrNotMatch
+ }
+ if len(components) == 0 {
+ components = []string{":" + verb}
+ } else {
+ components = append([]string{}, components...)
+ components[len(components)-1] += ":" + verb
+ }
+ }
+
+ var pos int
+ stack := make([]string, 0, p.stacksize)
+ captured := make([]string, len(p.vars))
+ l := len(components)
+ for _, op := range p.ops {
+ var err error
+
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush, utilities.OpLitPush:
+ if pos >= l {
+ return nil, ErrNotMatch
+ }
+ c := components[pos]
+ if op.code == utilities.OpLitPush {
+ if lit := p.pool[op.operand]; c != lit {
+ return nil, ErrNotMatch
+ }
+ } else if op.code == utilities.OpPush {
+ if c, err = unescape(c, unescapingMode, false); err != nil {
+ return nil, err
+ }
+ }
+ stack = append(stack, c)
+ pos++
+ case utilities.OpPushM:
+ end := len(components)
+ if end < pos+p.tailLen {
+ return nil, ErrNotMatch
+ }
+ end -= p.tailLen
+ c := strings.Join(components[pos:end], "/")
+ if c, err = unescape(c, unescapingMode, true); err != nil {
+ return nil, err
+ }
+ stack = append(stack, c)
+ pos = end
+ case utilities.OpConcatN:
+ n := op.operand
+ l := len(stack) - n
+ stack = append(stack[:l], strings.Join(stack[l:], "/"))
+ case utilities.OpCapture:
+ n := len(stack) - 1
+ captured[op.operand] = stack[n]
+ stack = stack[:n]
+ }
+ }
+ if pos < l {
+ return nil, ErrNotMatch
+ }
+ bindings := make(map[string]string)
+ for i, val := range captured {
+ bindings[p.vars[i]] = val
+ }
+ return bindings, nil
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// It will never perform per-component unescaping (see: UnescapingModeLegacy).
+// MatchAndEscape will return an error if no Patterns matched. If successful,
+// the function returns a mapping from field paths to their captured values.
+//
+// Deprecated: Use MatchAndEscape.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+ return p.MatchAndEscape(components, verb, UnescapingModeDefault)
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+ var stack []string
+ for _, op := range p.ops {
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush:
+ stack = append(stack, "*")
+ case utilities.OpLitPush:
+ stack = append(stack, p.pool[op.operand])
+ case utilities.OpPushM:
+ stack = append(stack, "**")
+ case utilities.OpConcatN:
+ n := op.operand
+ l := len(stack) - n
+ stack = append(stack[:l], strings.Join(stack[l:], "/"))
+ case utilities.OpCapture:
+ n := len(stack) - 1
+ stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+ }
+ }
+ segs := strings.Join(stack, "/")
+ if p.verb != "" {
+ return fmt.Sprintf("/%s:%s", segs, p.verb)
+ }
+ return "/" + segs
+}
+
+/*
+ * The following code is adopted and modified from Go's standard library
+ * and carries the attached license.
+ *
+ * Copyright 2009 The Go Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file.
+ */
+
+// ishex returns whether or not the given byte is a valid hex character
+func ishex(c byte) bool {
+ switch {
+ case '0' <= c && c <= '9':
+ return true
+ case 'a' <= c && c <= 'f':
+ return true
+ case 'A' <= c && c <= 'F':
+ return true
+ }
+ return false
+}
+
+func isRFC6570Reserved(c byte) bool {
+ switch c {
+ case '!', '#', '$', '&', '\'', '(', ')', '*',
+ '+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+// unhex converts a hex point to the bit representation
+func unhex(c byte) byte {
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0'
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10
+ }
+ return 0
+}
+
+// shouldUnescapeWithMode returns true if the character is escapable with the
+// given mode
+func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
+ switch mode {
+ case UnescapingModeAllExceptReserved:
+ if isRFC6570Reserved(c) {
+ return false
+ }
+ case UnescapingModeAllExceptSlash:
+ if c == '/' {
+ return false
+ }
+ case UnescapingModeAllCharacters:
+ return true
+ }
+ return true
+}
+
+// unescape unescapes a path string using the provided mode
+func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
+ // TODO(v3): remove UnescapingModeLegacy
+ if mode == UnescapingModeLegacy {
+ return s, nil
+ }
+
+ if !multisegment {
+ mode = UnescapingModeAllCharacters
+ }
+
+ // Count %, check that they're well-formed.
+ n := 0
+ for i := 0; i < len(s); {
+ if s[i] == '%' {
+ n++
+ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
+ s = s[i:]
+ if len(s) > 3 {
+ s = s[:3]
+ }
+
+ return "", MalformedSequenceError(s)
+ }
+ i += 3
+ } else {
+ i++
+ }
+ }
+
+ if n == 0 {
+ return s, nil
+ }
+
+ var t strings.Builder
+ t.Grow(len(s))
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '%':
+ c := unhex(s[i+1])<<4 | unhex(s[i+2])
+ if shouldUnescapeWithMode(c, mode) {
+ t.WriteByte(c)
+ i += 2
+ continue
+ }
+ fallthrough
+ default:
+ t.WriteByte(s[i])
+ }
+ }
+
+ return t.String(), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
new file mode 100644
index 000000000..d549407f2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+ "google.golang.org/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+ return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+ b, err := Bool(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+ f, err := Float64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+ f, err := Float32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to a int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+ i, err := Int64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to a int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+ i, err := Int32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+ i, err := Uint64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+ i, err := Uint32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
new file mode 100644
index 000000000..fe634174b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -0,0 +1,372 @@
+package runtime
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/known/durationpb"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+ "google.golang.org/protobuf/types/known/structpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
+
+var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
+
+// QueryParameterParser defines interface for all query parameter parsers
+type QueryParameterParser interface {
+ Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
+}
+
+// PopulateQueryParameters parses query parameters
+// into "msg" using current query parser
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+ return currentQueryParser.Parse(msg, values, filter)
+}
+
+// DefaultQueryParser is a QueryParameterParser which implements the default
+// query parameters parsing behavior.
+//
+// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
+type DefaultQueryParser struct{}
+
+// Parse populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+ for key, values := range values {
+ if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 {
+ key = match[1]
+ values = append([]string{match[2]}, values...)
+ }
+
+ msgValue := msg.ProtoReflect()
+ fieldPath := normalizeFieldPath(msgValue, strings.Split(key, "."))
+ if filter.HasCommonPrefix(fieldPath) {
+ continue
+ }
+ if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+ fieldPath := strings.Split(fieldPathString, ".")
+ return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
+}
+
+func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string {
+ newFieldPath := make([]string, 0, len(fieldPath))
+ for i, fieldName := range fieldPath {
+ fields := msgValue.Descriptor().Fields()
+ fieldDesc := fields.ByTextName(fieldName)
+ if fieldDesc == nil {
+ fieldDesc = fields.ByJSONName(fieldName)
+ }
+ if fieldDesc == nil {
+ // return initial field path values if no matching message field was found
+ return fieldPath
+ }
+
+ newFieldPath = append(newFieldPath, string(fieldDesc.Name()))
+
+ // If this is the last element, we're done
+ if i == len(fieldPath)-1 {
+ break
+ }
+
+ // Only singular message fields are allowed
+ if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated {
+ return fieldPath
+ }
+
+ // Get the nested message
+ msgValue = msgValue.Get(fieldDesc).Message()
+ }
+
+ return newFieldPath
+}
+
+func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
+ if len(fieldPath) < 1 {
+ return errors.New("no field path")
+ }
+ if len(values) < 1 {
+ return errors.New("no value provided")
+ }
+
+ var fieldDescriptor protoreflect.FieldDescriptor
+ for i, fieldName := range fieldPath {
+ fields := msgValue.Descriptor().Fields()
+
+ // Get field by name
+ fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
+ if fieldDescriptor == nil {
+ fieldDescriptor = fields.ByJSONName(fieldName)
+ if fieldDescriptor == nil {
+ // We're not returning an error here because this could just be
+ // an extra query parameter that isn't part of the request.
+ grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
+ return nil
+ }
+ }
+
+ // If this is the last element, we're done
+ if i == len(fieldPath)-1 {
+ break
+ }
+
+ // Only singular message fields are allowed
+ if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
+ return fmt.Errorf("invalid path: %q is not a message", fieldName)
+ }
+
+ // Get the nested message
+ msgValue = msgValue.Mutable(fieldDescriptor).Message()
+ }
+
+ // Check if oneof already set
+ if of := fieldDescriptor.ContainingOneof(); of != nil {
+ if f := msgValue.WhichOneof(of); f != nil {
+ return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
+ }
+ }
+
+ switch {
+ case fieldDescriptor.IsList():
+ return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
+ case fieldDescriptor.IsMap():
+ return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
+ }
+
+ if len(values) > 1 {
+ return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
+ }
+
+ return populateField(fieldDescriptor, msgValue, values[0])
+}
+
+func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
+ v, err := parseField(fieldDescriptor, value)
+ if err != nil {
+ return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ msgValue.Set(fieldDescriptor, v)
+ return nil
+}
+
+func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
+ for _, value := range values {
+ v, err := parseField(fieldDescriptor, value)
+ if err != nil {
+ return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+ list.Append(v)
+ }
+
+ return nil
+}
+
+func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
+ if len(values) != 2 {
+ return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
+ }
+
+ key, err := parseField(fieldDescriptor.MapKey(), values[0])
+ if err != nil {
+ return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ value, err := parseField(fieldDescriptor.MapValue(), values[1])
+ if err != nil {
+ return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ mp.Set(key.MapKey(), value)
+
+ return nil
+}
+
+func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
+ switch fieldDescriptor.Kind() {
+ case protoreflect.BoolKind:
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfBool(v), nil
+ case protoreflect.EnumKind:
+ enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
+ if err != nil {
+ if errors.Is(err, protoregistry.NotFound) {
+ return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
+ }
+ return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
+ }
+ // Look for enum by name
+ v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
+ if v == nil {
+ i, err := strconv.Atoi(value)
+ if err != nil {
+ return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+ }
+ // Look for enum by number
+ if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil {
+ return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+ }
+ }
+ return protoreflect.ValueOfEnum(v.Number()), nil
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfInt32(int32(v)), nil
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfInt64(v), nil
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ v, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), nil
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfUint64(v), nil
+ case protoreflect.FloatKind:
+ v, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfFloat32(float32(v)), nil
+ case protoreflect.DoubleKind:
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfFloat64(v), nil
+ case protoreflect.StringKind:
+ return protoreflect.ValueOfString(value), nil
+ case protoreflect.BytesKind:
+ v, err := Bytes(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfBytes(v), nil
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ return parseMessage(fieldDescriptor.Message(), value)
+ default:
+ panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
+ }
+}
+
+func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
+ var msg proto.Message
+ switch msgDescriptor.FullName() {
+ case "google.protobuf.Timestamp":
+ t, err := time.Parse(time.RFC3339Nano, value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = timestamppb.New(t)
+ case "google.protobuf.Duration":
+ d, err := time.ParseDuration(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = durationpb.New(d)
+ case "google.protobuf.DoubleValue":
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Double(v)
+ case "google.protobuf.FloatValue":
+ v, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Float(float32(v))
+ case "google.protobuf.Int64Value":
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Int64(v)
+ case "google.protobuf.Int32Value":
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Int32(int32(v))
+ case "google.protobuf.UInt64Value":
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.UInt64(v)
+ case "google.protobuf.UInt32Value":
+ v, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.UInt32(uint32(v))
+ case "google.protobuf.BoolValue":
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Bool(v)
+ case "google.protobuf.StringValue":
+ msg = wrapperspb.String(value)
+ case "google.protobuf.BytesValue":
+ v, err := Bytes(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Bytes(v)
+ case "google.protobuf.FieldMask":
+ fm := &field_mask.FieldMask{}
+ fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
+ msg = fm
+ case "google.protobuf.Value":
+ var v structpb.Value
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
+ case "google.protobuf.Struct":
+ var v structpb.Struct
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
+ default:
+ return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
+ }
+
+ return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
new file mode 100644
index 000000000..b89409465
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "utilities",
+ srcs = [
+ "doc.go",
+ "pattern.go",
+ "readerfactory.go",
+ "string_array_flag.go",
+ "trie.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
+)
+
+go_test(
+ name = "utilities_test",
+ size = "small",
+ srcs = [
+ "string_array_flag_test.go",
+ "trie_test.go",
+ ],
+ deps = [":utilities"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":utilities",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
new file mode 100644
index 000000000..cf79a4d58
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
@@ -0,0 +1,2 @@
+// Package utilities provides members for internal use in grpc-gateway.
+package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
new file mode 100644
index 000000000..dfe7de486
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// An OpCode is a opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+ // OpNop does nothing
+ OpNop = OpCode(iota)
+ // OpPush pushes a component to stack
+ OpPush
+ // OpLitPush pushes a component to stack if it matches to the literal
+ OpLitPush
+ // OpPushM concatenates the remaining components and pushes it to stack
+ OpPushM
+ // OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+ OpConcatN
+ // OpCapture pops an item and binds it to the variable
+ OpCapture
+ // OpEnd is the least positive invalid opcode.
+ OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
new file mode 100644
index 000000000..01d26edae
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
@@ -0,0 +1,19 @@
+package utilities
+
+import (
+ "bytes"
+ "io"
+)
+
+// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
+// at the start of the stream
+func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return func() io.Reader {
+ return bytes.NewReader(b)
+ }, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
new file mode 100644
index 000000000..d224ab776
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
@@ -0,0 +1,33 @@
+package utilities
+
+import (
+ "flag"
+ "strings"
+)
+
+// flagInterface is an cut down interface to `flag`
+type flagInterface interface {
+ Var(value flag.Value, name string, usage string)
+}
+
+// StringArrayFlag defines a flag with the specified name and usage string.
+// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
+func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
+ value := &StringArrayFlags{}
+ f.Var(value, name, usage)
+ return value
+}
+
+// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
+type StringArrayFlags []string
+
+// String returns a string representation of `StringArrayFlags`
+func (i *StringArrayFlags) String() string {
+ return strings.Join(*i, ",")
+}
+
+// Set appends a value to `StringArrayFlags`
+func (i *StringArrayFlags) Set(value string) error {
+ *i = append(*i, value)
+ return nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
new file mode 100644
index 000000000..dd99b0ed2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
@@ -0,0 +1,174 @@
+package utilities
+
+import (
+ "sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+ // Encoding keeps an encoding from string to int
+ Encoding map[string]int
+ // Base is the base array of Double Array
+ Base []int
+ // Check is the check array of Double Array
+ Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+ da := &DoubleArray{Encoding: make(map[string]int)}
+ if len(seqs) == 0 {
+ return da
+ }
+
+ encoded := registerTokens(da, seqs)
+ sort.Sort(byLex(encoded))
+
+ root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+ addSeqs(da, encoded, 0, root)
+
+ for i := len(da.Base); i > 0; i-- {
+ if da.Check[i-1] != 0 {
+ da.Base = da.Base[:i]
+ da.Check = da.Check[:i]
+ break
+ }
+ }
+ return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+ var result [][]int
+ for _, seq := range seqs {
+ encoded := make([]int, 0, len(seq))
+ for _, token := range seq {
+ if _, ok := da.Encoding[token]; !ok {
+ da.Encoding[token] = len(da.Encoding)
+ }
+ encoded = append(encoded, da.Encoding[token])
+ }
+ result = append(result, encoded)
+ }
+ for i := range result {
+ result[i] = append(result[i], len(da.Encoding))
+ }
+ return result
+}
+
+type node struct {
+ row, col int
+ left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+ return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+ var result []*node
+ lastVal := int(-1)
+ last := new(node)
+ for i := n.left; i < n.right; i++ {
+ if lastVal == seqs[i][n.col+1] {
+ continue
+ }
+ last.right = i
+ last = &node{
+ row: i,
+ col: n.col + 1,
+ left: i,
+ }
+ result = append(result, last)
+ }
+ last.right = n.right
+ return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+ ensureSize(da, pos)
+
+ children := n.children(seqs)
+ var i int
+ for i = 1; ; i++ {
+ ok := func() bool {
+ for _, child := range children {
+ code := child.value(seqs)
+ j := i + code
+ ensureSize(da, j)
+ if da.Check[j] != 0 {
+ return false
+ }
+ }
+ return true
+ }()
+ if ok {
+ break
+ }
+ }
+ da.Base[pos] = i
+ for _, child := range children {
+ code := child.value(seqs)
+ j := i + code
+ da.Check[j] = pos + 1
+ }
+ terminator := len(da.Encoding)
+ for _, child := range children {
+ code := child.value(seqs)
+ if code == terminator {
+ continue
+ }
+ j := i + code
+ addSeqs(da, seqs, j, *child)
+ }
+}
+
+func ensureSize(da *DoubleArray, i int) {
+ for i >= len(da.Base) {
+ da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+ da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+ }
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+ si := l[i]
+ sj := l[j]
+ var k int
+ for k = 0; k < len(si) && k < len(sj); k++ {
+ if si[k] < sj[k] {
+ return true
+ }
+ if si[k] > sj[k] {
+ return false
+ }
+ }
+ return k < len(sj)
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+ if len(da.Base) == 0 {
+ return false
+ }
+
+ var i int
+ for _, t := range seq {
+ code, ok := da.Encoding[t]
+ if !ok {
+ break
+ }
+ j := da.Base[i] + code
+ if len(da.Check) <= j || da.Check[j] != i+1 {
+ break
+ }
+ i = j
+ }
+ j := da.Base[i] + len(da.Encoding)
+ if len(da.Check) <= j || da.Check[j] != i+1 {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/heptiolabs/healthcheck/.gitignore b/vendor/github.com/heptiolabs/healthcheck/.gitignore
new file mode 100644
index 000000000..61ead8666
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/.gitignore
@@ -0,0 +1 @@
+/vendor
diff --git a/vendor/github.com/heptiolabs/healthcheck/.travis.yml b/vendor/github.com/heptiolabs/healthcheck/.travis.yml
new file mode 100644
index 000000000..df442613f
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+go_import_path: github.com/heptiolabs/healthcheck
+go:
+ - 1.9.x
+
+sudo: false
+
+install:
+ - go get -u github.com/golang/dep/cmd/dep
+ && dep ensure -vendor-only -v
+
+script:
+ - go test -v -cover .
diff --git a/vendor/github.com/heptiolabs/healthcheck/CODE_OF_CONDUCT.md b/vendor/github.com/heptiolabs/healthcheck/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..560c4004f
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/CODE_OF_CONDUCT.md
@@ -0,0 +1,37 @@
+# Community Code of Conduct
+
+## Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering
+an open and welcoming community, we pledge to respect all people who contribute
+through reporting issues, posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are not
+aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
+commit themselves to fairly and consistently applying these principles to every aspect
+of managing this project. Project maintainers who do not follow or enforce the Code of
+Conduct may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainer(s).
+
+This Code of Conduct is adapted from the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) and [Contributor Covenant](http://contributor-covenant.org/version/1/2/0/), version 1.2.0.
diff --git a/vendor/github.com/heptiolabs/healthcheck/CONTRIBUTING.md b/vendor/github.com/heptiolabs/healthcheck/CONTRIBUTING.md
new file mode 100644
index 000000000..2c0e8806b
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/CONTRIBUTING.md
@@ -0,0 +1,57 @@
+## DCO Sign off
+
+All authors to the project retain copyright to their work. However, to ensure
+that they are only submitting work that they have rights to, we are requiring
+everyone to acknowldge this by signing their work.
+
+Any copyright notices in this repos should specify the authors as "the contributors".
+
+To sign your work, just add a line like this at the end of your commit message:
+
+```
+Signed-off-by: Joe Beda
+```
+
+This can easily be done with the `--signoff` option to `git commit`.
+
+By doing this you state that you can certify the following (from https://developercertificate.org/):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+1 Letterman Drive
+Suite D4700
+San Francisco, CA, 94129
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
\ No newline at end of file
diff --git a/vendor/github.com/heptiolabs/healthcheck/Gopkg.lock b/vendor/github.com/heptiolabs/healthcheck/Gopkg.lock
new file mode 100644
index 000000000..28121b796
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/Gopkg.lock
@@ -0,0 +1,87 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ name = "github.com/beorn7/perks"
+ packages = ["quantile"]
+ revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
+
+[[projects]]
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/golang/protobuf"
+ packages = ["proto"]
+ revision = "925541529c1fa6821df4e44ce2723319eb2be768"
+ version = "v1.0.0"
+
+[[projects]]
+ name = "github.com/matttproud/golang_protobuf_extensions"
+ packages = ["pbutil"]
+ revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
+ version = "v1.0.0"
+
+[[projects]]
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/prometheus/client_golang"
+ packages = [
+ "prometheus",
+ "prometheus/promhttp"
+ ]
+ revision = "c3324c1198cf3374996e9d3098edd46a6b55afc9"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/prometheus/client_model"
+ packages = ["go"]
+ revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/prometheus/common"
+ packages = [
+ "expfmt",
+ "internal/bitbucket.org/ww/goautoneg",
+ "model"
+ ]
+ revision = "6fb6fce6f8b75884b92e1889c150403fc0872c5e"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/prometheus/procfs"
+ packages = [
+ ".",
+ "internal/util",
+ "nfs",
+ "xfs"
+ ]
+ revision = "7186fbf4eb22a031bcd4657c8adc3fc39acf6d8a"
+
+[[projects]]
+ name = "github.com/stretchr/testify"
+ packages = ["assert"]
+ revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
+ version = "v1.2.1"
+
+[[projects]]
+ name = "gopkg.in/DATA-DOG/go-sqlmock.v1"
+ packages = ["."]
+ revision = "d76b18b42f285b792bf985118980ce9eacea9d10"
+ version = "v1.3.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "739553432f1d3b8bc633e5e10f18d31e57b1f8af15c9e916398429bd4d7bd826"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/heptiolabs/healthcheck/Gopkg.toml b/vendor/github.com/heptiolabs/healthcheck/Gopkg.toml
new file mode 100644
index 000000000..25c33552e
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/Gopkg.toml
@@ -0,0 +1,10 @@
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+
+[[constraint]]
+ name = "github.com/prometheus/client_golang"
+ branch = "master"
+
+[[constraint]]
+ name = "github.com/stretchr/testify"
+ version = "1.1.4"
diff --git a/vendor/github.com/heptiolabs/healthcheck/LICENSE b/vendor/github.com/heptiolabs/healthcheck/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/heptiolabs/healthcheck/README.md b/vendor/github.com/heptiolabs/healthcheck/README.md
new file mode 100644
index 000000000..1e916cf56
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/README.md
@@ -0,0 +1,92 @@
+# healthcheck
+[![Build Status](https://travis-ci.org/heptiolabs/healthcheck.svg?branch=master)](https://travis-ci.org/heptiolabs/healthcheck)
+[![Go Report Card](https://goreportcard.com/badge/github.com/heptiolabs/healthcheck)](https://goreportcard.com/report/github.com/heptiolabs/healthcheck)
+[![GoDoc](https://godoc.org/github.com/heptiolabs/healthcheck?status.svg)](https://godoc.org/github.com/heptiolabs/healthcheck)
+
+Healthcheck is a library for implementing Kubernetes [liveness and readiness](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) probe handlers in your Go application.
+
+## Features
+
+ - Integrates easily with Kubernetes. This library explicitly separates liveness vs. readiness checks instead of lumping everything into a single category of check.
+
+ - Optionally exposes each check as a [Prometheus gauge](https://prometheus.io/docs/concepts/metric_types/#gauge) metric. This allows for cluster-wide monitoring and alerting on individual checks.
+
+ - Supports asynchronous checks, which run in a background goroutine at a fixed interval. These are useful for expensive checks that you don't want to add latency to the liveness and readiness endpoints.
+
+ - Includes a small library of generically useful checks for validating upstream DNS, TCP, HTTP, and database dependencies as well as checking basic health of the Go runtime.
+
+## Usage
+
+See the [GoDoc examples](https://godoc.org/github.com/heptiolabs/healthcheck) for more detail.
+
+ - Install with `go get` or your favorite Go dependency manager: `go get -u github.com/heptiolabs/healthcheck`
+
+ - Import the package: `import "github.com/heptiolabs/healthcheck"`
+
+ - Create a `healthcheck.Handler`:
+ ```go
+ health := healthcheck.NewHandler()
+ ```
+
+ - Configure some application-specific liveness checks (whether the app itself is unhealthy):
+ ```go
+ // Our app is not happy if we've got more than 100 goroutines running.
+ health.AddLivenessCheck("goroutine-threshold", healthcheck.GoroutineCountCheck(100))
+ ```
+
+ - Configure some application-specific readiness checks (whether the app is ready to serve requests):
+ ```go
+ // Our app is not ready if we can't resolve our upstream dependency in DNS.
+ health.AddReadinessCheck(
+ "upstream-dep-dns",
+ healthcheck.DNSResolveCheck("upstream.example.com", 50*time.Millisecond))
+
+ // Our app is not ready if we can't connect to our database (`var db *sql.DB`) in <1s.
+ health.AddReadinessCheck("database", healthcheck.DatabasePingCheck(db, 1*time.Second))
+ ```
+
+ - Expose the `/live` and `/ready` endpoints over HTTP (on port 8086):
+ ```go
+ go http.ListenAndServe("0.0.0.0:8086", health)
+ ```
+
+ - Configure your Kubernetes container with HTTP liveness and readiness probes see the ([Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/)) for more detail:
+ ```yaml
+ # this is a bare bones example
+ # copy and paste livenessProbe and readinessProbe as appropriate for your app
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: heptio-healthcheck-example
+ spec:
+ containers:
+ - name: liveness
+ image: your-registry/your-container
+
+ # define a liveness probe that checks every 5 seconds, starting after 5 seconds
+ livenessProbe:
+ httpGet:
+ path: /live
+ port: 8086
+ initialDelaySeconds: 5
+ periodSeconds: 5
+
+ # define a readiness probe that checks every 5 seconds
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8086
+ periodSeconds: 5
+ ```
+
+ - If one of your readiness checks fails, Kubernetes will stop routing traffic to that pod within a few seconds (depending on `periodSeconds` and other factors).
+
+ - If one of your liveness checks fails or your app becomes totally unresponsive, Kubernetes will restart your container.
+
+ ## HTTP Endpoints
+ When you run `go http.ListenAndServe("0.0.0.0:8086", health)`, two HTTP endpoints are exposed:
+
+ - **`/live`**: liveness endpoint (HTTP 200 if healthy, HTTP 503 if unhealthy)
+ - **`/ready`**: readiness endpoint (HTTP 200 if healthy, HTTP 503 if unhealthy)
+
+Pass the `?full=1` query parameter to see the full check results as JSON. These are omitted by default for performance.
\ No newline at end of file
diff --git a/vendor/github.com/heptiolabs/healthcheck/async.go b/vendor/github.com/heptiolabs/healthcheck/async.go
new file mode 100644
index 000000000..2c6bb9f20
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/async.go
@@ -0,0 +1,85 @@
+// Copyright 2017 by the contributors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "context"
+ "errors"
+ "time"
+)
+
+// ErrNoData is returned if the first call of an Async() wrapped Check has not
+// yet returned.
+var ErrNoData = errors.New("no data yet")
+
+// Async converts a Check into an asynchronous check that runs in a background
+// goroutine at a fixed interval. The check is called at a fixed rate, not with
+// a fixed delay between invocations. If your check takes longer than the
+// interval to execute, the next execution will happen immediately.
+//
+// Note: if you need to clean up the background goroutine, use AsyncWithContext().
+func Async(check Check, interval time.Duration) Check {
+ return AsyncWithContext(context.Background(), check, interval)
+}
+
+// AsyncWithContext converts a Check into an asynchronous check that runs in a
+// background goroutine at a fixed interval. The check is called at a fixed
+// rate, not with a fixed delay between invocations. If your check takes longer
+// than the interval to execute, the next execution will happen immediately.
+//
+// Note: if you don't need to cancel execution (because this runs forever), use Async()
+func AsyncWithContext(ctx context.Context, check Check, interval time.Duration) Check {
+ // create a chan that will buffer the most recent check result
+ result := make(chan error, 1)
+
+ // fill it with ErrNoData so we'll start in an initially failing state
+ // (we don't want to be ready/live until we've actually executed the check
+ // once, but that might be slow).
+ result <- ErrNoData
+
+ // make a wrapper that runs the check, and swaps out the current head of
+ // the channel with the latest result
+ update := func() {
+ err := check()
+ <-result
+ result <- err
+ }
+
+ // spawn a background goroutine to run the check
+ go func() {
+ // call once right away (time.Tick() doesn't always tick immediately
+ // but we want an initial result as soon as possible)
+ update()
+
+ // loop forever or until the context is canceled
+ ticker := time.Tick(interval)
+ for {
+ select {
+ case <-ticker:
+ update()
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ // return a Check function that closes over our result and mutex
+ return func() error {
+ // peek at the head of the channel, then put it back
+ err := <-result
+ result <- err
+ return err
+ }
+}
diff --git a/vendor/github.com/heptiolabs/healthcheck/checks.go b/vendor/github.com/heptiolabs/healthcheck/checks.go
new file mode 100644
index 000000000..4f18bb54f
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/checks.go
@@ -0,0 +1,120 @@
+// Copyright 2017 by the contributors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+// TCPDialCheck returns a Check that checks TCP connectivity to the provided
+// endpoint.
+func TCPDialCheck(addr string, timeout time.Duration) Check {
+ return func() error {
+ conn, err := net.DialTimeout("tcp", addr, timeout)
+ if err != nil {
+ return err
+ }
+ return conn.Close()
+ }
+}
+
+// HTTPGetCheck returns a Check that performs an HTTP GET request against the
+// specified URL. The check fails if the response times out or returns a non-200
+// status code.
+func HTTPGetCheck(url string, timeout time.Duration) Check {
+ client := http.Client{
+ Timeout: timeout,
+ // never follow redirects
+ CheckRedirect: func(*http.Request, []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ return func() error {
+ resp, err := client.Get(url)
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("returned status %d", resp.StatusCode)
+ }
+ return nil
+ }
+}
+
+// DatabasePingCheck returns a Check that validates connectivity to a
+// database/sql.DB using Ping().
+func DatabasePingCheck(database *sql.DB, timeout time.Duration) Check {
+ return func() error {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ if database == nil {
+ return fmt.Errorf("database is nil")
+ }
+ return database.PingContext(ctx)
+ }
+}
+
+// DNSResolveCheck returns a Check that makes sure the provided host can resolve
+// to at least one IP address within the specified timeout.
+func DNSResolveCheck(host string, timeout time.Duration) Check {
+ resolver := net.Resolver{}
+ return func() error {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ addrs, err := resolver.LookupHost(ctx, host)
+ if err != nil {
+ return err
+ }
+ if len(addrs) < 1 {
+ return fmt.Errorf("could not resolve host")
+ }
+ return nil
+ }
+}
+
+// GoroutineCountCheck returns a Check that fails if too many goroutines are
+// running (which could indicate a resource leak).
+func GoroutineCountCheck(threshold int) Check {
+ return func() error {
+ count := runtime.NumGoroutine()
+ if count > threshold {
+ return fmt.Errorf("too many goroutines (%d > %d)", count, threshold)
+ }
+ return nil
+ }
+}
+
+// GCMaxPauseCheck returns a Check that fails if any recent Go garbage
+// collection pause exceeds the provided threshold.
+func GCMaxPauseCheck(threshold time.Duration) Check {
+ thresholdNanoseconds := uint64(threshold.Nanoseconds())
+ return func() error {
+ var stats runtime.MemStats
+ runtime.ReadMemStats(&stats)
+ for _, pause := range stats.PauseNs {
+ if pause > thresholdNanoseconds {
+ return fmt.Errorf("recent GC cycle took %s > %s", time.Duration(pause), threshold)
+ }
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/heptiolabs/healthcheck/doc.go b/vendor/github.com/heptiolabs/healthcheck/doc.go
new file mode 100644
index 000000000..1920098bb
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/doc.go
@@ -0,0 +1,24 @@
+// Copyright 2017 by the contributors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package healthcheck helps you implement Kubernetes liveness and readiness checks
+for your application. It supports synchronous and asynchronous (background)
+checks. It can optionally report each check's status as a set of Prometheus
+gauge metrics for cluster-wide monitoring and alerting.
+
+It also includes a small library of generic checks for DNS, TCP, and HTTP
+reachability as well as Goroutine usage.
+*/
+package healthcheck
diff --git a/vendor/github.com/heptiolabs/healthcheck/handler.go b/vendor/github.com/heptiolabs/healthcheck/handler.go
new file mode 100644
index 000000000..6ea97407d
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/handler.go
@@ -0,0 +1,103 @@
+// Copyright 2017 by the contributors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "encoding/json"
+ "net/http"
+ "sync"
+)
+
+// basicHandler is a basic Handler implementation.
+type basicHandler struct {
+ http.ServeMux
+ checksMutex sync.RWMutex
+ livenessChecks map[string]Check
+ readinessChecks map[string]Check
+}
+
+// NewHandler creates a new basic Handler
+func NewHandler() Handler {
+ h := &basicHandler{
+ livenessChecks: make(map[string]Check),
+ readinessChecks: make(map[string]Check),
+ }
+ h.Handle("/live", http.HandlerFunc(h.LiveEndpoint))
+ h.Handle("/ready", http.HandlerFunc(h.ReadyEndpoint))
+ return h
+}
+
+func (s *basicHandler) LiveEndpoint(w http.ResponseWriter, r *http.Request) {
+ s.handle(w, r, s.livenessChecks)
+}
+
+func (s *basicHandler) ReadyEndpoint(w http.ResponseWriter, r *http.Request) {
+ s.handle(w, r, s.readinessChecks, s.livenessChecks)
+}
+
+func (s *basicHandler) AddLivenessCheck(name string, check Check) {
+ s.checksMutex.Lock()
+ defer s.checksMutex.Unlock()
+ s.livenessChecks[name] = check
+}
+
+func (s *basicHandler) AddReadinessCheck(name string, check Check) {
+ s.checksMutex.Lock()
+ defer s.checksMutex.Unlock()
+ s.readinessChecks[name] = check
+}
+
+func (s *basicHandler) collectChecks(checks map[string]Check, resultsOut map[string]string, statusOut *int) {
+ s.checksMutex.RLock()
+ defer s.checksMutex.RUnlock()
+ for name, check := range checks {
+ if err := check(); err != nil {
+ *statusOut = http.StatusServiceUnavailable
+ resultsOut[name] = err.Error()
+ } else {
+ resultsOut[name] = "OK"
+ }
+ }
+}
+
+func (s *basicHandler) handle(w http.ResponseWriter, r *http.Request, checks ...map[string]Check) {
+ if r.Method != http.MethodGet {
+ http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ checkResults := make(map[string]string)
+ status := http.StatusOK
+ for _, checks := range checks {
+ s.collectChecks(checks, checkResults, &status)
+ }
+
+ // write out the response code and content type header
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ w.WriteHeader(status)
+
+ // unless ?full=1, return an empty body. Kubernetes only cares about the
+ // HTTP status code, so we won't waste bytes on the full body.
+ if r.URL.Query().Get("full") != "1" {
+ w.Write([]byte("{}\n"))
+ return
+ }
+
+ // otherwise, write the JSON body ignoring any encoding errors (which
+ // shouldn't really be possible since we're encoding a map[string]string).
+ encoder := json.NewEncoder(w)
+ encoder.SetIndent("", " ")
+ encoder.Encode(checkResults)
+}
diff --git a/vendor/github.com/heptiolabs/healthcheck/metrics_handler.go b/vendor/github.com/heptiolabs/healthcheck/metrics_handler.go
new file mode 100644
index 000000000..e95be0f04
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/metrics_handler.go
@@ -0,0 +1,76 @@
+// Copyright 2017 by the contributors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type metricsHandler struct {
+ handler Handler
+ registry prometheus.Registerer
+ namespace string
+}
+
+// NewMetricsHandler returns a healthcheck Handler that also exposes metrics
+// into the provided Prometheus registry.
+func NewMetricsHandler(registry prometheus.Registerer, namespace string) Handler {
+ return &metricsHandler{
+ handler: NewHandler(),
+ registry: registry,
+ namespace: namespace,
+ }
+}
+
+func (h *metricsHandler) AddLivenessCheck(name string, check Check) {
+ h.handler.AddLivenessCheck(name, h.wrap(name, check))
+}
+
+func (h *metricsHandler) AddReadinessCheck(name string, check Check) {
+ h.handler.AddReadinessCheck(name, h.wrap(name, check))
+}
+
+func (h *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ h.handler.ServeHTTP(w, r)
+}
+
+func (h *metricsHandler) LiveEndpoint(w http.ResponseWriter, r *http.Request) {
+ h.handler.LiveEndpoint(w, r)
+}
+
+func (h *metricsHandler) ReadyEndpoint(w http.ResponseWriter, r *http.Request) {
+ h.handler.ReadyEndpoint(w, r)
+}
+
+func (h *metricsHandler) wrap(name string, check Check) Check {
+ h.registry.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: h.namespace,
+ Subsystem: "healthcheck",
+ Name: "status",
+ Help: "Current check status (0 indicates success, 1 indicates failure)",
+ ConstLabels: prometheus.Labels{"check": name},
+ },
+ func() float64 {
+ if check() == nil {
+ return 0
+ }
+ return 1
+ },
+ ))
+ return check
+}
diff --git a/vendor/github.com/heptiolabs/healthcheck/timeout.go b/vendor/github.com/heptiolabs/healthcheck/timeout.go
new file mode 100644
index 000000000..86bf21d9a
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/timeout.go
@@ -0,0 +1,52 @@
+// Copyright 2017 by the contributors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "fmt"
+ "time"
+)
+
+// TimeoutError is the error returned when a Timeout-wrapped Check takes too long
+type timeoutError time.Duration
+
+func (e timeoutError) Error() string {
+ return fmt.Sprintf("timed out after %s", time.Duration(e).String())
+}
+
+// Timeout returns whether this error is a timeout (always true for timeoutError)
+func (e timeoutError) Timeout() bool {
+ return true
+}
+
+// Temporary returns whether this error is temporary (always true for timeoutError)
+func (e timeoutError) Temporary() bool {
+ return true
+}
+
+// Timeout adds a timeout to a Check. If the underlying check takes longer than
+// the timeout, it returns an error.
+func Timeout(check Check, timeout time.Duration) Check {
+ return func() error {
+ c := make(chan error, 1)
+ go func() { c <- check() }()
+ select {
+ case err := <-c:
+ return err
+ case <-time.After(timeout):
+ return timeoutError(timeout)
+ }
+ }
+}
diff --git a/vendor/github.com/heptiolabs/healthcheck/types.go b/vendor/github.com/heptiolabs/healthcheck/types.go
new file mode 100644
index 000000000..714294b28
--- /dev/null
+++ b/vendor/github.com/heptiolabs/healthcheck/types.go
@@ -0,0 +1,52 @@
+// Copyright 2017 by the contributors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "net/http"
+)
+
+// Check is a health/readiness check.
+type Check func() error
+
+// Handler is an http.Handler with additional methods that register health and
+// readiness checks. It handles handle "/live" and "/ready" HTTP
+// endpoints.
+type Handler interface {
+ // The Handler is an http.Handler, so it can be exposed directly and handle
+ // /live and /ready endpoints.
+ http.Handler
+
+ // AddLivenessCheck adds a check that indicates that this instance of the
+ // application should be destroyed or restarted. A failed liveness check
+ // indicates that this instance is unhealthy, not some upstream dependency.
+ // Every liveness check is also included as a readiness check.
+ AddLivenessCheck(name string, check Check)
+
+ // AddReadinessCheck adds a check that indicates that this instance of the
+ // application is currently unable to serve requests because of an upstream
+ // or some transient failure. If a readiness check fails, this instance
+ // should no longer receiver requests, but should not be restarted or
+ // destroyed.
+ AddReadinessCheck(name string, check Check)
+
+ // LiveEndpoint is the HTTP handler for just the /live endpoint, which is
+ // useful if you need to attach it into your own HTTP handler tree.
+ LiveEndpoint(http.ResponseWriter, *http.Request)
+
+ // ReadyEndpoint is the HTTP handler for just the /ready endpoint, which is
+ // useful if you need to attach it into your own HTTP handler tree.
+ ReadyEndpoint(http.ResponseWriter, *http.Request)
+}
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml b/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml
new file mode 100644
index 000000000..77285093d
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml
@@ -0,0 +1,32 @@
+# .readthedocs.yaml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+# Set the OS, Python version and other tools you might need
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.11"
+ # You can also specify other tool versions:
+ # nodejs: "19"
+ # rust: "1.64"
+ # golang: "1.19"
+
+# Build documentation in the "docs/" directory with Sphinx
+sphinx:
+ configuration: docs/source/conf.py
+
+# Optionally build your docs in additional formats such as PDF and ePub
+# formats:
+# - pdf
+# - epub
+
+# Optional but recommended, declare the Python requirements required
+# to build your documentation
+# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
+python:
+ install:
+ - requirements: docs/requirements.txt
\ No newline at end of file
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT b/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT
new file mode 100644
index 000000000..c96a21a2a
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 - 2024 IP2Location.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/README.md b/vendor/github.com/ip2location/ip2location-go/v9/README.md
new file mode 100644
index 000000000..93876e20d
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/README.md
@@ -0,0 +1,28 @@
+[![Go Report Card](https://goreportcard.com/badge/github.com/ip2location/ip2location-go/v9)](https://goreportcard.com/report/github.com/ip2location/ip2location-go/v9)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/ip2location/ip2location-go/v9)](https://pkg.go.dev/github.com/ip2location/ip2location-go/v9)
+
+# IP2Location Go Package
+
+This Go package provides a fast lookup of country, region, city, latitude, longitude, ZIP code, time zone, ISP, domain name, connection type, IDD code, area code, weather station code, station name, mcc, mnc, mobile brand, elevation, usage type, address type, IAB category, district, autonomous system number (ASN) and autonomous system (AS) from IP address by using IP2Location database. This package uses a file based database available at IP2Location.com. This database simply contains IP blocks as keys, and other information such as country, region, city, latitude, longitude, ZIP code, time zone, ISP, domain name, connection type, IDD code, area code, weather station code, station name, mcc, mnc, mobile brand, elevation, usage type, address type, IAB category, district, autonomous system number (ASN) and autonomous system (AS) as values. It supports both IP address in IPv4 and IPv6.
+
+This package can be used in many types of projects such as:
+
+ - select the geographically closest mirror
+ - analyze your web server logs to determine the countries of your visitors
+ - credit card fraud detection
+ - software export controls
+ - display native language and currency
+ - prevent password sharing and abuse of service
+ - geotargeting in advertisement
+
+The database will be updated in monthly basis for the greater accuracy. Free LITE databases are available at https://lite.ip2location.com/ upon registration.
+
+The paid databases are available at https://www.ip2location.com under Premium subscription package.
+
+As an alternative, this package can also call the IP2Location Web Service. This requires an API key. If you don't have an existing API key, you can subscribe for one at the below:
+https://www.ip2location.com/web-service/ip2location
+
+Developer Documentation
+=====================
+
+To learn more about installation, usage, and code examples, please visit the developer documentation at [https://ip2location-go.readthedocs.io/en/latest/](https://ip2location-go.readthedocs.io/en/latest/).
\ No newline at end of file
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/country.go b/vendor/github.com/ip2location/ip2location-go/v9/country.go
new file mode 100644
index 000000000..202ce316c
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/country.go
@@ -0,0 +1,129 @@
+package ip2location
+
+import (
+ "encoding/csv"
+ "errors"
+ "os"
+)
+
+// The CountryInfoRecord struct stores all of the available
+// country info found in the country information CSV file.
+type CountryInfoRecord struct {
+ Country_code string
+ Country_name string
+ Country_alpha3_code string
+ Country_numeric_code string
+ Capital string
+ Country_demonym string
+ Total_area string
+ Population string
+ Idd_code string
+ Currency_code string
+ Currency_name string
+ Currency_symbol string
+ Lang_code string
+ Lang_name string
+ Cctld string
+}
+
+// The CI struct is the main object used to read the country information CSV.
+type CI struct {
+ resultsArr []CountryInfoRecord
+ resultsMap map[string]CountryInfoRecord
+}
+
+// OpenCountryInfo initializes with the path to the country information CSV file.
+func OpenCountryInfo(csvFile string) (*CI, error) {
+ var ci = &CI{}
+
+ _, err := os.Stat(csvFile)
+ if os.IsNotExist(err) {
+ return nil, errors.New("The CSV file '" + csvFile + "' is not found.")
+ }
+
+ f, err := os.Open(csvFile)
+ if err != nil {
+ return nil, errors.New("Unable to read '" + csvFile + "'.")
+ }
+
+ defer f.Close()
+
+ csvReader := csv.NewReader(f)
+ data, err := csvReader.ReadAll()
+ if err != nil {
+ return nil, errors.New("Unable to read '" + csvFile + "'.")
+ }
+
+ ci.resultsMap = make(map[string]CountryInfoRecord)
+ var headerArr []string
+
+ for i, line := range data {
+ if i == 0 { // headers
+ for _, field := range line {
+ headerArr = append(headerArr, field)
+ }
+ } else {
+ var rec CountryInfoRecord
+ for j, field := range line {
+ switch headerArr[j] {
+ case "country_code":
+ rec.Country_code = field
+ case "country_name":
+ rec.Country_name = field
+ case "country_alpha3_code":
+ rec.Country_alpha3_code = field
+ case "country_numeric_code":
+ rec.Country_numeric_code = field
+ case "capital":
+ rec.Capital = field
+ case "country_demonym":
+ rec.Country_demonym = field
+ case "total_area":
+ rec.Total_area = field
+ case "population":
+ rec.Population = field
+ case "idd_code":
+ rec.Idd_code = field
+ case "currency_code":
+ rec.Currency_code = field
+ case "currency_name":
+ rec.Currency_name = field
+ case "currency_symbol":
+ rec.Currency_symbol = field
+ case "lang_code":
+ rec.Lang_code = field
+ case "lang_name":
+ rec.Lang_name = field
+ case "cctld":
+ rec.Cctld = field
+ }
+ }
+ if rec.Country_code == "" {
+ return nil, errors.New("Invalid country information CSV file.")
+ }
+ ci.resultsArr = append(ci.resultsArr, rec)
+ ci.resultsMap[rec.Country_code] = rec
+ }
+ }
+ return ci, nil
+}
+
+// GetCountryInfo returns the country information for the specified country or all countries if not specified
+func (c *CI) GetCountryInfo(countryCode ...string) ([]CountryInfoRecord, error) {
+ if len(c.resultsArr) == 0 {
+ return nil, errors.New("No record available.")
+ }
+
+ if len(countryCode) == 1 {
+ cc := countryCode[0]
+ if rec, ok := c.resultsMap[cc]; ok {
+ var x []CountryInfoRecord
+ x = append(x, rec)
+ return x, nil // return record
+ } else {
+ return nil, errors.New("No record found.")
+ }
+ } else {
+ return c.resultsArr, nil // return all countries
+ }
+}
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go b/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go
new file mode 100644
index 000000000..417536a73
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go
@@ -0,0 +1,1194 @@
+// This ip2location package provides a fast lookup of country, region, city, latitude, longitude, ZIP code, time zone,
+// ISP, domain name, connection type, IDD code, area code, weather station code, station name, MCC, MNC,
+// mobile brand, elevation, usage type, address type, IAB category, district, autonomous system number (ASN) and
+// autonomous system (AS) from IP address by using IP2Location database.
+package ip2location
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "lukechampine.com/uint128"
+ "math"
+ "math/big"
+ "net"
+ "os"
+ "strconv"
+ "unsafe"
+)
+
+type DBReader interface {
+ io.ReadCloser
+ io.ReaderAt
+}
+
+type ip2locationmeta struct {
+ databasetype uint8
+ databasecolumn uint8
+ databaseday uint8
+ databasemonth uint8
+ databaseyear uint8
+ ipv4databasecount uint32
+ ipv4databaseaddr uint32
+ ipv6databasecount uint32
+ ipv6databaseaddr uint32
+ ipv4indexed bool
+ ipv6indexed bool
+ ipv4indexbaseaddr uint32
+ ipv6indexbaseaddr uint32
+ ipv4columnsize uint32
+ ipv6columnsize uint32
+ productcode uint8
+ producttype uint8
+ filesize uint32
+}
+
+// The IP2Locationrecord struct stores all of the available
+// geolocation info found in the IP2Location database.
+type IP2Locationrecord struct {
+ Country_short string
+ Country_long string
+ Region string
+ City string
+ Isp string
+ Latitude float32
+ Longitude float32
+ Domain string
+ Zipcode string
+ Timezone string
+ Netspeed string
+ Iddcode string
+ Areacode string
+ Weatherstationcode string
+ Weatherstationname string
+ Mcc string
+ Mnc string
+ Mobilebrand string
+ Elevation float32
+ Usagetype string
+ Addresstype string
+ Category string
+ District string
+ Asn string
+ As string
+}
+
+type DB struct {
+ f DBReader
+ meta ip2locationmeta
+
+ country_position_offset uint32
+ region_position_offset uint32
+ city_position_offset uint32
+ isp_position_offset uint32
+ domain_position_offset uint32
+ zipcode_position_offset uint32
+ latitude_position_offset uint32
+ longitude_position_offset uint32
+ timezone_position_offset uint32
+ netspeed_position_offset uint32
+ iddcode_position_offset uint32
+ areacode_position_offset uint32
+ weatherstationcode_position_offset uint32
+ weatherstationname_position_offset uint32
+ mcc_position_offset uint32
+ mnc_position_offset uint32
+ mobilebrand_position_offset uint32
+ elevation_position_offset uint32
+ usagetype_position_offset uint32
+ addresstype_position_offset uint32
+ category_position_offset uint32
+ district_position_offset uint32
+ asn_position_offset uint32
+ as_position_offset uint32
+
+ country_enabled bool
+ region_enabled bool
+ city_enabled bool
+ isp_enabled bool
+ domain_enabled bool
+ zipcode_enabled bool
+ latitude_enabled bool
+ longitude_enabled bool
+ timezone_enabled bool
+ netspeed_enabled bool
+ iddcode_enabled bool
+ areacode_enabled bool
+ weatherstationcode_enabled bool
+ weatherstationname_enabled bool
+ mcc_enabled bool
+ mnc_enabled bool
+ mobilebrand_enabled bool
+ elevation_enabled bool
+ usagetype_enabled bool
+ addresstype_enabled bool
+ category_enabled bool
+ district_enabled bool
+ asn_enabled bool
+ as_enabled bool
+
+ metaok bool
+}
+
+var defaultDB = &DB{}
+
+var country_position = [27]uint8{0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
+var region_position = [27]uint8{0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}
+var city_position = [27]uint8{0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4}
+var isp_position = [27]uint8{0, 0, 3, 0, 5, 0, 7, 5, 7, 0, 8, 0, 9, 0, 9, 0, 9, 0, 9, 7, 9, 0, 9, 7, 9, 9, 9}
+var latitude_position = [27]uint8{0, 0, 0, 0, 0, 5, 5, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}
+var longitude_position = [27]uint8{0, 0, 0, 0, 0, 6, 6, 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}
+var domain_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 6, 8, 0, 9, 0, 10, 0, 10, 0, 10, 0, 10, 8, 10, 0, 10, 8, 10, 10, 10}
+var zipcode_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 7, 7, 7, 0, 7, 0, 7, 7, 7, 0, 7, 7, 7}
+var timezone_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 7, 8, 8, 8, 7, 8, 0, 8, 8, 8, 0, 8, 8, 8}
+var netspeed_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 11, 0, 11, 8, 11, 0, 11, 0, 11, 0, 11, 11, 11}
+var iddcode_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 12, 0, 12, 0, 12, 9, 12, 0, 12, 12, 12}
+var areacode_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 13, 0, 13, 0, 13, 10, 13, 0, 13, 13, 13}
+var weatherstationcode_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 14, 0, 14, 0, 14, 0, 14, 14, 14}
+var weatherstationname_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 15, 0, 15, 0, 15, 0, 15, 15, 15}
+var mcc_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 16, 0, 16, 9, 16, 16, 16}
+var mnc_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 17, 0, 17, 10, 17, 17, 17}
+var mobilebrand_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 18, 0, 18, 11, 18, 18, 18}
+var elevation_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 19, 0, 19, 19, 19}
+var usagetype_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 20, 20, 20}
+var addresstype_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 21}
+var category_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 22}
+var district_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23}
+var asn_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24}
+var as_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25}
+
+const api_version string = "9.7.0"
+
+var max_ipv4_range = uint128.From64(4294967295)
+var max_ipv6_range = uint128.From64(0)
+var from_v4mapped = uint128.From64(281470681743360)
+var to_v4mapped = uint128.From64(281474976710655)
+var from_6to4 = uint128.From64(0)
+var to_6to4 = uint128.From64(0)
+var from_teredo = uint128.From64(0)
+var to_teredo = uint128.From64(0)
+var last_32bits = uint128.From64(4294967295)
+
+const countryshort uint32 = 0x0000001
+const countrylong uint32 = 0x0000002
+const region uint32 = 0x0000004
+const city uint32 = 0x0000008
+const isp uint32 = 0x0000010
+const latitude uint32 = 0x0000020
+const longitude uint32 = 0x0000040
+const domain uint32 = 0x0000080
+const zipcode uint32 = 0x0000100
+const timezone uint32 = 0x0000200
+const netspeed uint32 = 0x0000400
+const iddcode uint32 = 0x0000800
+const areacode uint32 = 0x0001000
+const weatherstationcode uint32 = 0x0002000
+const weatherstationname uint32 = 0x0004000
+const mcc uint32 = 0x0008000
+const mnc uint32 = 0x0010000
+const mobilebrand uint32 = 0x0020000
+const elevation uint32 = 0x0040000
+const usagetype uint32 = 0x0080000
+const addresstype uint32 = 0x0100000
+const category uint32 = 0x0200000
+const district uint32 = 0x0400000
+const asn uint32 = 0x0800000
+const as uint32 = 0x1000000
+
+const all uint32 = countryshort | countrylong | region | city | isp | latitude | longitude | domain | zipcode | timezone | netspeed | iddcode | areacode | weatherstationcode | weatherstationname | mcc | mnc | mobilebrand | elevation | usagetype | addresstype | category | district | asn | as
+
+const invalid_address string = "Invalid IP address."
+const missing_file string = "Invalid database file."
+const not_supported string = "This parameter is unavailable for selected data file. Please upgrade the data file."
+const invalid_bin string = "Incorrect IP2Location BIN file format. Please make sure that you are using the latest IP2Location BIN file."
+
+func reverseBytes(s []byte) {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+}
+
+// get IP type and calculate IP number; calculates index too if exists
+func (d *DB) checkip(ip string) (iptype uint32, ipnum uint128.Uint128, ipindex uint32) {
+ iptype = 0
+ ipnum = uint128.From64(0)
+ ipnumtmp := uint128.From64(0)
+ ipindex = 0
+ ipaddress := net.ParseIP(ip)
+
+ if ipaddress != nil {
+ v4 := ipaddress.To4()
+
+ if v4 != nil {
+ iptype = 4
+ ipnum = uint128.From64(uint64(binary.BigEndian.Uint32(v4)))
+ } else {
+ v6 := ipaddress.To16()
+
+ if v6 != nil {
+ iptype = 6
+ reverseBytes(v6)
+ ipnum = uint128.FromBytes(v6)
+
+ if ipnum.Cmp(from_v4mapped) >= 0 && ipnum.Cmp(to_v4mapped) <= 0 {
+ // ipv4-mapped ipv6 should treat as ipv4 and read ipv4 data section
+ iptype = 4
+ ipnum = ipnum.Sub(from_v4mapped)
+ } else if ipnum.Cmp(from_6to4) >= 0 && ipnum.Cmp(to_6to4) <= 0 {
+ // 6to4 so need to remap to ipv4
+ iptype = 4
+ ipnum = ipnum.Rsh(80)
+ ipnum = ipnum.And(last_32bits)
+ } else if ipnum.Cmp(from_teredo) >= 0 && ipnum.Cmp(to_teredo) <= 0 {
+ // Teredo so need to remap to ipv4
+ iptype = 4
+ ipnum = uint128.Uint128{^ipnum.Lo, ^ipnum.Hi}
+ ipnum = ipnum.And(last_32bits)
+ }
+ }
+ }
+ }
+ if iptype == 4 {
+ if d.meta.ipv4indexed {
+ ipnumtmp = ipnum.Rsh(16)
+ ipnumtmp = ipnumtmp.Lsh(3)
+ ipindex = uint32(ipnumtmp.Add(uint128.From64(uint64(d.meta.ipv4indexbaseaddr))).Lo)
+ }
+ } else if iptype == 6 {
+ if d.meta.ipv6indexed {
+ ipnumtmp = ipnum.Rsh(112)
+ ipnumtmp = ipnumtmp.Lsh(3)
+ ipindex = uint32(ipnumtmp.Add(uint128.From64(uint64(d.meta.ipv6indexbaseaddr))).Lo)
+ }
+ }
+ return
+}
+
+// read byte
+func (d *DB) readuint8(pos int64) (uint8, error) {
+ var retval uint8
+ data := make([]byte, 1)
+ _, err := d.f.ReadAt(data, pos-1)
+ if err != nil {
+ return 0, err
+ }
+ retval = data[0]
+ return retval, nil
+}
+
+// read row
+func (d *DB) read_row(pos uint32, size uint32) ([]byte, error) {
+ pos2 := int64(pos)
+ data := make([]byte, size)
+ _, err := d.f.ReadAt(data, pos2-1)
+ if err != nil {
+ return nil, err
+ }
+ return data, nil
+}
+
+// read unsigned 32-bit integer from slices
+func (d *DB) readuint32_row(row []byte, pos uint32) uint32 {
+ var retval uint32
+ data := row[pos : pos+4]
+ retval = binary.LittleEndian.Uint32(data)
+ return retval
+}
+
+// read unsigned 32-bit integer
+func (d *DB) readuint32(pos uint32) (uint32, error) {
+ pos2 := int64(pos)
+ var retval uint32
+ data := make([]byte, 4)
+ _, err := d.f.ReadAt(data, pos2-1)
+ if err != nil {
+ return 0, err
+ }
+ buf := bytes.NewReader(data)
+ err = binary.Read(buf, binary.LittleEndian, &retval)
+ if err != nil {
+ fmt.Printf("binary read failed: %v", err)
+ }
+ return retval, nil
+}
+
+// read unsigned 128-bit integer from slices
+func (d *DB) readuint128_row(row []byte, pos uint32) uint128.Uint128 {
+ retval := uint128.From64(0)
+ data := row[pos : pos+16]
+
+ // little endian to big endian
+ // for i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 {
+ // data[i], data[j] = data[j], data[i]
+ // }
+ retval = uint128.FromBytes(data)
+ return retval
+}
+
+// read unsigned 128-bit integer
+func (d *DB) readuint128(pos uint32) (uint128.Uint128, error) {
+ pos2 := int64(pos)
+ retval := uint128.From64(0)
+ data := make([]byte, 16)
+ _, err := d.f.ReadAt(data, pos2-1)
+ if err != nil {
+ return uint128.From64(0), err
+ }
+
+ // little endian to big endian
+ // for i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 {
+ // data[i], data[j] = data[j], data[i]
+ // }
+ retval = uint128.FromBytes(data)
+ return retval, nil
+}
+
+// read string
+func (d *DB) readstr(pos uint32) (string, error) {
+ pos2 := int64(pos)
+ readlen := 256 // max size of string field + 1 byte for the length
+ var retval string
+ data := make([]byte, readlen)
+ _, err := d.f.ReadAt(data, pos2)
+ if err != nil && err.Error() != "EOF" { // bypass EOF error coz we are reading 256 which may hit EOF
+ return "", err
+ }
+ strlen := data[0]
+ retval = convertBytesToString(data[1:(strlen + 1)])
+ return retval, nil
+}
+
+// read float from slices
+func (d *DB) readfloat_row(row []byte, pos uint32) float32 {
+ var retval float32
+ data := row[pos : pos+4]
+ bits := binary.LittleEndian.Uint32(data)
+ retval = math.Float32frombits(bits)
+ return retval
+}
+
+func fatal(db *DB, err error) (*DB, error) {
+ _ = db.f.Close()
+ return nil, err
+}
+
+// Open takes the path to the IP2Location BIN database file. It will read all the metadata required to
+// be able to extract the embedded geolocation data, and return the underlining DB object.
+func OpenDB(dbpath string) (*DB, error) {
+ f, err := os.Open(dbpath)
+ if err != nil {
+ return nil, err
+ }
+
+ return OpenDBWithReader(f)
+}
+
+// OpenDBWithReader takes a DBReader to the IP2Location BIN database file. It will read all the metadata required to
+// be able to extract the embedded geolocation data, and return the underlining DB object.
+func OpenDBWithReader(reader DBReader) (*DB, error) {
+ var db = &DB{}
+
+ _max_ipv6_range := big.NewInt(0)
+ _max_ipv6_range.SetString("340282366920938463463374607431768211455", 10)
+ max_ipv6_range = uint128.FromBig(_max_ipv6_range)
+
+ _from_6to4 := big.NewInt(0)
+ _from_6to4.SetString("42545680458834377588178886921629466624", 10)
+ from_6to4 = uint128.FromBig(_from_6to4)
+
+ _to_6to4 := big.NewInt(0)
+ _to_6to4.SetString("42550872755692912415807417417958686719", 10)
+ to_6to4 = uint128.FromBig(_to_6to4)
+
+ _from_teredo := big.NewInt(0)
+ _from_teredo.SetString("42540488161975842760550356425300246528", 10)
+ from_teredo = uint128.FromBig(_from_teredo)
+
+ _to_teredo := big.NewInt(0)
+ _to_teredo.SetString("42540488241204005274814694018844196863", 10)
+ to_teredo = uint128.FromBig(_to_teredo)
+
+ db.f = reader
+
+ var row []byte
+ var err error
+ readlen := uint32(64) // 64-byte header
+
+ row, err = db.read_row(1, readlen)
+ if err != nil {
+ return fatal(db, err)
+ }
+ db.meta.databasetype = row[0]
+ db.meta.databasecolumn = row[1]
+ db.meta.databaseyear = row[2]
+ db.meta.databasemonth = row[3]
+ db.meta.databaseday = row[4]
+ db.meta.ipv4databasecount = db.readuint32_row(row, 5)
+ db.meta.ipv4databaseaddr = db.readuint32_row(row, 9)
+ db.meta.ipv6databasecount = db.readuint32_row(row, 13)
+ db.meta.ipv6databaseaddr = db.readuint32_row(row, 17)
+ db.meta.ipv4indexbaseaddr = db.readuint32_row(row, 21)
+ db.meta.ipv6indexbaseaddr = db.readuint32_row(row, 25)
+ db.meta.productcode = row[29]
+ db.meta.producttype = row[30]
+ db.meta.filesize = db.readuint32_row(row, 31)
+
+ // check if is correct BIN (should be 1 for IP2Location BIN file), also checking for zipped file (PK being the first 2 chars)
+ if (db.meta.productcode != 1 && db.meta.databaseyear >= 21) || (db.meta.databasetype == 80 && db.meta.databasecolumn == 75) { // only BINs from Jan 2021 onwards have this byte set
+ return fatal(db, errors.New(invalid_bin))
+ }
+
+ if db.meta.ipv4indexbaseaddr > 0 {
+ db.meta.ipv4indexed = true
+ }
+
+ if db.meta.ipv6databasecount > 0 && db.meta.ipv6indexbaseaddr > 0 {
+ db.meta.ipv6indexed = true
+ }
+
+ db.meta.ipv4columnsize = uint32(db.meta.databasecolumn << 2) // 4 bytes each column
+ db.meta.ipv6columnsize = uint32(16 + ((db.meta.databasecolumn - 1) << 2)) // 4 bytes each column, except IPFrom column which is 16 bytes
+
+ dbt := db.meta.databasetype
+
+ if country_position[dbt] != 0 {
+ db.country_position_offset = uint32(country_position[dbt]-2) << 2
+ db.country_enabled = true
+ }
+ if region_position[dbt] != 0 {
+ db.region_position_offset = uint32(region_position[dbt]-2) << 2
+ db.region_enabled = true
+ }
+ if city_position[dbt] != 0 {
+ db.city_position_offset = uint32(city_position[dbt]-2) << 2
+ db.city_enabled = true
+ }
+ if isp_position[dbt] != 0 {
+ db.isp_position_offset = uint32(isp_position[dbt]-2) << 2
+ db.isp_enabled = true
+ }
+ if domain_position[dbt] != 0 {
+ db.domain_position_offset = uint32(domain_position[dbt]-2) << 2
+ db.domain_enabled = true
+ }
+ if zipcode_position[dbt] != 0 {
+ db.zipcode_position_offset = uint32(zipcode_position[dbt]-2) << 2
+ db.zipcode_enabled = true
+ }
+ if latitude_position[dbt] != 0 {
+ db.latitude_position_offset = uint32(latitude_position[dbt]-2) << 2
+ db.latitude_enabled = true
+ }
+ if longitude_position[dbt] != 0 {
+ db.longitude_position_offset = uint32(longitude_position[dbt]-2) << 2
+ db.longitude_enabled = true
+ }
+ if timezone_position[dbt] != 0 {
+ db.timezone_position_offset = uint32(timezone_position[dbt]-2) << 2
+ db.timezone_enabled = true
+ }
+ if netspeed_position[dbt] != 0 {
+ db.netspeed_position_offset = uint32(netspeed_position[dbt]-2) << 2
+ db.netspeed_enabled = true
+ }
+ if iddcode_position[dbt] != 0 {
+ db.iddcode_position_offset = uint32(iddcode_position[dbt]-2) << 2
+ db.iddcode_enabled = true
+ }
+ if areacode_position[dbt] != 0 {
+ db.areacode_position_offset = uint32(areacode_position[dbt]-2) << 2
+ db.areacode_enabled = true
+ }
+ if weatherstationcode_position[dbt] != 0 {
+ db.weatherstationcode_position_offset = uint32(weatherstationcode_position[dbt]-2) << 2
+ db.weatherstationcode_enabled = true
+ }
+ if weatherstationname_position[dbt] != 0 {
+ db.weatherstationname_position_offset = uint32(weatherstationname_position[dbt]-2) << 2
+ db.weatherstationname_enabled = true
+ }
+ if mcc_position[dbt] != 0 {
+ db.mcc_position_offset = uint32(mcc_position[dbt]-2) << 2
+ db.mcc_enabled = true
+ }
+ if mnc_position[dbt] != 0 {
+ db.mnc_position_offset = uint32(mnc_position[dbt]-2) << 2
+ db.mnc_enabled = true
+ }
+ if mobilebrand_position[dbt] != 0 {
+ db.mobilebrand_position_offset = uint32(mobilebrand_position[dbt]-2) << 2
+ db.mobilebrand_enabled = true
+ }
+ if elevation_position[dbt] != 0 {
+ db.elevation_position_offset = uint32(elevation_position[dbt]-2) << 2
+ db.elevation_enabled = true
+ }
+ if usagetype_position[dbt] != 0 {
+ db.usagetype_position_offset = uint32(usagetype_position[dbt]-2) << 2
+ db.usagetype_enabled = true
+ }
+ if addresstype_position[dbt] != 0 {
+ db.addresstype_position_offset = uint32(addresstype_position[dbt]-2) << 2
+ db.addresstype_enabled = true
+ }
+ if category_position[dbt] != 0 {
+ db.category_position_offset = uint32(category_position[dbt]-2) << 2
+ db.category_enabled = true
+ }
+ if district_position[dbt] != 0 {
+ db.district_position_offset = uint32(district_position[dbt]-2) << 2
+ db.district_enabled = true
+ }
+ if asn_position[dbt] != 0 {
+ db.asn_position_offset = uint32(asn_position[dbt]-2) << 2
+ db.asn_enabled = true
+ }
+ if as_position[dbt] != 0 {
+ db.as_position_offset = uint32(as_position[dbt]-2) << 2
+ db.as_enabled = true
+ }
+
+ db.metaok = true
+
+ return db, nil
+}
+
+// Open takes the path to the IP2Location BIN database file. It will read all the metadata required to
+// be able to extract the embedded geolocation data.
+//
+// Deprecated: No longer being updated.
+func Open(dbpath string) {
+ db, err := OpenDB(dbpath)
+ if err != nil {
+ return
+ }
+
+ defaultDB = db
+}
+
+// Close will close the file handle to the BIN file.
+//
+// Deprecated: No longer being updated.
+func Close() {
+ defaultDB.Close()
+}
+
+// Api_version returns the version of the component.
+func Api_version() string {
+ return api_version
+}
+
+// PackageVersion returns the database type.
+func (d *DB) PackageVersion() string {
+ return strconv.Itoa(int(d.meta.databasetype))
+}
+
+// DatabaseVersion returns the database version.
+func (d *DB) DatabaseVersion() string {
+ return "20" + strconv.Itoa(int(d.meta.databaseyear)) + "." + strconv.Itoa(int(d.meta.databasemonth)) + "." + strconv.Itoa(int(d.meta.databaseday))
+}
+
+// populate record with message
+func loadmessage(mesg string) IP2Locationrecord {
+ var x IP2Locationrecord
+
+ x.Country_short = mesg
+ x.Country_long = mesg
+ x.Region = mesg
+ x.City = mesg
+ x.Isp = mesg
+ x.Domain = mesg
+ x.Zipcode = mesg
+ x.Timezone = mesg
+ x.Netspeed = mesg
+ x.Iddcode = mesg
+ x.Areacode = mesg
+ x.Weatherstationcode = mesg
+ x.Weatherstationname = mesg
+ x.Mcc = mesg
+ x.Mnc = mesg
+ x.Mobilebrand = mesg
+ x.Usagetype = mesg
+ x.Addresstype = mesg
+ x.Category = mesg
+ x.District = mesg
+ x.Asn = mesg
+ x.As = mesg
+
+ return x
+}
+
+func handleError(rec IP2Locationrecord, err error) IP2Locationrecord {
+ if err != nil {
+ fmt.Print(err)
+ }
+ return rec
+}
+
+// convertBytesToString provides a no-copy []byte to string conversion.
+// This implementation is adopted by official strings.Builder.
+// Reference: https://github.com/golang/go/issues/25484
+func convertBytesToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// Get_all will return all geolocation fields based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_all(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, all))
+}
+
+// Get_country_short will return the ISO-3166 country code based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_country_short(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, countryshort))
+}
+
+// Get_country_long will return the country name based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_country_long(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, countrylong))
+}
+
+// Get_region will return the region name based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_region(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, region))
+}
+
+// Get_city will return the city name based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_city(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, city))
+}
+
+// Get_isp will return the Internet Service Provider name based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_isp(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, isp))
+}
+
+// Get_latitude will return the latitude based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_latitude(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, latitude))
+}
+
+// Get_longitude will return the longitude based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_longitude(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, longitude))
+}
+
+// Get_domain will return the domain name based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_domain(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, domain))
+}
+
+// Get_zipcode will return the postal code based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_zipcode(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, zipcode))
+}
+
+// Get_timezone will return the time zone based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_timezone(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, timezone))
+}
+
+// Get_netspeed will return the Internet connection speed based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_netspeed(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, netspeed))
+}
+
+// Get_iddcode will return the International Direct Dialing code based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_iddcode(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, iddcode))
+}
+
+// Get_areacode will return the area code based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_areacode(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, areacode))
+}
+
+// Get_weatherstationcode will return the weather station code based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_weatherstationcode(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, weatherstationcode))
+}
+
+// Get_weatherstationname will return the weather station name based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_weatherstationname(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, weatherstationname))
+}
+
+// Get_mcc will return the mobile country code based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_mcc(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, mcc))
+}
+
+// Get_mnc will return the mobile network code based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_mnc(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, mnc))
+}
+
+// Get_mobilebrand will return the mobile carrier brand based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_mobilebrand(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, mobilebrand))
+}
+
+// Get_elevation will return the elevation in meters based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_elevation(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, elevation))
+}
+
+// Get_usagetype will return the usage type based on the queried IP address.
+//
+// Deprecated: No longer being updated.
+func Get_usagetype(ipaddress string) IP2Locationrecord {
+ return handleError(defaultDB.query(ipaddress, usagetype))
+}
+
+// Get_all will return all geolocation fields based on the queried IP address.
+func (d *DB) Get_all(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, all)
+}
+
+// Get_country_short will return the ISO-3166 country code based on the queried IP address.
+func (d *DB) Get_country_short(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, countryshort)
+}
+
+// Get_country_long will return the country name based on the queried IP address.
+func (d *DB) Get_country_long(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, countrylong)
+}
+
+// Get_region will return the region name based on the queried IP address.
+func (d *DB) Get_region(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, region)
+}
+
+// Get_city will return the city name based on the queried IP address.
+func (d *DB) Get_city(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, city)
+}
+
+// Get_isp will return the Internet Service Provider name based on the queried IP address.
+func (d *DB) Get_isp(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, isp)
+}
+
+// Get_latitude will return the latitude based on the queried IP address.
+func (d *DB) Get_latitude(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, latitude)
+}
+
+// Get_longitude will return the longitude based on the queried IP address.
+func (d *DB) Get_longitude(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, longitude)
+}
+
+// Get_domain will return the domain name based on the queried IP address.
+func (d *DB) Get_domain(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, domain)
+}
+
+// Get_zipcode will return the postal code based on the queried IP address.
+func (d *DB) Get_zipcode(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, zipcode)
+}
+
+// Get_timezone will return the time zone based on the queried IP address.
+func (d *DB) Get_timezone(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, timezone)
+}
+
+// Get_netspeed will return the Internet connection speed based on the queried IP address.
+func (d *DB) Get_netspeed(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, netspeed)
+}
+
+// Get_iddcode will return the International Direct Dialing code based on the queried IP address.
+func (d *DB) Get_iddcode(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, iddcode)
+}
+
+// Get_areacode will return the area code based on the queried IP address.
+func (d *DB) Get_areacode(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, areacode)
+}
+
+// Get_weatherstationcode will return the weather station code based on the queried IP address.
+func (d *DB) Get_weatherstationcode(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, weatherstationcode)
+}
+
+// Get_weatherstationname will return the weather station name based on the queried IP address.
+func (d *DB) Get_weatherstationname(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, weatherstationname)
+}
+
+// Get_mcc will return the mobile country code based on the queried IP address.
+func (d *DB) Get_mcc(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, mcc)
+}
+
+// Get_mnc will return the mobile network code based on the queried IP address.
+func (d *DB) Get_mnc(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, mnc)
+}
+
+// Get_mobilebrand will return the mobile carrier brand based on the queried IP address.
+func (d *DB) Get_mobilebrand(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, mobilebrand)
+}
+
+// Get_elevation will return the elevation in meters based on the queried IP address.
+func (d *DB) Get_elevation(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, elevation)
+}
+
+// Get_usagetype will return the usage type based on the queried IP address.
+func (d *DB) Get_usagetype(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, usagetype)
+}
+
+// Get_addresstype will return the address type based on the queried IP address.
+func (d *DB) Get_addresstype(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, addresstype)
+}
+
+// Get_category will return the category based on the queried IP address.
+func (d *DB) Get_category(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, category)
+}
+
+// Get_district will return the district name based on the queried IP address.
+func (d *DB) Get_district(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, district)
+}
+
+// Get_asn will return the autonomous system number (ASN) based on the queried IP address.
+func (d *DB) Get_asn(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, asn)
+}
+
+// Get_as will return the autonomous system (AS) based on the queried IP address.
+func (d *DB) Get_as(ipaddress string) (IP2Locationrecord, error) {
+ return d.query(ipaddress, as)
+}
+
+// main query
+func (d *DB) query(ipaddress string, mode uint32) (IP2Locationrecord, error) {
+ x := loadmessage(not_supported) // default message
+
+ // read metadata
+ if !d.metaok {
+ x = loadmessage(missing_file)
+ return x, nil
+ }
+
+ // check IP type and return IP number & index (if exists)
+ iptype, ipno, ipindex := d.checkip(ipaddress)
+
+ if iptype == 0 {
+ x = loadmessage(invalid_address)
+ return x, nil
+ }
+
+ var err error
+ var colsize uint32
+ var baseaddr uint32
+ var low uint32
+ var high uint32
+ var mid uint32
+ var rowoffset uint32
+ var firstcol uint32 = 4 // 4 bytes for ip from
+ var row []byte
+ var fullrow []byte
+ var readlen uint32
+ ipfrom := uint128.From64(0)
+ ipto := uint128.From64(0)
+ maxip := uint128.From64(0)
+
+ if iptype == 4 {
+ baseaddr = d.meta.ipv4databaseaddr
+ high = d.meta.ipv4databasecount
+ maxip = max_ipv4_range
+ colsize = d.meta.ipv4columnsize
+ } else {
+ firstcol = 16 // 16 bytes for ip from
+ baseaddr = d.meta.ipv6databaseaddr
+ high = d.meta.ipv6databasecount
+ maxip = max_ipv6_range
+ colsize = d.meta.ipv6columnsize
+ }
+
+ // reading index
+ if ipindex > 0 {
+ row, err = d.read_row(ipindex, 8) // 4 bytes each for IP From and IP To
+ if err != nil {
+ return x, err
+ }
+ low = d.readuint32_row(row, 0)
+ high = d.readuint32_row(row, 4)
+ }
+
+ if ipno.Cmp(maxip) >= 0 {
+ ipno = ipno.Sub(uint128.From64(1))
+ }
+
+ for low <= high {
+ mid = ((low + high) >> 1)
+ rowoffset = baseaddr + (mid * colsize)
+
+ // reading IP From + whole row + next IP From
+ readlen = colsize + firstcol
+ fullrow, err = d.read_row(rowoffset, readlen)
+ if err != nil {
+ return x, err
+ }
+
+ if iptype == 4 {
+ ipfrom32 := d.readuint32_row(fullrow, 0)
+ ipfrom = uint128.From64(uint64(ipfrom32))
+
+ ipto32 := d.readuint32_row(fullrow, colsize)
+ ipto = uint128.From64(uint64(ipto32))
+ } else {
+ ipfrom = d.readuint128_row(fullrow, 0)
+
+ ipto = d.readuint128_row(fullrow, colsize)
+ }
+
+ if ipno.Cmp(ipfrom) >= 0 && ipno.Cmp(ipto) < 0 {
+ rowlen := colsize - firstcol
+ row = fullrow[firstcol:(firstcol + rowlen)] // extract the actual row data
+
+ if mode&countryshort == 1 && d.country_enabled {
+ if x.Country_short, err = d.readstr(d.readuint32_row(row, d.country_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&countrylong != 0 && d.country_enabled {
+ if x.Country_long, err = d.readstr(d.readuint32_row(row, d.country_position_offset) + 3); err != nil {
+ return x, err
+ }
+ }
+
+ if mode®ion != 0 && d.region_enabled {
+ if x.Region, err = d.readstr(d.readuint32_row(row, d.region_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&city != 0 && d.city_enabled {
+ if x.City, err = d.readstr(d.readuint32_row(row, d.city_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&isp != 0 && d.isp_enabled {
+ if x.Isp, err = d.readstr(d.readuint32_row(row, d.isp_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&latitude != 0 && d.latitude_enabled {
+ x.Latitude = d.readfloat_row(row, d.latitude_position_offset)
+ }
+
+ if mode&longitude != 0 && d.longitude_enabled {
+ x.Longitude = d.readfloat_row(row, d.longitude_position_offset)
+ }
+
+ if mode&domain != 0 && d.domain_enabled {
+ if x.Domain, err = d.readstr(d.readuint32_row(row, d.domain_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&zipcode != 0 && d.zipcode_enabled {
+ if x.Zipcode, err = d.readstr(d.readuint32_row(row, d.zipcode_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&timezone != 0 && d.timezone_enabled {
+ if x.Timezone, err = d.readstr(d.readuint32_row(row, d.timezone_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&netspeed != 0 && d.netspeed_enabled {
+ if x.Netspeed, err = d.readstr(d.readuint32_row(row, d.netspeed_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&iddcode != 0 && d.iddcode_enabled {
+ if x.Iddcode, err = d.readstr(d.readuint32_row(row, d.iddcode_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&areacode != 0 && d.areacode_enabled {
+ if x.Areacode, err = d.readstr(d.readuint32_row(row, d.areacode_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&weatherstationcode != 0 && d.weatherstationcode_enabled {
+ if x.Weatherstationcode, err = d.readstr(d.readuint32_row(row, d.weatherstationcode_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&weatherstationname != 0 && d.weatherstationname_enabled {
+ if x.Weatherstationname, err = d.readstr(d.readuint32_row(row, d.weatherstationname_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&mcc != 0 && d.mcc_enabled {
+ if x.Mcc, err = d.readstr(d.readuint32_row(row, d.mcc_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&mnc != 0 && d.mnc_enabled {
+ if x.Mnc, err = d.readstr(d.readuint32_row(row, d.mnc_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&mobilebrand != 0 && d.mobilebrand_enabled {
+ if x.Mobilebrand, err = d.readstr(d.readuint32_row(row, d.mobilebrand_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&elevation != 0 && d.elevation_enabled {
+ res, err := d.readstr(d.readuint32_row(row, d.elevation_position_offset))
+ if err != nil {
+ return x, err
+ }
+
+ f, _ := strconv.ParseFloat(res, 32)
+ x.Elevation = float32(f)
+ }
+
+ if mode&usagetype != 0 && d.usagetype_enabled {
+ if x.Usagetype, err = d.readstr(d.readuint32_row(row, d.usagetype_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&addresstype != 0 && d.addresstype_enabled {
+ if x.Addresstype, err = d.readstr(d.readuint32_row(row, d.addresstype_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&category != 0 && d.category_enabled {
+ if x.Category, err = d.readstr(d.readuint32_row(row, d.category_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&district != 0 && d.district_enabled {
+ if x.District, err = d.readstr(d.readuint32_row(row, d.district_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&asn != 0 && d.asn_enabled {
+ if x.Asn, err = d.readstr(d.readuint32_row(row, d.asn_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ if mode&as != 0 && d.as_enabled {
+ if x.As, err = d.readstr(d.readuint32_row(row, d.as_position_offset)); err != nil {
+ return x, err
+ }
+ }
+
+ return x, nil
+ } else {
+ if ipno.Cmp(ipfrom) < 0 {
+ high = mid - 1
+ } else {
+ low = mid + 1
+ }
+ }
+ }
+ return x, nil
+}
+
+func (d *DB) Close() {
+ _ = d.f.Close()
+}
+
+// Printrecord is used to output the geolocation data for debugging purposes.
+func Printrecord(x IP2Locationrecord) {
+ fmt.Printf("country_short: %s\n", x.Country_short)
+ fmt.Printf("country_long: %s\n", x.Country_long)
+ fmt.Printf("region: %s\n", x.Region)
+ fmt.Printf("city: %s\n", x.City)
+ fmt.Printf("isp: %s\n", x.Isp)
+ fmt.Printf("latitude: %f\n", x.Latitude)
+ fmt.Printf("longitude: %f\n", x.Longitude)
+ fmt.Printf("domain: %s\n", x.Domain)
+ fmt.Printf("zipcode: %s\n", x.Zipcode)
+ fmt.Printf("timezone: %s\n", x.Timezone)
+ fmt.Printf("netspeed: %s\n", x.Netspeed)
+ fmt.Printf("iddcode: %s\n", x.Iddcode)
+ fmt.Printf("areacode: %s\n", x.Areacode)
+ fmt.Printf("weatherstationcode: %s\n", x.Weatherstationcode)
+ fmt.Printf("weatherstationname: %s\n", x.Weatherstationname)
+ fmt.Printf("mcc: %s\n", x.Mcc)
+ fmt.Printf("mnc: %s\n", x.Mnc)
+ fmt.Printf("mobilebrand: %s\n", x.Mobilebrand)
+ fmt.Printf("elevation: %f\n", x.Elevation)
+ fmt.Printf("usagetype: %s\n", x.Usagetype)
+ fmt.Printf("addresstype: %s\n", x.Addresstype)
+ fmt.Printf("category: %s\n", x.Category)
+ fmt.Printf("district: %s\n", x.District)
+ fmt.Printf("asn: %s\n", x.Asn)
+ fmt.Printf("as: %s\n", x.As)
+}
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/ip2locationwebservice.go b/vendor/github.com/ip2location/ip2location-go/v9/ip2locationwebservice.go
new file mode 100644
index 000000000..4d8c5db00
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/ip2locationwebservice.go
@@ -0,0 +1,227 @@
+package ip2location
+
+import (
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+)
+
+// The IP2LocationResult struct stores all of the available
+// geolocation info found in the IP2Location Web Service.
+type IP2LocationResult struct {
+ Response string `json:"response"`
+ CountryCode string `json:"country_code"`
+ CountryName string `json:"country_name"`
+ RegionName string `json:"region_name"`
+ CityName string `json:"city_name"`
+ Latitude float64 `json:"latitude"`
+ Longitude float64 `json:"longitude"`
+ ZipCode string `json:"zip_code"`
+ TimeZone string `json:"time_zone"`
+ Isp string `json:"isp"`
+ Domain string `json:"domain"`
+ NetSpeed string `json:"net_speed"`
+ IddCode string `json:"idd_code"`
+ AreaCode string `json:"area_code"`
+ WeatherStationCode string `json:"weather_station_code"`
+ WeatherStationName string `json:"weather_station_name"`
+ Mcc string `json:"mcc"`
+ Mnc string `json:"mnc"`
+ MobileBrand string `json:"mobile_brand"`
+ Elevation int `json:"elevation"`
+ UsageType string `json:"usage_type"`
+ AddressType string `json:"address_type"`
+ Category string `json:"category"`
+ CategoryName string `json:"category_name"`
+ Geotargeting struct {
+ Metro string `json:"metro"`
+ } `json:"geotargeting"`
+ Continent struct {
+ Name string `json:"name"`
+ Code string `json:"code"`
+ Hemisphere []string `json:"hemisphere"`
+ } `json:"continent"`
+ Country struct {
+ Name string `json:"name"`
+ Alpha3Code string `json:"alpha3_code"`
+ NumericCode string `json:"numeric_code"`
+ Demonym string `json:"demonym"`
+ Flag string `json:"flag"`
+ Capital string `json:"capital"`
+ TotalArea string `json:"total_area"`
+ Population string `json:"population"`
+ Currency struct {
+ Code string `json:"code"`
+ Name string `json:"name"`
+ Symbol string `json:"symbol"`
+ } `json:"currency"`
+ Language struct {
+ Code string `json:"code"`
+ Name string `json:"name"`
+ } `json:"language"`
+ IddCode string `json:"idd_code"`
+ Tld string `json:"tld"`
+ IsEu bool `json:"is_eu"`
+ } `json:"country"`
+ CountryGroupings []struct {
+ Acronym string `json:"acronym"`
+ Name string `json:"name"`
+ } `json:"country_groupings"`
+ Region struct {
+ Name string `json:"name"`
+ Code string `json:"code"`
+ } `json:"region"`
+ City struct {
+ Name string `json:"name"`
+ } `json:"city"`
+ TimeZoneInfo struct {
+ Olson string `json:"olson"`
+ CurrentTime string `json:"current_time"`
+ GmtOffset int `json:"gmt_offset"`
+ IsDst string `json:"is_dst"`
+ Sunrise string `json:"sunrise"`
+ Sunset string `json:"sunset"`
+ } `json:"time_zone_info"`
+ CreditsConsumed int `json:"credits_consumed"`
+}
+
+// The IP2LocationCreditResult struct stores the
+// credit balance for the IP2Location Web Service.
+type IP2LocationCreditResult struct {
+ Response int `json:"response"`
+}
+
+// The WS struct is the main object used to query the IP2Location Web Service.
+type WS struct {
+ apiKey string
+ apiPackage string
+ useSSL bool
+}
+
+var regexAPIKey = regexp.MustCompile(`^[\dA-Z]{10}$`)
+var regexAPIPackage = regexp.MustCompile(`^WS\d+$`)
+
+const baseURL = "api.ip2location.com/v2/"
+const msgInvalidAPIKey = "Invalid API key."
+const msgInvalidAPIPackage = "Invalid package name."
+
+// OpenWS initializes with the web service API key, API package and whether to use SSL
+func OpenWS(apikey string, apipackage string, usessl bool) (*WS, error) {
+ var ws = &WS{}
+ ws.apiKey = apikey
+ ws.apiPackage = apipackage
+ ws.useSSL = usessl
+
+ err := ws.checkParams()
+
+ if err != nil {
+ return nil, err
+ }
+
+ return ws, nil
+}
+
+func (w *WS) checkParams() error {
+ if !regexAPIKey.MatchString(w.apiKey) {
+ return errors.New(msgInvalidAPIKey)
+ }
+
+ if !regexAPIPackage.MatchString(w.apiPackage) {
+ return errors.New(msgInvalidAPIPackage)
+ }
+
+ return nil
+}
+
+// LookUp will return all geolocation fields based on the queried IP address, addon, lang
+func (w *WS) LookUp(ipAddress string, addOn string, lang string) (IP2LocationResult, error) {
+ var res IP2LocationResult
+ err := w.checkParams()
+
+ if err != nil {
+ return res, err
+ }
+
+ protocol := "https"
+
+ if !w.useSSL {
+ protocol = "http"
+ }
+
+ // lang param not supported yet due to the inconsistent data type being returned by the API
+ myUrl := protocol + "://" + baseURL + "?key=" + w.apiKey + "&package=" + w.apiPackage + "&ip=" + url.QueryEscape(ipAddress) + "&addon=" + url.QueryEscape(addOn)
+
+ resp, err := http.Get(myUrl)
+
+ if err != nil {
+ return res, err
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusOK {
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+
+ if err != nil {
+ return res, err
+ }
+
+ err = json.Unmarshal(bodyBytes, &res)
+
+ if err != nil {
+ return res, err
+ }
+
+ return res, nil
+ }
+
+ return res, errors.New("Error HTTP " + strconv.Itoa(int(resp.StatusCode)))
+}
+
+// GetCredit will return the web service credit balance.
+func (w *WS) GetCredit() (IP2LocationCreditResult, error) {
+ var res IP2LocationCreditResult
+ err := w.checkParams()
+
+ if err != nil {
+ return res, err
+ }
+
+ protocol := "https"
+
+ if !w.useSSL {
+ protocol = "http"
+ }
+
+ myUrl := protocol + "://" + baseURL + "?key=" + w.apiKey + "&check=true"
+
+ resp, err := http.Get(myUrl)
+
+ if err != nil {
+ return res, err
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusOK {
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+
+ if err != nil {
+ return res, err
+ }
+
+ err = json.Unmarshal(bodyBytes, &res)
+
+ if err != nil {
+ return res, err
+ }
+
+ return res, nil
+ }
+
+ return res, errors.New("Error HTTP " + strconv.Itoa(int(resp.StatusCode)))
+}
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/iptools.go b/vendor/github.com/ip2location/ip2location-go/v9/iptools.go
new file mode 100644
index 000000000..9808698a3
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/iptools.go
@@ -0,0 +1,491 @@
+package ip2location
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "net"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// The IPTools struct is the main object to access the IP address tools
+type IPTools struct {
+ max_ipv4_range *big.Int
+ max_ipv6_range *big.Int
+}
+
+// OpenTools initializes some variables
+func OpenTools() *IPTools {
+ var t = &IPTools{}
+ t.max_ipv4_range = big.NewInt(4294967295)
+ t.max_ipv6_range = big.NewInt(0)
+ t.max_ipv6_range.SetString("340282366920938463463374607431768211455", 10)
+ return t
+}
+
+// IsIPv4 returns true if the IP address provided is an IPv4.
+func (t *IPTools) IsIPv4(IP string) bool {
+ ipaddr := net.ParseIP(IP)
+
+ if ipaddr == nil {
+ return false
+ }
+
+ v4 := ipaddr.To4()
+
+ if v4 == nil {
+ return false
+ }
+
+ return true
+}
+
+// IsIPv6 returns true if the IP address provided is an IPv6.
+func (t *IPTools) IsIPv6(IP string) bool {
+ if t.IsIPv4(IP) {
+ return false
+ }
+
+ ipaddr := net.ParseIP(IP)
+
+ if ipaddr == nil {
+ return false
+ }
+
+ v6 := ipaddr.To16()
+
+ if v6 == nil {
+ return false
+ }
+
+ return true
+}
+
+// IPv4ToDecimal returns the IP number for the supplied IPv4 address.
+func (t *IPTools) IPv4ToDecimal(IP string) (*big.Int, error) {
+ if !t.IsIPv4(IP) {
+ return nil, errors.New("Not a valid IPv4 address.")
+ }
+
+ ipnum := big.NewInt(0)
+ ipaddr := net.ParseIP(IP)
+
+ if ipaddr != nil {
+ v4 := ipaddr.To4()
+
+ if v4 != nil {
+ ipnum.SetBytes(v4)
+ }
+ }
+
+ return ipnum, nil
+}
+
+// IPv6ToDecimal returns the IP number for the supplied IPv6 address.
+func (t *IPTools) IPv6ToDecimal(IP string) (*big.Int, error) {
+ if !t.IsIPv6(IP) {
+ return nil, errors.New("Not a valid IPv6 address.")
+ }
+
+ ipnum := big.NewInt(0)
+ ipaddr := net.ParseIP(IP)
+
+ if ipaddr != nil {
+ v6 := ipaddr.To16()
+
+ if v6 != nil {
+ ipnum.SetBytes(v6)
+ }
+ }
+
+ return ipnum, nil
+}
+
+// DecimalToIPv4 returns the IPv4 address for the supplied IP number.
+func (t *IPTools) DecimalToIPv4(IPNum *big.Int) (string, error) {
+ if IPNum.Cmp(big.NewInt(0)) < 0 || IPNum.Cmp(t.max_ipv4_range) > 0 {
+ return "", errors.New("Invalid IP number.")
+ }
+
+ buf := make([]byte, 4)
+ bytes := IPNum.FillBytes(buf)
+
+ ip := net.IP(bytes)
+ return ip.String(), nil
+}
+
+// DecimalToIPv6 returns the IPv6 address for the supplied IP number.
+func (t *IPTools) DecimalToIPv6(IPNum *big.Int) (string, error) {
+ if IPNum.Cmp(big.NewInt(0)) < 0 || IPNum.Cmp(t.max_ipv6_range) > 0 {
+ return "", errors.New("Invalid IP number.")
+ }
+
+ buf := make([]byte, 16)
+ bytes := IPNum.FillBytes(buf)
+
+ ip := net.IP(bytes)
+ return ip.String(), nil
+}
+
+// CompressIPv6 returns the compressed form of the supplied IPv6 address.
+func (t *IPTools) CompressIPv6(IP string) (string, error) {
+ if !t.IsIPv6(IP) {
+ return "", errors.New("Not a valid IPv6 address.")
+ }
+
+ ipaddr := net.ParseIP(IP)
+
+ if ipaddr == nil {
+ return "", errors.New("Not a valid IPv6 address.")
+ }
+
+ return ipaddr.String(), nil
+}
+
+// ExpandIPv6 returns the expanded form of the supplied IPv6 address.
+func (t *IPTools) ExpandIPv6(IP string) (string, error) {
+ if !t.IsIPv6(IP) {
+ return "", errors.New("Not a valid IPv6 address.")
+ }
+
+ ipaddr := net.ParseIP(IP)
+
+ ipstr := hex.EncodeToString(ipaddr)
+ re := regexp.MustCompile(`(.{4})`)
+ ipstr = re.ReplaceAllString(ipstr, "$1:")
+ ipstr = strings.TrimSuffix(ipstr, ":")
+
+ return ipstr, nil
+}
+
+// IPv4ToCIDR returns the CIDR for the supplied IPv4 range.
+func (t *IPTools) IPv4ToCIDR(IPFrom string, IPTo string) ([]string, error) {
+ if !t.IsIPv4(IPFrom) || !t.IsIPv4(IPTo) {
+ return nil, errors.New("Not a valid IPv4 address.")
+ }
+
+ startipbig, _ := t.IPv4ToDecimal(IPFrom)
+ endipbig, _ := t.IPv4ToDecimal(IPTo)
+ startip := startipbig.Uint64()
+ endip := endipbig.Uint64()
+ var result []string
+ var maxsize float64
+ var maxdiff float64
+
+ for endip >= startip {
+ maxsize = 32
+
+ for maxsize > 0 {
+ mask := math.Pow(2, 32) - math.Pow(2, 32-(maxsize-1))
+ maskbase := startip & uint64(mask)
+
+ if maskbase != startip {
+ break
+ }
+
+ maxsize = maxsize - 1
+ }
+
+ x := math.Log(float64(endip)-float64(startip)+1) / math.Log(2)
+ maxdiff = 32 - math.Floor(x)
+
+ if maxsize < maxdiff {
+ maxsize = maxdiff
+ }
+
+ bn := big.NewInt(0)
+
+ bn.SetString(fmt.Sprintf("%v", startip), 10)
+
+ ip, _ := t.DecimalToIPv4(bn)
+ result = append(result, ip+"/"+fmt.Sprintf("%v", maxsize))
+ startip = startip + uint64(math.Pow(2, 32-maxsize))
+ }
+
+ return result, nil
+}
+
+// converts IPv6 address to binary string representation.
+func (t *IPTools) ipToBinary(ip string) (string, error) {
+ if !t.IsIPv6(ip) {
+ return "", errors.New("Not a valid IPv6 address.")
+ }
+
+ ipaddr := net.ParseIP(ip)
+
+ binstr := ""
+ for i, j := 0, len(ipaddr); i < j; i = i + 1 {
+ binstr += fmt.Sprintf("%08b", ipaddr[i])
+ }
+
+ return binstr, nil
+}
+
+// converts binary string representation to IPv6 address.
+func (t *IPTools) binaryToIP(binstr string) (string, error) {
+ re := regexp.MustCompile(`^[01]{128}$`)
+ if !re.MatchString(binstr) {
+ return "", errors.New("Not a valid binary string.")
+ }
+
+ re2 := regexp.MustCompile(`(.{8})`)
+
+ bytes := make([]byte, 16)
+ i := 0
+ matches := re2.FindAllStringSubmatch(binstr, -1)
+ for _, v := range matches {
+ x, _ := strconv.ParseUint(v[1], 2, 8)
+ bytes[i] = byte(x)
+ i = i + 1
+ }
+
+ ipaddr := net.IP(bytes)
+
+ return ipaddr.String(), nil
+}
+
+// returns the min and max for the array
+func (t *IPTools) minMax(array []int) (int, int) {
+ var max int = array[0]
+ var min int = array[0]
+ for _, value := range array {
+ if max < value {
+ max = value
+ }
+ if min > value {
+ min = value
+ }
+ }
+ return min, max
+}
+
+// IPv6ToCIDR returns the CIDR for the supplied IPv6 range.
+func (t *IPTools) IPv6ToCIDR(IPFrom string, IPTo string) ([]string, error) {
+ if !t.IsIPv6(IPFrom) || !t.IsIPv6(IPTo) {
+ return nil, errors.New("Not a valid IPv6 address.")
+ }
+
+ ipfrombin, err := t.ipToBinary(IPFrom)
+
+ if err != nil {
+ return nil, errors.New("Not a valid IPv6 address.")
+ }
+
+ iptobin, err := t.ipToBinary(IPTo)
+
+ if err != nil {
+ return nil, errors.New("Not a valid IPv6 address.")
+ }
+
+ var result []string
+
+ networksize := 0
+ shift := 0
+ unpadded := ""
+ padded := ""
+ networks := make(map[string]int)
+ n := 0
+
+ if ipfrombin == iptobin {
+ result = append(result, IPFrom+"/128")
+ return result, nil
+ }
+
+ if ipfrombin > iptobin {
+ tmp := ipfrombin
+ ipfrombin = iptobin
+ iptobin = tmp
+ }
+
+ for {
+ if string(ipfrombin[len(ipfrombin)-1]) == "1" {
+ unpadded = ipfrombin[networksize:128]
+ padded = fmt.Sprintf("%-128s", unpadded) // pad right with spaces
+ padded = strings.ReplaceAll(padded, " ", "0") // replace spaces
+ networks[padded] = 128 - networksize
+ n = strings.LastIndex(ipfrombin, "0")
+ if n == 0 {
+ ipfrombin = ""
+ } else {
+ ipfrombin = ipfrombin[0:n]
+ }
+ ipfrombin = ipfrombin + "1"
+ ipfrombin = fmt.Sprintf("%-128s", ipfrombin) // pad right with spaces
+ ipfrombin = strings.ReplaceAll(ipfrombin, " ", "0") // replace spaces
+ }
+
+ if string(iptobin[len(iptobin)-1]) == "0" {
+ unpadded = iptobin[networksize:128]
+ padded = fmt.Sprintf("%-128s", unpadded) // pad right with spaces
+ padded = strings.ReplaceAll(padded, " ", "0") // replace spaces
+ networks[padded] = 128 - networksize
+ n = strings.LastIndex(iptobin, "1")
+ if n == 0 {
+ iptobin = ""
+ } else {
+ iptobin = iptobin[0:n]
+ }
+ iptobin = iptobin + "0"
+ iptobin = fmt.Sprintf("%-128s", iptobin) // pad right with spaces
+ iptobin = strings.ReplaceAll(iptobin, " ", "1") // replace spaces
+ }
+
+ if iptobin < ipfrombin {
+ // special logic for Go due to lack of do-while
+ if ipfrombin >= iptobin {
+ break
+ }
+ continue
+ }
+
+ values := []int{strings.LastIndex(ipfrombin, "0"), strings.LastIndex(iptobin, "1")}
+ _, max := t.minMax(values)
+ shift = 128 - max
+ unpadded = ipfrombin[0 : 128-shift]
+ ipfrombin = fmt.Sprintf("%0128s", unpadded)
+ unpadded = iptobin[0 : 128-shift]
+ iptobin = fmt.Sprintf("%0128s", unpadded)
+
+ networksize = networksize + shift
+
+ if ipfrombin == iptobin {
+ unpadded = ipfrombin[networksize:128]
+ padded = fmt.Sprintf("%-128s", unpadded) // pad right with spaces
+ padded = strings.ReplaceAll(padded, " ", "0") // replace spaces
+ networks[padded] = 128 - networksize
+ }
+
+ if ipfrombin >= iptobin {
+ break
+ }
+ }
+
+ keys := make([]string, 0, len(networks))
+ for k := range networks {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ str, _ := t.binaryToIP(k)
+ result = append(result, str+"/"+fmt.Sprintf("%d", networks[k]))
+ }
+
+ return result, nil
+}
+
+// CIDRToIPv4 returns the IPv4 range for the supplied CIDR.
+func (t *IPTools) CIDRToIPv4(CIDR string) ([]string, error) {
+ if strings.Index(CIDR, "/") == -1 {
+ return nil, errors.New("Not a valid CIDR.")
+ }
+
+ re := regexp.MustCompile(`^[0-9]{1,2}$`)
+ arr := strings.Split(CIDR, "/")
+
+ if len(arr) != 2 || !t.IsIPv4(arr[0]) || !re.MatchString(arr[1]) {
+ return nil, errors.New("Not a valid CIDR.")
+ }
+
+ ip := arr[0]
+
+ prefix, err := strconv.Atoi(arr[1])
+ if err != nil || prefix > 32 {
+ return nil, errors.New("Not a valid CIDR.")
+ }
+
+ ipstartbn, err := t.IPv4ToDecimal(ip)
+ if err != nil {
+ return nil, errors.New("Not a valid CIDR.")
+ }
+ ipstartlong := ipstartbn.Int64()
+
+ ipstartlong = ipstartlong & (-1 << (32 - prefix))
+
+ bn := big.NewInt(0)
+ bn.SetString(strconv.Itoa(int(ipstartlong)), 10)
+
+ ipstart, _ := t.DecimalToIPv4(bn)
+
+ var total int64 = 1 << (32 - prefix)
+
+ ipendlong := ipstartlong + total - 1
+
+ if ipendlong > 4294967295 {
+ ipendlong = 4294967295
+ }
+
+ bn.SetString(strconv.Itoa(int(ipendlong)), 10)
+ ipend, _ := t.DecimalToIPv4(bn)
+
+ result := []string{ipstart, ipend}
+
+ return result, nil
+}
+
+// CIDRToIPv6 returns the IPv6 range for the supplied CIDR.
+func (t *IPTools) CIDRToIPv6(CIDR string) ([]string, error) {
+ if strings.Index(CIDR, "/") == -1 {
+ return nil, errors.New("Not a valid CIDR.")
+ }
+
+ re := regexp.MustCompile(`^[0-9]{1,3}$`)
+ arr := strings.Split(CIDR, "/")
+
+ if len(arr) != 2 || !t.IsIPv6(arr[0]) || !re.MatchString(arr[1]) {
+ return nil, errors.New("Not a valid CIDR.")
+ }
+
+ ip := arr[0]
+
+ prefix, err := strconv.Atoi(arr[1])
+ if err != nil || prefix > 128 {
+ return nil, errors.New("Not a valid CIDR.")
+ }
+
+ expand, _ := t.ExpandIPv6(ip)
+ parts := strings.Split(expand, ":")
+
+ bitStart := strings.Repeat("1", prefix) + strings.Repeat("0", 128-prefix)
+ bitEnd := strings.Repeat("0", prefix) + strings.Repeat("1", 128-prefix)
+
+ n := 16 // split string into 16-char parts
+ floors := []string{}
+ for i := 0; i < len(bitStart); i += n {
+ end := i + n
+ if end > len(bitStart) {
+ end = len(bitStart)
+ }
+ floors = append(floors, bitStart[i:end])
+ }
+ ceilings := []string{}
+ for i := 0; i < len(bitEnd); i += n {
+ end := i + n
+ if end > len(bitEnd) {
+ end = len(bitEnd)
+ }
+ ceilings = append(ceilings, bitEnd[i:end])
+ }
+
+ start := []string{}
+ end := []string{}
+
+ for i := 0; i < 8; i += 1 {
+ p, _ := strconv.ParseUint(parts[i], 16, 64)
+ f, _ := strconv.ParseUint(floors[i], 2, 64)
+ c, _ := strconv.ParseUint(ceilings[i], 2, 64)
+ start = append(start, strconv.FormatUint(p&f, 16))
+ end = append(end, strconv.FormatUint(p|c, 16))
+ }
+
+ hexstartaddress, _ := t.ExpandIPv6(strings.Join(start, ":"))
+ hexendaddress, _ := t.ExpandIPv6(strings.Join(end, ":"))
+ result := []string{hexstartaddress, hexendaddress}
+
+ return result, nil
+}
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/region.go b/vendor/github.com/ip2location/ip2location-go/v9/region.go
new file mode 100644
index 000000000..c06e3303e
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/region.go
@@ -0,0 +1,96 @@
+package ip2location
+
+import (
+ "encoding/csv"
+ "errors"
+ "os"
+ "strings"
+)
+
+// The RegionInfoRecord struct stores all of the available
+// region info found in the region information CSV file.
+type RegionInfoRecord struct {
+ Country_code string
+ Name string
+ Code string
+}
+
+// The RI struct is the main object used to read the region information CSV.
+type RI struct {
+ resultsMap map[string][]RegionInfoRecord
+}
+
+// OpenRegionInfo initializes with the path to the region information CSV file.
+func OpenRegionInfo(csvFile string) (*RI, error) {
+ var ri = &RI{}
+
+ _, err := os.Stat(csvFile)
+ if os.IsNotExist(err) {
+ return nil, errors.New("The CSV file '" + csvFile + "' is not found.")
+ }
+
+ f, err := os.Open(csvFile)
+ if err != nil {
+ return nil, errors.New("Unable to read '" + csvFile + "'.")
+ }
+
+ defer f.Close()
+
+ csvReader := csv.NewReader(f)
+ data, err := csvReader.ReadAll()
+ if err != nil {
+ return nil, errors.New("Unable to read '" + csvFile + "'.")
+ }
+
+ ri.resultsMap = make(map[string][]RegionInfoRecord)
+ var headerArr []string
+ var resultsArr []RegionInfoRecord
+
+ for i, line := range data {
+ if i == 0 { // headers
+ for _, field := range line {
+ headerArr = append(headerArr, field)
+ }
+ } else {
+ var rec RegionInfoRecord
+ for j, field := range line {
+ switch headerArr[j] {
+ case "country_code":
+ rec.Country_code = field
+ case "subdivision_name":
+ rec.Name = field
+ case "code":
+ rec.Code = field
+ }
+ }
+ if rec.Name == "" {
+ return nil, errors.New("Invalid region information CSV file.")
+ }
+ resultsArr = append(resultsArr, rec)
+ }
+ }
+ for _, elem := range resultsArr {
+ if _, ok := ri.resultsMap[elem.Country_code]; !ok {
+ var arr []RegionInfoRecord
+ ri.resultsMap[elem.Country_code] = arr
+ }
+ ri.resultsMap[elem.Country_code] = append(ri.resultsMap[elem.Country_code], elem)
+ }
+ return ri, nil
+}
+
+// GetRegionCode returns the region code for the specified country and region name
+func (r *RI) GetRegionCode(countryCode string, regionName string) (string, error) {
+ if len(r.resultsMap) == 0 {
+ return "", errors.New("No record available.")
+ }
+
+ if arr, ok := r.resultsMap[countryCode]; ok {
+ for _, elem := range arr {
+ if strings.ToUpper(elem.Name) == strings.ToUpper(regionName) {
+ return elem.Code, nil
+ }
+ }
+ }
+ return "", errors.New("No record found.")
+}
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
new file mode 100644
index 000000000..66d1657d2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -0,0 +1,1017 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ NoCompression = 0
+ BestSpeed = 1
+ BestCompression = 9
+ DefaultCompression = -1
+
+ // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+ // entropy encoding. This mode is useful in compressing data that has
+ // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+ // that lacks an entropy encoder. Compression gains are achieved when
+ // certain bytes in the input stream occur more frequently than others.
+ //
+ // Note that HuffmanOnly produces a compressed output that is
+ // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+ // continue to be able to decompress this output.
+ HuffmanOnly = -2
+ ConstantCompression = HuffmanOnly // compatibility alias.
+
+ logWindowSize = 15
+ windowSize = 1 << logWindowSize
+ windowMask = windowSize - 1
+ logMaxOffsetSize = 15 // Standard DEFLATE
+ minMatchLength = 4 // The smallest match that the compressor looks for
+ maxMatchLength = 258 // The longest match for the compressor
+ minOffsetSize = 1 // The shortest offset that makes any sense
+
+ // The maximum number of tokens we will encode at the time.
+ // Smaller sizes usually creates less optimal blocks.
+ // Bigger can make context switching slow.
+ // We use this for levels 7-9, so we make it big.
+ maxFlateBlockTokens = 1 << 15
+ maxStoreBlockSize = 65535
+ hashBits = 17 // After 17 performance degrades
+ hashSize = 1 << hashBits
+ hashMask = (1 << hashBits) - 1
+ hashShift = (hashBits + minMatchLength - 1) / minMatchLength
+ maxHashOffset = 1 << 28
+
+ skipNever = math.MaxInt32
+
+ debugDeflate = false
+)
+
+type compressionLevel struct {
+ good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+// Compression levels have been rebalanced from zlib deflate defaults
+// to give a bigger spread in speed and compression.
+// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
+var levels = []compressionLevel{
+ {}, // 0
+ // Level 1-6 uses specialized algorithm - values not used
+ {0, 0, 0, 0, 0, 1},
+ {0, 0, 0, 0, 0, 2},
+ {0, 0, 0, 0, 0, 3},
+ {0, 0, 0, 0, 0, 4},
+ {0, 0, 0, 0, 0, 5},
+ {0, 0, 0, 0, 0, 6},
+ // Levels 7-9 use increasingly more lazy matching
+ // and increasingly stringent conditions for "good enough".
+ {8, 12, 16, 24, skipNever, 7},
+ {16, 30, 40, 64, skipNever, 8},
+ {32, 258, 258, 1024, skipNever, 9},
+}
+
+// advancedState contains state for the advanced levels, with bigger hash tables, etc.
+type advancedState struct {
+ // deflate state
+ length int
+ offset int
+ maxInsertIndex int
+ chainHead int
+ hashOffset int
+
+ ii uint16 // position of last match, intended to overflow to reset.
+
+ // input window: unprocessed data is window[index:windowEnd]
+ index int
+ hashMatch [maxMatchLength + minMatchLength]uint32
+
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
+}
+
+type compressor struct {
+ compressionLevel
+
+ h *huffmanEncoder
+ w *huffmanBitWriter
+
+ // compression algorithm
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+
+ window []byte
+ windowEnd int
+ blockStart int // window index where current tokens start
+ err error
+
+ // queued output tokens
+ tokens tokens
+ fast fastEnc
+ state *advancedState
+
+ sync bool // requesting flush
+ byteAvailable bool // if true, still need to process window[index-1].
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+ s := d.state
+ if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+ // shift the window by windowSize
+ //copy(d.window[:], d.window[windowSize:2*windowSize])
+ *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
+ s.index -= windowSize
+ d.windowEnd -= windowSize
+ if d.blockStart >= windowSize {
+ d.blockStart -= windowSize
+ } else {
+ d.blockStart = math.MaxInt32
+ }
+ s.hashOffset += windowSize
+ if s.hashOffset > maxHashOffset {
+ delta := s.hashOffset - 1
+ s.hashOffset -= delta
+ s.chainHead -= delta
+ // Iterate over slices instead of arrays to avoid copying
+ // the entire table onto the stack (Issue #18625).
+ for i, v := range s.hashPrev[:] {
+ if int(v) > delta {
+ s.hashPrev[i] = uint32(int(v) - delta)
+ } else {
+ s.hashPrev[i] = 0
+ }
+ }
+ for i, v := range s.hashHead[:] {
+ if int(v) > delta {
+ s.hashHead[i] = uint32(int(v) - delta)
+ } else {
+ s.hashHead[i] = 0
+ }
+ }
+ }
+ }
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ var window []byte
+ if d.blockStart <= index {
+ window = d.window[d.blockStart:index]
+ }
+ d.blockStart = index
+ //d.w.writeBlock(tok, eof, window)
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
+ return d.w.err
+ }
+ return nil
+}
+
+// writeBlockSkip writes the current block and uses the number of tokens
+// to determine if the block should be stored on no matches, or
+// only huffman encoded.
+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ if d.blockStart <= index {
+ window := d.window[d.blockStart:index]
+ // If we removed less than a 64th of all literals
+ // we huffman compress the block.
+ if int(tok.n) > len(window)-int(tok.n>>6) {
+ d.w.writeBlockHuff(eof, window, d.sync)
+ } else {
+ // Write a dynamic huffman block.
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
+ }
+ } else {
+ d.w.writeBlock(tok, eof, nil)
+ }
+ d.blockStart = index
+ return d.w.err
+ }
+ return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+ // Do not fill window if we are in store-only or huffman mode.
+ if d.level <= 0 && d.level > -MinCustomWindowSize {
+ return
+ }
+ if d.fast != nil {
+ // encode the last data, but discard the result
+ if len(b) > maxMatchOffset {
+ b = b[len(b)-maxMatchOffset:]
+ }
+ d.fast.Encode(&d.tokens, b)
+ d.tokens.Reset()
+ return
+ }
+ s := d.state
+ // If we are given too much, cut it.
+ if len(b) > windowSize {
+ b = b[len(b)-windowSize:]
+ }
+ // Add all to window.
+ n := copy(d.window[d.windowEnd:], b)
+
+ // Calculate 256 hashes at the time (more L1 cache hits)
+ loops := (n + 256 - minMatchLength) / 256
+ for j := 0; j < loops; j++ {
+ startindex := j * 256
+ end := startindex + 256 + minMatchLength - 1
+ if end > n {
+ end = n
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+
+ if dstSize <= 0 {
+ continue
+ }
+
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ }
+ // Update window information.
+ d.windowEnd += n
+ s.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
+
+ // We quit when we get a match that's at least nice long
+ nice := len(win) - pos
+ if d.nice < nice {
+ nice = d.nice
+ }
+
+ // If we've got a match that's good enough, only look in 1/4 the chain.
+ tries := d.chain
+ length = minMatchLength - 1
+
+ wEnd := win[pos+length]
+ wPos := win[pos:]
+ minIndex := pos - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+ offset = 0
+
+ if d.chain < 100 {
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+ }
+
+ // Minimum gain to accept a match.
+ cGain := 4
+
+ // Some like it higher (CSV), some like it lower (JSON)
+ const baseCost = 3
+ // Base is 4 bytes at with an additional cost.
+ // Matches must be better than this.
+
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ // Calculate gain. Estimate
+ newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
+
+ //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
+ if newGain > cGain {
+ length = n
+ offset = pos - i
+ cGain = newGain
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+ if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.writeBytes(buf)
+ return d.w.err
+}
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+ return hash4u(binary.LittleEndian.Uint32(b), hashBits)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> (32 - h)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+ if len(b) < 4 {
+ return
+ }
+ hb := binary.LittleEndian.Uint32(b)
+
+ dst[0] = hash4u(hb, hashBits)
+ end := len(b) - 4 + 1
+ for i := 1; i < end; i++ {
+ hb = (hb >> 8) | uint32(b[i+3])<<24
+ dst[i] = hash4u(hb, hashBits)
+ }
+}
+
+func (d *compressor) initDeflate() {
+ d.window = make([]byte, 2*windowSize)
+ d.byteAvailable = false
+ d.err = nil
+ if d.state == nil {
+ return
+ }
+ s := d.state
+ s.index = 0
+ s.hashOffset = 1
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.chainHead = -1
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+ s := d.state
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = debugDeflate
+
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+ if d.windowEnd != s.index && d.chain > 100 {
+ // Get literal huffman coder.
+ if d.h == nil {
+ d.h = newHuffmanEncoder(maxFlateBlockTokens)
+ }
+ var tmp [256]uint16
+ for _, v := range d.window[s.index:d.windowEnd] {
+ tmp[v]++
+ }
+ d.h.generate(tmp[:], 15)
+ }
+
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+
+ for {
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - s.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ // Flush current output block if any.
+ if d.byteAvailable {
+ // There is still one pending token that needs to be flushed
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ }
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ return
+ }
+ }
+ if s.index < s.maxInsertIndex {
+ // Update the hash
+ hash := hash4(d.window[s.index:])
+ ch := s.hashHead[hash]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[hash] = uint32(s.index + s.hashOffset)
+ }
+ prevLength := s.length
+ prevOffset := s.offset
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
+ }
+ }
+
+ if prevLength >= minMatchLength && s.length <= prevLength {
+ // No better match, but check for better match at end...
+ //
+ // Skip forward a number of bytes.
+ // Offset of 2 seems to yield best results. 3 is sometimes better.
+ const checkOff = 2
+
+ // Check all, except full length
+ if prevLength < maxMatchLength-checkOff {
+ prevIndex := s.index - 1
+ if prevIndex+prevLength < s.maxInsertIndex {
+ end := lookahead
+ if lookahead > maxMatchLength+checkOff {
+ end = maxMatchLength + checkOff
+ }
+ end += prevIndex
+
+ // Hash at match end.
+ h := hash4(d.window[prevIndex+prevLength:])
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+ if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+ length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+ // It seems like a pure length metric is best.
+ if length > prevLength {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+
+ // Extend back...
+ for i := checkOff - 1; i >= 0; i-- {
+ if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
+ // Emit tokens we "owe"
+ for j := 0; j <= i; j++ {
+ d.tokens.AddLiteral(d.window[prevIndex+j])
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ }
+ break
+ } else {
+ prevLength++
+ }
+ }
+ } else if false {
+ // Check one further ahead.
+ // Only rarely better, disabled for now.
+ prevIndex++
+ h := hash4(d.window[prevIndex+prevLength:])
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+ if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+ length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+ // It seems like a pure length metric is best.
+ if length > prevLength+checkOff {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+ prevIndex--
+
+ // Extend back...
+ for i := checkOff; i >= 0; i-- {
+ if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
+ // Emit tokens we "owe"
+ for j := 0; j <= i; j++ {
+ d.tokens.AddLiteral(d.window[prevIndex+j])
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ }
+ break
+ } else {
+ prevLength++
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ newIndex := s.index + prevLength - 1
+ // Calculate missing hashes
+ end := newIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ }
+
+ s.index = newIndex
+ d.byteAvailable = false
+ s.length = minMatchLength - 1
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.ii = 0
+ } else {
+ // Reset, if we got a match this run.
+ if s.length >= minMatchLength {
+ s.ii = 0
+ }
+ // We have a byte waiting. Emit it.
+ if d.byteAvailable {
+ s.ii++
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+
+ // If we have a long run of no matches, skip additional bytes
+ // Resets when s.ii overflows after 64KB.
+ if n := int(s.ii) - d.chain; n > 0 {
+ n = 1 + int(n>>6)
+ for j := 0; j < n; j++ {
+ if s.index >= d.windowEnd-1 {
+ break
+ }
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ // Index...
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ s.index++
+ }
+ // Flush last byte
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ }
+ } else {
+ s.index++
+ d.byteAvailable = true
+ }
+ }
+ }
+}
+
+func (d *compressor) store() {
+ if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.windowEnd = 0
+ }
+}
+
+// fillWindow will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillBlock(b []byte) int {
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+ if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+ return
+ }
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ d.windowEnd = 0
+}
+
+// storeFast will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeFast() {
+ // We only compress if we have maxStoreBlockSize.
+ if d.windowEnd < len(d.window) {
+ if !d.sync {
+ return
+ }
+ // Handle extremely small sizes.
+ if d.windowEnd < 128 {
+ if d.windowEnd == 0 {
+ return
+ }
+ if d.windowEnd <= 32 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ } else {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+ d.fast.Reset()
+ return
+ }
+ }
+
+ d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
+ // If we made zero matches, store the block as is.
+ if d.tokens.n == 0 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ // If we removed less than 1/16th, huffman compress the block.
+ } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ } else {
+ d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+ if d.err != nil {
+ return 0, d.err
+ }
+ n = len(b)
+ for len(b) > 0 {
+ if d.windowEnd == len(d.window) || d.sync {
+ d.step(d)
+ }
+ b = b[d.fill(d, b):]
+ if d.err != nil {
+ return 0, d.err
+ }
+ }
+ return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+ d.sync = true
+ if d.err != nil {
+ return d.err
+ }
+ d.step(d)
+ if d.err == nil {
+ d.w.writeStoredHeader(0, false)
+ d.w.flush()
+ d.err = d.w.err
+ }
+ d.sync = false
+ return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+ d.w = newHuffmanBitWriter(w)
+
+ switch {
+ case level == NoCompression:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).store
+ case level == ConstantCompression:
+ d.w.logNewTablePenalty = 10
+ d.window = make([]byte, 32<<10)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeHuff
+ case level == DefaultCompression:
+ level = 5
+ fallthrough
+ case level >= 1 && level <= 6:
+ d.w.logNewTablePenalty = 7
+ d.fast = newFastEnc(level)
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeFast
+ case 7 <= level && level <= 9:
+ d.w.logNewTablePenalty = 8
+ d.state = &advancedState{}
+ d.compressionLevel = levels[level]
+ d.initDeflate()
+ d.fill = (*compressor).fillDeflate
+ d.step = (*compressor).deflateLazy
+ case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
+ d.w.logNewTablePenalty = 7
+ d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeFast
+ default:
+ return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+ }
+ d.level = level
+ return nil
+}
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+ d.w.reset(w)
+ d.sync = false
+ d.err = nil
+ // We only need to reset a few things for Snappy.
+ if d.fast != nil {
+ d.fast.Reset()
+ d.windowEnd = 0
+ d.tokens.Reset()
+ return
+ }
+ switch d.compressionLevel.chain {
+ case 0:
+ // level was NoCompression or ConstantCompresssion.
+ d.windowEnd = 0
+ default:
+ s := d.state
+ s.chainHead = -1
+ for i := range s.hashHead {
+ s.hashHead[i] = 0
+ }
+ for i := range s.hashPrev {
+ s.hashPrev[i] = 0
+ }
+ s.hashOffset = 1
+ s.index, d.windowEnd = 0, 0
+ d.blockStart, d.byteAvailable = 0, false
+ d.tokens.Reset()
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.ii = 0
+ s.maxInsertIndex = 0
+ }
+}
+
+func (d *compressor) close() error {
+ if d.err != nil {
+ return d.err
+ }
+ d.sync = true
+ d.step(d)
+ if d.err != nil {
+ return d.err
+ }
+ if d.w.writeStoredHeader(0, true); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.flush()
+ d.w.reset(nil)
+ return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more.
+// Level 0 (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+ var dw Writer
+ if err := dw.d.init(w, level); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary. The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output. The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+ zw, err := NewWriter(w, level)
+ if err != nil {
+ return nil, err
+ }
+ zw.d.fillWindow(dict)
+ zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+ return zw, err
+}
+
+// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
+const MinCustomWindowSize = 32
+
+// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
+const MaxCustomWindowSize = windowSize
+
+// NewWriterWindow returns a new Writer compressing data with a custom window size.
+// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
+func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
+ if windowSize < MinCustomWindowSize {
+ return nil, errors.New("flate: requested window size less than MinWindowSize")
+ }
+ if windowSize > MaxCustomWindowSize {
+ return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
+ }
+ var dw Writer
+ if err := dw.d.init(w, -windowSize); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+ d compressor
+ dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+ return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+ // For more about flushing:
+ // http://www.bolet.org/~pornin/deflate-flush.html
+ return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+ return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+ if len(w.dict) > 0 {
+ // w was created with NewWriterDict
+ w.d.reset(dst)
+ if dst != nil {
+ w.d.fillWindow(w.dict)
+ }
+ } else {
+ // w was created with NewWriter
+ w.d.reset(dst)
+ }
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+ w.dict = dict
+ w.d.reset(dst)
+ w.d.fillWindow(w.dict)
+}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
new file mode 100644
index 000000000..bb36351a5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+// - Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
+//
+// - Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+ hist []byte // Sliding window history
+
+ // Invariant: 0 <= rdPos <= wrPos <= len(hist)
+ wrPos int // Current output position in buffer
+ rdPos int // Have emitted hist[:rdPos] already
+ full bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+ *dd = dictDecoder{hist: dd.hist}
+
+ if cap(dd.hist) < size {
+ dd.hist = make([]byte, size)
+ }
+ dd.hist = dd.hist[:size]
+
+ if len(dict) > len(dd.hist) {
+ dict = dict[len(dict)-len(dd.hist):]
+ }
+ dd.wrPos = copy(dd.hist, dict)
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos = 0
+ dd.full = true
+ }
+ dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+ if dd.full {
+ return len(dd.hist)
+ }
+ return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+ return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+ return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+ return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+ dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+ dd.hist[dd.wrPos] = c
+ dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+ dstBase := dd.wrPos
+ dstPos := dstBase
+ srcPos := dstPos - dist
+ endPos := dstPos + length
+ if endPos > len(dd.hist) {
+ endPos = len(dd.hist)
+ }
+
+ // Copy non-overlapping section after destination position.
+ //
+ // This section is non-overlapping in that the copy length for this section
+ // is always less than or equal to the backwards distance. This can occur
+ // if a distance refers to data that wraps-around in the buffer.
+ // Thus, a backwards copy is performed here; that is, the exact bytes in
+ // the source prior to the copy is placed in the destination.
+ if srcPos < 0 {
+ srcPos += len(dd.hist)
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+ srcPos = 0
+ }
+
+ // Copy possibly overlapping section before destination position.
+ //
+ // This section can overlap if the copy length for this section is larger
+ // than the backwards distance. This is allowed by LZ77 so that repeated
+ // strings can be succinctly represented using (dist, length) pairs.
+ // Thus, a forwards copy is performed here; that is, the bytes copied is
+ // possibly dependent on the resulting bytes in the destination as the copy
+ // progresses along. This is functionally equivalent to the following:
+ //
+ // for i := 0; i < endPos-dstPos; i++ {
+ // dd.hist[dstPos+i] = dd.hist[srcPos+i]
+ // }
+ // dstPos = endPos
+ //
+ for dstPos < endPos {
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+ dstPos := dd.wrPos
+ endPos := dstPos + length
+ if dstPos < dist || endPos > len(dd.hist) {
+ return 0
+ }
+ dstBase := dstPos
+ srcPos := dstPos - dist
+
+ // Copy possibly overlapping section before destination position.
+loop:
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ if dstPos < endPos {
+ goto loop // Avoid for-loop so that this function can be inlined
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+ toRead := dd.hist[dd.rdPos:dd.wrPos]
+ dd.rdPos = dd.wrPos
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos, dd.rdPos = 0, 0
+ dd.full = true
+ }
+ return toRead
+}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
new file mode 100644
index 000000000..c8124b5c4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -0,0 +1,193 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+type fastEnc interface {
+ Encode(dst *tokens, src []byte)
+ Reset()
+}
+
+func newFastEnc(level int) fastEnc {
+ switch level {
+ case 1:
+ return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 2:
+ return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 3:
+ return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 4:
+ return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 5:
+ return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 6:
+ return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
+ default:
+ panic("invalid level specified")
+ }
+}
+
+const (
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+ baseMatchOffset = 1 // The smallest match offset
+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
+ maxMatchOffset = 1 << 15 // The largest match offset
+
+ bTableBits = 17 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
+)
+
+const (
+ prime3bytes = 506832829
+ prime4bytes = 2654435761
+ prime5bytes = 889523592379
+ prime6bytes = 227718039650203
+ prime7bytes = 58295818150454627
+ prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+func load3232(b []byte, i int32) uint32 {
+ return binary.LittleEndian.Uint32(b[i:])
+}
+
+func load6432(b []byte, i int32) uint64 {
+ return binary.LittleEndian.Uint64(b[i:])
+}
+
+type tableEntry struct {
+ offset int32
+}
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastGen struct {
+ hist []byte
+ cur int32
+}
+
+func (e *fastGen) addBlock(src []byte) int32 {
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.hist = make([]byte, 0, allocHistory)
+ } else {
+ if cap(e.hist) < maxMatchOffset*2 {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - maxMatchOffset
+ // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:maxMatchOffset]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+type tableEntryPrev struct {
+ Cur tableEntry
+ Prev tableEntry
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
+}
+
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+ switch mls {
+ case 3:
+ return (uint32(u<<8) * prime3bytes) >> (32 - length)
+ case 5:
+ return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+ case 6:
+ return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+ case 7:
+ return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+ case 8:
+ return uint32((u * prime8bytes) >> (64 - length))
+ default:
+ return (uint32(u) * prime4bytes) >> (32 - length)
+ }
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastGen) Reset() {
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= bufferReset {
+ e.cur += maxMatchOffset + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
new file mode 100644
index 000000000..f70594c34
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,1182 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ // The largest offset code.
+ offsetCodeCount = 30
+
+ // The special code used to mark the end of a block.
+ endBlockMarker = 256
+
+ // The first length code.
+ lengthCodesStart = 257
+
+ // The number of codegen codes.
+ codegenCodeCount = 19
+ badCode = 255
+
+ // maxPredefinedTokens is the maximum number of tokens
+ // where we check if fixed size is smaller.
+ maxPredefinedTokens = 250
+
+ // bufferFlushSize indicates the buffer size
+ // after which bytes are flushed to the writer.
+ // Should preferably be a multiple of 6, since
+ // we accumulate 6 bytes between writes to the buffer.
+ bufferFlushSize = 246
+)
+
+// Minimum length code that emits bits.
+const lengthExtraBitsMinCode = 8
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = [32]uint8{
+ /* 257 */ 0, 0, 0,
+ /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+ /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ /* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = [32]uint8{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+ 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+ 64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// Minimum offset code that emits bits.
+const offsetExtraBitsMinCode = 4
+
+// offset code word extra bits.
+var offsetExtraBits = [32]int8{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ /* extended window */
+ 14, 14,
+}
+
+var offsetCombined = [32]uint32{}
+
+func init() {
+ var offsetBase = [32]uint32{
+ /* normal deflate */
+ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+ 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+ 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+ 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+ 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+ 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+ /* extended window */
+ 0x008000, 0x00c000,
+ }
+
+ for i := range offsetCombined[:] {
+ // Don't use extended window values...
+ if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
+ continue
+ }
+ offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
+ }
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+ // writer is the underlying writer.
+ // Do not use it directly; use the write method, which ensures
+ // that Write errors are sticky.
+ writer io.Writer
+
+ // Data waiting to be written is bytes[0:nbytes]
+ // and then the low nbits of bits.
+ bits uint64
+ nbits uint8
+ nbytes uint8
+ lastHuffMan bool
+ literalEncoding *huffmanEncoder
+ tmpLitEncoding *huffmanEncoder
+ offsetEncoding *huffmanEncoder
+ codegenEncoding *huffmanEncoder
+ err error
+ lastHeader int
+ // Set between 0 (reused block can be up to 2x the size)
+ logNewTablePenalty uint
+ bytes [256 + 8]byte
+ literalFreq [lengthCodesStart + 32]uint16
+ offsetFreq [32]uint16
+ codegenFreq [codegenCodeCount]uint16
+
+ // codegen must have an extra space for the final symbol.
+ codegen [literalCount + offsetCodeCount + 1]uint8
+}
+
+// Huffman reuse.
+//
+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
+//
+// This is controlled by several variables:
+//
+// If lastHeader is non-zero the Huffman table can be reused.
+// This also indicates that a Huffman table has been generated that can output all
+// possible symbols.
+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
+// an EOB with the previous table must be written.
+//
+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
+//
+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
+// optimal size and adding a penalty in 'logNewTablePenalty'.
+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
+// is slower both for compression and decompression.
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+ return &huffmanBitWriter{
+ writer: w,
+ literalEncoding: newHuffmanEncoder(literalCount),
+ tmpLitEncoding: newHuffmanEncoder(literalCount),
+ codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+ offsetEncoding: newHuffmanEncoder(offsetCodeCount),
+ }
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+ w.writer = writer
+ w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+ w.lastHeader = 0
+ w.lastHuffMan = false
+}
+
+func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
+ a := t.offHist[:offsetCodeCount]
+ b := w.offsetEncoding.codes
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+
+ a = t.extraHist[:literalCount-256]
+ b = w.literalEncoding.codes[256:literalCount]
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+
+ a = t.litHist[:256]
+ b = w.literalEncoding.codes[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+ return true
+}
+
+func (w *huffmanBitWriter) flush() {
+ if w.err != nil {
+ w.nbits = 0
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ n := w.nbytes
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ if w.nbits > 8 { // Avoid underflow
+ w.nbits -= 8
+ } else {
+ w.nbits = 0
+ }
+ n++
+ }
+ w.bits = 0
+ w.write(w.bytes[:n])
+ w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
+ w.bits |= uint64(b) << (w.nbits & 63)
+ w.nbits += nb
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+ if w.err != nil {
+ return
+ }
+ n := w.nbytes
+ if w.nbits&7 != 0 {
+ w.err = InternalError("writeBytes with unfinished bits")
+ return
+ }
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ w.nbits -= 8
+ n++
+ }
+ if n != 0 {
+ w.write(w.bytes[:n])
+ }
+ w.nbytes = 0
+ w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array). This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+ for i := range w.codegenFreq {
+ w.codegenFreq[i] = 0
+ }
+ // Note that we are using codegen both as a temporary variable for holding
+ // a copy of the frequencies, and as the place where we put the result.
+ // This is fine because the output is always shorter than the input used
+ // so far.
+ codegen := w.codegen[:] // cache
+ // Copy the concatenated code sizes to codegen. Put a marker at the end.
+ cgnl := codegen[:numLiterals]
+ for i := range cgnl {
+ cgnl[i] = litEnc.codes[i].len()
+ }
+
+ cgnl = codegen[numLiterals : numLiterals+numOffsets]
+ for i := range cgnl {
+ cgnl[i] = offEnc.codes[i].len()
+ }
+ codegen[numLiterals+numOffsets] = badCode
+
+ size := codegen[0]
+ count := 1
+ outIndex := 0
+ for inIndex := 1; size != badCode; inIndex++ {
+ // INVARIANT: We have seen "count" copies of size that have not yet
+ // had output generated for them.
+ nextSize := codegen[inIndex]
+ if nextSize == size {
+ count++
+ continue
+ }
+ // We need to generate codegen indicating "count" of size.
+ if size != 0 {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ count--
+ for count >= 3 {
+ n := 6
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 16
+ outIndex++
+ codegen[outIndex] = uint8(n - 3)
+ outIndex++
+ w.codegenFreq[16]++
+ count -= n
+ }
+ } else {
+ for count >= 11 {
+ n := 138
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 18
+ outIndex++
+ codegen[outIndex] = uint8(n - 11)
+ outIndex++
+ w.codegenFreq[18]++
+ count -= n
+ }
+ if count >= 3 {
+ // count >= 3 && count <= 10
+ codegen[outIndex] = 17
+ outIndex++
+ codegen[outIndex] = uint8(count - 3)
+ outIndex++
+ w.codegenFreq[17]++
+ count = 0
+ }
+ }
+ count--
+ for ; count >= 0; count-- {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ }
+ // Set up invariant for next time through the loop.
+ size = nextSize
+ count = 1
+ }
+ // Marker indicating the end of the codegen.
+ codegen[outIndex] = badCode
+}
+
+func (w *huffmanBitWriter) codegens() int {
+ numCodegens := len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return numCodegens
+}
+
+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
+ numCodegens = len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return 3 + 5 + 5 + 4 + (3 * numCodegens) +
+ w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+ int(w.codegenFreq[16])*2 +
+ int(w.codegenFreq[17])*3 +
+ int(w.codegenFreq[18])*7, numCodegens
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
+ size = litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:])
+ return size
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+ header, numCodegens := w.headerSize()
+ size = header +
+ litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:]) +
+ extraBits
+ return size, numCodegens
+}
+
+// extraBitSize will return the number of bits that will be written
+// as "extra" bits on matches.
+func (w *huffmanBitWriter) extraBitSize() int {
+ total := 0
+ for i, n := range w.literalFreq[257:literalCount] {
+ total += int(n) * int(lengthExtraBits[i&31])
+ }
+ for i, n := range w.offsetFreq[:offsetCodeCount] {
+ total += int(n) * int(offsetExtraBits[i&31])
+ }
+ return total
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+ return 3 +
+ fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
+ fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
+ extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+ if in == nil {
+ return 0, false
+ }
+ if len(in) <= maxStoreBlockSize {
+ return (len(in) + 5) * 8, true
+ }
+ return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+ // The function does not get inlined if we "& 63" the shift.
+ w.bits |= c.code64() << (w.nbits & 63)
+ w.nbits += c.len()
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+// writeOutBits will write bits to the buffer.
+func (w *huffmanBitWriter) writeOutBits() {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+
+ // We over-write, but faster...
+ binary.LittleEndian.PutUint64(w.bytes[n:], bits)
+ n += 6
+
+ if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
+ w.write(w.bytes[:n])
+ n = 0
+ }
+
+ w.nbytes = n
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var firstBits int32 = 4
+ if isEof {
+ firstBits = 5
+ }
+ w.writeBits(firstBits, 3)
+ w.writeBits(int32(numLiterals-257), 5)
+ w.writeBits(int32(numOffsets-1), 5)
+ w.writeBits(int32(numCodegens-4), 4)
+
+ for i := 0; i < numCodegens; i++ {
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
+ w.writeBits(int32(value), 3)
+ }
+
+ i := 0
+ for {
+ var codeWord = uint32(w.codegen[i])
+ i++
+ if codeWord == badCode {
+ break
+ }
+ w.writeCode(w.codegenEncoding.codes[codeWord])
+
+ switch codeWord {
+ case 16:
+ w.writeBits(int32(w.codegen[i]), 2)
+ i++
+ case 17:
+ w.writeBits(int32(w.codegen[i]), 3)
+ i++
+ case 18:
+ w.writeBits(int32(w.codegen[i]), 7)
+ i++
+ }
+ }
+}
+
+// writeStoredHeader will write a stored header.
+// If the stored block is only used for EOF,
+// it is replaced with a fixed huffman block.
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
+ if length == 0 && isEof {
+ w.writeFixedHeader(isEof)
+ // EOB: 7 bits, value: 0
+ w.writeBits(0, 7)
+ w.flush()
+ return
+ }
+
+ var flag int32
+ if isEof {
+ flag = 1
+ }
+ w.writeBits(flag, 3)
+ w.flush()
+ w.writeBits(int32(length), 16)
+ w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // Indicate that we are a fixed Huffman block
+ var value int32 = 2
+ if isEof {
+ value = 3
+ }
+ w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens.AddEOB()
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, false)
+ w.generate()
+ var extraBits int
+ storedSize, storable := w.storedSize(input)
+ if storable {
+ extraBits = w.extraBitSize()
+ }
+
+ // Figure out smallest code.
+ // Fixed Huffman baseline.
+ var literalEncoding = fixedLiteralEncoding
+ var offsetEncoding = fixedOffsetEncoding
+ var size = math.MaxInt32
+ if tokens.n < maxPredefinedTokens {
+ size = w.fixedSize(extraBits)
+ }
+
+ // Dynamic Huffman?
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ if dynamicSize < size {
+ size = dynamicSize
+ literalEncoding = w.literalEncoding
+ offsetEncoding = w.offsetEncoding
+ }
+
+ // Stored bytes?
+ if storable && storedSize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ if literalEncoding == fixedLiteralEncoding {
+ w.writeFixedHeader(eof)
+ } else {
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ }
+
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ sync = sync || eof
+ if sync {
+ tokens.AddEOB()
+ }
+
+ // We cannot reuse pure huffman table, and must mark as EOF.
+ if (w.lastHuffMan || eof) && w.lastHeader > 0 {
+ // We will not try to reuse.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+
+ // fillReuse enables filling of empty values.
+ // This will make encodings always reusable without testing.
+ // However, this does not appear to benefit on most cases.
+ const fillReuse = false
+
+ // Check if we can reuse...
+ if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+ extraBits := 0
+ ssize, storable := w.storedSize(input)
+
+ const usePrefs = true
+ if storable || w.lastHeader > 0 {
+ extraBits = w.extraBitSize()
+ }
+
+ var size int
+
+ // Check if we should reuse.
+ if w.lastHeader > 0 {
+ // Estimate size for using a new table.
+ // Use the previous header size as the best estimate.
+ newSize := w.lastHeader + tokens.EstimatedBits()
+ newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
+
+ // The estimated size is calculated as an optimal table.
+ // We add a penalty to make it more realistic and re-use a bit more.
+ reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
+
+ // Check if a new table is better.
+ if newSize < reuseSize {
+ // Write the EOB we owe.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ size = newSize
+ w.lastHeader = 0
+ } else {
+ size = reuseSize
+ }
+
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+ }
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ }
+
+ // We want a new block/table
+ if w.lastHeader == 0 {
+ if fillReuse && !sync {
+ w.fillTokens()
+ numLiterals, numOffsets = maxNumLit, maxNumDist
+ } else {
+ w.literalFreq[endBlockMarker] = 1
+ }
+
+ w.generate()
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+
+ var numCodegens int
+ if fillReuse && !sync {
+ // Reindex for accurate size...
+ w.indexTokens(tokens, true)
+ }
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ // Store predefined, if we don't get a reasonable improvement.
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+ // Store bytes, if we don't get an improvement.
+ if storable && ssize <= preSize {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+ }
+
+ if storable && ssize <= size {
+ // Store bytes, if we don't get an improvement.
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Write Huffman table.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ if !sync {
+ w.lastHeader, _ = w.headerSize()
+ }
+ w.lastHuffMan = false
+ }
+
+ if sync {
+ w.lastHeader = 0
+ }
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+func (w *huffmanBitWriter) fillTokens() {
+ for i, v := range w.literalFreq[:literalCount] {
+ if v == 0 {
+ w.literalFreq[i] = 1
+ }
+ }
+ for i, v := range w.offsetFreq[:offsetCodeCount] {
+ if v == 0 {
+ w.offsetFreq[i] = 1
+ }
+ }
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+ //copy(w.literalFreq[:], t.litHist[:])
+ *(*[256]uint16)(w.literalFreq[:]) = t.litHist
+ //copy(w.literalFreq[256:], t.extraHist[:])
+ *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
+ w.offsetFreq = t.offHist
+
+ if t.n == 0 {
+ return
+ }
+ if filled {
+ return maxNumLit, maxNumDist
+ }
+ // get the number of literals
+ numLiterals = len(w.literalFreq)
+ for w.literalFreq[numLiterals-1] == 0 {
+ numLiterals--
+ }
+ // get the number of offsets
+ numOffsets = len(w.offsetFreq)
+ for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+ numOffsets--
+ }
+ if numOffsets == 0 {
+ // We haven't found a single match. If we want to go with the dynamic encoding,
+ // we should count at least one offset to be sure that the offset huffman tree could be encoded.
+ w.offsetFreq[0] = 1
+ numOffsets = 1
+ }
+ return
+}
+
+func (w *huffmanBitWriter) generate() {
+ w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+ if w.err != nil {
+ return
+ }
+ if len(tokens) == 0 {
+ return
+ }
+
+ // Only last token should be endBlockMarker.
+ var deferEOB bool
+ if tokens[len(tokens)-1] == endBlockMarker {
+ tokens = tokens[:len(tokens)-1]
+ deferEOB = true
+ }
+
+ // Create slices up to the next power of two to avoid bounds checks.
+ lits := leCodes[:256]
+ offs := oeCodes[:32]
+ lengths := leCodes[lengthCodesStart:]
+ lengths = lengths[:32]
+
+ // Go 1.16 LOVES having these on stack.
+ bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+ for _, t := range tokens {
+ if t < 256 {
+ //w.writeCode(lits[t.literal()])
+ c := lits[t]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ continue
+ }
+
+ // Write the length
+ length := t.length()
+ lengthCode := lengthCode(length) & 31
+ if false {
+ w.writeCode(lengths[lengthCode])
+ } else {
+ // inlined
+ c := lengths[lengthCode]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+
+ if lengthCode >= lengthExtraBitsMinCode {
+ extraLengthBits := lengthExtraBits[lengthCode]
+ //w.writeBits(extraLength, extraLengthBits)
+ extraLength := int32(length - lengthBase[lengthCode])
+ bits |= uint64(extraLength) << (nbits & 63)
+ nbits += extraLengthBits
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+ // Write the offset
+ offset := t.offset()
+ offsetCode := (offset >> 16) & 31
+ if false {
+ w.writeCode(offs[offsetCode])
+ } else {
+ // inlined
+ c := offs[offsetCode]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+
+ if offsetCode >= offsetExtraBitsMinCode {
+ offsetComb := offsetCombined[offsetCode]
+ //w.writeBits(extraOffset, extraOffsetBits)
+ bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
+ nbits += uint8(offsetComb)
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+ }
+ // Restore...
+ w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+ if deferEOB {
+ w.writeCode(leCodes[endBlockMarker])
+ }
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+ w := newHuffmanBitWriter(nil)
+ w.offsetFreq[0] = 1
+ huffOffset = newHuffmanEncoder(offsetCodeCount)
+ huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ // Clear histogram
+ for i := range w.literalFreq[:] {
+ w.literalFreq[i] = 0
+ }
+ if !w.lastHuffMan {
+ for i := range w.offsetFreq[:] {
+ w.offsetFreq[i] = 0
+ }
+ }
+
+ const numLiterals = endBlockMarker + 1
+ const numOffsets = 1
+
+ // Add everything as literals
+ // We have to estimate the header size.
+ // Assume header is around 70 bytes:
+ // https://stackoverflow.com/a/25454430
+ const guessHeaderSizeBits = 70 * 8
+ histogram(input, w.literalFreq[:numLiterals])
+ ssize, storable := w.storedSize(input)
+ if storable && len(input) > 1024 {
+ // Quick check for incompressible content.
+ abs := float64(0)
+ avg := float64(len(input)) / 256
+ max := float64(len(input) * 2)
+ for _, v := range w.literalFreq[:256] {
+ diff := float64(v) - avg
+ abs += diff * diff
+ if abs > max {
+ break
+ }
+ }
+ if abs < max {
+ if debugDeflate {
+ fmt.Println("stored", abs, "<", max)
+ }
+ // No chance we can compress this...
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ }
+ w.literalFreq[endBlockMarker] = 1
+ w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
+ estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
+ if estBits < math.MaxInt32 {
+ estBits += w.lastHeader
+ if w.lastHeader == 0 {
+ estBits += guessHeaderSizeBits
+ }
+ estBits += estBits >> w.logNewTablePenalty
+ }
+
+ // Store bytes, if we don't get a reasonable improvement.
+ if storable && ssize <= estBits {
+ if debugDeflate {
+ fmt.Println("stored,", ssize, "<=", estBits)
+ }
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ if w.lastHeader > 0 {
+ reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
+
+ if estBits < reuseSize {
+ if debugDeflate {
+ fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
+ }
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ } else if debugDeflate {
+ fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+ }
+ }
+
+ count := 0
+ if w.lastHeader == 0 {
+ // Use the temp encoding, so swap.
+ w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ numCodegens := w.codegens()
+
+ // Huffman.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHuffMan = true
+ w.lastHeader, _ = w.headerSize()
+ if debugDeflate {
+ count += w.lastHeader
+ fmt.Println("header:", count/8)
+ }
+ }
+
+ encoding := w.literalEncoding.codes[:256]
+ // Go 1.16 LOVES having these on stack. At least 1.5x the speed.
+ bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+ if debugDeflate {
+ count -= int(nbytes)*8 + int(nbits)
+ }
+ // Unroll, write 3 codes/loop.
+ // Fastest number of unrolls.
+ for len(input) > 3 {
+ // We must have at least 48 bits free.
+ if nbits >= 8 {
+ n := nbits >> 3
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ bits >>= (n * 8) & 63
+ nbits -= n * 8
+ nbytes += n
+ }
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ a, b := encoding[input[0]], encoding[input[1]]
+ bits |= a.code64() << (nbits & 63)
+ bits |= b.code64() << ((nbits + a.len()) & 63)
+ c := encoding[input[2]]
+ nbits += b.len() + a.len()
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ input = input[3:]
+ }
+
+ // Remaining...
+ for _, t := range input {
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ bits |= c.code64() << (nbits & 63)
+
+ nbits += c.len()
+ if debugDeflate {
+ count += int(c.len())
+ }
+ }
+ // Restore...
+ w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+ if debugDeflate {
+ nb := count + int(nbytes)*8 + int(nbits)
+ fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
+ }
+ // Flush if needed to have space.
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+
+ if eof || sync {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
new file mode 100644
index 000000000..be7b58b47
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -0,0 +1,417 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "math"
+ "math/bits"
+)
+
+const (
+ maxBitsLimit = 16
+ // number of valid literals
+ literalCount = 286
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode uint32
+
+func (h hcode) len() uint8 {
+ return uint8(h)
+}
+
+func (h hcode) code64() uint64 {
+ return uint64(h >> 8)
+}
+
+func (h hcode) zero() bool {
+ return h == 0
+}
+
+type huffmanEncoder struct {
+ codes []hcode
+ bitCount [17]int32
+
+ // Allocate a reusable buffer with the longest possible frequency table.
+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+ // The largest of these is literalCount, so we allocate for that case.
+ freqcache [literalCount + 1]literalNode
+}
+
+type literalNode struct {
+ literal uint16
+ freq uint16
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+ // Our level. for better printing
+ level int32
+
+ // The frequency of the last node at this level
+ lastFreq int32
+
+ // The frequency of the next character to add to this level
+ nextCharFreq int32
+
+ // The frequency of the next pair (from level below) to add to this level.
+ // Only valid if the "needed" value of the next lower level is 0.
+ nextPairFreq int32
+
+ // The number of chains remaining to generate for this level before moving
+ // up to the next level
+ needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint8) {
+ *h = hcode(length) | (hcode(code) << 8)
+}
+
+func newhcode(code uint16, length uint8) hcode {
+ return hcode(length) | (hcode(code) << 8)
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+ return bits.Reverse16(number << ((16 - bitLength) & 15))
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+ // Make capacity to next power of two.
+ c := uint(bits.Len32(uint32(size - 1)))
+ return &huffmanEncoder{codes: make([]hcode, size, 1<= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list An array of the literals with non-zero frequencies
+//
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+//
+// maxBits The maximum number of bits that should be used to encode any literal.
+//
+// Must be less than 16.
+//
+// return An integer array in which array[i] indicates the number of literals
+//
+// that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+ if maxBits >= maxBitsLimit {
+ panic("flate: maxBits too large")
+ }
+ n := int32(len(list))
+ list = list[0 : n+1]
+ list[n] = maxNode()
+
+ // The tree can't have greater depth than n - 1, no matter what. This
+ // saves a little bit of work in some small cases
+ if maxBits > n-1 {
+ maxBits = n - 1
+ }
+
+ // Create information about each of the levels.
+ // A bogus "Level 0" whose sole purpose is so that
+ // level1.prev.needed==0. This makes level1.nextPairFreq
+ // be a legitimate value that never gets chosen.
+ var levels [maxBitsLimit]levelInfo
+ // leafCounts[i] counts the number of literals at the left
+ // of ancestors of the rightmost node at level i.
+ // leafCounts[i][j] is the number of literals at the left
+ // of the level j ancestor.
+ var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+ // Descending to only have 1 bounds check.
+ l2f := int32(list[2].freq)
+ l1f := int32(list[1].freq)
+ l0f := int32(list[0].freq) + int32(list[1].freq)
+
+ for level := int32(1); level <= maxBits; level++ {
+ // For every level, the first two items are the first two characters.
+ // We initialize the levels as if we had already figured this out.
+ levels[level] = levelInfo{
+ level: level,
+ lastFreq: l1f,
+ nextCharFreq: l2f,
+ nextPairFreq: l0f,
+ }
+ leafCounts[level][level] = 2
+ if level == 1 {
+ levels[level].nextPairFreq = math.MaxInt32
+ }
+ }
+
+ // We need a total of 2*n - 2 items at top level and have already generated 2.
+ levels[maxBits].needed = 2*n - 4
+
+ level := uint32(maxBits)
+ for level < 16 {
+ l := &levels[level]
+ if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+ // We've run out of both leafs and pairs.
+ // End all calculations for this level.
+ // To make sure we never come back to this level or any lower level,
+ // set nextPairFreq impossibly large.
+ l.needed = 0
+ levels[level+1].nextPairFreq = math.MaxInt32
+ level++
+ continue
+ }
+
+ prevFreq := l.lastFreq
+ if l.nextCharFreq < l.nextPairFreq {
+ // The next item on this row is a leaf node.
+ n := leafCounts[level][level] + 1
+ l.lastFreq = l.nextCharFreq
+ // Lower leafCounts are the same of the previous node.
+ leafCounts[level][level] = n
+ e := list[n]
+ if e.literal < math.MaxUint16 {
+ l.nextCharFreq = int32(e.freq)
+ } else {
+ l.nextCharFreq = math.MaxInt32
+ }
+ } else {
+ // The next item on this row is a pair from the previous row.
+ // nextPairFreq isn't valid until we generate two
+ // more values in the level below
+ l.lastFreq = l.nextPairFreq
+ // Take leaf counts from the lower level, except counts[level] remains the same.
+ if true {
+ save := leafCounts[level][level]
+ leafCounts[level] = leafCounts[level-1]
+ leafCounts[level][level] = save
+ } else {
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ }
+ levels[l.level-1].needed = 2
+ }
+
+ if l.needed--; l.needed == 0 {
+ // We've done everything we need to do for this level.
+ // Continue calculating one level up. Fill in nextPairFreq
+ // of that level with the sum of the two nodes we've just calculated on
+ // this level.
+ if l.level == maxBits {
+ // All done!
+ break
+ }
+ levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+ level++
+ } else {
+ // If we stole from below, move down temporarily to replenish it.
+ for levels[level-1].needed > 0 {
+ level--
+ }
+ }
+ }
+
+ // Somethings is wrong if at the end, the top level is null or hasn't used
+ // all of the leaves.
+ if leafCounts[maxBits][maxBits] != n {
+ panic("leafCounts[maxBits][maxBits] != n")
+ }
+
+ bitCount := h.bitCount[:maxBits+1]
+ bits := 1
+ counts := &leafCounts[maxBits]
+ for level := maxBits; level > 0; level-- {
+ // chain.leafCount gives the number of literals requiring at least "bits"
+ // bits to encode.
+ bitCount[bits] = counts[level] - counts[level-1]
+ bits++
+ }
+ return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+ code := uint16(0)
+ for n, bits := range bitCount {
+ code <<= 1
+ if n == 0 || bits == 0 {
+ continue
+ }
+ // The literals list[len(list)-bits] .. list[len(list)-bits]
+ // are encoded using "bits" bits, and get the values
+ // code, code + 1, .... The code values are
+ // assigned in literal order (not frequency order).
+ chunk := list[len(list)-int(bits):]
+
+ sortByLiteral(chunk)
+ for _, node := range chunk {
+ h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
+ code++
+ }
+ list = list[0 : len(list)-int(bits)]
+ }
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
+ list := h.freqcache[:len(freq)+1]
+ codes := h.codes[:len(freq)]
+ // Number of non-zero literals
+ count := 0
+ // Set list to be the set of all non-zero literals and their frequencies
+ for i, f := range freq {
+ if f != 0 {
+ list[count] = literalNode{uint16(i), f}
+ count++
+ } else {
+ codes[i] = 0
+ }
+ }
+ list[count] = literalNode{}
+
+ list = list[:count]
+ if count <= 2 {
+ // Handle the small cases here, because they are awkward for the general case code. With
+ // two or fewer literals, everything has bit length 1.
+ for i, node := range list {
+ // "list" is in order of increasing literal value.
+ h.codes[node.literal].set(uint16(i), 1)
+ }
+ return
+ }
+ sortByFreq(list)
+
+ // Get the number of literals for each bit count
+ bitCount := h.bitCounts(list, maxBits)
+ // And do the assignment
+ h.assignEncodingAndSize(bitCount, list)
+}
+
+// atLeastOne clamps the result between 1 and 15.
+func atLeastOne(v float32) float32 {
+ if v < 1 {
+ return 1
+ }
+ if v > 15 {
+ return 15
+ }
+ return v
+}
+
+func histogram(b []byte, h []uint16) {
+ if true && len(b) >= 8<<10 {
+ // Split for bigger inputs
+ histogramSplit(b, h)
+ } else {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
+ }
+ }
+}
+
+func histogramSplit(b []byte, h []uint16) {
+ // Tested, and slightly faster than 2-way.
+ // Writing to separate arrays and combining is also slightly slower.
+ h = h[:256]
+ for len(b)&3 != 0 {
+ h[b[0]]++
+ b = b[1:]
+ }
+ n := len(b) / 4
+ x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
+ y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
+ for i, t := range x {
+ v0 := &h[t]
+ v1 := &h[y[i]]
+ v3 := &h[w[i]]
+ v2 := &h[z[i]]
+ *v0++
+ *v1++
+ *v2++
+ *v3++
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
new file mode 100644
index 000000000..6c05ba8c1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByFreq(data []literalNode) {
+ n := len(data)
+ quickSortByFreq(data, 0, n, maxDepth(n))
+}
+
+func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivotByFreq(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSortByFreq(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSortByFreq(data, mhi, b)
+ } else {
+ quickSortByFreq(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSortByFreq(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSortByFreq(data, a, b)
+ }
+}
+
+func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
+ medianOfThreeSortByFreq(data, m, m-s, m+s)
+ medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThreeSortByFreq(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
+ }
+ b := a
+ for {
+ for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
+ }
+ for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
+ }
+ for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSortByFreq(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// quickSortByFreq, loosely following Bentley and McIlroy,
+// ``Engineering a Sort Function,'' SP&E November 1993.
+
+// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
new file mode 100644
index 000000000..93f1aea10
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
@@ -0,0 +1,201 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByLiteral(data []literalNode) {
+ n := len(data)
+ quickSort(data, 0, n, maxDepth(n))
+}
+
+func quickSort(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivot(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSort(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSort(data, mhi, b)
+ } else {
+ quickSort(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSort(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].literal < data[i-6].literal {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSort(data, a, b)
+ }
+}
+func heapSort(data []literalNode, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDown(data, lo, i, first)
+ }
+}
+
+// siftDown implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data []literalNode, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
+ child++
+ }
+ if data[first+root].literal > data[first+child].literal {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThree(data, lo, lo+s, lo+2*s)
+ medianOfThree(data, m, m-s, m+s)
+ medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThree(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && data[a].literal < data[pivot].literal; a++ {
+ }
+ b := a
+ for {
+ for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
+ }
+ for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].literal > data[pivot].literal { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
+ }
+ for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSort(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// maxDepth returns a threshold at which quicksort should switch
+// to heapsort. It returns 2*ceil(lg(n+1)).
+func maxDepth(n int) int {
+ var depth int
+ for i := n; i > 0; i >>= 1 {
+ depth++
+ }
+ return depth * 2
+}
+
+// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].literal < data[m1].literal {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
new file mode 100644
index 000000000..2f410d64f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -0,0 +1,829 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+ "bufio"
+ "compress/flate"
+ "fmt"
+ "io"
+ "math/bits"
+ "sync"
+)
+
+const (
+ maxCodeLen = 16 // max length of Huffman code
+ maxCodeLenMask = 15 // mask for max length of Huffman code
+ // The next three numbers come from the RFC section 3.2.7, with the
+ // additional proviso in section 3.2.5 which implies that distance codes
+ // 30 and 31 should never occur in compressed data.
+ maxNumLit = 286
+ maxNumDist = 30
+ numCodes = 19 // number of codes in Huffman meta-code
+
+ debugDecode = false
+)
+
+// Value of length - 3 and extra bits.
+type lengthExtra struct {
+ length, extra uint8
+}
+
+var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+
+var bitMask32 = [32]uint32{
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+ 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+ 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// Initialize the fixedHuffmanDecoder only once upon first use.
+var fixedOnce sync.Once
+var fixedHuffmanDecoder huffmanDecoder
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError = flate.CorruptInputError
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+//
+// Deprecated: No longer returned.
+type ReadError = flate.ReadError
+
+// A WriteError reports an error encountered while writing output.
+//
+// Deprecated: No longer returned.
+type WriteError = flate.WriteError
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+ // Reset discards any buffered data and resets the Resetter as if it was
+ // newly initialized with the given reader.
+ Reset(r io.Reader, dict []byte) error
+}
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+//
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+//
+// See the following:
+// http://www.gzip.org/algorithm.txt
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+ huffmanChunkBits = 9
+ huffmanNumChunks = 1 << huffmanChunkBits
+ huffmanCountMask = 15
+ huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+ maxRead int // the maximum number of bits we can read and not overread
+ chunks *[huffmanNumChunks]uint16 // chunks as described above
+ links [][]uint16 // overflow links
+ linkMask uint32 // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(lengths []int) bool {
+ // Sanity enables additional runtime tests during Huffman
+ // table construction. It's intended to be used during
+ // development to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if h.chunks == nil {
+ h.chunks = new([huffmanNumChunks]uint16)
+ }
+
+ if h.maxRead != 0 {
+ *h = huffmanDecoder{chunks: h.chunks, links: h.links}
+ }
+
+ // Count number of codes of each length,
+ // compute maxRead and max length.
+ var count [maxCodeLen]int
+ var min, max int
+ for _, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ if min == 0 || n < min {
+ min = n
+ }
+ if n > max {
+ max = n
+ }
+ count[n&maxCodeLenMask]++
+ }
+
+ // Empty tree. The decompressor.huffSym function will fail later if the tree
+ // is used. Technically, an empty tree is only valid for the HDIST tree and
+ // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+ // is guaranteed to fail since it will attempt to use the tree to decode the
+ // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+ // guaranteed to fail later since the compressed data section must be
+ // composed of at least one symbol (the end-of-block marker).
+ if max == 0 {
+ return true
+ }
+
+ code := 0
+ var nextcode [maxCodeLen]int
+ for i := min; i <= max; i++ {
+ code <<= 1
+ nextcode[i&maxCodeLenMask] = code
+ code += count[i&maxCodeLenMask]
+ }
+
+ // Check that the coding is complete (i.e., that we've
+ // assigned all 2-to-the-max possible bit sequences).
+ // Exception: To be compatible with zlib, we also need to
+ // accept degenerate single-code codings. See also
+ // TestDegenerateHuffmanCoding.
+ if code != 1< huffmanChunkBits {
+ numLinks := 1 << (uint(max) - huffmanChunkBits)
+ h.linkMask = uint32(numLinks - 1)
+
+ // create link tables
+ link := nextcode[huffmanChunkBits+1] >> 1
+ if cap(h.links) < huffmanNumChunks-link {
+ h.links = make([][]uint16, huffmanNumChunks-link)
+ } else {
+ h.links = h.links[:huffmanNumChunks-link]
+ }
+ for j := uint(link); j < huffmanNumChunks; j++ {
+ reverse := int(bits.Reverse16(uint16(j)))
+ reverse >>= uint(16 - huffmanChunkBits)
+ off := j - uint(link)
+ if sanity && h.chunks[reverse] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[reverse] = uint16(off<>= uint(16 - n)
+ if n <= huffmanChunkBits {
+ for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+ // We should never need to overwrite
+ // an existing chunk. Also, 0 is
+ // never a valid chunk, because the
+ // lower 4 "count" bits should be
+ // between 1 and 15.
+ if sanity && h.chunks[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[off] = chunk
+ }
+ } else {
+ j := reverse & (huffmanNumChunks - 1)
+ if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+ // Longer codes should have been
+ // associated with a link table above.
+ panic("impossible: not an indirect chunk")
+ }
+ value := h.chunks[j] >> huffmanValueShift
+ linktab := h.links[value]
+ reverse >>= huffmanChunkBits
+ for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+ if sanity && linktab[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ linktab[off] = chunk
+ }
+ }
+ }
+
+ if sanity {
+ // Above we've sanity checked that we never overwrote
+ // an existing entry. Here we additionally check that
+ // we filled the tables completely.
+ for i, chunk := range h.chunks {
+ if chunk == 0 {
+ // As an exception, in the degenerate
+ // single-code case, we allow odd
+ // chunks to be missing.
+ if code == 1 && i%2 == 1 {
+ continue
+ }
+ panic("impossible: missing chunk")
+ }
+ }
+ for _, linktab := range h.links {
+ for _, chunk := range linktab {
+ if chunk == 0 {
+ panic("impossible: missing chunk")
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// Reader is the actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+type step uint8
+
+const (
+ copyData step = iota + 1
+ nextBlock
+ huffmanBytesBuffer
+ huffmanBytesReader
+ huffmanBufioReader
+ huffmanStringsReader
+ huffmanGenericReader
+)
+
+// Decompress state.
+type decompressor struct {
+ // Input source.
+ r Reader
+ roffset int64
+
+ // Huffman decoders for literal/length, distance.
+ h1, h2 huffmanDecoder
+
+ // Length arrays used to define Huffman codes.
+ bits *[maxNumLit + maxNumDist]int
+ codebits *[numCodes]int
+
+ // Output history, buffer.
+ dict dictDecoder
+
+ // Next step in the decompression,
+ // and decompression state.
+ step step
+ stepState int
+ err error
+ toRead []byte
+ hl, hd *huffmanDecoder
+ copyLen int
+ copyDist int
+
+ // Temporary buffer (avoids repeated allocation).
+ buf [4]byte
+
+ // Input bits, in top of b.
+ b uint32
+
+ nb uint
+ final bool
+}
+
+func (f *decompressor) nextBlock() {
+ for f.nb < 1+2 {
+ if f.err = f.moreBits(); f.err != nil {
+ return
+ }
+ }
+ f.final = f.b&1 == 1
+ f.b >>= 1
+ typ := f.b & 3
+ f.b >>= 2
+ f.nb -= 1 + 2
+ switch typ {
+ case 0:
+ f.dataBlock()
+ if debugDecode {
+ fmt.Println("stored block")
+ }
+ case 1:
+ // compressed, fixed Huffman tables
+ f.hl = &fixedHuffmanDecoder
+ f.hd = nil
+ f.huffmanBlockDecoder()
+ if debugDecode {
+ fmt.Println("predefinied huffman block")
+ }
+ case 2:
+ // compressed, dynamic Huffman tables
+ if f.err = f.readHuffman(); f.err != nil {
+ break
+ }
+ f.hl = &f.h1
+ f.hd = &f.h2
+ f.huffmanBlockDecoder()
+ if debugDecode {
+ fmt.Println("dynamic huffman block")
+ }
+ default:
+ // 3 is reserved.
+ if debugDecode {
+ fmt.Println("reserved data block encountered")
+ }
+ f.err = CorruptInputError(f.roffset)
+ }
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+ for {
+ if len(f.toRead) > 0 {
+ n := copy(b, f.toRead)
+ f.toRead = f.toRead[n:]
+ if len(f.toRead) == 0 {
+ return n, f.err
+ }
+ return n, nil
+ }
+ if f.err != nil {
+ return 0, f.err
+ }
+
+ f.doStep()
+
+ if f.err != nil && len(f.toRead) == 0 {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ }
+ }
+}
+
+// WriteTo implements the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+ total := int64(0)
+ flushed := false
+ for {
+ if len(f.toRead) > 0 {
+ n, err := w.Write(f.toRead)
+ total += int64(n)
+ if err != nil {
+ f.err = err
+ return total, err
+ }
+ if n != len(f.toRead) {
+ return total, io.ErrShortWrite
+ }
+ f.toRead = f.toRead[:0]
+ }
+ if f.err != nil && flushed {
+ if f.err == io.EOF {
+ return total, nil
+ }
+ return total, f.err
+ }
+ if f.err == nil {
+ f.doStep()
+ }
+ if len(f.toRead) == 0 && f.err != nil && !flushed {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ flushed = true
+ }
+ }
+}
+
+func (f *decompressor) Close() error {
+ if f.err == io.EOF {
+ return nil
+ }
+ return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+ // HLIT[5], HDIST[5], HCLEN[4].
+ for f.nb < 5+5+4 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ nlit := int(f.b&0x1F) + 257
+ if nlit > maxNumLit {
+ if debugDecode {
+ fmt.Println("nlit > maxNumLit", nlit)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ ndist := int(f.b&0x1F) + 1
+ if ndist > maxNumDist {
+ if debugDecode {
+ fmt.Println("ndist > maxNumDist", ndist)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ nclen := int(f.b&0xF) + 4
+ // numCodes is 19, so nclen is always valid.
+ f.b >>= 4
+ f.nb -= 5 + 5 + 4
+
+ // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+ for i := 0; i < nclen; i++ {
+ for f.nb < 3 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ f.codebits[codeOrder[i]] = int(f.b & 0x7)
+ f.b >>= 3
+ f.nb -= 3
+ }
+ for i := nclen; i < len(codeOrder); i++ {
+ f.codebits[codeOrder[i]] = 0
+ }
+ if !f.h1.init(f.codebits[0:]) {
+ if debugDecode {
+ fmt.Println("init codebits failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // HLIT + 257 code lengths, HDIST + 1 code lengths,
+ // using the code length Huffman code.
+ for i, n := 0, nlit+ndist; i < n; {
+ x, err := f.huffSym(&f.h1)
+ if err != nil {
+ return err
+ }
+ if x < 16 {
+ // Actual length.
+ f.bits[i] = x
+ i++
+ continue
+ }
+ // Repeat previous length or zero.
+ var rep int
+ var nb uint
+ var b int
+ switch x {
+ default:
+ return InternalError("unexpected length code")
+ case 16:
+ rep = 3
+ nb = 2
+ if i == 0 {
+ if debugDecode {
+ fmt.Println("i==0")
+ }
+ return CorruptInputError(f.roffset)
+ }
+ b = f.bits[i-1]
+ case 17:
+ rep = 3
+ nb = 3
+ b = 0
+ case 18:
+ rep = 11
+ nb = 7
+ b = 0
+ }
+ for f.nb < nb {
+ if err := f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits:", err)
+ }
+ return err
+ }
+ }
+ rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1))
+ f.b >>= nb & regSizeMaskUint32
+ f.nb -= nb
+ if i+rep > n {
+ if debugDecode {
+ fmt.Println("i+rep > n", i, rep, n)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ for j := 0; j < rep; j++ {
+ f.bits[i] = b
+ i++
+ }
+ }
+
+ if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+ if debugDecode {
+ fmt.Println("init2 failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // As an optimization, we can initialize the maxRead bits to read at a time
+ // for the HLIT tree to the length of the EOB marker since we know that
+ // every block must terminate with one. This preserves the property that
+ // we never read any extra bytes after the end of the DEFLATE stream.
+ if f.h1.maxRead < f.bits[endBlockMarker] {
+ f.h1.maxRead = f.bits[endBlockMarker]
+ }
+ if !f.final {
+ // If not the final block, the smallest block possible is
+ // a predefined table, BTYPE=01, with a single EOB marker.
+ // This will take up 3 + 7 bits.
+ f.h1.maxRead += 10
+ }
+
+ return nil
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+ // Uncompressed.
+ // Discard current half-byte.
+ left := (f.nb) & 7
+ f.nb -= left
+ f.b >>= left
+
+ offBytes := f.nb >> 3
+ // Unfilled values will be overwritten.
+ f.buf[0] = uint8(f.b)
+ f.buf[1] = uint8(f.b >> 8)
+ f.buf[2] = uint8(f.b >> 16)
+ f.buf[3] = uint8(f.b >> 24)
+
+ f.roffset += int64(offBytes)
+ f.nb, f.b = 0, 0
+
+ // Length then ones-complement of length.
+ nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
+ f.roffset += int64(nr)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+ n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
+ nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
+ if nn != ^n {
+ if debugDecode {
+ ncomp := ^n
+ fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ if n == 0 {
+ f.toRead = f.dict.readFlush()
+ f.finishBlock()
+ return
+ }
+
+ f.copyLen = int(n)
+ f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+ buf := f.dict.writeSlice()
+ if len(buf) > f.copyLen {
+ buf = buf[:f.copyLen]
+ }
+
+ cnt, err := io.ReadFull(f.r, buf)
+ f.roffset += int64(cnt)
+ f.copyLen -= cnt
+ f.dict.writeMark(cnt)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = copyData
+ return
+ }
+ f.finishBlock()
+}
+
+func (f *decompressor) finishBlock() {
+ if f.final {
+ if f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
+ }
+ f.err = io.EOF
+ }
+ f.step = nextBlock
+}
+
+func (f *decompressor) doStep() {
+ switch f.step {
+ case copyData:
+ f.copyData()
+ case nextBlock:
+ f.nextBlock()
+ case huffmanBytesBuffer:
+ f.huffmanBytesBuffer()
+ case huffmanBytesReader:
+ f.huffmanBytesReader()
+ case huffmanBufioReader:
+ f.huffmanBufioReader()
+ case huffmanStringsReader:
+ f.huffmanStringsReader()
+ case huffmanGenericReader:
+ f.huffmanGenericReader()
+ default:
+ panic("BUG: unexpected step state")
+ }
+}
+
+// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
+func noEOF(e error) error {
+ if e == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return e
+}
+
+func (f *decompressor) moreBits() error {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
+ f.nb += 8
+ return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(h.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ return 0, noEOF(err)
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & regSizeMaskUint32)
+ nb += 8
+ }
+ chunk := h.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return 0, f.err
+ }
+ f.b = b >> (n & regSizeMaskUint32)
+ f.nb = nb - n
+ return int(chunk >> huffmanValueShift), nil
+ }
+ }
+}
+
+func makeReader(r io.Reader) Reader {
+ if rr, ok := r.(Reader); ok {
+ return rr
+ }
+ return bufio.NewReader(r)
+}
+
+func fixedHuffmanDecoderInit() {
+ fixedOnce.Do(func() {
+ // These come from the RFC section 3.2.6.
+ var bits [288]int
+ for i := 0; i < 144; i++ {
+ bits[i] = 8
+ }
+ for i := 144; i < 256; i++ {
+ bits[i] = 9
+ }
+ for i := 256; i < 280; i++ {
+ bits[i] = 7
+ }
+ for i := 280; i < 288; i++ {
+ bits[i] = 8
+ }
+ fixedHuffmanDecoder.init(bits[:])
+ })
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+ *f = decompressor{
+ r: makeReader(r),
+ bits: f.bits,
+ codebits: f.codebits,
+ h1: f.h1,
+ h2: f.h2,
+ dict: f.dict,
+ step: nextBlock,
+ }
+ f.dict.init(maxMatchOffset, dict)
+ return nil
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = nextBlock
+ f.dict.init(maxMatchOffset, nil)
+ return &f
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary. The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read. NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = nextBlock
+ f.dict.init(maxMatchOffset, dict)
+ return &f
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
new file mode 100644
index 000000000..2b2f993f7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -0,0 +1,1283 @@
+// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "math/bits"
+ "strings"
+)
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesBuffer() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Buffer)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBytesBuffer
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBytesBuffer // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBytesReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBytesReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBufioReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bufio.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBufioReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBufioReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanStringsReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*strings.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanStringsReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanStringsReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanGenericReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanGenericReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanGenericReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+func (f *decompressor) huffmanBlockDecoder() {
+ switch f.r.(type) {
+ case *bytes.Buffer:
+ f.huffmanBytesBuffer()
+ case *bytes.Reader:
+ f.huffmanBytesReader()
+ case *bufio.Reader:
+ f.huffmanBufioReader()
+ case *strings.Reader:
+ f.huffmanStringsReader()
+ case Reader:
+ f.huffmanGenericReader()
+ default:
+ f.huffmanGenericReader()
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
new file mode 100644
index 000000000..703b9a89a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -0,0 +1,241 @@
+package flate
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+)
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL1 struct {
+ fastGen
+ table [tableSize]tableEntry
+}
+
+// EncodeL1 uses a similar algorithm to level 1
+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ candidate = e.table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hashLen(now, tableBits, hashBytes)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = now
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+ cv = now
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ var l = int32(4)
+ if false {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else {
+ // inlined:
+ a := src[s+4:]
+ b := src[t+4:]
+ for len(a) >= 8 {
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ l += int32(bits.TrailingZeros64(diff) >> 3)
+ break
+ }
+ l += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ if len(a) < 8 {
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ l++
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ // Save the match found
+ if false {
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ } else {
+ // Inlined...
+ xoffset := uint32(s - t - baseMatchOffset)
+ xlength := l
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ dst.extraHist[lengthCodes1[uint8(xl)]]++
+ dst.offHist[oc]++
+ dst.tokens[dst.n] = token(matchType | uint32(xl)<= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hashLen(x, tableBits, hashBytes)
+ e.table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashLen(x, tableBits, hashBytes)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
+ cv = x >> 8
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
new file mode 100644
index 000000000..876dfbe30
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -0,0 +1,214 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL2 struct {
+ fastGen
+ table [bTableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ // When should we start skipping if we haven't found matches in a long while.
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashLen(cv, bTableBits, hashBytes)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = e.table[nextHash]
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hashLen(now, bTableBits, hashBytes)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = now
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ cv = now
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every second hash in-between, but offset by 1.
+ for i := s - l + 2; i < s-5; i += 7 {
+ x := load6432(src, i)
+ nextHash := hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i}
+ // Skip one
+ x >>= 16
+ nextHash = hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
+ // Skip one
+ x >>= 16
+ nextHash = hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hashLen(x, bTableBits, hashBytes)
+ prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
+ e.table[prevHash] = tableEntry{offset: o}
+ e.table[prevHash2] = tableEntry{offset: o + 1}
+ currHash := hashLen(x>>16, bTableBits, hashBytes)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
+ cv = x >> 24
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
new file mode 100644
index 000000000..7aa2b72a1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -0,0 +1,241 @@
+package flate
+
+import "fmt"
+
+// fastEncL3
+type fastEncL3 struct {
+ fastGen
+ table [1 << 16]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ tableBits = 16
+ tableSize = 1 << tableBits
+ hashBytes = 5
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ }
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ e.table[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // Skip if too small.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 7
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ s = nextS
+ nextS = s + 1 + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidates := e.table[nextHash]
+ now := load6432(src, nextS)
+
+ // Safe offset distance until s + 4...
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+ e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if candidate.offset < minOffset {
+ cv = now
+ // Previous will also be invalid, we have nothing.
+ continue
+ }
+
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
+ break
+ }
+ // Both match and are valid, pick longest.
+ offset := s - (candidate.offset - e.cur)
+ o2 := s - (candidates.Prev.offset - e.cur)
+ l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
+ if l2 > l1 {
+ candidate = candidates.Prev
+ }
+ break
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ }
+ cv = now
+ }
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+8) < len(src) && t > 0 {
+ cv = load6432(src, t)
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + t},
+ }
+ }
+ goto emitRemainder
+ }
+
+ // Store every 5th hash in-between.
+ for i := s - l + 2; i < s-5; i += 6 {
+ nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + i}}
+ }
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s.
+ x := load6432(src, s-2)
+ prevHash := hashLen(x, tableBits, hashBytes)
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 2},
+ }
+ x >>= 8
+ prevHash = hashLen(x, tableBits, hashBytes)
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 1},
+ }
+ x >>= 8
+ currHash := hashLen(x, tableBits, hashBytes)
+ candidates := e.table[currHash]
+ cv = x
+ e.table[currHash] = tableEntryPrev{
+ Prev: candidates.Cur,
+ Cur: tableEntry{offset: s + e.cur},
+ }
+
+ // Check both candidates
+ candidate = candidates.Cur
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+
+ if candidate.offset > minOffset {
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Found a match...
+ continue
+ }
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Match at prev...
+ continue
+ }
+ }
+ cv = x >> 8
+ s++
+ break
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
new file mode 100644
index 000000000..23c08b325
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -0,0 +1,221 @@
+package flate
+
+import "fmt"
+
+type fastEncL4 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntry
+}
+
+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.bTable[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ e.bTable[nextHashL] = entry
+
+ t = lCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
+ // We got a long match. Use that.
+ break
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ lCandidate = e.bTable[hash7(next, tableBits)]
+
+ // If the next long is a candidate, check if we should use that instead...
+ lOff := nextS - (lCandidate.offset - e.cur)
+ if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
+ l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
+ if l2 > l1 {
+ s = nextS
+ t = lCandidate.offset - e.cur
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if debugDeflate {
+ if t >= s {
+ panic("s-t")
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
+ e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between
+ if true {
+ i := nextS
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+
+ i += 3
+ for ; i < s-1; i += 3 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ e.bTable[prevHashL] = tableEntry{offset: o}
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
new file mode 100644
index 000000000..1f61ec182
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -0,0 +1,708 @@
+package flate
+
+import "fmt"
+
+type fastEncL5 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+ lCandidate = e.bTable[nextHashL]
+ // Store the next match
+
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // If the next long is a candidate, use that...
+ t2 := lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ if l == 0 {
+ // Extend the 4-byte match as long as possible.
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Try to locate a better match by checking the end of best match...
+ if sAt := s + l; l < 30 && sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+ t2 := eLong - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
+ if t2 >= 0 && off < maxMatchOffset && off > 0 {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between.
+ if true {
+ const hashEvery = 3
+ i := s - l + 1
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // Do an long at i+1
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ eLong = &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // We only have enough bits for a short entry at i+2
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+ // Skip one - otherwise we risk hitting 's'
+ i += 4
+ for ; i < s-1; i += hashEvery {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ eLong := &e.bTable[prevHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
+
+// fastEncL5Window is a level 5 encoder,
+// but with a custom window size.
+type fastEncL5Window struct {
+ hist []byte
+ cur int32
+ maxOffset int32
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ maxMatchOffset := e.maxOffset
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+ lCandidate = e.bTable[nextHashL]
+ // Store the next match
+
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // If the next long is a candidate, use that...
+ t2 := lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ if l == 0 {
+ // Extend the 4-byte match as long as possible.
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Try to locate a better match by checking the end of best match...
+ if sAt := s + l; l < 30 && sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+ t2 := eLong - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
+ if t2 >= 0 && off < maxMatchOffset && off > 0 {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between.
+ if true {
+ const hashEvery = 3
+ i := s - l + 1
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // Do an long at i+1
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ eLong = &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // We only have enough bits for a short entry at i+2
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+ // Skip one - otherwise we risk hitting 's'
+ i += 4
+ for ; i < s-1; i += hashEvery {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ eLong := &e.bTable[prevHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
+
+// Reset the encoding table.
+func (e *fastEncL5Window) Reset() {
+ // We keep the same allocs, since we are compressing the same block sizes.
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= int32(bufferReset) {
+ e.cur += e.maxOffset + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+}
+
+func (e *fastEncL5Window) addBlock(src []byte) int32 {
+ // check if we have space already
+ maxMatchOffset := e.maxOffset
+
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.hist = make([]byte, 0, allocHistory)
+ } else {
+ if cap(e.hist) < int(maxMatchOffset*2) {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - maxMatchOffset
+ copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:maxMatchOffset]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > e.maxOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > e.maxOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
new file mode 100644
index 000000000..f1e9d98fa
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -0,0 +1,325 @@
+package flate
+
+import "fmt"
+
+type fastEncL6 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ // Repeat MUST be > 1 and within range
+ repeat := int32(1)
+ for {
+ const skipLog = 7
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ // Calculate hashes of 'next'
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Long candidate matches at least 4 bytes.
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check the previous long candidate as well.
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ // Current value did not match, but check if previous long value does.
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+
+ // Look up next long candidate (at nextS)
+ lCandidate = e.bTable[nextHashL]
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check repeat at s + repOff
+ const repOff = 1
+ t2 := s - repeat + repOff
+ if load3232(src, t2) == uint32(cv>>(8*repOff)) {
+ ml := e.matchlen(s+4+repOff, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ l = ml
+ s += repOff
+ // Not worth checking more.
+ break
+ }
+ }
+
+ // If the next long is a candidate, use that...
+ t2 = lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ // This is ok, but check previous as well.
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Try to locate a better match by checking the end-of-match...
+ if sAt := s + l; sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
+ // Test current
+ t2 := eLong.Cur.offset - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
+ if off < maxMatchOffset {
+ if off > 0 && t2 >= 0 {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ // Test next:
+ t2 = eLong.Prev.offset - e.cur - l + skipBeginning
+ off := s2 - t2
+ if off > 0 && off < maxMatchOffset && t2 >= 0 {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if false {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ repeat = s - t
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index after match end.
+ for i := nextS + 1; i < int32(len(src))-8; i += 2 {
+ cv := load6432(src, i)
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
+ }
+ goto emitRemainder
+ }
+
+ // Store every long hash in-between and every second short.
+ if true {
+ for i := nextS + 1; i < s-1; i += 2 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ cv = load6432(src, s)
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
new file mode 100644
index 000000000..4bd388584
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
@@ -0,0 +1,16 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package flate
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+// len(a) <= len(b) and len(a) > 0
+//
+//go:noescape
+func matchLen(a []byte, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
new file mode 100644
index 000000000..0782b86e3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
@@ -0,0 +1,66 @@
+// Copied from S2 implementation.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+#include "textflag.h"
+
+// func matchLen(a []byte, b []byte) int
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+ MOVQ a_base+0(FP), AX
+ MOVQ b_base+24(FP), CX
+ MOVQ a_len+8(FP), DX
+
+ // matchLen
+ XORL SI, SI
+ CMPL DX, $0x08
+ JB matchlen_match4_standalone
+
+matchlen_loopback_standalone:
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
+
+#ifdef GOAMD64_v3
+ TZCNTQ BX, BX
+#else
+ BSFQ BX, BX
+#endif
+ SHRL $0x03, BX
+ LEAL (SI)(BX*1), SI
+ JMP gen_match_len_end
+
+matchlen_loop_standalone:
+ LEAL -8(DX), DX
+ LEAL 8(SI), SI
+ CMPL DX, $0x08
+ JAE matchlen_loopback_standalone
+
+matchlen_match4_standalone:
+ CMPL DX, $0x04
+ JB matchlen_match2_standalone
+ MOVL (AX)(SI*1), BX
+ CMPL (CX)(SI*1), BX
+ JNE matchlen_match2_standalone
+ LEAL -4(DX), DX
+ LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+ CMPL DX, $0x02
+ JB matchlen_match1_standalone
+ MOVW (AX)(SI*1), BX
+ CMPW (CX)(SI*1), BX
+ JNE matchlen_match1_standalone
+ LEAL -2(DX), DX
+ LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+ CMPL DX, $0x01
+ JB gen_match_len_end
+ MOVB (AX)(SI*1), BL
+ CMPB (CX)(SI*1), BL
+ JNE gen_match_len_end
+ INCL SI
+
+gen_match_len_end:
+ MOVQ SI, ret+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
new file mode 100644
index 000000000..ad5cd814b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
@@ -0,0 +1,33 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package flate
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+ for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
+ diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
+ if diff != 0 {
+ return n + bits.TrailingZeros64(diff)>>3
+ }
+ n += 8
+ }
+
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ n++
+ }
+ return n
+
+}
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
new file mode 100644
index 000000000..6ed28061b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
@@ -0,0 +1,37 @@
+package flate
+
+const (
+ // Masks for shifts with register sizes of the shift value.
+ // This can be used to work around the x86 design of shifting by mod register size.
+ // It can be used when a variable shift is always smaller than the register size.
+
+ // reg8SizeMaskX - shift value is 8 bits, shifted is X
+ reg8SizeMask8 = 7
+ reg8SizeMask16 = 15
+ reg8SizeMask32 = 31
+ reg8SizeMask64 = 63
+
+ // reg16SizeMaskX - shift value is 16 bits, shifted is X
+ reg16SizeMask8 = reg8SizeMask8
+ reg16SizeMask16 = reg8SizeMask16
+ reg16SizeMask32 = reg8SizeMask32
+ reg16SizeMask64 = reg8SizeMask64
+
+ // reg32SizeMaskX - shift value is 32 bits, shifted is X
+ reg32SizeMask8 = reg8SizeMask8
+ reg32SizeMask16 = reg8SizeMask16
+ reg32SizeMask32 = reg8SizeMask32
+ reg32SizeMask64 = reg8SizeMask64
+
+ // reg64SizeMaskX - shift value is 64 bits, shifted is X
+ reg64SizeMask8 = reg8SizeMask8
+ reg64SizeMask16 = reg8SizeMask16
+ reg64SizeMask32 = reg8SizeMask32
+ reg64SizeMask64 = reg8SizeMask64
+
+ // regSizeMaskUintX - shift value is uint, shifted is X
+ regSizeMaskUint8 = reg8SizeMask8
+ regSizeMaskUint16 = reg8SizeMask16
+ regSizeMaskUint32 = reg8SizeMask32
+ regSizeMaskUint64 = reg8SizeMask64
+)
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go
new file mode 100644
index 000000000..1b7a2cbd7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go
@@ -0,0 +1,40 @@
+//go:build !amd64
+// +build !amd64
+
+package flate
+
+const (
+ // Masks for shifts with register sizes of the shift value.
+ // This can be used to work around the x86 design of shifting by mod register size.
+ // It can be used when a variable shift is always smaller than the register size.
+
+ // reg8SizeMaskX - shift value is 8 bits, shifted is X
+ reg8SizeMask8 = 0xff
+ reg8SizeMask16 = 0xff
+ reg8SizeMask32 = 0xff
+ reg8SizeMask64 = 0xff
+
+ // reg16SizeMaskX - shift value is 16 bits, shifted is X
+ reg16SizeMask8 = 0xffff
+ reg16SizeMask16 = 0xffff
+ reg16SizeMask32 = 0xffff
+ reg16SizeMask64 = 0xffff
+
+ // reg32SizeMaskX - shift value is 32 bits, shifted is X
+ reg32SizeMask8 = 0xffffffff
+ reg32SizeMask16 = 0xffffffff
+ reg32SizeMask32 = 0xffffffff
+ reg32SizeMask64 = 0xffffffff
+
+ // reg64SizeMaskX - shift value is 64 bits, shifted is X
+ reg64SizeMask8 = 0xffffffffffffffff
+ reg64SizeMask16 = 0xffffffffffffffff
+ reg64SizeMask32 = 0xffffffffffffffff
+ reg64SizeMask64 = 0xffffffffffffffff
+
+ // regSizeMaskUintX - shift value is uint, shifted is X
+ regSizeMaskUint8 = ^uint(0)
+ regSizeMaskUint16 = ^uint(0)
+ regSizeMaskUint32 = ^uint(0)
+ regSizeMaskUint64 = ^uint(0)
+)
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
new file mode 100644
index 000000000..f3d4139ef
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -0,0 +1,318 @@
+package flate
+
+import (
+ "io"
+ "math"
+ "sync"
+)
+
+const (
+ maxStatelessBlock = math.MaxInt16
+ // dictionary will be taken from maxStatelessBlock, so limit it.
+ maxStatelessDict = 8 << 10
+
+ slTableBits = 13
+ slTableSize = 1 << slTableBits
+ slTableShift = 32 - slTableBits
+)
+
+type statelessWriter struct {
+ dst io.Writer
+ closed bool
+}
+
+func (s *statelessWriter) Close() error {
+ if s.closed {
+ return nil
+ }
+ s.closed = true
+ // Emit EOF block
+ return StatelessDeflate(s.dst, nil, true, nil)
+}
+
+func (s *statelessWriter) Write(p []byte) (n int, err error) {
+ err = StatelessDeflate(s.dst, p, false, nil)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (s *statelessWriter) Reset(w io.Writer) {
+ s.dst = w
+ s.closed = false
+}
+
+// NewStatelessWriter will do compression but without maintaining any state
+// between Write calls.
+// There will be no memory kept between Write calls,
+// but compression and speed will be suboptimal.
+// Because of this, the size of actual Write calls will affect output size.
+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
+ return &statelessWriter{dst: dst}
+}
+
+// bitWriterPool contains bit writers that can be reused.
+var bitWriterPool = sync.Pool{
+ New: func() interface{} {
+ return newHuffmanBitWriter(nil)
+ },
+}
+
+// StatelessDeflate allows compressing directly to a Writer without retaining state.
+// When returning everything will be flushed.
+// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
+ var dst tokens
+ bw := bitWriterPool.Get().(*huffmanBitWriter)
+ bw.reset(out)
+ defer func() {
+ // don't keep a reference to our output
+ bw.reset(nil)
+ bitWriterPool.Put(bw)
+ }()
+ if eof && len(in) == 0 {
+ // Just write an EOF block.
+ // Could be faster...
+ bw.writeStoredHeader(0, true)
+ bw.flush()
+ return bw.err
+ }
+
+ // Truncate dict
+ if len(dict) > maxStatelessDict {
+ dict = dict[len(dict)-maxStatelessDict:]
+ }
+
+ // For subsequent loops, keep shallow dict reference to avoid alloc+copy.
+ var inDict []byte
+
+ for len(in) > 0 {
+ todo := in
+ if len(inDict) > 0 {
+ if len(todo) > maxStatelessBlock-maxStatelessDict {
+ todo = todo[:maxStatelessBlock-maxStatelessDict]
+ }
+ } else if len(todo) > maxStatelessBlock-len(dict) {
+ todo = todo[:maxStatelessBlock-len(dict)]
+ }
+ inOrg := in
+ in = in[len(todo):]
+ uncompressed := todo
+ if len(dict) > 0 {
+ // combine dict and source
+ bufLen := len(todo) + len(dict)
+ combined := make([]byte, bufLen)
+ copy(combined, dict)
+ copy(combined[len(dict):], todo)
+ todo = combined
+ }
+ // Compress
+ if len(inDict) == 0 {
+ statelessEnc(&dst, todo, int16(len(dict)))
+ } else {
+ statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
+ }
+ isEof := eof && len(in) == 0
+
+ if dst.n == 0 {
+ bw.writeStoredHeader(len(uncompressed), isEof)
+ if bw.err != nil {
+ return bw.err
+ }
+ bw.writeBytes(uncompressed)
+ } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
+ // If we removed less than 1/16th, huffman compress the block.
+ bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
+ } else {
+ bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ }
+ if len(in) > 0 {
+ // Retain a dict if we have more
+ inDict = inOrg[len(uncompressed)-maxStatelessDict:]
+ dict = nil
+ dst.Reset()
+ }
+ if bw.err != nil {
+ return bw.err
+ }
+ }
+ if !eof {
+ // Align, only a stored block can do that.
+ bw.writeStoredHeader(0, false)
+ }
+ bw.flush()
+ return bw.err
+}
+
+func hashSL(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> slTableShift
+}
+
+func load3216(b []byte, i int16) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6416(b []byte, i int16) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ type tableEntry struct {
+ offset int16
+ }
+
+ var table [slTableSize]tableEntry
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src)-int(startAt) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = 0
+ return
+ }
+ // Index until startAt
+ if startAt > 0 {
+ cv := load3232(src, 0)
+ for i := int16(0); i < startAt; i++ {
+ table[hashSL(cv)] = tableEntry{offset: i}
+ cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+ }
+ }
+
+ s := startAt + 1
+ nextEmit := startAt
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int16(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3216(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashSL(cv)
+ candidate = table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit || nextS <= 0 {
+ goto emitRemainder
+ }
+
+ now := load6416(src, nextS)
+ table[nextHash] = tableEntry{offset: s}
+ nextHash = hashSL(uint32(now))
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = table[nextHash]
+ now >>= 8
+ table[nextHash] = tableEntry{offset: s}
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset
+ l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ // Save the match found
+ dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6416(src, s-2)
+ o := s - 2
+ prevHash := hashSL(uint32(x))
+ table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashSL(uint32(x))
+ candidate = table[currHash]
+ table[currHash] = tableEntry{offset: o + 2}
+
+ if uint32(x) != load3216(src, candidate.offset) {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
new file mode 100644
index 000000000..d818790c1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -0,0 +1,379 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
+ // bits 16-22 offsetcode - 5 bits
+ // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
+ // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
+ lengthShift = 22
+ offsetMask = 1<maxnumlit
+ offHist [32]uint16 // offset codes
+ litHist [256]uint16 // codes 0->255
+ nFilled int
+ n uint16 // Must be able to contain maxStoreBlockSize
+ tokens [maxStoreBlockSize + 1]token
+}
+
+func (t *tokens) Reset() {
+ if t.n == 0 {
+ return
+ }
+ t.n = 0
+ t.nFilled = 0
+ for i := range t.litHist[:] {
+ t.litHist[i] = 0
+ }
+ for i := range t.extraHist[:] {
+ t.extraHist[i] = 0
+ }
+ for i := range t.offHist[:] {
+ t.offHist[i] = 0
+ }
+}
+
+func (t *tokens) Fill() {
+ if t.n == 0 {
+ return
+ }
+ for i, v := range t.litHist[:] {
+ if v == 0 {
+ t.litHist[i] = 1
+ t.nFilled++
+ }
+ }
+ for i, v := range t.extraHist[:literalCount-256] {
+ if v == 0 {
+ t.nFilled++
+ t.extraHist[i] = 1
+ }
+ }
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v == 0 {
+ t.offHist[i] = 1
+ }
+ }
+}
+
+func indexTokens(in []token) tokens {
+ var t tokens
+ t.indexTokens(in)
+ return t
+}
+
+func (t *tokens) indexTokens(in []token) {
+ t.Reset()
+ for _, tok := range in {
+ if tok < matchType {
+ t.AddLiteral(tok.literal())
+ continue
+ }
+ t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
+ }
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+ for _, v := range lit {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+}
+
+func (t *tokens) AddLiteral(lit byte) {
+ t.tokens[t.n] = token(lit)
+ t.litHist[lit]++
+ t.n++
+}
+
+// from https://stackoverflow.com/a/28730362
+func mFastLog2(val float32) float32 {
+ ux := int32(math.Float32bits(val))
+ log2 := (float32)(((ux >> 23) & 255) - 128)
+ ux &= -0x7f800001
+ ux += 127 << 23
+ uval := math.Float32frombits(uint32(ux))
+ log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
+ return log2
+}
+
+// EstimatedBits will return an minimum size estimated by an *optimal*
+// compression of the block.
+// The size of the block
+func (t *tokens) EstimatedBits() int {
+ shannon := float32(0)
+ bits := int(0)
+ nMatches := 0
+ total := int(t.n) + t.nFilled
+ if total > 0 {
+ invTotal := 1.0 / float32(total)
+ for _, v := range t.litHist[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ }
+ }
+ // Just add 15 for EOB
+ shannon += 15
+ for i, v := range t.extraHist[1 : literalCount-256] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ bits += int(lengthExtraBits[i&31]) * int(v)
+ nMatches += int(v)
+ }
+ }
+ }
+ if nMatches > 0 {
+ invTotal := 1.0 / float32(nMatches)
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ bits += int(offsetExtraBits[i&31]) * int(v)
+ }
+ }
+ }
+ return int(shannon) + bits
+}
+
+// AddMatch adds a match to the tokens.
+// This function is very sensitive to inlining and right on the border.
+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
+ if debugDeflate {
+ if xlength >= maxMatchLength+baseMatchLength {
+ panic(fmt.Errorf("invalid length: %v", xlength))
+ }
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oCode := offsetCode(xoffset)
+ xoffset |= oCode << 16
+
+ t.extraHist[lengthCodes1[uint8(xlength)]]++
+ t.offHist[oCode&31]++
+ t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ // We need to have at least baseMatchLength left over for next loop.
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ t.extraHist[lengthCodes1[uint8(xl)]]++
+ t.offHist[oc&31]++
+ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) }
+
+// Convert length to code.
+func lengthCode(len uint8) uint8 { return lengthCodes[len] }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+ if false {
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[off&255]
+ } else if off>>7 < uint32(len(offsetCodes)) {
+ return offsetCodes[(off>>7)&255] + 14
+ } else {
+ return offsetCodes[(off>>14)&255] + 28
+ }
+ }
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[uint8(off)]
+ }
+ return offsetCodes14[uint8(off>>7)]
+}
diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go
new file mode 100644
index 000000000..00a0a2c38
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go
@@ -0,0 +1,380 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gzip implements reading and writing of gzip format compressed files,
+// as specified in RFC 1952.
+package gzip
+
+import (
+ "bufio"
+ "compress/gzip"
+ "encoding/binary"
+ "hash/crc32"
+ "io"
+ "time"
+
+ "github.com/klauspost/compress/flate"
+)
+
+const (
+ gzipID1 = 0x1f
+ gzipID2 = 0x8b
+ gzipDeflate = 8
+ flagText = 1 << 0
+ flagHdrCrc = 1 << 1
+ flagExtra = 1 << 2
+ flagName = 1 << 3
+ flagComment = 1 << 4
+)
+
+var (
+ // ErrChecksum is returned when reading GZIP data that has an invalid checksum.
+ ErrChecksum = gzip.ErrChecksum
+ // ErrHeader is returned when reading GZIP data that has an invalid header.
+ ErrHeader = gzip.ErrHeader
+)
+
+var le = binary.LittleEndian
+
+// noEOF converts io.EOF to io.ErrUnexpectedEOF.
+func noEOF(err error) error {
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+}
+
+// The gzip file stores a header giving metadata about the compressed file.
+// That header is exposed as the fields of the Writer and Reader structs.
+//
+// Strings must be UTF-8 encoded and may only contain Unicode code points
+// U+0001 through U+00FF, due to limitations of the GZIP file format.
+type Header struct {
+ Comment string // comment
+ Extra []byte // "extra data"
+ ModTime time.Time // modification time
+ Name string // file name
+ OS byte // operating system type
+}
+
+// A Reader is an io.Reader that can be read to retrieve
+// uncompressed data from a gzip-format compressed file.
+//
+// In general, a gzip file can be a concatenation of gzip files,
+// each with its own header. Reads from the Reader
+// return the concatenation of the uncompressed data of each.
+// Only the first header is recorded in the Reader fields.
+//
+// Gzip files store a length and checksum of the uncompressed data.
+// The Reader will return a ErrChecksum when Read
+// reaches the end of the uncompressed data if it does not
+// have the expected length or checksum. Clients should treat data
+// returned by Read as tentative until they receive the io.EOF
+// marking the end of the data.
+type Reader struct {
+ Header // valid after NewReader or Reader.Reset
+ r flate.Reader
+ br *bufio.Reader
+ decompressor io.ReadCloser
+ digest uint32 // CRC-32, IEEE polynomial (section 8)
+ size uint32 // Uncompressed size (section 2.3.1)
+ buf [512]byte
+ err error
+ multistream bool
+}
+
+// NewReader creates a new Reader reading the given reader.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+//
+// It is the caller's responsibility to call Close on the Reader when done.
+//
+// The Reader.Header fields will be valid in the Reader returned.
+func NewReader(r io.Reader) (*Reader, error) {
+ z := new(Reader)
+ if err := z.Reset(r); err != nil {
+ return nil, err
+ }
+ return z, nil
+}
+
+// Reset discards the Reader z's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) error {
+ *z = Reader{
+ decompressor: z.decompressor,
+ multistream: true,
+ br: z.br,
+ }
+ if rr, ok := r.(flate.Reader); ok {
+ z.r = rr
+ } else {
+ // Reuse if we can.
+ if z.br != nil {
+ z.br.Reset(r)
+ } else {
+ z.br = bufio.NewReader(r)
+ }
+ z.r = z.br
+ }
+ z.Header, z.err = z.readHeader()
+ return z.err
+}
+
+// Multistream controls whether the reader supports multistream files.
+//
+// If enabled (the default), the Reader expects the input to be a sequence
+// of individually gzipped data streams, each with its own header and
+// trailer, ending at EOF. The effect is that the concatenation of a sequence
+// of gzipped files is treated as equivalent to the gzip of the concatenation
+// of the sequence. This is standard behavior for gzip readers.
+//
+// Calling Multistream(false) disables this behavior; disabling the behavior
+// can be useful when reading file formats that distinguish individual gzip
+// data streams or mix gzip data streams with other data streams.
+// In this mode, when the Reader reaches the end of the data stream,
+// Read returns io.EOF. If the underlying reader implements io.ByteReader,
+// it will be left positioned just after the gzip stream.
+// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
+// If there is no next stream, z.Reset(r) will return io.EOF.
+func (z *Reader) Multistream(ok bool) {
+ z.multistream = ok
+}
+
+// readString reads a NUL-terminated string from z.r.
+// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
+// will output a string encoded using UTF-8.
+// This method always updates z.digest with the data read.
+func (z *Reader) readString() (string, error) {
+ var err error
+ needConv := false
+ for i := 0; ; i++ {
+ if i >= len(z.buf) {
+ return "", ErrHeader
+ }
+ z.buf[i], err = z.r.ReadByte()
+ if err != nil {
+ return "", err
+ }
+ if z.buf[i] > 0x7f {
+ needConv = true
+ }
+ if z.buf[i] == 0 {
+ // Digest covers the NUL terminator.
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
+
+ // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
+ if needConv {
+ s := make([]rune, 0, i)
+ for _, v := range z.buf[:i] {
+ s = append(s, rune(v))
+ }
+ return string(s), nil
+ }
+ return string(z.buf[:i]), nil
+ }
+ }
+}
+
+// readHeader reads the GZIP header according to section 2.3.1.
+// This method does not set z.err.
+func (z *Reader) readHeader() (hdr Header, err error) {
+ if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
+ // RFC 1952, section 2.2, says the following:
+ // A gzip file consists of a series of "members" (compressed data sets).
+ //
+ // Other than this, the specification does not clarify whether a
+ // "series" is defined as "one or more" or "zero or more". To err on the
+ // side of caution, Go interprets this to mean "zero or more".
+ // Thus, it is okay to return io.EOF here.
+ return hdr, err
+ }
+ if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
+ return hdr, ErrHeader
+ }
+ flg := z.buf[3]
+ hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
+ // z.buf[8] is XFL and is currently ignored.
+ hdr.OS = z.buf[9]
+ z.digest = crc32.ChecksumIEEE(z.buf[:10])
+
+ if flg&flagExtra != 0 {
+ if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+ return hdr, noEOF(err)
+ }
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
+ data := make([]byte, le.Uint16(z.buf[:2]))
+ if _, err = io.ReadFull(z.r, data); err != nil {
+ return hdr, noEOF(err)
+ }
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
+ hdr.Extra = data
+ }
+
+ var s string
+ if flg&flagName != 0 {
+ if s, err = z.readString(); err != nil {
+ return hdr, err
+ }
+ hdr.Name = s
+ }
+
+ if flg&flagComment != 0 {
+ if s, err = z.readString(); err != nil {
+ return hdr, err
+ }
+ hdr.Comment = s
+ }
+
+ if flg&flagHdrCrc != 0 {
+ if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+ return hdr, noEOF(err)
+ }
+ digest := le.Uint16(z.buf[:2])
+ if digest != uint16(z.digest) {
+ return hdr, ErrHeader
+ }
+ }
+
+ // Reserved FLG bits must be zero.
+ if flg>>5 != 0 {
+ return hdr, ErrHeader
+ }
+
+ z.digest = 0
+ if z.decompressor == nil {
+ z.decompressor = flate.NewReader(z.r)
+ } else {
+ z.decompressor.(flate.Resetter).Reset(z.r, nil)
+ }
+ return hdr, nil
+}
+
+// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
+func (z *Reader) Read(p []byte) (n int, err error) {
+ if z.err != nil {
+ return 0, z.err
+ }
+
+ for n == 0 {
+ n, z.err = z.decompressor.Read(p)
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+ z.size += uint32(n)
+ if z.err != io.EOF {
+ // In the normal case we return here.
+ return n, z.err
+ }
+
+ // Finished file; check checksum and size.
+ if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+ z.err = noEOF(err)
+ return n, z.err
+ }
+ digest := le.Uint32(z.buf[:4])
+ size := le.Uint32(z.buf[4:8])
+ if digest != z.digest || size != z.size {
+ z.err = ErrChecksum
+ return n, z.err
+ }
+ z.digest, z.size = 0, 0
+
+ // File is ok; check if there is another.
+ if !z.multistream {
+ return n, io.EOF
+ }
+ z.err = nil // Remove io.EOF
+
+ if _, z.err = z.readHeader(); z.err != nil {
+ return n, z.err
+ }
+ }
+
+ return n, nil
+}
+
+type crcer interface {
+ io.Writer
+ Sum32() uint32
+ Reset()
+}
+type crcUpdater struct {
+ z *Reader
+}
+
+func (c *crcUpdater) Write(p []byte) (int, error) {
+ c.z.digest = crc32.Update(c.z.digest, crc32.IEEETable, p)
+ return len(p), nil
+}
+
+func (c *crcUpdater) Sum32() uint32 {
+ return c.z.digest
+}
+
+func (c *crcUpdater) Reset() {
+ c.z.digest = 0
+}
+
+// WriteTo support the io.WriteTo interface for io.Copy and friends.
+func (z *Reader) WriteTo(w io.Writer) (int64, error) {
+ total := int64(0)
+ crcWriter := crcer(crc32.NewIEEE())
+ if z.digest != 0 {
+ crcWriter = &crcUpdater{z: z}
+ }
+ for {
+ if z.err != nil {
+ if z.err == io.EOF {
+ return total, nil
+ }
+ return total, z.err
+ }
+
+ // We write both to output and digest.
+ mw := io.MultiWriter(w, crcWriter)
+ n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
+ total += n
+ z.size += uint32(n)
+ if err != nil {
+ z.err = err
+ return total, z.err
+ }
+
+ // Finished file; check checksum + size.
+ if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ z.err = err
+ return total, err
+ }
+ z.digest = crcWriter.Sum32()
+ digest := le.Uint32(z.buf[:4])
+ size := le.Uint32(z.buf[4:8])
+ if digest != z.digest || size != z.size {
+ z.err = ErrChecksum
+ return total, z.err
+ }
+ z.digest, z.size = 0, 0
+
+ // File is ok; check if there is another.
+ if !z.multistream {
+ return total, nil
+ }
+ crcWriter.Reset()
+ z.err = nil // Remove io.EOF
+
+ if _, z.err = z.readHeader(); z.err != nil {
+ if z.err == io.EOF {
+ return total, nil
+ }
+ return total, z.err
+ }
+ }
+}
+
+// Close closes the Reader. It does not close the underlying io.Reader.
+// In order for the GZIP checksum to be verified, the reader must be
+// fully consumed until the io.EOF.
+func (z *Reader) Close() error { return z.decompressor.Close() }
diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go
new file mode 100644
index 000000000..5bc720593
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gzip/gzip.go
@@ -0,0 +1,290 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gzip
+
+import (
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+
+ "github.com/klauspost/compress/flate"
+)
+
+// These constants are copied from the flate package, so that code that imports
+// "compress/gzip" does not also have to import "compress/flate".
+const (
+ NoCompression = flate.NoCompression
+ BestSpeed = flate.BestSpeed
+ BestCompression = flate.BestCompression
+ DefaultCompression = flate.DefaultCompression
+ ConstantCompression = flate.ConstantCompression
+ HuffmanOnly = flate.HuffmanOnly
+
+ // StatelessCompression will do compression but without maintaining any state
+ // between Write calls.
+ // There will be no memory kept between Write calls,
+ // but compression and speed will be suboptimal.
+ // Because of this, the size of actual Write calls will affect output size.
+ StatelessCompression = -3
+)
+
+// A Writer is an io.WriteCloser.
+// Writes to a Writer are compressed and written to w.
+type Writer struct {
+ Header // written at first call to Write, Flush, or Close
+ w io.Writer
+ level int
+ err error
+ compressor *flate.Writer
+ digest uint32 // CRC-32, IEEE polynomial (section 8)
+ size uint32 // Uncompressed size (section 2.3.1)
+ wroteHeader bool
+ closed bool
+ buf [10]byte
+}
+
+// NewWriter returns a new Writer.
+// Writes to the returned writer are compressed and written to w.
+//
+// It is the caller's responsibility to call Close on the WriteCloser when done.
+// Writes may be buffered and not flushed until Close.
+//
+// Callers that wish to set the fields in Writer.Header must do so before
+// the first call to Write, Flush, or Close.
+func NewWriter(w io.Writer) *Writer {
+ z, _ := NewWriterLevel(w, DefaultCompression)
+ return z
+}
+
+// NewWriterLevel is like NewWriter but specifies the compression level instead
+// of assuming DefaultCompression.
+//
+// The compression level can be DefaultCompression, NoCompression, or any
+// integer value between BestSpeed and BestCompression inclusive. The error
+// returned will be nil if the level is valid.
+func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
+ if level < StatelessCompression || level > BestCompression {
+ return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
+ }
+ z := new(Writer)
+ z.init(w, level)
+ return z, nil
+}
+
+// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
+const MinCustomWindowSize = flate.MinCustomWindowSize
+
+// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
+const MaxCustomWindowSize = flate.MaxCustomWindowSize
+
+// NewWriterWindow returns a new Writer compressing data with a custom window size.
+// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
+func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
+ if windowSize < MinCustomWindowSize {
+ return nil, errors.New("gzip: requested window size less than MinWindowSize")
+ }
+ if windowSize > MaxCustomWindowSize {
+ return nil, errors.New("gzip: requested window size bigger than MaxCustomWindowSize")
+ }
+
+ z := new(Writer)
+ z.init(w, -windowSize)
+ return z, nil
+}
+
+func (z *Writer) init(w io.Writer, level int) {
+ compressor := z.compressor
+ if level != StatelessCompression {
+ if compressor != nil {
+ compressor.Reset(w)
+ }
+ }
+
+ *z = Writer{
+ Header: Header{
+ OS: 255, // unknown
+ },
+ w: w,
+ level: level,
+ compressor: compressor,
+ }
+}
+
+// Reset discards the Writer z's state and makes it equivalent to the
+// result of its original state from NewWriter or NewWriterLevel, but
+// writing to w instead. This permits reusing a Writer rather than
+// allocating a new one.
+func (z *Writer) Reset(w io.Writer) {
+ z.init(w, z.level)
+}
+
+// writeBytes writes a length-prefixed byte slice to z.w.
+func (z *Writer) writeBytes(b []byte) error {
+ if len(b) > 0xffff {
+ return errors.New("gzip.Write: Extra data is too large")
+ }
+ le.PutUint16(z.buf[:2], uint16(len(b)))
+ _, err := z.w.Write(z.buf[:2])
+ if err != nil {
+ return err
+ }
+ _, err = z.w.Write(b)
+ return err
+}
+
+// writeString writes a UTF-8 string s in GZIP's format to z.w.
+// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
+func (z *Writer) writeString(s string) (err error) {
+ // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
+ needconv := false
+ for _, v := range s {
+ if v == 0 || v > 0xff {
+ return errors.New("gzip.Write: non-Latin-1 header string")
+ }
+ if v > 0x7f {
+ needconv = true
+ }
+ }
+ if needconv {
+ b := make([]byte, 0, len(s))
+ for _, v := range s {
+ b = append(b, byte(v))
+ }
+ _, err = z.w.Write(b)
+ } else {
+ _, err = io.WriteString(z.w, s)
+ }
+ if err != nil {
+ return err
+ }
+ // GZIP strings are NUL-terminated.
+ z.buf[0] = 0
+ _, err = z.w.Write(z.buf[:1])
+ return err
+}
+
+// Write writes a compressed form of p to the underlying io.Writer. The
+// compressed bytes are not necessarily flushed until the Writer is closed.
+func (z *Writer) Write(p []byte) (int, error) {
+ if z.err != nil {
+ return 0, z.err
+ }
+ var n int
+ // Write the GZIP header lazily.
+ if !z.wroteHeader {
+ z.wroteHeader = true
+ z.buf[0] = gzipID1
+ z.buf[1] = gzipID2
+ z.buf[2] = gzipDeflate
+ z.buf[3] = 0
+ if z.Extra != nil {
+ z.buf[3] |= 0x04
+ }
+ if z.Name != "" {
+ z.buf[3] |= 0x08
+ }
+ if z.Comment != "" {
+ z.buf[3] |= 0x10
+ }
+ le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
+ if z.level == BestCompression {
+ z.buf[8] = 2
+ } else if z.level == BestSpeed {
+ z.buf[8] = 4
+ } else {
+ z.buf[8] = 0
+ }
+ z.buf[9] = z.OS
+ n, z.err = z.w.Write(z.buf[:10])
+ if z.err != nil {
+ return n, z.err
+ }
+ if z.Extra != nil {
+ z.err = z.writeBytes(z.Extra)
+ if z.err != nil {
+ return n, z.err
+ }
+ }
+ if z.Name != "" {
+ z.err = z.writeString(z.Name)
+ if z.err != nil {
+ return n, z.err
+ }
+ }
+ if z.Comment != "" {
+ z.err = z.writeString(z.Comment)
+ if z.err != nil {
+ return n, z.err
+ }
+ }
+
+ if z.compressor == nil && z.level != StatelessCompression {
+ z.compressor, _ = flate.NewWriter(z.w, z.level)
+ }
+ }
+ z.size += uint32(len(p))
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
+ if z.level == StatelessCompression {
+ return len(p), flate.StatelessDeflate(z.w, p, false, nil)
+ }
+ n, z.err = z.compressor.Write(p)
+ return n, z.err
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+//
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet. Flush does
+// not return until the data has been written. If the underlying
+// writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (z *Writer) Flush() error {
+ if z.err != nil {
+ return z.err
+ }
+ if z.closed || z.level == StatelessCompression {
+ return nil
+ }
+ if !z.wroteHeader {
+ z.Write(nil)
+ if z.err != nil {
+ return z.err
+ }
+ }
+ z.err = z.compressor.Flush()
+ return z.err
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying
+// io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+ if z.err != nil {
+ return z.err
+ }
+ if z.closed {
+ return nil
+ }
+ z.closed = true
+ if !z.wroteHeader {
+ z.Write(nil)
+ if z.err != nil {
+ return z.err
+ }
+ }
+ if z.level == StatelessCompression {
+ z.err = flate.StatelessDeflate(z.w, nil, true, nil)
+ } else {
+ z.err = z.compressor.Close()
+ }
+ if z.err != nil {
+ return z.err
+ }
+ le.PutUint32(z.buf[:4], z.digest)
+ le.PutUint32(z.buf[4:8], z.size)
+ _, z.err = z.w.Write(z.buf[:8])
+ return z.err
+}
diff --git a/vendor/github.com/klauspost/compress/internal/race/norace.go b/vendor/github.com/klauspost/compress/internal/race/norace.go
new file mode 100644
index 000000000..affbbbb59
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/race/norace.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+package race
+
+func ReadSlice[T any](s []T) {
+}
+
+func WriteSlice[T any](s []T) {
+}
diff --git a/vendor/github.com/klauspost/compress/internal/race/race.go b/vendor/github.com/klauspost/compress/internal/race/race.go
new file mode 100644
index 000000000..f5e240dcd
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/race/race.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package race
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+func ReadSlice[T any](s []T) {
+ if len(s) == 0 {
+ return
+ }
+ runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0])))
+}
+
+func WriteSlice[T any](s []T) {
+ if len(s) == 0 {
+ return
+ }
+ runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0])))
+}
diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore
new file mode 100644
index 000000000..3a89c6e3e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/.gitignore
@@ -0,0 +1,15 @@
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE
new file mode 100644
index 000000000..1d2d645bd
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md
new file mode 100644
index 000000000..8284bb081
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/README.md
@@ -0,0 +1,1120 @@
+# S2 Compression
+
+S2 is an extension of [Snappy](https://github.com/google/snappy).
+
+S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads.
+
+Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy.
+This means that S2 can seamlessly replace Snappy without converting compressed content.
+
+S2 can produce Snappy compatible output, faster and better than Snappy.
+If you want full benefit of the changes you should use s2 without Snappy compatibility.
+
+S2 is designed to have high throughput on content that cannot be compressed.
+This is important, so you don't have to worry about spending CPU cycles on already compressed data.
+
+## Benefits over Snappy
+
+* Better compression
+* Adjustable compression (3 levels)
+* Concurrent stream compression
+* Faster decompression, even for Snappy compatible content
+* Concurrent Snappy/S2 stream decompression
+* Skip forward in compressed stream
+* Random seeking with indexes
+* Compatible with reading Snappy compressed content
+* Smaller block size overhead on incompressible blocks
+* Block concatenation
+* Block Dictionary support
+* Uncompressed stream mode
+* Automatic stream size padding
+* Snappy compatible block compression
+
+## Drawbacks over Snappy
+
+* Not optimized for 32 bit systems
+* Streams use slightly more memory due to larger blocks and concurrency (configurable)
+
+# Usage
+
+Installation: `go get -u github.com/klauspost/compress/s2`
+
+Full package documentation:
+
+[![godoc][1]][2]
+
+[1]: https://godoc.org/github.com/klauspost/compress?status.svg
+[2]: https://godoc.org/github.com/klauspost/compress/s2
+
+## Compression
+
+```Go
+func EncodeStream(src io.Reader, dst io.Writer) error {
+ enc := s2.NewWriter(dst)
+ _, err := io.Copy(enc, src)
+ if err != nil {
+ enc.Close()
+ return err
+ }
+ // Blocks until compression is done.
+ return enc.Close()
+}
+```
+
+You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete.
+
+For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method.
+
+The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2.
+It is possible to flush any buffered data using the `Flush()` method.
+This will block until all data sent to the encoder has been written to the output.
+
+S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader.
+
+As a final method to compress data, if you have a single block of data you would like to have encoded as a stream,
+a slightly more efficient method is to use the `EncodeBuffer` method.
+This will take ownership of the buffer until the stream is closed.
+
+```Go
+func EncodeStream(src []byte, dst io.Writer) error {
+ enc := s2.NewWriter(dst)
+ // The encoder owns the buffer until Flush or Close is called.
+ err := enc.EncodeBuffer(buf)
+ if err != nil {
+ enc.Close()
+ return err
+ }
+ // Blocks until compression is done.
+ return enc.Close()
+}
+```
+
+Each call to `EncodeBuffer` will result in discrete blocks being created without buffering,
+so it should only be used a single time per stream.
+If you need to write several blocks, you should use the regular io.Writer interface.
+
+
+## Decompression
+
+```Go
+func DecodeStream(src io.Reader, dst io.Writer) error {
+ dec := s2.NewReader(src)
+ _, err := io.Copy(dst, dec)
+ return err
+}
+```
+
+Similar to the Writer, a Reader can be reused using the `Reset` method.
+
+For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available.
+However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed.
+
+For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`.
+Do however note that these functions (similar to Snappy) does not provide validation of data,
+so data corruption may be undetected. Stream encoding provides CRC checks of data.
+
+It is possible to efficiently skip forward in a compressed stream using the `Skip()` method.
+For big skips the decompressor is able to skip blocks without decompressing them.
+
+## Single Blocks
+
+Similar to Snappy S2 offers single block compression.
+Blocks do not offer the same flexibility and safety as streams,
+but may be preferable for very small payloads, less than 100K.
+
+Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result.
+It is possible to provide a destination buffer.
+If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used.
+If not a new will be allocated.
+
+Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression.
+
+Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`.
+Again an optional destination buffer can be supplied.
+The `s2.DecodedLen(src)` can be used to get the minimum capacity needed.
+If that is not satisfied a new buffer will be allocated.
+
+Block function always operate on a single goroutine since it should only be used for small payloads.
+
+# Commandline tools
+
+Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression.
+
+Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases).
+
+Installing then requires Go to be installed. To install them, use:
+
+`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest`
+
+To build binaries to the current folder use:
+
+`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d`
+
+
+## s2c
+
+```
+Usage: s2c [options] file1 file2
+
+Compresses all files supplied as input separately.
+Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'.
+By default output files will be overwritten.
+Use - as the only file name to read from stdin and write to stdout.
+
+Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
+Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
+
+File names beginning with 'http://' and 'https://' will be downloaded and compressed.
+Only http response code 200 is accepted.
+
+Options:
+ -bench int
+ Run benchmark n times. No output will be written
+ -blocksize string
+ Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M")
+ -c Write all output to stdout. Multiple input files will be concatenated
+ -cpu int
+ Compress using this amount of threads (default 32)
+ -faster
+ Compress faster, but with a minor compression loss
+ -help
+ Display help
+ -index
+ Add seek index (default true)
+ -o string
+ Write output to another file. Single input file only
+ -pad string
+ Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1")
+ -q Don't write any output to terminal, except errors
+ -rm
+ Delete source file(s) after successful compression
+ -safe
+ Do not overwrite output files
+ -slower
+ Compress more, but a lot slower
+ -snappy
+ Generate Snappy compatible output stream
+ -verify
+ Verify written files
+
+```
+
+## s2d
+
+```
+Usage: s2d [options] file1 file2
+
+Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'.
+Output file names have the extension removed. By default output files will be overwritten.
+Use - as the only file name to read from stdin and write to stdout.
+
+Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
+Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
+
+File names beginning with 'http://' and 'https://' will be downloaded and decompressed.
+Extensions on downloaded files are ignored. Only http response code 200 is accepted.
+
+Options:
+ -bench int
+ Run benchmark n times. No output will be written
+ -c Write all output to stdout. Multiple input files will be concatenated
+ -help
+ Display help
+ -o string
+ Write output to another file. Single input file only
+ -offset string
+ Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index
+ -q Don't write any output to terminal, except errors
+ -rm
+ Delete source file(s) after successful decompression
+ -safe
+ Do not overwrite output files
+ -tail string
+ Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index
+ -verify
+ Verify files, but do not write output
+```
+
+## s2sx: self-extracting archives
+
+s2sx allows creating self-extracting archives with no dependencies.
+
+By default, executables are created for the same platforms as the host os,
+but this can be overridden with `-os` and `-arch` parameters.
+
+Extracted files have 0666 permissions, except when untar option used.
+
+```
+Usage: s2sx [options] file1 file2
+
+Compresses all files supplied as input separately.
+If files have '.s2' extension they are assumed to be compressed already.
+Output files are written as 'filename.s2sx' and with '.exe' for windows targets.
+If output is big, an additional file with ".more" is written. This must be included as well.
+By default output files will be overwritten.
+
+Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
+Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
+
+Options:
+ -arch string
+ Destination architecture (default "amd64")
+ -c Write all output to stdout. Multiple input files will be concatenated
+ -cpu int
+ Compress using this amount of threads (default 32)
+ -help
+ Display help
+ -max string
+ Maximum executable size. Rest will be written to another file. (default "1G")
+ -os string
+ Destination operating system (default "windows")
+ -q Don't write any output to terminal, except errors
+ -rm
+ Delete source file(s) after successful compression
+ -safe
+ Do not overwrite output files
+ -untar
+ Untar on destination
+```
+
+Available platforms are:
+
+ * darwin-amd64
+ * darwin-arm64
+ * linux-amd64
+ * linux-arm
+ * linux-arm64
+ * linux-mips64
+ * linux-ppc64le
+ * windows-386
+ * windows-amd64
+
+By default, there is a size limit of 1GB for the output executable.
+
+When this is exceeded the remaining file content is written to a file called
+output+`.more`. This file must be included for a successful extraction and
+placed alongside the executable for a successful extraction.
+
+This file *must* have the same name as the executable, so if the executable is renamed,
+so must the `.more` file.
+
+This functionality is disabled with stdin/stdout.
+
+### Self-extracting TAR files
+
+If you wrap a TAR file you can specify `-untar` to make it untar on the destination host.
+
+Files are extracted to the current folder with the path specified in the tar file.
+
+Note that tar files are not validated before they are wrapped.
+
+For security reasons files that move below the root folder are not allowed.
+
+# Performance
+
+This section will focus on comparisons to Snappy.
+This package is solely aimed at replacing Snappy as a high speed compression package.
+If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd)
+gives better compression, but typically at speeds slightly below "better" mode in this package.
+
+Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation.
+
+Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput.
+
+A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain.
+The content compressed in this mode is fully compatible with the standard decoder.
+
+Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
+
+| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
+|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
+| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% |
+| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - |
+| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% |
+| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - |
+| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% |
+| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - |
+| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% |
+| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - |
+| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% |
+| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - |
+| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% |
+| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - |
+| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% |
+| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - |
+| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% |
+| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - |
+| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% |
+| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - |
+| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% |
+| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - |
+| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% |
+| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - |
+
+### Legend
+
+* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
+* `S2 Throughput`: Throughput of S2 in MB/s.
+* `S2 % smaller`: How many percent of the Snappy output size is S2 better.
+* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
+* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
+* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression.
+
+There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
+
+Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size.
+
+The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
+
+Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup.
+This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above).
+
+## Decompression
+
+S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used.
+
+S2 vs Snappy **decompression** speed. Both operating on single core:
+
+| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy |
+|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------|
+| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x |
+| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x |
+| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x |
+| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x |
+| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x |
+| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x |
+| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x |
+| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x |
+| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x |
+| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x |
+| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x |
+
+### Legend
+
+* `S2 Throughput`: Decompression speed of S2 encoded content.
+* `Better Throughput`: Decompression speed of S2 "better" encoded content.
+* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed.
+
+
+While the decompression code hasn't changed, there is a significant speedup in decompression speed.
+S2 prefers longer matches and will typically only find matches that are 6 bytes or longer.
+While this reduces compression a bit, it improves decompression speed.
+
+The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy.
+
+Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
+
+| File | S2 Throughput | S2 throughput |
+|--------------------------------|---------------|---------------|
+| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
+| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
+| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
+| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s |
+| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s |
+| enwik9.s2 | 1.67x | 681.53 MB/s |
+| adresser.json.s2 | 3.41x | 4230.53 MB/s |
+| silesia.tar.s2 | 1.52x | 811.58 |
+
+Even though S2 typically compresses better than Snappy, decompression speed is always better.
+
+### Concurrent Stream Decompression
+
+For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent)
+that will decode a full stream using multiple goroutines.
+
+Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 `, best of 3:
+
+| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` |
+|-------------------------------------------|------------|------------|------------|------------|-------------|
+| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s |
+| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s |
+| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s |
+| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s |
+| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s |
+
+Scaling can be expected to be pretty linear until memory bandwidth is saturated.
+
+For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads.
+
+## Block compression
+
+
+When compressing blocks no concurrent compression is performed just as Snappy.
+This is because blocks are for smaller payloads and generally will not benefit from concurrent compression.
+
+An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input.
+In rare, worst case scenario Snappy blocks could be significantly bigger than the input.
+
+### Mixed content blocks
+
+The most reliable is a wide dataset.
+For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
+53927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
+
+| * | Input | Output | Reduction | MB/s |
+|-------------------|------------|------------|------------|------------|
+| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** |
+| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 |
+| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 |
+| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 |
+| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 |
+| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 |
+
+S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
+"Better" mode provides the same compression speed as LZ4 with better compression ratio.
+
+When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression.
+
+As can be seen from the other benchmarks decompression should also be easier on the S2 generated output.
+
+Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for
+other Go compressors:
+
+| * | Input | Output | Reduction | MB/s |
+|-------------------|------------|------------|-----------|--------|
+| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 |
+| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 |
+| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 |
+| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 |
+
+### Standard block compression
+
+Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
+So individual benchmarks should only be seen as a guideline and the overall picture is more important.
+
+These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above.
+
+Block compression. Parallel benchmark running on 16 cores, 16 goroutines.
+
+AMD64 assembly is use for both S2 and Snappy.
+
+| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
+|-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
+| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s |
+| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s |
+| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s |
+| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s |
+| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s |
+| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s |
+| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s |
+| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s |
+| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s |
+| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s |
+| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s |
+| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s |
+| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s |
+| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s |
+| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s |
+| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s |
+
+
+Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
+
+Decompression speed is better than Snappy, except in one case.
+
+Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline.
+
+Size is on average around Snappy, but varies on content type.
+In cases where compression is worse, it usually is compensated by a speed boost.
+
+
+### Better compression
+
+Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
+So individual benchmarks should only be seen as a guideline and the overall picture is more important.
+
+| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
+|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
+| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s |
+| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s |
+| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s |
+| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s |
+| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s |
+| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s |
+| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s |
+| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s |
+| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s |
+| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s |
+| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s |
+| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s |
+| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s |
+| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s |
+| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s |
+| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s |
+
+
+Except for the mostly incompressible JPEG image compression is better and usually in the
+double digits in terms of percentage reduction over Snappy.
+
+The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder
+to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down.
+
+This mode aims to provide better compression at the expense of performance and achieves that
+without a huge performance penalty, except on very small blocks.
+
+Decompression speed suffers a little compared to the regular S2 mode,
+but still manages to be close to Snappy in spite of increased compression.
+
+# Best compression mode
+
+S2 offers a "best" compression mode.
+
+This will compress as much as possible with little regard to CPU usage.
+
+Mainly for offline compression, but where decompression speed should still
+be high and compatible with other S2 compressed data.
+
+Some examples compared on 16 core CPU, amd64 assembly used:
+
+```
+* enwik10
+Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s
+Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s
+Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s
+
+* github-june-2days-2019.json
+Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s
+Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s
+Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s
+
+* nyc-taxi-data-10M.csv
+Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s
+Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s
+Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s
+
+* 10gb.tar
+Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s
+Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s
+Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/
+
+* consensus.db.10gb
+Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s
+Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s
+Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s
+```
+
+Decompression speed should be around the same as using the 'better' compression mode.
+
+## Dictionaries
+
+*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for
+neither encoding nor decoding. Performance improvements can be expected in the future.*
+
+Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
+
+The same dictionary *must* be used for both encoding and decoding.
+S2 does not keep track of whether the same dictionary is used,
+and using the wrong dictionary will most often not result in an error when decompressing.
+
+Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary.
+This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries
+and treat the blocks similarly.
+
+Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression),
+the same usage scenario applies to S2 dictionaries.
+
+> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file.
+
+S2 further limits the dictionary to only be enabled on the first 64KB of a block.
+This will remove any negative (speed) impacts of the dictionaries on bigger blocks.
+
+### Compression
+
+Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst)
+and a 64KB dictionary trained with zStandard the following sizes can be achieved.
+
+| | Default | Better | Best |
+|--------------------|------------------|------------------|-----------------------|
+| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) |
+| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) |
+
+So for highly repetitive content, this case provides an almost 3x reduction in size.
+
+For less uniform data we will use the Go source code tree.
+Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input:
+
+| | Default | Better | Best |
+|--------------------|-------------------|-------------------|-------------------|
+| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) |
+| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) |
+| Saving/file | 362 bytes | 428 bytes | 472 bytes |
+
+
+### Creating Dictionaries
+
+There are no tools to create dictionaries in S2.
+However, there are multiple ways to create a useful dictionary:
+
+#### Using a Sample File
+
+If your input is very uniform, you can just use a sample file as the dictionary.
+
+For example in the `github_users_sample_set` above, the average compression only goes up from
+10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary.
+
+```Go
+ // Read a sample
+ sample, err := os.ReadFile("sample.json")
+
+ // Create a dictionary.
+ dict := s2.MakeDict(sample, nil)
+
+ // b := dict.Bytes() will provide a dictionary that can be saved
+ // and reloaded with s2.NewDict(b).
+
+ // To encode:
+ encoded := dict.Encode(nil, file)
+
+ // To decode:
+ decoded, err := dict.Decode(nil, file)
+```
+
+#### Using Zstandard
+
+Zstandard dictionaries can easily be converted to S2 dictionaries.
+
+This can be helpful to generate dictionaries for files that don't have a fixed structure.
+
+
+Example, with training set files placed in `./training-set`:
+
+`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict`
+
+This will create a dictionary of 64KB, that can be converted to a dictionary like this:
+
+```Go
+ // Decode the Zstandard dictionary.
+ insp, err := zstd.InspectDictionary(zdict)
+ if err != nil {
+ panic(err)
+ }
+
+ // We are only interested in the contents.
+ // Assume that files start with "// Copyright (c) 2023".
+ // Search for the longest match for that.
+ // This may save a few bytes.
+ dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023"))
+
+ // b := dict.Bytes() will provide a dictionary that can be saved
+ // and reloaded with s2.NewDict(b).
+
+ // We can now encode using this dictionary
+ encodedWithDict := dict.Encode(nil, payload)
+
+ // To decode content:
+ decoded, err := dict.Decode(nil, encodedWithDict)
+```
+
+It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary.
+
+This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized.
+
+Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files.
+This can be omitted, at the expense of a few bytes.
+
+# Snappy Compatibility
+
+S2 now offers full compatibility with Snappy.
+
+This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output.
+
+There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by
+simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`.
+This uses "better" mode for all operations.
+If you would like more control, you can use the s2 package as described below:
+
+## Blocks
+
+Snappy compatible blocks can be generated with the S2 encoder.
+Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
+
+| Snappy | S2 replacement |
+|---------------------------|-----------------------|
+| snappy.Encode(...) | s2.EncodeSnappy(...) |
+| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
+
+`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output.
+
+`s2.ConcatBlocks` is compatible with snappy blocks.
+
+Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
+53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
+
+| Encoder | Size | MB/s | Reduction |
+|-----------------------|------------|------------|------------|
+| snappy.Encode | 1128706759 | 725.59 | 71.89% |
+| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% |
+| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
+| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** |
+
+## Streams
+
+For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`.
+All other options are available, but note that block size limit is different for snappy.
+
+Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput:
+
+| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best |
+|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------|
+| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s |
+| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s |
+| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s |
+| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s |
+| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s |
+
+# Decompression
+
+All decompression functions map directly to equivalent s2 functions.
+
+| Snappy | S2 replacement |
+|------------------------|--------------------|
+| snappy.Decode(...) | s2.Decode(...) |
+| snappy.DecodedLen(...) | s2.DecodedLen(...) |
+| snappy.NewReader(...) | s2.NewReader(...) |
+
+Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip)
+are also available for Snappy streams.
+
+If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize)
+on your Reader will reduce memory consumption.
+
+# Concatenating blocks and streams.
+
+Concatenating streams will concatenate the output of both without recompressing them.
+While this is inefficient in terms of compression it might be usable in certain scenarios.
+The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement.
+
+Blocks can be concatenated using the `ConcatBlocks` function.
+
+Snappy blocks/streams can safely be concatenated with S2 blocks and streams.
+Streams with indexes (see below) will currently not work on concatenated streams.
+
+# Stream Seek Index
+
+S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data.
+
+The index can either be appended to the stream as a skippable block or returned for separate storage.
+
+When the index is appended to a stream it will be skipped by regular decoders,
+so the output remains compatible with other decoders.
+
+## Creating an Index
+
+To automatically add an index to a stream, add `WriterAddIndex()` option to your writer.
+Then the index will be added to the stream when `Close()` is called.
+
+```
+ // Add Index to stream...
+ enc := s2.NewWriter(w, s2.WriterAddIndex())
+ io.Copy(enc, r)
+ enc.Close()
+```
+
+If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`.
+This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`.
+
+```
+ // Get index for separate storage...
+ enc := s2.NewWriter(w)
+ io.Copy(enc, r)
+ index, err := enc.CloseIndex()
+```
+
+The `index` can then be used needing to read from the stream.
+This means the index can be used without needing to seek to the end of the stream
+or for manually forwarding streams. See below.
+
+Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function.
+
+## Using Indexes
+
+To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available.
+
+Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader.
+
+If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported.
+Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface.
+
+```
+ dec := s2.NewReader(r)
+ rs, err := dec.ReadSeeker(false, nil)
+ rs.Seek(wantOffset, io.SeekStart)
+```
+
+Get a seeker to seek forward. Since no index is provided, the index is read from the stream.
+This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface.
+
+A custom index can be specified which will be used if supplied.
+When using a custom index, it will not be read from the input stream.
+
+```
+ dec := s2.NewReader(r)
+ rs, err := dec.ReadSeeker(false, index)
+ rs.Seek(wantOffset, io.SeekStart)
+```
+
+This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker
+
+```
+ dec := s2.NewReader(r)
+ rs, err := dec.ReadSeeker(true, index)
+ rs.Seek(wantOffset, io.SeekStart)
+```
+
+Finally, since we specify that we want to do random seeking `r` must be an io.Seeker.
+
+The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader,
+meaning changes performed to one is reflected in the other.
+
+To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used.
+
+## Manually Forwarding Streams
+
+Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type.
+This can be used for parsing indexes, either separate or in streams.
+
+In some cases it may not be possible to serve a seekable stream.
+This can for instance be an HTTP stream, where the Range request
+is sent at the start of the stream.
+
+With a little bit of extra code it is still possible to use indexes
+to forward to specific offset with a single forward skip.
+
+It is possible to load the index manually like this:
+```
+ var index s2.Index
+ _, err = index.Load(idxBytes)
+```
+
+This can be used to figure out how much to offset the compressed stream:
+
+```
+ compressedOffset, uncompressedOffset, err := index.Find(wantOffset)
+```
+
+The `compressedOffset` is the number of bytes that should be skipped
+from the beginning of the compressed file.
+
+The `uncompressedOffset` will then be offset of the uncompressed bytes returned
+when decoding from that position. This will always be <= wantOffset.
+
+When creating a decoder it must be specified that it should *not* expect a stream identifier
+at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset`
+we create the decoder like this:
+
+```
+ dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier())
+```
+
+We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want.
+This is done using the regular "Skip" function:
+
+```
+ err = dec.Skip(wantOffset - uncompressedOffset)
+```
+
+This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset.
+
+# Compact storage
+
+For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from
+a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load).
+
+This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors.
+
+## Index Format:
+
+Each block is structured as a snappy skippable block, with the chunk ID 0x99.
+
+The block can be read from the front, but contains information so it can be read from the back as well.
+
+Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding),
+with un-encoded value length of 64 bits, unless other limits are specified.
+
+| Content | Format |
+|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
+| ID, `[1]byte` | Always 0x99. |
+| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. |
+| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". |
+| UncompressedSize, Varint | Total Uncompressed size. |
+| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. |
+| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. |
+| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. |
+| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. |
+| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. |
+| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. |
+| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. |
+| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. |
+
+For regular streams the uncompressed offsets are fully predictable,
+so `HasUncompressedOffsets` allows to specify that compressed blocks all have
+exactly `EstBlockSize` bytes of uncompressed content.
+
+Entries *must* be in order, starting with the lowest offset,
+and there *must* be no uncompressed offset duplicates.
+Entries *may* point to the start of a skippable block,
+but it is then not allowed to also have an entry for the next block since
+that would give an uncompressed offset duplicate.
+
+There is no requirement for all blocks to be represented in the index.
+In fact there is a maximum of 65536 block entries in an index.
+
+The writer can use any method to reduce the number of entries.
+An implicit block start at 0,0 can be assumed.
+
+### Decoding entries:
+
+```
+// Read Uncompressed entries.
+// Each assumes EstBlockSize delta from previous.
+for each entry {
+ uOff = 0
+ if HasUncompressedOffsets == 1 {
+ uOff = ReadVarInt // Read value from stream
+ }
+
+ // Except for the first entry, use previous values.
+ if entryNum == 0 {
+ entry[entryNum].UncompressedOffset = uOff
+ continue
+ }
+
+ // Uncompressed uses previous offset and adds EstBlockSize
+ entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff
+}
+
+
+// Guess that the first block will be 50% of uncompressed size.
+// Integer truncating division must be used.
+CompressGuess := EstBlockSize / 2
+
+// Read Compressed entries.
+// Each assumes CompressGuess delta from previous.
+// CompressGuess is adjusted for each value.
+for each entry {
+ cOff = ReadVarInt // Read value from stream
+
+ // Except for the first entry, use previous values.
+ if entryNum == 0 {
+ entry[entryNum].CompressedOffset = cOff
+ continue
+ }
+
+ // Compressed uses previous and our estimate.
+ entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff
+
+ // Adjust compressed offset for next loop, integer truncating division must be used.
+ CompressGuess += cOff/2
+}
+```
+
+To decode from any given uncompressed offset `(wantOffset)`:
+
+* Iterate entries until `entry[n].UncompressedOffset > wantOffset`.
+* Start decoding from `entry[n-1].CompressedOffset`.
+* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream.
+
+See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface.
+
+
+# Format Extensions
+
+* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
+* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB).
+* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset.
+
+Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0.
+
+The length is specified by reading the 3-bit length specified in the tag and decode using this table:
+
+| Length | Actual Length |
+|--------|----------------------|
+| 0 | 4 |
+| 1 | 5 |
+| 2 | 6 |
+| 3 | 7 |
+| 4 | 8 |
+| 5 | 8 + read 1 byte |
+| 6 | 260 + read 2 bytes |
+| 7 | 65540 + read 3 bytes |
+
+This allows any repeat offset + length to be represented by 2 to 5 bytes.
+It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies.
+
+Lengths are stored as little endian values.
+
+The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams.
+
+Default streaming block size is 1MB.
+
+# Dictionary Encoding
+
+Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
+
+A dictionary provides an initial repeat value that can be used to point to a common header.
+
+Other than that the dictionary contains values that can be used as back-references.
+
+Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller.
+
+## Format
+
+Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes).
+
+Encoding: `[repeat value (uvarint)][dictionary content...]`
+
+Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset.
+This value is an offset into the dictionary content and not a back-reference offset,
+so setting this to 0 will make the repeat value point to the first value of the dictionary.
+
+The value must be less than the dictionary length-8
+
+## Encoding
+
+From the decoder point of view the dictionary content is seen as preceding the encoded content.
+
+`[dictionary content][decoded output]`
+
+Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block.
+
+Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data.
+However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed.
+
+The first match can be a repeat value, which will use the repeat offset stored in the dictionary.
+
+When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary,
+neither by a copy nor repeat operations.
+If the boundary is crossed while copying from the dictionary, the operation should complete,
+but the next instruction is not allowed to reference the dictionary.
+
+Valid blocks encoded *without* a dictionary can be decoded with any dictionary.
+There are no checks whether the supplied dictionary is the correct for a block.
+Because of this there is no overhead by using a dictionary.
+
+## Example
+
+This is the dictionary content. Elements are separated by `[]`.
+
+Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`.
+
+Initial repeat offset is set at 10, which is the letter `2`.
+
+Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]`
+
+Decoded: `[10][ bananas w][hich][ were ][brown ][were added]`
+
+Output: `10 bananas which were brown were added`
+
+
+## Streams
+
+For streams each block can use the dictionary.
+
+The dictionary cannot not currently be provided on the stream.
+
+
+# LICENSE
+
+This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.
+
+Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go
new file mode 100644
index 000000000..264ffd0a9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode.go
@@ -0,0 +1,443 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strconv"
+
+ "github.com/klauspost/compress/internal/race"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("s2: corrupt input")
+ // ErrCRC reports that the input failed CRC validation (streams only)
+ ErrCRC = errors.New("s2: corrupt input, crc mismatch")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("s2: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("s2: unsupported input")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= cap(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+
+ race.WriteSlice(dst)
+ race.ReadSlice(src[s:])
+
+ if s2Decode(dst, src[s:]) != 0 {
+ return nil, ErrCorrupt
+ }
+ return dst, nil
+}
+
+// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func s2DecodeDict(dst, src []byte, dict *Dict) int {
+ if dict == nil {
+ return s2Decode(dst, src)
+ }
+ const debug = false
+ const debugErrs = debug
+
+ if debug {
+ fmt.Println("Starting decode, dst len:", len(dst))
+ }
+ var d, s, length int
+ offset := len(dict.dict) - dict.repeat
+
+ // As long as we can read at least 5 bytes...
+ for s < len(src)-5 {
+ // Removing bounds checks is SLOWER, when if doing
+ // in := src[s:s+5]
+ // Checked on Go 1.18
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ x = uint32(src[s-1])
+ case x == 61:
+ in := src[s : s+3]
+ x = uint32(in[1]) | uint32(in[2])<<8
+ s += 3
+ case x == 62:
+ in := src[s : s+4]
+ // Load as 32 bit and shift down.
+ x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x >>= 8
+ s += 4
+ case x == 63:
+ in := src[s : s+5]
+ x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
+ s += 5
+ }
+ length = int(x) + 1
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debugErrs {
+ fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ length = int(src[s-2]) >> 2 & 0x7
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ length = int(src[s]) + 4
+ s += 1
+ case 6:
+ in := src[s : s+2]
+ length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
+ s += 2
+ case 7:
+ in := src[s : s+3]
+ length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
+ s += 3
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ in := src[s : s+3]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8)
+ length = 1 + int(in[0])>>2
+ s += 3
+
+ case tagCopy4:
+ in := src[s : s+5]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
+ length = 1 + int(in[0])>>2
+ s += 5
+ }
+
+ if offset <= 0 || length > len(dst)-d {
+ if debugErrs {
+ fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ // copy from dict
+ if d < offset {
+ if d > MaxDictSrcOffset {
+ if debugErrs {
+ fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ startOff := len(dict.dict) - offset + d
+ if startOff < 0 || startOff+length > len(dict.dict) {
+ if debugErrs {
+ fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict))
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff)
+ }
+ copy(dst[d:d+length], dict.dict[startOff:])
+ d += length
+ continue
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ // Remaining with extra checks...
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debugErrs {
+ fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(src[s-2]) >> 2 & 0x7
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ s += 1
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-1])) + 4
+ case 6:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
+ case 7:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || length > len(dst)-d {
+ if debugErrs {
+ fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ // copy from dict
+ if d < offset {
+ if d > MaxDictSrcOffset {
+ if debugErrs {
+ fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ rOff := len(dict.dict) - (offset - d)
+ if debug {
+ fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff)
+ }
+ if rOff+length > len(dict.dict) {
+ if debugErrs {
+ fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if rOff < 0 {
+ if debugErrs {
+ fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:d+length], dict.dict[rOff:])
+ d += length
+ continue
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ if d != len(dst) {
+ if debugErrs {
+ fmt.Println("wanted length", len(dst), "got", d)
+ }
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s
new file mode 100644
index 000000000..9b105e03c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s
@@ -0,0 +1,568 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+#define R_TMP0 AX
+#define R_TMP1 BX
+#define R_LEN CX
+#define R_OFF DX
+#define R_SRC SI
+#define R_DST DI
+#define R_DBASE R8
+#define R_DLEN R9
+#define R_DEND R10
+#define R_SBASE R11
+#define R_SLEN R12
+#define R_SEND R13
+#define R_TMP2 R14
+#define R_TMP3 R15
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - R_TMP0 scratch
+// - R_TMP1 scratch
+// - R_LEN length or x (shared)
+// - R_OFF offset
+// - R_SRC &src[s]
+// - R_DST &dst[d]
+// + R_DBASE dst_base
+// + R_DLEN dst_len
+// + R_DEND dst_base + dst_len
+// + R_SBASE src_base
+// + R_SLEN src_len
+// + R_SEND src_base + src_len
+// - R_TMP2 used by doCopy
+// - R_TMP3 used by doCopy
+//
+// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
+// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
+TEXT ·s2Decode(SB), NOSPLIT, $48-56
+ // Initialize R_SRC, R_DST and R_DBASE-R_SEND.
+ MOVQ dst_base+0(FP), R_DBASE
+ MOVQ dst_len+8(FP), R_DLEN
+ MOVQ R_DBASE, R_DST
+ MOVQ R_DBASE, R_DEND
+ ADDQ R_DLEN, R_DEND
+ MOVQ src_base+24(FP), R_SBASE
+ MOVQ src_len+32(FP), R_SLEN
+ MOVQ R_SBASE, R_SRC
+ MOVQ R_SBASE, R_SEND
+ ADDQ R_SLEN, R_SEND
+ XORQ R_OFF, R_OFF
+
+loop:
+ // for s < len(src)
+ CMPQ R_SRC, R_SEND
+ JEQ end
+
+ // R_LEN = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (R_SRC), R_LEN
+ MOVL R_LEN, R_TMP1
+ ANDL $3, R_TMP1
+ CMPL R_TMP1, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, R_LEN
+ CMPL R_LEN, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ R_SRC
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that R_LEN == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // R_LEN can hold 64 bits, so the increment cannot overflow.
+ INCQ R_LEN
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // R_TMP0 = len(dst) - d
+ // R_TMP1 = len(src) - s
+ MOVQ R_DEND, R_TMP0
+ SUBQ R_DST, R_TMP0
+ MOVQ R_SEND, R_TMP1
+ SUBQ R_SRC, R_TMP1
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ R_LEN, $16
+ JGT callMemmove
+ CMPQ R_TMP0, $16
+ JLT callMemmove
+ CMPQ R_TMP1, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R_SRC), X0
+ MOVOU X0, 0(R_DST)
+
+ // d += length
+ // s += length
+ ADDQ R_LEN, R_DST
+ ADDQ R_LEN, R_SRC
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ R_LEN, R_TMP0
+ JGT errCorrupt
+ CMPQ R_LEN, R_TMP1
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ R_DST, 0(SP)
+ MOVQ R_SRC, 8(SP)
+ MOVQ R_LEN, 16(SP)
+ MOVQ R_DST, 24(SP)
+ MOVQ R_SRC, 32(SP)
+ MOVQ R_LEN, 40(SP)
+ MOVQ R_OFF, 48(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R_DBASE-R_SEND.
+ MOVQ 24(SP), R_DST
+ MOVQ 32(SP), R_SRC
+ MOVQ 40(SP), R_LEN
+ MOVQ 48(SP), R_OFF
+ MOVQ dst_base+0(FP), R_DBASE
+ MOVQ dst_len+8(FP), R_DLEN
+ MOVQ R_DBASE, R_DEND
+ ADDQ R_DLEN, R_DEND
+ MOVQ src_base+24(FP), R_SBASE
+ MOVQ src_len+32(FP), R_SLEN
+ MOVQ R_SBASE, R_SEND
+ ADDQ R_SLEN, R_SEND
+
+ // d += length
+ // s += length
+ ADDQ R_LEN, R_DST
+ ADDQ R_LEN, R_SRC
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ R_LEN, R_SRC
+ SUBQ $58, R_SRC
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL R_LEN, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(R_SRC), R_LEN
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(R_SRC), R_LEN
+ JMP doLit
+
+tagLit62Plus:
+ CMPL R_LEN, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ // We read one byte, safe to read one back, since we are just reading tag.
+ // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8
+ MOVL -4(R_SRC), R_LEN
+ SHRL $8, R_LEN
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(R_SRC), R_LEN
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, R_LEN
+ INCQ R_LEN
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(R_SRC), R_OFF
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, R_LEN
+ INCQ R_LEN
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(R_SRC), R_OFF
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - R_TMP1 == src[s] & 0x03
+ // - R_LEN == src[s]
+ CMPQ R_TMP1, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ // length = 4 + int(src[s-2])>>2&0x7
+ MOVBQZX -1(R_SRC), R_TMP1
+ MOVQ R_LEN, R_TMP0
+ SHRQ $2, R_LEN
+ ANDQ $0xe0, R_TMP0
+ ANDQ $7, R_LEN
+ SHLQ $3, R_TMP0
+ ADDQ $4, R_LEN
+ ORQ R_TMP1, R_TMP0
+
+ // check if repeat code, ZF set by ORQ.
+ JZ repeatCode
+
+ // This is a regular copy, transfer our temporary value to R_OFF (length)
+ MOVQ R_TMP0, R_OFF
+ JMP doCopy
+
+// This is a repeat code.
+repeatCode:
+ // If length < 9, reuse last offset, with the length already calculated.
+ CMPQ R_LEN, $9
+ JL doCopyRepeat
+
+ // Read additional bytes for length.
+ JE repeatLen1
+
+ // Rare, so the extra branch shouldn't hurt too much.
+ CMPQ R_LEN, $10
+ JE repeatLen2
+ JMP repeatLen3
+
+// Read repeat lengths.
+repeatLen1:
+ // s ++
+ ADDQ $1, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = src[s-1] + 8
+ MOVBQZX -1(R_SRC), R_LEN
+ ADDL $8, R_LEN
+ JMP doCopyRepeat
+
+repeatLen2:
+ // s +=2
+ ADDQ $2, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8)
+ MOVWQZX -2(R_SRC), R_LEN
+ ADDL $260, R_LEN
+ JMP doCopyRepeat
+
+repeatLen3:
+ // s +=3
+ ADDQ $3, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16)
+ // Read one byte further back (just part of the tag, shifted out)
+ MOVL -4(R_SRC), R_LEN
+ SHRL $8, R_LEN
+ ADDL $65540, R_LEN
+ JMP doCopyRepeat
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - R_LEN == length && R_LEN > 0
+ // - R_OFF == offset
+
+ // if d < offset { etc }
+ MOVQ R_DST, R_TMP1
+ SUBQ R_DBASE, R_TMP1
+ CMPQ R_TMP1, R_OFF
+ JLT errCorrupt
+
+ // Repeat values can skip the test above, since any offset > 0 will be in dst.
+doCopyRepeat:
+ // if offset <= 0 { etc }
+ CMPQ R_OFF, $0
+ JLE errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R_DEND, R_TMP1
+ SUBQ R_DST, R_TMP1
+ CMPQ R_LEN, R_TMP1
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R_TMP2 = len(dst)-d
+ // - R_TMP3 = &dst[d-offset]
+ MOVQ R_DEND, R_TMP2
+ SUBQ R_DST, R_TMP2
+ MOVQ R_DST, R_TMP3
+ SUBQ R_OFF, R_TMP3
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ R_LEN, $16
+ JGT slowForwardCopy
+ CMPQ R_OFF, $8
+ JLT slowForwardCopy
+ CMPQ R_TMP2, $16
+ JLT slowForwardCopy
+ MOVQ 0(R_TMP3), R_TMP0
+ MOVQ R_TMP0, 0(R_DST)
+ MOVQ 8(R_TMP3), R_TMP1
+ MOVQ R_TMP1, 8(R_DST)
+ ADDQ R_LEN, R_DST
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R_TMP2
+ CMPQ R_LEN, R_TMP2
+ JGT verySlowForwardCopy
+
+ // We want to keep the offset, so we use R_TMP2 from here.
+ MOVQ R_OFF, R_TMP2
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R_TMP3, is unchanged.
+ // }
+ CMPQ R_TMP2, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R_TMP3), R_TMP1
+ MOVQ R_TMP1, (R_DST)
+ SUBQ R_TMP2, R_LEN
+ ADDQ R_TMP2, R_DST
+ ADDQ R_TMP2, R_TMP2
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by R_DST being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ R_DST, R_TMP0
+ ADDQ R_LEN, R_DST
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ R_LEN, $0
+ JLE loop
+ MOVQ (R_TMP3), R_TMP1
+ MOVQ R_TMP1, (R_TMP0)
+ ADDQ $8, R_TMP3
+ ADDQ $8, R_TMP0
+ SUBQ $8, R_LEN
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R_TMP3), R_TMP1
+ MOVB R_TMP1, (R_DST)
+ INCQ R_TMP3
+ INCQ R_DST
+ DECQ R_LEN
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ R_DST, R_DEND
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s
new file mode 100644
index 000000000..78e463f34
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s
@@ -0,0 +1,574 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+#define R_TMP0 R2
+#define R_TMP1 R3
+#define R_LEN R4
+#define R_OFF R5
+#define R_SRC R6
+#define R_DST R7
+#define R_DBASE R8
+#define R_DLEN R9
+#define R_DEND R10
+#define R_SBASE R11
+#define R_SLEN R12
+#define R_SEND R13
+#define R_TMP2 R14
+#define R_TMP3 R15
+
+// TEST_SRC will check if R_SRC is <= SRC_END
+#define TEST_SRC() \
+ CMP R_SEND, R_SRC \
+ BGT errCorrupt
+
+// MOVD R_SRC, R_TMP1
+// SUB R_SBASE, R_TMP1, R_TMP1
+// CMP R_SLEN, R_TMP1
+// BGT errCorrupt
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - R_TMP0 scratch
+// - R_TMP1 scratch
+// - R_LEN length or x
+// - R_OFF offset
+// - R_SRC &src[s]
+// - R_DST &dst[d]
+// + R_DBASE dst_base
+// + R_DLEN dst_len
+// + R_DEND dst_base + dst_len
+// + R_SBASE src_base
+// + R_SLEN src_len
+// + R_SEND src_base + src_len
+// - R_TMP2 used by doCopy
+// - R_TMP3 used by doCopy
+//
+// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
+// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
+TEXT ·s2Decode(SB), NOSPLIT, $56-56
+ // Initialize R_SRC, R_DST and R_DBASE-R_SEND.
+ MOVD dst_base+0(FP), R_DBASE
+ MOVD dst_len+8(FP), R_DLEN
+ MOVD R_DBASE, R_DST
+ MOVD R_DBASE, R_DEND
+ ADD R_DLEN, R_DEND, R_DEND
+ MOVD src_base+24(FP), R_SBASE
+ MOVD src_len+32(FP), R_SLEN
+ MOVD R_SBASE, R_SRC
+ MOVD R_SBASE, R_SEND
+ ADD R_SLEN, R_SEND, R_SEND
+ MOVD $0, R_OFF
+
+loop:
+ // for s < len(src)
+ CMP R_SEND, R_SRC
+ BEQ end
+
+ // R_LEN = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBU (R_SRC), R_LEN
+ MOVW R_LEN, R_TMP1
+ ANDW $3, R_TMP1
+ MOVW $1, R1
+ CMPW R1, R_TMP1
+ BGE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ MOVW $60, R1
+ LSRW $2, R_LEN, R_LEN
+ CMPW R_LEN, R1
+ BLS tagLit60Plus
+
+ // case x < 60:
+ // s++
+ ADD $1, R_SRC, R_SRC
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that R_LEN == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // R_LEN can hold 64 bits, so the increment cannot overflow.
+ ADD $1, R_LEN, R_LEN
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // R_TMP0 = len(dst) - d
+ // R_TMP1 = len(src) - s
+ MOVD R_DEND, R_TMP0
+ SUB R_DST, R_TMP0, R_TMP0
+ MOVD R_SEND, R_TMP1
+ SUB R_SRC, R_TMP1, R_TMP1
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMP $16, R_LEN
+ BGT callMemmove
+ CMP $16, R_TMP0
+ BLT callMemmove
+ CMP $16, R_TMP1
+ BLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ LDP 0(R_SRC), (R_TMP2, R_TMP3)
+ STP (R_TMP2, R_TMP3), 0(R_DST)
+
+ // d += length
+ // s += length
+ ADD R_LEN, R_DST, R_DST
+ ADD R_LEN, R_SRC, R_SRC
+ B loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMP R_TMP0, R_LEN
+ BGT errCorrupt
+ CMP R_TMP1, R_LEN
+ BGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVD R_DST, 8(RSP)
+ MOVD R_SRC, 16(RSP)
+ MOVD R_LEN, 24(RSP)
+ MOVD R_DST, 32(RSP)
+ MOVD R_SRC, 40(RSP)
+ MOVD R_LEN, 48(RSP)
+ MOVD R_OFF, 56(RSP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R_DBASE-R_SEND.
+ MOVD 32(RSP), R_DST
+ MOVD 40(RSP), R_SRC
+ MOVD 48(RSP), R_LEN
+ MOVD 56(RSP), R_OFF
+ MOVD dst_base+0(FP), R_DBASE
+ MOVD dst_len+8(FP), R_DLEN
+ MOVD R_DBASE, R_DEND
+ ADD R_DLEN, R_DEND, R_DEND
+ MOVD src_base+24(FP), R_SBASE
+ MOVD src_len+32(FP), R_SLEN
+ MOVD R_SBASE, R_SEND
+ ADD R_SLEN, R_SEND, R_SEND
+
+ // d += length
+ // s += length
+ ADD R_LEN, R_DST, R_DST
+ ADD R_LEN, R_SRC, R_SRC
+ B loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADD R_LEN, R_SRC, R_SRC
+ SUB $58, R_SRC, R_SRC
+ TEST_SRC()
+
+ // case x == 60:
+ MOVW $61, R1
+ CMPW R1, R_LEN
+ BEQ tagLit61
+ BGT tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBU -1(R_SRC), R_LEN
+ B doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVHU -2(R_SRC), R_LEN
+ B doLit
+
+tagLit62Plus:
+ CMPW $62, R_LEN
+ BHI tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVHU -3(R_SRC), R_LEN
+ MOVBU -1(R_SRC), R_TMP1
+ ORR R_TMP1<<16, R_LEN
+ B doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVWU -4(R_SRC), R_LEN
+ B doLit
+
+ // The code above handles literal tags.
+ // ----------------------------------------
+ // The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADD $5, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVD R_SRC, R_TMP1
+ SUB R_SBASE, R_TMP1, R_TMP1
+ CMP R_SLEN, R_TMP1
+ BGT errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ MOVD $1, R1
+ ADD R_LEN>>2, R1, R_LEN
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVWU -4(R_SRC), R_OFF
+ B doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADD $3, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // length = 1 + int(src[s-3])>>2
+ MOVD $1, R1
+ ADD R_LEN>>2, R1, R_LEN
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVHU -2(R_SRC), R_OFF
+ B doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - R_TMP1 == src[s] & 0x03
+ // - R_LEN == src[s]
+ CMP $2, R_TMP1
+ BEQ tagCopy2
+ BGT tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADD $2, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ // Calculate offset in R_TMP0 in case it is a repeat.
+ MOVD R_LEN, R_TMP0
+ AND $0xe0, R_TMP0
+ MOVBU -1(R_SRC), R_TMP1
+ ORR R_TMP0<<3, R_TMP1, R_TMP0
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ MOVD $7, R1
+ AND R_LEN>>2, R1, R_LEN
+ ADD $4, R_LEN, R_LEN
+
+ // check if repeat code with offset 0.
+ CMP $0, R_TMP0
+ BEQ repeatCode
+
+ // This is a regular copy, transfer our temporary value to R_OFF (offset)
+ MOVD R_TMP0, R_OFF
+ B doCopy
+
+ // This is a repeat code.
+repeatCode:
+ // If length < 9, reuse last offset, with the length already calculated.
+ CMP $9, R_LEN
+ BLT doCopyRepeat
+ BEQ repeatLen1
+ CMP $10, R_LEN
+ BEQ repeatLen2
+
+repeatLen3:
+ // s +=3
+ ADD $3, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540
+ MOVBU -1(R_SRC), R_TMP0
+ MOVHU -3(R_SRC), R_LEN
+ ORR R_TMP0<<16, R_LEN, R_LEN
+ ADD $65540, R_LEN, R_LEN
+ B doCopyRepeat
+
+repeatLen2:
+ // s +=2
+ ADD $2, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260
+ MOVHU -2(R_SRC), R_LEN
+ ADD $260, R_LEN, R_LEN
+ B doCopyRepeat
+
+repeatLen1:
+ // s +=1
+ ADD $1, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // length = src[s-1] + 8
+ MOVBU -1(R_SRC), R_LEN
+ ADD $8, R_LEN, R_LEN
+ B doCopyRepeat
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - R_LEN == length && R_LEN > 0
+ // - R_OFF == offset
+
+ // if d < offset { etc }
+ MOVD R_DST, R_TMP1
+ SUB R_DBASE, R_TMP1, R_TMP1
+ CMP R_OFF, R_TMP1
+ BLT errCorrupt
+
+ // Repeat values can skip the test above, since any offset > 0 will be in dst.
+doCopyRepeat:
+
+ // if offset <= 0 { etc }
+ CMP $0, R_OFF
+ BLE errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVD R_DEND, R_TMP1
+ SUB R_DST, R_TMP1, R_TMP1
+ CMP R_TMP1, R_LEN
+ BGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R_TMP2 = len(dst)-d
+ // - R_TMP3 = &dst[d-offset]
+ MOVD R_DEND, R_TMP2
+ SUB R_DST, R_TMP2, R_TMP2
+ MOVD R_DST, R_TMP3
+ SUB R_OFF, R_TMP3, R_TMP3
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMP $16, R_LEN
+ BGT slowForwardCopy
+ CMP $8, R_OFF
+ BLT slowForwardCopy
+ CMP $16, R_TMP2
+ BLT slowForwardCopy
+ MOVD 0(R_TMP3), R_TMP0
+ MOVD R_TMP0, 0(R_DST)
+ MOVD 8(R_TMP3), R_TMP1
+ MOVD R_TMP1, 8(R_DST)
+ ADD R_LEN, R_DST, R_DST
+ B loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUB $10, R_TMP2, R_TMP2
+ CMP R_TMP2, R_LEN
+ BGT verySlowForwardCopy
+
+ // We want to keep the offset, so we use R_TMP2 from here.
+ MOVD R_OFF, R_TMP2
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R_TMP3, is unchanged.
+ // }
+ CMP $8, R_TMP2
+ BGE fixUpSlowForwardCopy
+ MOVD (R_TMP3), R_TMP1
+ MOVD R_TMP1, (R_DST)
+ SUB R_TMP2, R_LEN, R_LEN
+ ADD R_TMP2, R_DST, R_DST
+ ADD R_TMP2, R_TMP2, R_TMP2
+ B makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by R_DST being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVD R_DST, R_TMP0
+ ADD R_LEN, R_DST, R_DST
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ MOVD $0, R1
+ CMP R1, R_LEN
+ BLE loop
+ MOVD (R_TMP3), R_TMP1
+ MOVD R_TMP1, (R_TMP0)
+ ADD $8, R_TMP3, R_TMP3
+ ADD $8, R_TMP0, R_TMP0
+ SUB $8, R_LEN, R_LEN
+ B finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R_TMP3), R_TMP1
+ MOVB R_TMP1, (R_DST)
+ ADD $1, R_TMP3, R_TMP3
+ ADD $1, R_DST, R_DST
+ SUB $1, R_LEN, R_LEN
+ CBNZ R_LEN, verySlowForwardCopy
+ B loop
+
+ // The code above handles copy tags.
+ // ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMP R_DEND, R_DST
+ BNE errCorrupt
+
+ // return 0
+ MOVD $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVD $1, R_TMP0
+ MOVD R_TMP0, ret+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go
new file mode 100644
index 000000000..cb3576edd
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (amd64 || arm64) && !appengine && gc && !noasm
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package s2
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func s2Decode(dst, src []byte) int
diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go
new file mode 100644
index 000000000..2cb55c2c7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_other.go
@@ -0,0 +1,292 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (!amd64 && !arm64) || appengine || !gc || noasm
+// +build !amd64,!arm64 appengine !gc noasm
+
+package s2
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func s2Decode(dst, src []byte) int {
+ const debug = false
+ if debug {
+ fmt.Println("Starting decode, dst len:", len(dst))
+ }
+ var d, s, length int
+ offset := 0
+
+ // As long as we can read at least 5 bytes...
+ for s < len(src)-5 {
+ // Removing bounds checks is SLOWER, when if doing
+ // in := src[s:s+5]
+ // Checked on Go 1.18
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ x = uint32(src[s-1])
+ case x == 61:
+ in := src[s : s+3]
+ x = uint32(in[1]) | uint32(in[2])<<8
+ s += 3
+ case x == 62:
+ in := src[s : s+4]
+ // Load as 32 bit and shift down.
+ x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x >>= 8
+ s += 4
+ case x == 63:
+ in := src[s : s+5]
+ x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
+ s += 5
+ }
+ length = int(x) + 1
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debug {
+ fmt.Println("corrupt: lit size", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ length = int(src[s-2]) >> 2 & 0x7
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ length = int(src[s]) + 4
+ s += 1
+ case 6:
+ in := src[s : s+2]
+ length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
+ s += 2
+ case 7:
+ in := src[s : s+3]
+ length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
+ s += 3
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ in := src[s : s+3]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8)
+ length = 1 + int(in[0])>>2
+ s += 3
+
+ case tagCopy4:
+ in := src[s : s+5]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
+ length = 1 + int(in[0])>>2
+ s += 5
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ if debug {
+ fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
+ }
+
+ return decodeErrCodeCorrupt
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ // Remaining with extra checks...
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debug {
+ fmt.Println("corrupt: lit size", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = int(src[s-2]) >> 2 & 0x7
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ s += 1
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-1])) + 4
+ case 6:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
+ case 7:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ if debug {
+ fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go
new file mode 100644
index 000000000..f125ad096
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/dict.go
@@ -0,0 +1,350 @@
+// Copyright (c) 2022+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "sync"
+)
+
+const (
+ // MinDictSize is the minimum dictionary size when repeat has been read.
+ MinDictSize = 16
+
+ // MaxDictSize is the maximum dictionary size when repeat has been read.
+ MaxDictSize = 65536
+
+ // MaxDictSrcOffset is the maximum offset where a dictionary entry can start.
+ MaxDictSrcOffset = 65535
+)
+
+// Dict contains a dictionary that can be used for encoding and decoding s2
+type Dict struct {
+ dict []byte
+ repeat int // Repeat as index of dict
+
+ fast, better, best sync.Once
+ fastTable *[1 << 14]uint16
+
+ betterTableShort *[1 << 14]uint16
+ betterTableLong *[1 << 17]uint16
+
+ bestTableShort *[1 << 16]uint32
+ bestTableLong *[1 << 19]uint32
+}
+
+// NewDict will read a dictionary.
+// It will return nil if the dictionary is invalid.
+func NewDict(dict []byte) *Dict {
+ if len(dict) == 0 {
+ return nil
+ }
+ var d Dict
+ // Repeat is the first value of the dict
+ r, n := binary.Uvarint(dict)
+ if n <= 0 {
+ return nil
+ }
+ dict = dict[n:]
+ d.dict = dict
+ if cap(d.dict) < len(d.dict)+16 {
+ d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
+ }
+ if len(dict) < MinDictSize || len(dict) > MaxDictSize {
+ return nil
+ }
+ d.repeat = int(r)
+ if d.repeat > len(dict) {
+ return nil
+ }
+ return &d
+}
+
+// Bytes will return a serialized version of the dictionary.
+// The output can be sent to NewDict.
+func (d *Dict) Bytes() []byte {
+ dst := make([]byte, binary.MaxVarintLen16+len(d.dict))
+ return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...)
+}
+
+// MakeDict will create a dictionary.
+// 'data' must be at least MinDictSize.
+// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used.
+// If searchStart is set the start repeat value will be set to the last
+// match of this content.
+// If no matches are found, it will attempt to find shorter matches.
+// This content should match the typical start of a block.
+// If at least 4 bytes cannot be matched, repeat is set to start of block.
+func MakeDict(data []byte, searchStart []byte) *Dict {
+ if len(data) == 0 {
+ return nil
+ }
+ if len(data) > MaxDictSize {
+ data = data[len(data)-MaxDictSize:]
+ }
+ var d Dict
+ dict := data
+ d.dict = dict
+ if cap(d.dict) < len(d.dict)+16 {
+ d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
+ }
+ if len(dict) < MinDictSize {
+ return nil
+ }
+
+ // Find the longest match possible, last entry if multiple.
+ for s := len(searchStart); s > 4; s-- {
+ if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 {
+ d.repeat = idx
+ break
+ }
+ }
+
+ return &d
+}
+
+// MakeDictManual will create a dictionary.
+// 'data' must be at least MinDictSize and less than or equal to MaxDictSize.
+// A manual first repeat index into data must be provided.
+// It must be less than len(data)-8.
+func MakeDictManual(data []byte, firstIdx uint16) *Dict {
+ if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize {
+ return nil
+ }
+ var d Dict
+ dict := data
+ d.dict = dict
+ if cap(d.dict) < len(d.dict)+16 {
+ d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
+ }
+
+ d.repeat = int(firstIdx)
+ return &d
+}
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockDictGo(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBetter compresses better than Encode but typically with a
+// 10-40% speed decrease on both compression and decompression.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) EncodeBetter(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockBetterDict(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// EncodeBest returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBest compresses as good as reasonably possible but with a
+// big speed decrease.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) EncodeBest(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockBest(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func (d *Dict) Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= cap(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ if s2DecodeDict(dst, src[s:], d) != 0 {
+ return nil, ErrCorrupt
+ }
+ return dst, nil
+}
+
+func (d *Dict) initFast() {
+ d.fast.Do(func() {
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint16
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8-2; i += 3 {
+ x0 := load64(d.dict, i)
+ h0 := hash6(x0, tableBits)
+ h1 := hash6(x0>>8, tableBits)
+ h2 := hash6(x0>>16, tableBits)
+ table[h0] = uint16(i)
+ table[h1] = uint16(i + 1)
+ table[h2] = uint16(i + 2)
+ }
+ d.fastTable = &table
+ })
+}
+
+func (d *Dict) initBetter() {
+ d.better.Do(func() {
+ const (
+ // Long hash matches.
+ lTableBits = 17
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint16
+ var sTable [maxSTableSize]uint16
+
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8; i++ {
+ cv := load64(d.dict, i)
+ lTable[hash7(cv, lTableBits)] = uint16(i)
+ sTable[hash4(cv, sTableBits)] = uint16(i)
+ }
+ d.betterTableShort = &sTable
+ d.betterTableLong = &lTable
+ })
+}
+
+func (d *Dict) initBest() {
+ d.best.Do(func() {
+ const (
+ // Long hash matches.
+ lTableBits = 19
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 16
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8; i++ {
+ cv := load64(d.dict, i)
+ hashL := hash8(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL := lTable[hashL]
+ candidateS := sTable[hashS]
+ lTable[hashL] = uint32(i) | candidateL<<16
+ sTable[hashS] = uint32(i) | candidateS<<16
+ }
+ d.bestTableShort = &sTable
+ d.bestTableLong = &lTable
+ })
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go
new file mode 100644
index 000000000..0c9088adf
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode.go
@@ -0,0 +1,393 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "math"
+ "math/bits"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+ n := encodeBlock(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EstimateBlockSize will perform a very fast compression
+// without outputting the result and return the compressed output size.
+// The function returns -1 if no improvement could be achieved.
+// Using actual compression will most often produce better compression than the estimate.
+func EstimateBlockSize(src []byte) (d int) {
+ if len(src) <= inputMargin || int64(len(src)) > 0xffffffff {
+ return -1
+ }
+ if len(src) <= 1024 {
+ d = calcBlockSizeSmall(src)
+ } else {
+ d = calcBlockSize(src)
+ }
+
+ if d == 0 {
+ return -1
+ }
+ // Size of the varint encoded block size.
+ d += (bits.Len64(uint64(len(src))) + 7) / 7
+
+ if d >= len(src) {
+ return -1
+ }
+ return d
+}
+
+// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBetter compresses better than Encode but typically with a
+// 10-40% speed decrease on both compression and decompression.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeBetter(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+ n := encodeBlockBetter(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EncodeBest returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBest compresses as good as reasonably possible but with a
+// big speed decrease.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeBest(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+ n := encodeBlockBest(dst[d:], src, nil)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EncodeSnappy returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The output is Snappy compatible and will likely decompress faster.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeSnappy(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+
+ n := encodeBlockSnappy(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The output is Snappy compatible and will likely decompress faster.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeSnappyBetter(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+
+ n := encodeBlockBetterSnappy(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The output is Snappy compatible and will likely decompress faster.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeSnappyBest(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+
+ n := encodeBlockBestSnappy(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination.
+// If the destination is nil or too small, a new will be allocated.
+// The blocks are not validated, so garbage in = garbage out.
+// dst may not overlap block data.
+// Any data in dst is preserved as is, so it will not be considered a block.
+func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) {
+ totalSize := uint64(0)
+ compSize := 0
+ for _, b := range blocks {
+ l, hdr, err := decodedLen(b)
+ if err != nil {
+ return nil, err
+ }
+ totalSize += uint64(l)
+ compSize += len(b) - hdr
+ }
+ if totalSize == 0 {
+ dst = append(dst, 0)
+ return dst, nil
+ }
+ if totalSize > math.MaxUint32 {
+ return nil, ErrTooLarge
+ }
+ var tmp [binary.MaxVarintLen32]byte
+ hdrSize := binary.PutUvarint(tmp[:], totalSize)
+ wantSize := hdrSize + compSize
+
+ if cap(dst)-len(dst) < wantSize {
+ dst = append(make([]byte, 0, wantSize+len(dst)), dst...)
+ }
+ dst = append(dst, tmp[:hdrSize]...)
+ for _, b := range blocks {
+ _, hdr, err := decodedLen(b)
+ if err != nil {
+ return nil, err
+ }
+ dst = append(dst, b[hdr:]...)
+ }
+ return dst, nil
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 8
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// will be accepted by the encoder.
+const minNonLiteralBlockSize = 32
+
+const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits)
+
+// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size.
+// Blocks this big are highly discouraged, though.
+// Half the size on 32 bit systems.
+const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+// 32 bit platforms will have lower thresholds for rejecting big content.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if intReduction == 1 {
+ // 32 bits
+ if n > math.MaxInt32 {
+ // Also includes negative.
+ return -1
+ }
+ } else if n > 0xffffffff {
+ // 64 bits
+ // Also includes negative.
+ return -1
+ }
+ // Size of the varint encoded block size.
+ n = n + uint64((bits.Len64(n)+7)/7)
+
+ // Add maximum size of encoding block as literals.
+ n += uint64(literalExtraSize(int64(srcLen)))
+ if intReduction == 1 {
+ // 32 bits
+ if n > math.MaxInt32 {
+ return -1
+ }
+ } else if n > 0xffffffff {
+ // 64 bits
+ // Also includes negative.
+ return -1
+ }
+ return int(n)
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go
new file mode 100644
index 000000000..997704569
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_all.go
@@ -0,0 +1,1068 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+)
+
+func load32(b []byte, i int) uint32 {
+ return binary.LittleEndian.Uint32(b[i:])
+}
+
+func load64(b []byte, i int) uint64 {
+ return binary.LittleEndian.Uint64(b[i:])
+}
+
+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash6(u uint64, h uint8) uint32 {
+ const prime6bytes = 227718039650203
+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+}
+
+func encodeGo(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+ n := encodeBlockGo(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockGo(dst, src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+
+ debug = false
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(base-nextEmit) > dstLimit {
+ return 0
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if nextEmit > 0 {
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ } else {
+ // First match, cannot be repeat.
+ d += emitCopy(dst[d:], repeat, s-base)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopy(dst[d:], repeat, s-base)
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if debug && s == candidate {
+ panic("s == candidate")
+ }
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+func encodeBlockSnappyGo(dst, src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ // Bail if we exceed the maximum size.
+ if d+(base-nextEmit) > dstLimit {
+ return 0
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeat(dst[d:], repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeat(dst[d:], repeat, s-base)
+ if false {
+ // Validate match.
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+ maxAhead = 8 // maximum bytes ahead without checking sLimit
+
+ debug = false
+ )
+ dict.initFast()
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if sLimit > MaxDictSrcOffset-maxAhead {
+ sLimit = MaxDictSrcOffset - maxAhead
+ }
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form can start with a dict entry (copy or repeat).
+ s := 0
+
+ // Convert dict repeat to offset
+ repeat := len(dict.dict) - dict.repeat
+ cv := load64(src, 0)
+
+ // While in dict
+searchDict:
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ if nextS > sLimit {
+ if debug {
+ fmt.Println("slimit reached", s, nextS)
+ }
+ break searchDict
+ }
+ candidateDict := int(dict.fastTable[hash0])
+ candidateDict2 := int(dict.fastTable[hash1])
+ candidate2 := int(table[hash1])
+ candidate := int(table[hash0])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+
+ if repeat > s {
+ candidate := len(dict.dict) - repeat + s
+ if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) {
+ // Extend back
+ base := s
+ for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ // Bail if we exceed the maximum size.
+ if d+(base-nextEmit) > dstLimit {
+ return 0
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ s += 4
+ candidate += 4
+ for candidate < len(dict.dict)-8 && s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ d += emitRepeat(dst[d:], repeat, s-base)
+ if debug {
+ fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ cv = load64(src, s)
+ continue
+ }
+ } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ if nextEmit > 0 {
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ } else {
+ // First match, cannot be repeat.
+ d += emitCopy(dst[d:], repeat, s-base)
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ if debug {
+ fmt.Println("emitted reg repeat", s-base, "s:", s)
+ }
+ cv = load64(src, s)
+ continue searchDict
+ }
+ if s == 0 {
+ cv = load64(src, nextS)
+ s = nextS
+ continue searchDict
+ }
+ // Start with table. These matches will always be closer.
+ if uint32(cv) == load32(src, candidate) {
+ goto emitMatch
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ goto emitMatch
+ }
+
+ // Check dict. Dicts have longer offsets, so we want longer matches.
+ if cv == load64(dict.dict, candidateDict) {
+ table[hash2] = uint32(s + 2)
+ goto emitDict
+ }
+
+ candidateDict = int(dict.fastTable[hash2])
+ // Check if upper 7 bytes match
+ if candidateDict2 >= 1 {
+ if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) {
+ table[hash2] = uint32(s + 2)
+ candidateDict = candidateDict2
+ s++
+ goto emitDict
+ }
+ }
+
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ goto emitMatch
+ }
+ if candidateDict >= 2 {
+ // Check if upper 6 bytes match
+ if cv^load64(dict.dict, candidateDict-2) < (1 << 16) {
+ s += 2
+ goto emitDict
+ }
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ continue searchDict
+
+ emitDict:
+ {
+ if debug {
+ if load32(dict.dict, candidateDict) != load32(src, s) {
+ panic("dict emit mismatch")
+ }
+ }
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] {
+ candidateDict--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = s + (len(dict.dict)) - candidateDict
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateDict += 4
+ for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateDict += 8
+ }
+
+ // Matches longer than 64 are split.
+ if s <= sLimit || s-base < 8 {
+ d += emitCopy(dst[d:], repeat, s-base)
+ } else {
+ // Split to ensure we don't start a copy within next block
+ d += emitCopy(dst[d:], repeat, 4)
+ d += emitRepeat(dst[d:], repeat, s-base-4)
+ }
+ if false {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := dict.dict[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index and continue loop to try new candidate.
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>8, tableBits)
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s - 1)
+ cv = load64(src, s)
+ }
+ continue
+ }
+ emitMatch:
+
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopy(dst[d:], repeat, s-base)
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if debug && s == candidate {
+ panic("s == candidate")
+ }
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+ // Search without dict:
+ if repeat > s {
+ repeat = 0
+ }
+
+ // No more dict
+ sLimit = len(src) - inputMargin
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ if debug {
+ fmt.Println("non-dict matching at", s, "repeat:", repeat)
+ }
+ cv = load64(src, s)
+ if debug {
+ fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
+ }
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ // Bail if we exceed the maximum size.
+ if d+(base-nextEmit) > dstLimit {
+ return 0
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if nextEmit > 0 {
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ } else {
+ // First match, cannot be repeat.
+ d += emitCopy(dst[d:], repeat, s-base)
+ }
+ if debug {
+ fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopy(dst[d:], repeat, s-base)
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if debug && s == candidate {
+ panic("s == candidate")
+ }
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", len(src)-nextEmit, "literals")
+ }
+ }
+ return d
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
new file mode 100644
index 000000000..4f45206a4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
@@ -0,0 +1,162 @@
+//go:build !appengine && !noasm && gc
+// +build !appengine,!noasm,gc
+
+package s2
+
+import "github.com/klauspost/compress/internal/race"
+
+const hasAmd64Asm = true
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ race.ReadSlice(src)
+ race.WriteSlice(dst)
+
+ const (
+ // Use 12 bit table when less than...
+ limit12B = 16 << 10
+ // Use 10 bit table when less than...
+ limit10B = 4 << 10
+ // Use 8 bit table when less than...
+ limit8B = 512
+ )
+
+ if len(src) >= 4<<20 {
+ return encodeBlockAsm(dst, src)
+ }
+ if len(src) >= limit12B {
+ return encodeBlockAsm4MB(dst, src)
+ }
+ if len(src) >= limit10B {
+ return encodeBlockAsm12B(dst, src)
+ }
+ if len(src) >= limit8B {
+ return encodeBlockAsm10B(dst, src)
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeBlockAsm8B(dst, src)
+}
+
+// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetter(dst, src []byte) (d int) {
+ race.ReadSlice(src)
+ race.WriteSlice(dst)
+
+ const (
+ // Use 12 bit table when less than...
+ limit12B = 16 << 10
+ // Use 10 bit table when less than...
+ limit10B = 4 << 10
+ // Use 8 bit table when less than...
+ limit8B = 512
+ )
+
+ if len(src) > 4<<20 {
+ return encodeBetterBlockAsm(dst, src)
+ }
+ if len(src) >= limit12B {
+ return encodeBetterBlockAsm4MB(dst, src)
+ }
+ if len(src) >= limit10B {
+ return encodeBetterBlockAsm12B(dst, src)
+ }
+ if len(src) >= limit8B {
+ return encodeBetterBlockAsm10B(dst, src)
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeBetterBlockAsm8B(dst, src)
+}
+
+// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockSnappy(dst, src []byte) (d int) {
+ race.ReadSlice(src)
+ race.WriteSlice(dst)
+
+ const (
+ // Use 12 bit table when less than...
+ limit12B = 16 << 10
+ // Use 10 bit table when less than...
+ limit10B = 4 << 10
+ // Use 8 bit table when less than...
+ limit8B = 512
+ )
+ if len(src) >= 64<<10 {
+ return encodeSnappyBlockAsm(dst, src)
+ }
+ if len(src) >= limit12B {
+ return encodeSnappyBlockAsm64K(dst, src)
+ }
+ if len(src) >= limit10B {
+ return encodeSnappyBlockAsm12B(dst, src)
+ }
+ if len(src) >= limit8B {
+ return encodeSnappyBlockAsm10B(dst, src)
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeSnappyBlockAsm8B(dst, src)
+}
+
+// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterSnappy(dst, src []byte) (d int) {
+ race.ReadSlice(src)
+ race.WriteSlice(dst)
+
+ const (
+ // Use 12 bit table when less than...
+ limit12B = 16 << 10
+ // Use 10 bit table when less than...
+ limit10B = 4 << 10
+ // Use 8 bit table when less than...
+ limit8B = 512
+ )
+ if len(src) >= 64<<10 {
+ return encodeSnappyBetterBlockAsm(dst, src)
+ }
+ if len(src) >= limit12B {
+ return encodeSnappyBetterBlockAsm64K(dst, src)
+ }
+ if len(src) >= limit10B {
+ return encodeSnappyBetterBlockAsm12B(dst, src)
+ }
+ if len(src) >= limit8B {
+ return encodeSnappyBetterBlockAsm10B(dst, src)
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeSnappyBetterBlockAsm8B(dst, src)
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go
new file mode 100644
index 000000000..47bac7423
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_best.go
@@ -0,0 +1,796 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "fmt"
+ "math"
+ "math/bits"
+)
+
+// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBest(dst, src []byte, dict *Dict) (d int) {
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 19
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 16
+ maxSTableSize = 1 << sTableBits
+
+ inputMargin = 8 + 2
+
+ debug = false
+ )
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ sLimitDict := len(src) - inputMargin
+ if sLimitDict > MaxDictSrcOffset-inputMargin {
+ sLimitDict = MaxDictSrcOffset - inputMargin
+ }
+
+ var lTable [maxLTableSize]uint64
+ var sTable [maxSTableSize]uint64
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ repeat := 1
+ if dict != nil {
+ dict.initBest()
+ s = 0
+ repeat = len(dict.dict) - dict.repeat
+ }
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ const lowbitMask = 0xffffffff
+ getCur := func(x uint64) int {
+ return int(x & lowbitMask)
+ }
+ getPrev := func(x uint64) int {
+ return int(x >> 32)
+ }
+ const maxSkip = 64
+
+ for {
+ type match struct {
+ offset int
+ s int
+ length int
+ score int
+ rep, dict bool
+ }
+ var best match
+ for {
+ // Next src position to check
+ nextS := (s-nextEmit)>>8 + 1
+ if nextS > maxSkip {
+ nextS = s + maxSkip
+ } else {
+ nextS += s
+ }
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ if dict != nil && s >= MaxDictSrcOffset {
+ dict = nil
+ if repeat > s {
+ repeat = math.MinInt32
+ }
+ }
+ hashL := hash8(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL := lTable[hashL]
+ candidateS := sTable[hashS]
+
+ score := func(m match) int {
+ // Matches that are longer forward are penalized since we must emit it as a literal.
+ score := m.length - m.s
+ if nextEmit == m.s {
+ // If we do not have to emit literals, we save 1 byte
+ score++
+ }
+ offset := m.s - m.offset
+ if m.rep {
+ return score - emitRepeatSize(offset, m.length)
+ }
+ return score - emitCopySize(offset, m.length)
+ }
+
+ matchAt := func(offset, s int, first uint32, rep bool) match {
+ if best.length != 0 && best.s-best.offset == s-offset {
+ // Don't retest if we have the same offset.
+ return match{offset: offset, s: s}
+ }
+ if load32(src, offset) != first {
+ return match{offset: offset, s: s}
+ }
+ m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
+ s += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ m.length -= offset
+ m.score = score(m)
+ if m.score <= -m.s {
+ // Eliminate if no savings, we might find a better one.
+ m.length = 0
+ }
+ return m
+ }
+ matchDict := func(candidate, s int, first uint32, rep bool) match {
+ if s >= MaxDictSrcOffset {
+ return match{offset: candidate, s: s}
+ }
+ // Calculate offset as if in continuous array with s
+ offset := -len(dict.dict) + candidate
+ if best.length != 0 && best.s-best.offset == s-offset && !rep {
+ // Don't retest if we have the same offset.
+ return match{offset: offset, s: s}
+ }
+
+ if load32(dict.dict, candidate) != first {
+ return match{offset: offset, s: s}
+ }
+ m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true}
+ s += 4
+ if !rep {
+ for s < sLimitDict && m.length < len(dict.dict) {
+ if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
+ if src[s] == dict.dict[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ } else {
+ for s < len(src) && m.length < len(dict.dict) {
+ if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
+ if src[s] == dict.dict[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ }
+ m.length -= candidate
+ m.score = score(m)
+ if m.score <= -m.s {
+ // Eliminate if no savings, we might find a better one.
+ m.length = 0
+ }
+ return m
+ }
+
+ bestOf := func(a, b match) match {
+ if b.length == 0 {
+ return a
+ }
+ if a.length == 0 {
+ return b
+ }
+ as := a.score + b.s
+ bs := b.score + a.s
+ if as >= bs {
+ return a
+ }
+ return b
+ }
+
+ if s > 0 {
+ best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
+ }
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false))
+ }
+ {
+ if (dict == nil || repeat <= s) && repeat > 0 {
+ best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
+ } else if s-repeat < -4 && dict != nil {
+ candidate := len(dict.dict) - (repeat - s)
+ best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
+ candidate++
+ best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true))
+ }
+
+ if best.length > 0 {
+ hashS := hash4(cv>>8, sTableBits)
+ // s+1
+ nextShort := sTable[hashS]
+ s := s + 1
+ cv := load64(src, s)
+ hashL := hash8(cv, lTableBits)
+ nextLong := lTable[hashL]
+ best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
+
+ // Dict at + 1
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ }
+
+ // s+2
+ if true {
+ hashS := hash4(cv>>8, sTableBits)
+
+ nextShort = sTable[hashS]
+ s++
+ cv = load64(src, s)
+ hashL := hash8(cv, lTableBits)
+ nextLong = lTable[hashL]
+
+ if (dict == nil || repeat <= s) && repeat > 0 {
+ // Repeat at + 2
+ best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true))
+ } else if repeat-s > 4 && dict != nil {
+ candidate := len(dict.dict) - (repeat - s)
+ best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
+ }
+ best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
+
+ // Dict at +2
+ // Very small gain
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ }
+ }
+ // Search for a match at best match end, see if that is better.
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 1-2 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ const skipEnd = 1
+ if sAt := best.s + best.length - skipEnd; sAt < sLimit {
+
+ sBack := best.s + skipBeginning - skipEnd
+ backL := best.length - skipBeginning
+ // Load initial values
+ cv = load64(src, sBack)
+
+ // Grab candidates...
+ next := lTable[hash8(load64(src, sAt), lTableBits)]
+
+ if checkAt := getCur(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ if checkAt := getPrev(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ // Disabled: Extremely small gain
+ if false {
+ next = sTable[hash4(load64(src, sAt), sTableBits)]
+ if checkAt := getCur(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ if checkAt := getPrev(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ }
+ }
+ }
+ }
+
+ // Update table
+ lTable[hashL] = uint64(s) | candidateL<<32
+ sTable[hashS] = uint64(s) | candidateS<<32
+
+ if best.length > 0 {
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards, not needed for repeats...
+ s = best.s
+ if !best.rep && !best.dict {
+ for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
+ best.offset--
+ best.length++
+ s--
+ }
+ }
+ if false && best.offset >= s {
+ panic(fmt.Errorf("t %d >= s %d", best.offset, s))
+ }
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := s - best.offset
+ s += best.length
+
+ if offset > 65535 && s-base <= 5 && !best.rep {
+ // Bail if the match is equal or worse to the encoding.
+ s = best.s + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+ if debug && nextEmit != base {
+ fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base)
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if best.rep {
+ if nextEmit > 0 || best.dict {
+ if debug {
+ fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], offset, best.length)
+ } else {
+ // First match without dict cannot be a repeat.
+ if debug {
+ fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
+ d += emitCopy(dst[d:], offset, best.length)
+ }
+ } else {
+ if debug {
+ fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
+ d += emitCopy(dst[d:], offset, best.length)
+ }
+ repeat = offset
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Fill tables...
+ for i := best.s + 1; i < s; i++ {
+ cv0 := load64(src, i)
+ long0 := hash8(cv0, lTableBits)
+ short0 := hash4(cv0, sTableBits)
+ lTable[long0] = uint64(i) | lTable[long0]<<32
+ sTable[short0] = uint64(i) | sTable[short0]<<32
+ }
+ cv = load64(src, s)
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", len(src)-nextEmit, "literals")
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBestSnappy(dst, src []byte) (d int) {
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 19
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 16
+ maxSTableSize = 1 << sTableBits
+
+ inputMargin = 8 + 2
+ )
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ var lTable [maxLTableSize]uint64
+ var sTable [maxSTableSize]uint64
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+ const lowbitMask = 0xffffffff
+ getCur := func(x uint64) int {
+ return int(x & lowbitMask)
+ }
+ getPrev := func(x uint64) int {
+ return int(x >> 32)
+ }
+ const maxSkip = 64
+
+ for {
+ type match struct {
+ offset int
+ s int
+ length int
+ score int
+ }
+ var best match
+ for {
+ // Next src position to check
+ nextS := (s-nextEmit)>>8 + 1
+ if nextS > maxSkip {
+ nextS = s + maxSkip
+ } else {
+ nextS += s
+ }
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash8(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL := lTable[hashL]
+ candidateS := sTable[hashS]
+
+ score := func(m match) int {
+ // Matches that are longer forward are penalized since we must emit it as a literal.
+ score := m.length - m.s
+ if nextEmit == m.s {
+ // If we do not have to emit literals, we save 1 byte
+ score++
+ }
+ offset := m.s - m.offset
+
+ return score - emitCopyNoRepeatSize(offset, m.length)
+ }
+
+ matchAt := func(offset, s int, first uint32) match {
+ if best.length != 0 && best.s-best.offset == s-offset {
+ // Don't retest if we have the same offset.
+ return match{offset: offset, s: s}
+ }
+ if load32(src, offset) != first {
+ return match{offset: offset, s: s}
+ }
+ m := match{offset: offset, s: s, length: 4 + offset}
+ s += 4
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ m.length -= offset
+ m.score = score(m)
+ if m.score <= -m.s {
+ // Eliminate if no savings, we might find a better one.
+ m.length = 0
+ }
+ return m
+ }
+
+ bestOf := func(a, b match) match {
+ if b.length == 0 {
+ return a
+ }
+ if a.length == 0 {
+ return b
+ }
+ as := a.score + b.s
+ bs := b.score + a.s
+ if as >= bs {
+ return a
+ }
+ return b
+ }
+
+ best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv)))
+ best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv)))
+
+ {
+ best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
+ if best.length > 0 {
+ // s+1
+ nextShort := sTable[hash4(cv>>8, sTableBits)]
+ s := s + 1
+ cv := load64(src, s)
+ nextLong := lTable[hash8(cv, lTableBits)]
+ best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
+ best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
+ // Repeat at + 2
+ best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
+
+ // s+2
+ if true {
+ nextShort = sTable[hash4(cv>>8, sTableBits)]
+ s++
+ cv = load64(src, s)
+ nextLong = lTable[hash8(cv, lTableBits)]
+ best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
+ best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
+ }
+ // Search for a match at best match end, see if that is better.
+ if sAt := best.s + best.length; sAt < sLimit {
+ sBack := best.s
+ backL := best.length
+ // Load initial values
+ cv = load64(src, sBack)
+ // Search for mismatch
+ next := lTable[hash8(load64(src, sAt), lTableBits)]
+ //next := sTable[hash4(load64(src, sAt), sTableBits)]
+
+ if checkAt := getCur(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
+ }
+ if checkAt := getPrev(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
+ }
+ }
+ }
+ }
+
+ // Update table
+ lTable[hashL] = uint64(s) | candidateL<<32
+ sTable[hashS] = uint64(s) | candidateS<<32
+
+ if best.length > 0 {
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards, not needed for repeats...
+ s = best.s
+ if true {
+ for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
+ best.offset--
+ best.length++
+ s--
+ }
+ }
+ if false && best.offset >= s {
+ panic(fmt.Errorf("t %d >= s %d", best.offset, s))
+ }
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := s - best.offset
+
+ s += best.length
+
+ if offset > 65535 && s-base <= 5 {
+ // Bail if the match is equal or worse to the encoding.
+ s = best.s + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ d += emitCopyNoRepeat(dst[d:], offset, best.length)
+ repeat = offset
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Fill tables...
+ for i := best.s + 1; i < s; i++ {
+ cv0 := load64(src, i)
+ long0 := hash8(cv0, lTableBits)
+ short0 := hash4(cv0, sTableBits)
+ lTable[long0] = uint64(i) | lTable[long0]<<32
+ sTable[short0] = uint64(i) | sTable[short0]<<32
+ }
+ cv = load64(src, s)
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// emitCopySize returns the size to encode the offset+length
+//
+// It assumes that:
+//
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopySize(offset, length int) int {
+ if offset >= 65536 {
+ i := 0
+ if length > 64 {
+ length -= 64
+ if length >= 4 {
+ // Emit remaining as repeats
+ return 5 + emitRepeatSize(offset, length)
+ }
+ i = 5
+ }
+ if length == 0 {
+ return i
+ }
+ return i + 5
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ if offset < 2048 {
+ // Emit 8 bytes, then rest as repeats...
+ return 2 + emitRepeatSize(offset, length-8)
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return 3 + emitRepeatSize(offset, length-60)
+ }
+ if length >= 12 || offset >= 2048 {
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ return 2
+}
+
+// emitCopyNoRepeatSize returns the size to encode the offset+length
+//
+// It assumes that:
+//
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopyNoRepeatSize(offset, length int) int {
+ if offset >= 65536 {
+ return 5 + 5*(length/64)
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return 3 + 3*(length/60)
+ }
+ if length >= 12 || offset >= 2048 {
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ return 2
+}
+
+// emitRepeatSize returns the number of bytes required to encode a repeat.
+// Length must be at least 4 and < 1<<24
+func emitRepeatSize(offset, length int) int {
+ // Repeat offset, make length cheaper
+ if length <= 4+4 || (length < 8+4 && offset < 2048) {
+ return 2
+ }
+ if length < (1<<8)+4+4 {
+ return 3
+ }
+ if length < (1<<16)+(1<<8)+4 {
+ return 4
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= (1 << 16) - 4
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ }
+ if left > 0 {
+ return 5 + emitRepeatSize(offset, left)
+ }
+ return 5
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go
new file mode 100644
index 000000000..544cb1e17
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_better.go
@@ -0,0 +1,1106 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "fmt"
+ "math/bits"
+)
+
+// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4(u uint64, h uint8) uint32 {
+ const prime4bytes = 2654435761
+ return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash5(u uint64, h uint8) uint32 {
+ const prime5bytes = 889523592379
+ return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ const prime7bytes = 58295818150454627
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+}
+
+// hash8 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash8(u uint64, h uint8) uint32 {
+ const prime8bytes = 0xcf1bbcdcb7a56463
+ return uint32((u * prime8bytes) >> ((64 - h) & 63))
+}
+
+// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterGo(dst, src []byte) (d int) {
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 17
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We initialize repeat to 0, so we never match on first attempt
+ repeat := 0
+
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = s + (s-nextEmit)>>7 + 1
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if cv == valLong {
+ break
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ break
+ }
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ // Minimum length of a repeat. Tested with various values.
+ // While 4-5 offers improvements in some, 6 reduces
+ // regressions significantly.
+ const wantRepeatBytes = 6
+ const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
+ if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + wantRepeatBytes + checkRep
+ s += wantRepeatBytes + checkRep
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidate] {
+ s++
+ candidate++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
+ break
+ }
+
+ // Check our short candidate
+ if uint32(cv) == uint32(valShort) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ break
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if repeat == offset {
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ d += emitCopy(dst[d:], offset, s-base)
+ repeat = offset
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ // lTable could be postponed, but very minor difference.
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
+ index0 += 2
+ index2 += 2
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 16
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We initialize repeat to 0, so we never match on first attempt
+ repeat := 0
+ const maxSkip = 100
+
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = (s-nextEmit)>>7 + 1
+ if nextS > maxSkip {
+ nextS = s + maxSkip
+ } else {
+ nextS += s
+ }
+
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ if uint32(cv) == load32(src, candidateL) {
+ break
+ }
+
+ // Check our short candidate
+ if uint32(cv) == load32(src, candidateS) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ break
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ d += emitCopyNoRepeat(dst[d:], offset, s-base)
+ repeat = offset
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
+ index0 += 2
+ index2 += 2
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) {
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 17
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+
+ maxAhead = 8 // maximum bytes ahead without checking sLimit
+
+ debug = false
+ )
+
+ sLimit := len(src) - inputMargin
+ if sLimit > MaxDictSrcOffset-maxAhead {
+ sLimit = MaxDictSrcOffset - maxAhead
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ dict.initBetter()
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 0
+ cv := load64(src, s)
+
+ // We initialize repeat to 0, so we never match on first attempt
+ repeat := len(dict.dict) - dict.repeat
+
+ // While in dict
+searchDict:
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = s + (s-nextEmit)>>7 + 1
+ if nextS > sLimit {
+ break searchDict
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ dictL := int(dict.betterTableLong[hashL])
+ dictS := int(dict.betterTableShort[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if s != 0 {
+ if cv == valLong {
+ goto emitMatch
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ goto emitMatch
+ }
+ }
+
+ // Check dict repeat.
+ if repeat >= s+4 {
+ candidate := len(dict.dict) - repeat + s
+ if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) {
+ // Extend back
+ base := s
+ for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ s += 4
+ candidate += 4
+ for candidate < len(dict.dict)-8 && s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ d += emitRepeat(dst[d:], repeat, s-base)
+ if debug {
+ fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ cv = load64(src, s)
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
+ continue
+ }
+ }
+ // Don't try to find match at s==0
+ if s == 0 {
+ cv = load64(src, nextS)
+ s = nextS
+ continue
+ }
+
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
+ goto emitMatch
+ }
+
+ // Long dict...
+ if uint32(cv) == load32(dict.dict, dictL) {
+ candidateL = dictL
+ goto emitDict
+ }
+
+ // Check our short candidate
+ if uint32(cv) == uint32(valShort) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ goto emitMatch
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ goto emitMatch
+ }
+ if uint32(cv) == load32(dict.dict, dictS) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ goto emitMatch
+ }
+ candidateL = dictS
+ goto emitDict
+ }
+ cv = load64(src, nextS)
+ s = nextS
+ }
+ emitDict:
+ {
+ if debug {
+ if load32(dict.dict, candidateL) != load32(src, s) {
+ panic("dict emit mismatch")
+ }
+ }
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ offset := s + (len(dict.dict)) - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if repeat == offset {
+ if debug {
+ fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
+ }
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ if debug {
+ fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
+ }
+ // Matches longer than 64 are split.
+ if s <= sLimit || s-base < 8 {
+ d += emitCopy(dst[d:], offset, s-base)
+ } else {
+ // Split to ensure we don't start a copy within next block.
+ d += emitCopy(dst[d:], offset, 4)
+ d += emitRepeat(dst[d:], offset, s-base-4)
+ }
+ repeat = offset
+ }
+ if false {
+ // Validate match.
+ if s <= candidateL {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := dict.dict[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // index every second long in between.
+ for index0 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ index0 += 2
+ index1 -= 2
+ }
+ }
+ continue
+ }
+ emitMatch:
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ if repeat == offset {
+ if debug {
+ fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s)
+ }
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ if debug {
+ fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s)
+ }
+ d += emitCopy(dst[d:], offset, s-base)
+ repeat = offset
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
+ index0 += 2
+ index2 += 2
+ }
+ }
+
+ // Search without dict:
+ if repeat > s {
+ repeat = 0
+ }
+
+ // No more dict
+ sLimit = len(src) - inputMargin
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ if debug {
+ fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
+ }
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = s + (s-nextEmit)>>7 + 1
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if cv == valLong {
+ break
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ break
+ }
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ // Minimum length of a repeat. Tested with various values.
+ // While 4-5 offers improvements in some, 6 reduces
+ // regressions significantly.
+ const wantRepeatBytes = 6
+ const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
+ if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + wantRepeatBytes + checkRep
+ s += wantRepeatBytes + checkRep
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidate] {
+ s++
+ candidate++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
+ break
+ }
+
+ // Check our short candidate
+ if uint32(cv) == uint32(valShort) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ break
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if repeat == offset {
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ d += emitCopy(dst[d:], offset, s-base)
+ repeat = offset
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
+ index0 += 2
+ index2 += 2
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go
new file mode 100644
index 000000000..6b393c34d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_go.go
@@ -0,0 +1,729 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package s2
+
+import (
+ "bytes"
+ "math/bits"
+)
+
+const hasAmd64Asm = false
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src))
+func encodeBlock(dst, src []byte) (d int) {
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeBlockGo(dst, src)
+}
+
+// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src))
+func encodeBlockBetter(dst, src []byte) (d int) {
+ return encodeBlockBetterGo(dst, src)
+}
+
+// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src))
+func encodeBlockBetterSnappy(dst, src []byte) (d int) {
+ return encodeBlockBetterSnappyGo(dst, src)
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src))
+func encodeBlockSnappy(dst, src []byte) (d int) {
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeBlockSnappyGo(dst, src)
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+func emitLiteral(dst, lit []byte) int {
+ if len(lit) == 0 {
+ return 0
+ }
+ const num = 63<<2 | tagLiteral
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[1] = uint8(n)
+ dst[0] = 60<<2 | tagLiteral
+ i = 2
+ case n < 1<<16:
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 61<<2 | tagLiteral
+ i = 3
+ case n < 1<<24:
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 62<<2 | tagLiteral
+ i = 4
+ default:
+ dst[4] = uint8(n >> 24)
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 63<<2 | tagLiteral
+ i = 5
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitRepeat writes a repeat chunk and returns the number of bytes written.
+// Length must be at least 4 and < 1<<24
+func emitRepeat(dst []byte, offset, length int) int {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ return 2
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ return 2
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ return 3
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ return 4
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ return 5 + emitRepeat(dst[5:], offset, left)
+ }
+ return 5
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopy(dst []byte, offset, length int) int {
+ if offset >= 65536 {
+ i := 0
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 5 bytes.
+ dst[4] = uint8(offset >> 24)
+ dst[3] = uint8(offset >> 16)
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 63<<2 | tagCopy4
+ length -= 64
+ if length >= 4 {
+ // Emit remaining as repeats
+ return 5 + emitRepeat(dst[5:], offset, length)
+ }
+ i = 5
+ }
+ if length == 0 {
+ return i
+ }
+ // Emit a copy, offset encoded as 4 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy4
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ dst[i+3] = uint8(offset >> 16)
+ dst[i+4] = uint8(offset >> 24)
+ return i + 5
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return off + emitRepeat(dst[off:], offset, length)
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ return 2
+}
+
+// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopyNoRepeat(dst []byte, offset, length int) int {
+ if offset >= 65536 {
+ i := 0
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 5 bytes.
+ dst[4] = uint8(offset >> 24)
+ dst[3] = uint8(offset >> 16)
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 63<<2 | tagCopy4
+ length -= 64
+ if length >= 4 {
+ // Emit remaining as repeats
+ return 5 + emitCopyNoRepeat(dst[5:], offset, length)
+ }
+ i = 5
+ }
+ if length == 0 {
+ return i
+ }
+ // Emit a copy, offset encoded as 4 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy4
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ dst[i+3] = uint8(offset >> 16)
+ dst[i+4] = uint8(offset >> 24)
+ return i + 5
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return 3 + emitCopyNoRepeat(dst[3:], offset, length)
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ return 2
+}
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+// len(a) <= len(b)
+func matchLen(a []byte, b []byte) int {
+ b = b[:len(a)]
+ var checked int
+ if len(a) > 4 {
+ // Try 4 bytes first
+ if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
+ return bits.TrailingZeros32(diff) >> 3
+ }
+ // Switch to 8 byte matching.
+ checked = 4
+ a = a[4:]
+ b = b[4:]
+ for len(a) >= 8 {
+ b = b[:len(a)]
+ if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
+ }
+ checked += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ }
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ return int(i) + checked
+ }
+ }
+ return len(a) + checked
+}
+
+// input must be > inputMargin
+func calcBlockSize(src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 13
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteralSize(src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteralSize(src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ if false {
+ // Validate match.
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteralSize(src[nextEmit:])
+ }
+ return d
+}
+
+// length must be > inputMargin.
+func calcBlockSizeSmall(src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 9
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteralSize(src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteralSize(src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ if false {
+ // Validate match.
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteralSize(src[nextEmit:])
+ }
+ return d
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+func emitLiteralSize(lit []byte) int {
+ if len(lit) == 0 {
+ return 0
+ }
+ switch {
+ case len(lit) <= 60:
+ return len(lit) + 1
+ case len(lit) <= 1<<8:
+ return len(lit) + 2
+ case len(lit) <= 1<<16:
+ return len(lit) + 3
+ case len(lit) <= 1<<24:
+ return len(lit) + 4
+ default:
+ return len(lit) + 5
+ }
+}
+
+func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4BlockAsm should be unreachable")
+}
+
+func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4BlockSnappyAsm should be unreachable")
+}
+
+func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4sBlockAsm should be unreachable")
+}
+
+func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4sBlockSnappyAsm should be unreachable")
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
new file mode 100644
index 000000000..297e41501
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
@@ -0,0 +1,228 @@
+// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+package s2
+
+func _dummy_()
+
+// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm(dst []byte, src []byte) int
+
+// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4194304 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm4MB(dst []byte, src []byte) int
+
+// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 16383 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm12B(dst []byte, src []byte) int
+
+// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4095 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm10B(dst []byte, src []byte) int
+
+// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 511 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm8B(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4194304 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 16383 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm12B(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4095 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm10B(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 511 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm8B(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 65535 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 16383 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4095 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 511 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 65535 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 16383 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4095 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 511 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+
+// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func calcBlockSize(src []byte) int
+
+// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 1024 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func calcBlockSizeSmall(src []byte) int
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes with margin of 0 bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+//
+//go:noescape
+func emitLiteral(dst []byte, lit []byte) int
+
+// emitRepeat writes a repeat chunk and returns the number of bytes written.
+// Length must be at least 4 and < 1<<32
+//
+//go:noescape
+func emitRepeat(dst []byte, offset int, length int) int
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+//
+//go:noescape
+func emitCopy(dst []byte, offset int, length int) int
+
+// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+//
+//go:noescape
+func emitCopyNoRepeat(dst []byte, offset int, length int) int
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+// len(a) <= len(b)
+//
+//go:noescape
+func matchLen(a []byte, b []byte) int
+
+// cvtLZ4Block converts an LZ4 block to S2
+//
+//go:noescape
+func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+
+// cvtLZ4sBlock converts an LZ4s block to S2
+//
+//go:noescape
+func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+
+// cvtLZ4Block converts an LZ4 block to Snappy
+//
+//go:noescape
+func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+
+// cvtLZ4sBlock converts an LZ4s block to Snappy
+//
+//go:noescape
+func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
new file mode 100644
index 000000000..2ff5b3340
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -0,0 +1,21277 @@
+// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+#include "textflag.h"
+
+// func _dummy_()
+TEXT ·_dummy_(SB), $0
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+ RET
+
+// func encodeBlockAsm(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm(SB), $65560-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm
+
+repeat_extend_back_loop_encodeBlockAsm:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm
+
+repeat_extend_back_end_encodeBlockAsm:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 5(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeBlockAsm:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_repeat_emit_encodeBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_repeat_emit_encodeBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm
+
+four_bytes_repeat_emit_encodeBlockAsm:
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm
+
+three_bytes_repeat_emit_encodeBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm
+
+two_bytes_repeat_emit_encodeBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm
+ JMP memmove_long_repeat_emit_encodeBlockAsm
+
+one_byte_repeat_emit_encodeBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm
+
+memmove_long_repeat_emit_encodeBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_repeat_extend_encodeBlockAsm:
+ CMPL R8, $0x10
+ JB matchlen_match8_repeat_extend_encodeBlockAsm
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm
+ XORQ 8(BX)(R11*1), R12
+ JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm
+
+matchlen_bsf_16repeat_extend_encodeBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm
+
+matchlen_match8_repeat_extend_encodeBlockAsm:
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ JMP matchlen_match4_repeat_extend_encodeBlockAsm
+
+matchlen_bsf_8_repeat_extend_encodeBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm
+
+matchlen_match4_repeat_extend_encodeBlockAsm:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm
+ JB repeat_extend_forward_end_encodeBlockAsm
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm
+
+matchlen_match1_repeat_extend_encodeBlockAsm:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm
+
+ // emitRepeat
+emit_repeat_again_match_repeat_encodeBlockAsm:
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_match_repeat_encodeBlockAsm
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm
+ CMPL BX, $0x00010100
+ JB repeat_four_match_repeat_encodeBlockAsm
+ CMPL BX, $0x0100ffff
+ JB repeat_five_match_repeat_encodeBlockAsm
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_repeat_encodeBlockAsm
+
+repeat_five_match_repeat_encodeBlockAsm:
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_match_repeat_encodeBlockAsm:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_match_repeat_encodeBlockAsm:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_match_repeat_encodeBlockAsm:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_match_repeat_encodeBlockAsm:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_as_copy_encodeBlockAsm:
+ // emitCopy
+ CMPL SI, $0x00010000
+ JB two_byte_offset_repeat_as_copy_encodeBlockAsm
+ CMPL BX, $0x40
+ JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm
+ MOVB $0xff, (AX)
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JB four_bytes_remain_repeat_as_copy_encodeBlockAsm
+
+ // emitRepeat
+emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy:
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy
+ CMPL BX, $0x0100ffff
+ JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy
+
+repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy:
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+four_bytes_remain_repeat_as_copy_encodeBlockAsm:
+ TESTL BX, BX
+ JZ repeat_end_emit_encodeBlockAsm
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+two_byte_offset_repeat_as_copy_encodeBlockAsm:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ MOVL SI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL BX, $0x0100ffff
+ JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+long_offset_short_repeat_as_copy_encodeBlockAsm:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ CMPL BX, $0x0100ffff
+ JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short
+
+repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm
+
+no_repeat_found_encodeBlockAsm:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm
+
+candidate3_match_encodeBlockAsm:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm
+
+candidate2_match_encodeBlockAsm:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm
+
+match_extend_back_loop_encodeBlockAsm:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm
+ JMP match_extend_back_loop_encodeBlockAsm
+
+match_extend_back_end_encodeBlockAsm:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm
+ CMPL DI, $0x00010000
+ JB three_bytes_match_emit_encodeBlockAsm
+ CMPL DI, $0x01000000
+ JB four_bytes_match_emit_encodeBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_encodeBlockAsm
+
+four_bytes_match_emit_encodeBlockAsm:
+ MOVL DI, R9
+ SHRL $0x10, R9
+ MOVB $0xf8, (AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeBlockAsm
+
+three_bytes_match_emit_encodeBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm
+
+two_bytes_match_emit_encodeBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm
+ JMP memmove_long_match_emit_encodeBlockAsm
+
+one_byte_match_emit_encodeBlockAsm:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm
+
+emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm
+
+emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm
+
+emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm
+
+memmove_long_match_emit_encodeBlockAsm:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm:
+match_nolit_loop_encodeBlockAsm:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeBlockAsm:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeBlockAsm
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeBlockAsm
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeBlockAsm
+
+matchlen_bsf_16match_nolit_encodeBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeBlockAsm
+
+matchlen_match8_match_nolit_encodeBlockAsm:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeBlockAsm
+
+matchlen_bsf_8_match_nolit_encodeBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm
+
+matchlen_match4_match_nolit_encodeBlockAsm:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm
+ JB match_nolit_end_encodeBlockAsm
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm
+
+matchlen_match1_match_nolit_encodeBlockAsm:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JB two_byte_offset_match_nolit_encodeBlockAsm
+ CMPL R9, $0x40
+ JBE four_bytes_remain_match_nolit_encodeBlockAsm
+ MOVB $0xff, (AX)
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JB four_bytes_remain_match_nolit_encodeBlockAsm
+
+ // emitRepeat
+emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy:
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm_emit_copy
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm_emit_copy
+ CMPL R9, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBlockAsm_emit_copy
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy
+
+repeat_five_match_nolit_encodeBlockAsm_emit_copy:
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_four_match_nolit_encodeBlockAsm_emit_copy:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_three_match_nolit_encodeBlockAsm_emit_copy:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_match_nolit_encodeBlockAsm_emit_copy:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+four_bytes_remain_match_nolit_encodeBlockAsm:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_encodeBlockAsm
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+two_byte_offset_match_nolit_encodeBlockAsm:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ MOVL BX, DI
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL R9, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+long_offset_short_match_nolit_encodeBlockAsm:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short:
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short
+ CMPL R9, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short
+
+repeat_five_match_nolit_encodeBlockAsm_emit_copy_short:
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_four_match_nolit_encodeBlockAsm_emit_copy_short:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_three_match_nolit_encodeBlockAsm_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_match_nolit_encodeBlockAsm_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+two_byte_offset_short_match_nolit_encodeBlockAsm:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBlockAsm
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+emit_copy_three_match_nolit_encodeBlockAsm:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm
+ INCL CX
+ JMP search_loop_encodeBlockAsm
+
+emit_remainder_encodeBlockAsm:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeBlockAsm
+ CMPL DX, $0x01000000
+ JB four_bytes_emit_remainder_encodeBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm
+
+four_bytes_emit_remainder_encodeBlockAsm:
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm
+
+three_bytes_emit_remainder_encodeBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm
+
+two_bytes_emit_remainder_encodeBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm
+ JMP memmove_long_emit_remainder_encodeBlockAsm
+
+one_byte_emit_remainder_encodeBlockAsm:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm
+
+memmove_long_emit_remainder_encodeBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBlockAsm4MB(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm4MB(SB), $65560-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm4MB:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm4MB
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm4MB:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm4MB
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm4MB
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm4MB
+
+repeat_extend_back_loop_encodeBlockAsm4MB:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm4MB
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm4MB
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm4MB
+
+repeat_extend_back_end_encodeBlockAsm4MB:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 4(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeBlockAsm4MB:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm4MB
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm4MB
+ CMPL BX, $0x00010000
+ JB three_bytes_repeat_emit_encodeBlockAsm4MB
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm4MB
+
+three_bytes_repeat_emit_encodeBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm4MB
+
+two_bytes_repeat_emit_encodeBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm4MB
+ JMP memmove_long_repeat_emit_encodeBlockAsm4MB
+
+one_byte_repeat_emit_encodeBlockAsm4MB:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm4MB:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB
+
+memmove_long_repeat_emit_encodeBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm4MB:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB:
+ CMPL R8, $0x10
+ JB matchlen_match8_repeat_extend_encodeBlockAsm4MB
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB
+ XORQ 8(BX)(R11*1), R12
+ JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm4MB
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB
+
+matchlen_bsf_16repeat_extend_encodeBlockAsm4MB:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm4MB
+
+matchlen_match8_repeat_extend_encodeBlockAsm4MB:
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm4MB
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ JMP matchlen_match4_repeat_extend_encodeBlockAsm4MB
+
+matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm4MB
+
+matchlen_match4_repeat_extend_encodeBlockAsm4MB:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm4MB
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm4MB:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ JB repeat_extend_forward_end_encodeBlockAsm4MB
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm4MB
+
+matchlen_match1_repeat_extend_encodeBlockAsm4MB:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm4MB
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm4MB:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm4MB
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm4MB
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_match_repeat_encodeBlockAsm4MB
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm4MB
+ CMPL BX, $0x00010100
+ JB repeat_four_match_repeat_encodeBlockAsm4MB
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_match_repeat_encodeBlockAsm4MB:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_match_repeat_encodeBlockAsm4MB:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_match_repeat_encodeBlockAsm4MB:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_match_repeat_encodeBlockAsm4MB:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_as_copy_encodeBlockAsm4MB:
+ // emitCopy
+ CMPL SI, $0x00010000
+ JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB
+ CMPL BX, $0x40
+ JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
+ MOVB $0xff, (AX)
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB:
+ TESTL BX, BX
+ JZ repeat_end_emit_encodeBlockAsm4MB
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+two_byte_offset_repeat_as_copy_encodeBlockAsm4MB:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+long_offset_short_repeat_as_copy_encodeBlockAsm4MB:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm4MB:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm4MB:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm4MB
+
+no_repeat_found_encodeBlockAsm4MB:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm4MB
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm4MB
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm4MB
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm4MB
+
+candidate3_match_encodeBlockAsm4MB:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm4MB
+
+candidate2_match_encodeBlockAsm4MB:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm4MB:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm4MB
+
+match_extend_back_loop_encodeBlockAsm4MB:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm4MB
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm4MB
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm4MB
+ JMP match_extend_back_loop_encodeBlockAsm4MB
+
+match_extend_back_end_encodeBlockAsm4MB:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 4(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm4MB:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm4MB
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm4MB
+ CMPL DI, $0x00010000
+ JB three_bytes_match_emit_encodeBlockAsm4MB
+ MOVL DI, R9
+ SHRL $0x10, R9
+ MOVB $0xf8, (AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeBlockAsm4MB
+
+three_bytes_match_emit_encodeBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm4MB
+
+two_bytes_match_emit_encodeBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm4MB
+ JMP memmove_long_match_emit_encodeBlockAsm4MB
+
+one_byte_match_emit_encodeBlockAsm4MB:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm4MB:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm4MB:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm4MB
+
+memmove_long_match_emit_encodeBlockAsm4MB:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm4MB:
+match_nolit_loop_encodeBlockAsm4MB:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeBlockAsm4MB:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeBlockAsm4MB
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeBlockAsm4MB
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeBlockAsm4MB
+
+matchlen_bsf_16match_nolit_encodeBlockAsm4MB:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeBlockAsm4MB
+
+matchlen_match8_match_nolit_encodeBlockAsm4MB:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm4MB
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeBlockAsm4MB
+
+matchlen_bsf_8_match_nolit_encodeBlockAsm4MB:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm4MB
+
+matchlen_match4_match_nolit_encodeBlockAsm4MB:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm4MB
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm4MB:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm4MB
+ JB match_nolit_end_encodeBlockAsm4MB
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm4MB
+
+matchlen_match1_match_nolit_encodeBlockAsm4MB:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm4MB
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm4MB:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JB two_byte_offset_match_nolit_encodeBlockAsm4MB
+ CMPL R9, $0x40
+ JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB
+ MOVB $0xff, (AX)
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JB four_bytes_remain_match_nolit_encodeBlockAsm4MB
+
+ // emitRepeat
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+four_bytes_remain_match_nolit_encodeBlockAsm4MB:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_encodeBlockAsm4MB
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+two_byte_offset_match_nolit_encodeBlockAsm4MB:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm4MB
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+long_offset_short_match_nolit_encodeBlockAsm4MB:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+two_byte_offset_short_match_nolit_encodeBlockAsm4MB:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+emit_copy_three_match_nolit_encodeBlockAsm4MB:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm4MB:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm4MB
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm4MB:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm4MB
+ INCL CX
+ JMP search_loop_encodeBlockAsm4MB
+
+emit_remainder_encodeBlockAsm4MB:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 4(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm4MB:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm4MB
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm4MB
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeBlockAsm4MB
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm4MB
+
+three_bytes_emit_remainder_encodeBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm4MB
+
+two_bytes_emit_remainder_encodeBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm4MB
+ JMP memmove_long_emit_remainder_encodeBlockAsm4MB
+
+one_byte_emit_remainder_encodeBlockAsm4MB:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm4MB:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm4MB:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB
+
+memmove_long_emit_remainder_encodeBlockAsm4MB:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm4MB:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBlockAsm12B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm12B(SB), $16408-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000080, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm12B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm12B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm12B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm12B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x18, R10
+ IMULQ R8, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm12B
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm12B
+
+repeat_extend_back_loop_encodeBlockAsm12B:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm12B
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm12B
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm12B
+
+repeat_extend_back_end_encodeBlockAsm12B:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 3(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeBlockAsm12B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm12B
+ JB three_bytes_repeat_emit_encodeBlockAsm12B
+
+three_bytes_repeat_emit_encodeBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm12B
+
+two_bytes_repeat_emit_encodeBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm12B
+ JMP memmove_long_repeat_emit_encodeBlockAsm12B
+
+one_byte_repeat_emit_encodeBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm12B
+
+memmove_long_repeat_emit_encodeBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm12B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_repeat_extend_encodeBlockAsm12B:
+ CMPL R8, $0x10
+ JB matchlen_match8_repeat_extend_encodeBlockAsm12B
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B
+ XORQ 8(BX)(R11*1), R12
+ JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm12B
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm12B
+
+matchlen_bsf_16repeat_extend_encodeBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm12B
+
+matchlen_match8_repeat_extend_encodeBlockAsm12B:
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm12B
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ JMP matchlen_match4_repeat_extend_encodeBlockAsm12B
+
+matchlen_bsf_8_repeat_extend_encodeBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm12B
+
+matchlen_match4_repeat_extend_encodeBlockAsm12B:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm12B
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm12B:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm12B
+ JB repeat_extend_forward_end_encodeBlockAsm12B
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm12B
+
+matchlen_match1_repeat_extend_encodeBlockAsm12B:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm12B
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm12B:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm12B
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm12B
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_match_repeat_encodeBlockAsm12B
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm12B:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm12B
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_three_match_repeat_encodeBlockAsm12B:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_match_repeat_encodeBlockAsm12B:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_offset_match_repeat_encodeBlockAsm12B:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_as_copy_encodeBlockAsm12B:
+ // emitCopy
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm12B:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm12B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm12B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm12B
+
+no_repeat_found_encodeBlockAsm12B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm12B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm12B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm12B
+
+candidate3_match_encodeBlockAsm12B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm12B
+
+candidate2_match_encodeBlockAsm12B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm12B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm12B
+
+match_extend_back_loop_encodeBlockAsm12B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm12B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm12B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm12B
+ JMP match_extend_back_loop_encodeBlockAsm12B
+
+match_extend_back_end_encodeBlockAsm12B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm12B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm12B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm12B
+ JB three_bytes_match_emit_encodeBlockAsm12B
+
+three_bytes_match_emit_encodeBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm12B
+
+two_bytes_match_emit_encodeBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm12B
+ JMP memmove_long_match_emit_encodeBlockAsm12B
+
+one_byte_match_emit_encodeBlockAsm12B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm12B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm12B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm12B
+
+memmove_long_match_emit_encodeBlockAsm12B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm12B:
+match_nolit_loop_encodeBlockAsm12B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeBlockAsm12B:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeBlockAsm12B
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeBlockAsm12B
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeBlockAsm12B
+
+matchlen_bsf_16match_nolit_encodeBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeBlockAsm12B
+
+matchlen_match8_match_nolit_encodeBlockAsm12B:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm12B
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeBlockAsm12B
+
+matchlen_bsf_8_match_nolit_encodeBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm12B
+
+matchlen_match4_match_nolit_encodeBlockAsm12B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm12B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm12B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm12B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm12B
+ JB match_nolit_end_encodeBlockAsm12B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm12B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm12B
+
+matchlen_match1_match_nolit_encodeBlockAsm12B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm12B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm12B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm12B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+long_offset_short_match_nolit_encodeBlockAsm12B:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+two_byte_offset_short_match_nolit_encodeBlockAsm12B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm12B
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBlockAsm12B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+emit_copy_three_match_nolit_encodeBlockAsm12B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm12B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm12B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm12B:
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x18, DI
+ IMULQ R8, DI
+ SHRQ $0x34, DI
+ SHLQ $0x18, BX
+ IMULQ R8, BX
+ SHRQ $0x34, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm12B
+ INCL CX
+ JMP search_loop_encodeBlockAsm12B
+
+emit_remainder_encodeBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm12B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm12B
+ JB three_bytes_emit_remainder_encodeBlockAsm12B
+
+three_bytes_emit_remainder_encodeBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm12B
+
+two_bytes_emit_remainder_encodeBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm12B
+ JMP memmove_long_emit_remainder_encodeBlockAsm12B
+
+one_byte_emit_remainder_encodeBlockAsm12B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm12B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm12B
+
+memmove_long_emit_remainder_encodeBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm12B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBlockAsm10B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm10B(SB), $4120-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000020, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm10B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm10B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm10B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm10B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm10B
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm10B
+
+repeat_extend_back_loop_encodeBlockAsm10B:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm10B
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm10B
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm10B
+
+repeat_extend_back_end_encodeBlockAsm10B:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 3(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeBlockAsm10B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm10B
+ JB three_bytes_repeat_emit_encodeBlockAsm10B
+
+three_bytes_repeat_emit_encodeBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm10B
+
+two_bytes_repeat_emit_encodeBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm10B
+ JMP memmove_long_repeat_emit_encodeBlockAsm10B
+
+one_byte_repeat_emit_encodeBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm10B
+
+memmove_long_repeat_emit_encodeBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm10B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_repeat_extend_encodeBlockAsm10B:
+ CMPL R8, $0x10
+ JB matchlen_match8_repeat_extend_encodeBlockAsm10B
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B
+ XORQ 8(BX)(R11*1), R12
+ JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm10B
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm10B
+
+matchlen_bsf_16repeat_extend_encodeBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm10B
+
+matchlen_match8_repeat_extend_encodeBlockAsm10B:
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm10B
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ JMP matchlen_match4_repeat_extend_encodeBlockAsm10B
+
+matchlen_bsf_8_repeat_extend_encodeBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm10B
+
+matchlen_match4_repeat_extend_encodeBlockAsm10B:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm10B
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm10B:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm10B
+ JB repeat_extend_forward_end_encodeBlockAsm10B
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm10B
+
+matchlen_match1_repeat_extend_encodeBlockAsm10B:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm10B
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm10B:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm10B
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm10B
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_match_repeat_encodeBlockAsm10B
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm10B:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm10B
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_three_match_repeat_encodeBlockAsm10B:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_match_repeat_encodeBlockAsm10B:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_offset_match_repeat_encodeBlockAsm10B:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_as_copy_encodeBlockAsm10B:
+ // emitCopy
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm10B:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm10B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm10B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm10B
+
+no_repeat_found_encodeBlockAsm10B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm10B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm10B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm10B
+
+candidate3_match_encodeBlockAsm10B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm10B
+
+candidate2_match_encodeBlockAsm10B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm10B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm10B
+
+match_extend_back_loop_encodeBlockAsm10B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm10B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm10B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm10B
+ JMP match_extend_back_loop_encodeBlockAsm10B
+
+match_extend_back_end_encodeBlockAsm10B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm10B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm10B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm10B
+ JB three_bytes_match_emit_encodeBlockAsm10B
+
+three_bytes_match_emit_encodeBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm10B
+
+two_bytes_match_emit_encodeBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm10B
+ JMP memmove_long_match_emit_encodeBlockAsm10B
+
+one_byte_match_emit_encodeBlockAsm10B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm10B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm10B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm10B
+
+memmove_long_match_emit_encodeBlockAsm10B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm10B:
+match_nolit_loop_encodeBlockAsm10B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeBlockAsm10B:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeBlockAsm10B
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeBlockAsm10B
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeBlockAsm10B
+
+matchlen_bsf_16match_nolit_encodeBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeBlockAsm10B
+
+matchlen_match8_match_nolit_encodeBlockAsm10B:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm10B
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeBlockAsm10B
+
+matchlen_bsf_8_match_nolit_encodeBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm10B
+
+matchlen_match4_match_nolit_encodeBlockAsm10B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm10B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm10B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm10B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm10B
+ JB match_nolit_end_encodeBlockAsm10B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm10B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm10B
+
+matchlen_match1_match_nolit_encodeBlockAsm10B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm10B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm10B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm10B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+long_offset_short_match_nolit_encodeBlockAsm10B:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+two_byte_offset_short_match_nolit_encodeBlockAsm10B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm10B
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBlockAsm10B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+emit_copy_three_match_nolit_encodeBlockAsm10B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm10B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm10B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm10B:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x36, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x36, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm10B
+ INCL CX
+ JMP search_loop_encodeBlockAsm10B
+
+emit_remainder_encodeBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm10B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm10B
+ JB three_bytes_emit_remainder_encodeBlockAsm10B
+
+three_bytes_emit_remainder_encodeBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm10B
+
+two_bytes_emit_remainder_encodeBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm10B
+ JMP memmove_long_emit_remainder_encodeBlockAsm10B
+
+one_byte_emit_remainder_encodeBlockAsm10B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm10B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm10B
+
+memmove_long_emit_remainder_encodeBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm10B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBlockAsm8B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm8B(SB), $1048-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000008, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm8B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm8B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm8B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm8B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm8B
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm8B
+
+repeat_extend_back_loop_encodeBlockAsm8B:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm8B
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm8B
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm8B
+
+repeat_extend_back_end_encodeBlockAsm8B:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 3(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeBlockAsm8B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm8B
+ JB three_bytes_repeat_emit_encodeBlockAsm8B
+
+three_bytes_repeat_emit_encodeBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm8B
+
+two_bytes_repeat_emit_encodeBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm8B
+ JMP memmove_long_repeat_emit_encodeBlockAsm8B
+
+one_byte_repeat_emit_encodeBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm8B
+
+memmove_long_repeat_emit_encodeBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm8B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_repeat_extend_encodeBlockAsm8B:
+ CMPL R8, $0x10
+ JB matchlen_match8_repeat_extend_encodeBlockAsm8B
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B
+ XORQ 8(BX)(R11*1), R12
+ JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm8B
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm8B
+
+matchlen_bsf_16repeat_extend_encodeBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm8B
+
+matchlen_match8_repeat_extend_encodeBlockAsm8B:
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm8B
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ JMP matchlen_match4_repeat_extend_encodeBlockAsm8B
+
+matchlen_bsf_8_repeat_extend_encodeBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm8B
+
+matchlen_match4_repeat_extend_encodeBlockAsm8B:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm8B
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm8B:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm8B
+ JB repeat_extend_forward_end_encodeBlockAsm8B
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm8B
+
+matchlen_match1_repeat_extend_encodeBlockAsm8B:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm8B
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm8B:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm8B
+
+ // emitRepeat
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm8B
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm8B:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm8B
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_three_match_repeat_encodeBlockAsm8B:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_two_match_repeat_encodeBlockAsm8B:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_as_copy_encodeBlockAsm8B:
+ // emitCopy
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm8B:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm8B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm8B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm8B
+
+no_repeat_found_encodeBlockAsm8B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm8B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm8B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm8B
+
+candidate3_match_encodeBlockAsm8B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm8B
+
+candidate2_match_encodeBlockAsm8B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm8B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm8B
+
+match_extend_back_loop_encodeBlockAsm8B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm8B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm8B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm8B
+ JMP match_extend_back_loop_encodeBlockAsm8B
+
+match_extend_back_end_encodeBlockAsm8B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm8B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm8B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm8B
+ JB three_bytes_match_emit_encodeBlockAsm8B
+
+three_bytes_match_emit_encodeBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm8B
+
+two_bytes_match_emit_encodeBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm8B
+ JMP memmove_long_match_emit_encodeBlockAsm8B
+
+one_byte_match_emit_encodeBlockAsm8B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm8B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm8B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm8B
+
+memmove_long_match_emit_encodeBlockAsm8B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm8B:
+match_nolit_loop_encodeBlockAsm8B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeBlockAsm8B:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeBlockAsm8B
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeBlockAsm8B
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeBlockAsm8B
+
+matchlen_bsf_16match_nolit_encodeBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeBlockAsm8B
+
+matchlen_match8_match_nolit_encodeBlockAsm8B:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm8B
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeBlockAsm8B
+
+matchlen_bsf_8_match_nolit_encodeBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm8B
+
+matchlen_match4_match_nolit_encodeBlockAsm8B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm8B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm8B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm8B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm8B
+ JB match_nolit_end_encodeBlockAsm8B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm8B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm8B
+
+matchlen_match1_match_nolit_encodeBlockAsm8B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm8B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm8B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm8B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ MOVL R9, BX
+ LEAL -4(R9), R9
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+long_offset_short_match_nolit_encodeBlockAsm8B:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R9, BX
+ LEAL -4(R9), R9
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+two_byte_offset_short_match_nolit_encodeBlockAsm8B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm8B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+emit_copy_three_match_nolit_encodeBlockAsm8B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm8B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm8B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm8B:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x38, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x38, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm8B
+ INCL CX
+ JMP search_loop_encodeBlockAsm8B
+
+emit_remainder_encodeBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm8B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm8B
+ JB three_bytes_emit_remainder_encodeBlockAsm8B
+
+three_bytes_emit_remainder_encodeBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm8B
+
+two_bytes_emit_remainder_encodeBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm8B
+ JMP memmove_long_emit_remainder_encodeBlockAsm8B
+
+one_byte_emit_remainder_encodeBlockAsm8B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm8B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm8B
+
+memmove_long_emit_remainder_encodeBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm8B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm(SB), $589848-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00001200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
+ JBE check_maxskip_ok_encodeBetterBlockAsm
+ LEAL 100(CX), BX
+ JMP check_maxskip_cont_encodeBetterBlockAsm
+
+check_maxskip_ok_encodeBetterBlockAsm:
+ LEAL 1(CX)(BX*1), BX
+
+check_maxskip_cont_encodeBetterBlockAsm:
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm
+
+no_short_found_encodeBetterBlockAsm:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm
+
+candidateS_match_encodeBetterBlockAsm:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm
+
+match_extend_back_loop_encodeBetterBlockAsm:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm
+ JMP match_extend_back_loop_encodeBetterBlockAsm
+
+match_extend_back_end_encodeBetterBlockAsm:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeBetterBlockAsm:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeBetterBlockAsm
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm
+
+matchlen_bsf_16match_nolit_encodeBetterBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm
+
+matchlen_match8_match_nolit_encodeBetterBlockAsm:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeBetterBlockAsm
+
+matchlen_bsf_8_match_nolit_encodeBetterBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm
+ JB match_nolit_end_encodeBetterBlockAsm
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm
+ CMPL R11, $0x01
+ JA match_length_ok_encodeBetterBlockAsm
+ CMPL DI, $0x0000ffff
+ JBE match_length_ok_encodeBetterBlockAsm
+ MOVL 20(SP), CX
+ INCL CX
+ JMP search_loop_encodeBetterBlockAsm
+
+match_length_ok_encodeBetterBlockAsm:
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_encodeBetterBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_match_emit_encodeBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm
+
+four_bytes_match_emit_encodeBetterBlockAsm:
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm
+
+three_bytes_match_emit_encodeBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm
+
+two_bytes_match_emit_encodeBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm
+ JMP memmove_long_match_emit_encodeBetterBlockAsm
+
+one_byte_match_emit_encodeBetterBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm
+
+memmove_long_match_emit_encodeBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL DI, $0x00010000
+ JB two_byte_offset_match_nolit_encodeBetterBlockAsm
+ CMPL R11, $0x40
+ JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm
+ MOVB $0xff, (AX)
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
+ ADDQ $0x05, AX
+ CMPL R11, $0x04
+ JB four_bytes_remain_match_nolit_encodeBetterBlockAsm
+
+ // emitRepeat
+emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy:
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy
+ CMPL R11, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy
+
+repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy:
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+four_bytes_remain_match_nolit_encodeBetterBlockAsm:
+ TESTL R11, R11
+ JZ match_nolit_emitcopy_end_encodeBetterBlockAsm
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+two_byte_offset_match_nolit_encodeBetterBlockAsm:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ MOVL DI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL R11, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+long_offset_short_match_nolit_encodeBetterBlockAsm:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ CMPL R11, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short
+
+repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+match_is_repeat_encodeBetterBlockAsm:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_match_emit_repeat_encodeBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
+
+four_bytes_match_emit_repeat_encodeBetterBlockAsm:
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm:
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm
+ CMPL R11, $0x0100ffff
+ JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm
+
+repeat_five_match_nolit_repeat_encodeBetterBlockAsm:
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_repeat_encodeBetterBlockAsm:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm:
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm
+
+emit_remainder_encodeBetterBlockAsm:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm
+ CMPL DX, $0x01000000
+ JB four_bytes_emit_remainder_encodeBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm
+
+four_bytes_emit_remainder_encodeBetterBlockAsm:
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm
+
+three_bytes_emit_remainder_encodeBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm
+
+two_bytes_emit_remainder_encodeBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm
+
+one_byte_emit_remainder_encodeBetterBlockAsm:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm
+
+memmove_long_emit_remainder_encodeBetterBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00001200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm4MB:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm4MB
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm4MB:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
+ JBE check_maxskip_ok_encodeBetterBlockAsm4MB
+ LEAL 100(CX), BX
+ JMP check_maxskip_cont_encodeBetterBlockAsm4MB
+
+check_maxskip_ok_encodeBetterBlockAsm4MB:
+ LEAL 1(CX)(BX*1), BX
+
+check_maxskip_cont_encodeBetterBlockAsm4MB:
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm4MB
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm4MB
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm4MB
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm4MB
+
+no_short_found_encodeBetterBlockAsm4MB:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm4MB
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm4MB
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm4MB
+
+candidateS_match_encodeBetterBlockAsm4MB:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm4MB
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm4MB:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm4MB
+
+match_extend_back_loop_encodeBetterBlockAsm4MB:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm4MB
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm4MB
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm4MB
+ JMP match_extend_back_loop_encodeBetterBlockAsm4MB
+
+match_extend_back_end_encodeBetterBlockAsm4MB:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 4(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm4MB:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeBetterBlockAsm4MB
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB
+
+matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm4MB
+
+matchlen_match8_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
+
+matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm4MB
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ JB match_nolit_end_encodeBetterBlockAsm4MB
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm4MB
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm4MB
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm4MB:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm4MB
+ CMPL R11, $0x01
+ JA match_length_ok_encodeBetterBlockAsm4MB
+ CMPL DI, $0x0000ffff
+ JBE match_length_ok_encodeBetterBlockAsm4MB
+ MOVL 20(SP), CX
+ INCL CX
+ JMP search_loop_encodeBetterBlockAsm4MB
+
+match_length_ok_encodeBetterBlockAsm4MB:
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm4MB
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm4MB
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_encodeBetterBlockAsm4MB
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
+
+three_bytes_match_emit_encodeBetterBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
+
+two_bytes_match_emit_encodeBetterBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm4MB
+ JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
+
+one_byte_match_emit_encodeBetterBlockAsm4MB:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm4MB:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB
+
+memmove_long_match_emit_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm4MB:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL DI, $0x00010000
+ JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB
+ CMPL R11, $0x40
+ JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
+ MOVB $0xff, (AX)
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
+ ADDQ $0x05, AX
+ CMPL R11, $0x04
+ JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB:
+ TESTL R11, R11
+ JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+two_byte_offset_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+long_offset_short_match_nolit_encodeBetterBlockAsm4MB:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm4MB:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+match_is_repeat_encodeBetterBlockAsm4MB:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm4MB:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm4MB:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm4MB
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm4MB:
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm4MB:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm4MB
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm4MB
+
+emit_remainder_encodeBetterBlockAsm4MB:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 4(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm4MB:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm4MB
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm4MB
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
+
+three_bytes_emit_remainder_encodeBetterBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
+
+two_bytes_emit_remainder_encodeBetterBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm4MB
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
+
+one_byte_emit_remainder_encodeBetterBlockAsm4MB:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
+
+memmove_long_emit_remainder_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm12B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm12B(SB), $81944-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000280, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm12B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm12B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm12B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm12B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 65560(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 65560(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm12B
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm12B
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm12B
+
+no_short_found_encodeBetterBlockAsm12B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm12B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm12B
+
+candidateS_match_encodeBetterBlockAsm12B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm12B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm12B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm12B
+
+match_extend_back_loop_encodeBetterBlockAsm12B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm12B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm12B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm12B
+ JMP match_extend_back_loop_encodeBetterBlockAsm12B
+
+match_extend_back_end_encodeBetterBlockAsm12B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm12B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeBetterBlockAsm12B
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B
+
+matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm12B
+
+matchlen_match8_match_nolit_encodeBetterBlockAsm12B:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeBetterBlockAsm12B
+
+matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm12B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ JB match_nolit_end_encodeBetterBlockAsm12B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm12B
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm12B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm12B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm12B
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm12B
+ JB three_bytes_match_emit_encodeBetterBlockAsm12B
+
+three_bytes_match_emit_encodeBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm12B
+
+two_bytes_match_emit_encodeBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm12B
+ JMP memmove_long_match_emit_encodeBetterBlockAsm12B
+
+one_byte_match_emit_encodeBetterBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B
+
+memmove_long_match_emit_encodeBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm12B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm12B:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm12B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+match_is_repeat_encodeBetterBlockAsm12B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm12B
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm12B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm12B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm12B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x32, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x34, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 65560(SP)(R10*4)
+ MOVL R13, 65560(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm12B:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm12B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm12B
+
+emit_remainder_encodeBetterBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm12B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm12B
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm12B
+
+three_bytes_emit_remainder_encodeBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
+
+two_bytes_emit_remainder_encodeBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm12B
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
+
+one_byte_emit_remainder_encodeBetterBlockAsm12B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
+
+memmove_long_emit_remainder_encodeBetterBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm12B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm10B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm10B(SB), $20504-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x000000a0, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm10B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm10B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm10B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm10B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 16408(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 16408(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm10B
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm10B
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm10B
+
+no_short_found_encodeBetterBlockAsm10B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm10B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm10B
+
+candidateS_match_encodeBetterBlockAsm10B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm10B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm10B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm10B
+
+match_extend_back_loop_encodeBetterBlockAsm10B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm10B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm10B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm10B
+ JMP match_extend_back_loop_encodeBetterBlockAsm10B
+
+match_extend_back_end_encodeBetterBlockAsm10B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm10B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeBetterBlockAsm10B
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B
+
+matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm10B
+
+matchlen_match8_match_nolit_encodeBetterBlockAsm10B:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeBetterBlockAsm10B
+
+matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm10B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ JB match_nolit_end_encodeBetterBlockAsm10B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm10B
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm10B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm10B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm10B
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm10B
+ JB three_bytes_match_emit_encodeBetterBlockAsm10B
+
+three_bytes_match_emit_encodeBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm10B
+
+two_bytes_match_emit_encodeBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm10B
+ JMP memmove_long_match_emit_encodeBetterBlockAsm10B
+
+one_byte_match_emit_encodeBetterBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B
+
+memmove_long_match_emit_encodeBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm10B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm10B:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm10B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+match_is_repeat_encodeBetterBlockAsm10B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm10B
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm10B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm10B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm10B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x34, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x36, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 16408(SP)(R10*4)
+ MOVL R13, 16408(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm10B:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm10B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm10B
+
+emit_remainder_encodeBetterBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm10B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm10B
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm10B
+
+three_bytes_emit_remainder_encodeBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
+
+two_bytes_emit_remainder_encodeBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm10B
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
+
+one_byte_emit_remainder_encodeBetterBlockAsm10B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
+
+memmove_long_emit_remainder_encodeBetterBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm10B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm8B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm8B(SB), $5144-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000028, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm8B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm8B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm8B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm8B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 4120(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 4120(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm8B
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm8B
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm8B
+
+no_short_found_encodeBetterBlockAsm8B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm8B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm8B
+
+candidateS_match_encodeBetterBlockAsm8B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm8B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm8B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm8B
+
+match_extend_back_loop_encodeBetterBlockAsm8B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm8B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm8B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm8B
+ JMP match_extend_back_loop_encodeBetterBlockAsm8B
+
+match_extend_back_end_encodeBetterBlockAsm8B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm8B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeBetterBlockAsm8B
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B
+
+matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm8B
+
+matchlen_match8_match_nolit_encodeBetterBlockAsm8B:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeBetterBlockAsm8B
+
+matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm8B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ JB match_nolit_end_encodeBetterBlockAsm8B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm8B
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm8B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm8B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm8B
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm8B
+ JB three_bytes_match_emit_encodeBetterBlockAsm8B
+
+three_bytes_match_emit_encodeBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm8B
+
+two_bytes_match_emit_encodeBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm8B
+ JMP memmove_long_match_emit_encodeBetterBlockAsm8B
+
+one_byte_match_emit_encodeBetterBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B
+
+memmove_long_match_emit_encodeBetterBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm8B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm8B:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm8B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+match_is_repeat_encodeBetterBlockAsm8B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm8B
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm8B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4
+ CMPQ DI, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4:
+ MOVL (R8), R9
+ MOVL R9, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (R8), R9
+ MOVL -4(R8)(DI*1), R8
+ MOVL R9, (AX)
+ MOVL R8, -4(AX)(DI*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm8B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R12
+ SUBQ R9, R12
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R8)(R12*1), R9
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R9
+ ADDQ $0x20, R12
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R12*1), X4
+ MOVOU -16(R8)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ DI, R12
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm8B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm8B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm8B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x38, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x36, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x38, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 4120(SP)(R10*4)
+ MOVL R13, 4120(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm8B:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm8B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm8B
+
+emit_remainder_encodeBetterBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm8B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm8B
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm8B
+
+three_bytes_emit_remainder_encodeBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
+
+two_bytes_emit_remainder_encodeBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm8B
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
+
+one_byte_emit_remainder_encodeBetterBlockAsm8B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
+
+memmove_long_emit_remainder_encodeBetterBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm8B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm(SB), $65560-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm
+
+repeat_extend_back_loop_encodeSnappyBlockAsm:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm
+
+repeat_extend_back_end_encodeSnappyBlockAsm:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 5(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeSnappyBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeSnappyBlockAsm:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_repeat_emit_encodeSnappyBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
+
+four_bytes_repeat_emit_encodeSnappyBlockAsm:
+ MOVL BX, R9
+ SHRL $0x10, R9
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R9, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
+
+one_byte_repeat_emit_encodeSnappyBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+
+matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm:
+ CMPL DI, $0x10
+ JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm
+ XORQ 8(BX)(R10*1), R11
+ JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
+ JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm
+
+matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm
+
+matchlen_match8_repeat_extend_encodeSnappyBlockAsm:
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm
+
+matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+ CMPL SI, $0x00010000
+ JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
+
+four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm:
+ CMPL BX, $0x40
+ JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
+ MOVB $0xff, (AX)
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
+ JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm
+
+four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm:
+ TESTL BX, BX
+ JZ repeat_end_emit_encodeSnappyBlockAsm
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm
+
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm
+
+no_repeat_found_encodeSnappyBlockAsm:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm
+
+candidate3_match_encodeSnappyBlockAsm:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm
+
+candidate2_match_encodeSnappyBlockAsm:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm
+
+match_extend_back_loop_encodeSnappyBlockAsm:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm
+ JMP match_extend_back_loop_encodeSnappyBlockAsm
+
+match_extend_back_end_encodeSnappyBlockAsm:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm
+ CMPL DI, $0x00010000
+ JB three_bytes_match_emit_encodeSnappyBlockAsm
+ CMPL DI, $0x01000000
+ JB four_bytes_match_emit_encodeSnappyBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm
+
+four_bytes_match_emit_encodeSnappyBlockAsm:
+ MOVL DI, R9
+ SHRL $0x10, R9
+ MOVB $0xf8, (AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm
+
+three_bytes_match_emit_encodeSnappyBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm
+
+two_bytes_match_emit_encodeSnappyBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm
+
+one_byte_match_emit_encodeSnappyBlockAsm:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm
+
+memmove_long_match_emit_encodeSnappyBlockAsm:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm:
+match_nolit_loop_encodeSnappyBlockAsm:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBlockAsm
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm
+
+matchlen_bsf_16match_nolit_encodeSnappyBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm
+
+matchlen_match8_match_nolit_encodeSnappyBlockAsm:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm
+
+matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ JB match_nolit_end_encodeSnappyBlockAsm
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JB two_byte_offset_match_nolit_encodeSnappyBlockAsm
+
+four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm:
+ CMPL R9, $0x40
+ JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm
+ MOVB $0xff, (AX)
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm
+ JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm
+
+four_bytes_remain_match_nolit_encodeSnappyBlockAsm:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
+
+two_byte_offset_match_nolit_encodeSnappyBlockAsm:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm
+
+emit_remainder_encodeSnappyBlockAsm:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm
+ CMPL DX, $0x01000000
+ JB four_bytes_emit_remainder_encodeSnappyBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
+
+four_bytes_emit_remainder_encodeSnappyBlockAsm:
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
+
+one_byte_emit_remainder_encodeSnappyBlockAsm:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm64K:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm64K
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm64K:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm64K
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm64K
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm64K
+
+repeat_extend_back_loop_encodeSnappyBlockAsm64K:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm64K
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm64K
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K
+
+repeat_extend_back_end_encodeSnappyBlockAsm64K:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 3(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeSnappyBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeSnappyBlockAsm64K:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm64K
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm64K
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
+
+one_byte_repeat_emit_encodeSnappyBlockAsm64K:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+
+matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x10
+ JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K
+ XORQ 8(BX)(R10*1), R11
+ JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
+ JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K
+
+matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K:
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
+
+matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K:
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm64K
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm64K:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm64K
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm64K:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm64K
+
+no_repeat_found_encodeSnappyBlockAsm64K:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm64K
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm64K
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm64K
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm64K
+
+candidate3_match_encodeSnappyBlockAsm64K:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm64K
+
+candidate2_match_encodeSnappyBlockAsm64K:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm64K:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm64K
+
+match_extend_back_loop_encodeSnappyBlockAsm64K:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm64K
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm64K
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm64K
+ JMP match_extend_back_loop_encodeSnappyBlockAsm64K
+
+match_extend_back_end_encodeSnappyBlockAsm64K:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm64K:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm64K
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm64K
+ JB three_bytes_match_emit_encodeSnappyBlockAsm64K
+
+three_bytes_match_emit_encodeSnappyBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
+
+two_bytes_match_emit_encodeSnappyBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm64K
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
+
+one_byte_match_emit_encodeSnappyBlockAsm64K:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm64K:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K
+
+memmove_long_match_emit_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm64K:
+match_nolit_loop_encodeSnappyBlockAsm64K:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBlockAsm64K
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K
+
+matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm64K
+
+matchlen_match8_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
+
+matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm64K
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ JB match_nolit_end_encodeSnappyBlockAsm64K
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm64K
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm64K
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm64K:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm64K:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm64K:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm64K
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm64K:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm64K
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm64K
+
+emit_remainder_encodeSnappyBlockAsm64K:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm64K:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm64K
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm64K
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm64K
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
+
+one_byte_emit_remainder_encodeSnappyBlockAsm64K:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000080, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm12B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm12B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm12B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm12B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x18, R10
+ IMULQ R8, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm12B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm12B
+
+repeat_extend_back_loop_encodeSnappyBlockAsm12B:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm12B
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm12B
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B
+
+repeat_extend_back_end_encodeSnappyBlockAsm12B:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 3(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeSnappyBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeSnappyBlockAsm12B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm12B
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
+
+one_byte_repeat_emit_encodeSnappyBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+
+matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x10
+ JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B
+ XORQ 8(BX)(R10*1), R11
+ JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
+ JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B
+
+matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
+
+matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm12B
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm12B:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm12B
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm12B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm12B
+
+no_repeat_found_encodeSnappyBlockAsm12B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm12B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm12B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm12B
+
+candidate3_match_encodeSnappyBlockAsm12B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm12B
+
+candidate2_match_encodeSnappyBlockAsm12B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm12B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm12B
+
+match_extend_back_loop_encodeSnappyBlockAsm12B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm12B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm12B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm12B
+ JMP match_extend_back_loop_encodeSnappyBlockAsm12B
+
+match_extend_back_end_encodeSnappyBlockAsm12B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm12B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm12B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm12B
+ JB three_bytes_match_emit_encodeSnappyBlockAsm12B
+
+three_bytes_match_emit_encodeSnappyBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
+
+two_bytes_match_emit_encodeSnappyBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm12B
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
+
+one_byte_match_emit_encodeSnappyBlockAsm12B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm12B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B
+
+memmove_long_match_emit_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm12B:
+match_nolit_loop_encodeSnappyBlockAsm12B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBlockAsm12B
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B
+
+matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm12B
+
+matchlen_match8_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
+
+matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm12B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ JB match_nolit_end_encodeSnappyBlockAsm12B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm12B
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm12B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm12B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm12B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm12B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm12B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm12B:
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x18, DI
+ IMULQ R8, DI
+ SHRQ $0x34, DI
+ SHLQ $0x18, BX
+ IMULQ R8, BX
+ SHRQ $0x34, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm12B
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm12B
+
+emit_remainder_encodeSnappyBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm12B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm12B
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm12B
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
+
+one_byte_emit_remainder_encodeSnappyBlockAsm12B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000020, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm10B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm10B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm10B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm10B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm10B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm10B
+
+repeat_extend_back_loop_encodeSnappyBlockAsm10B:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm10B
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm10B
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B
+
+repeat_extend_back_end_encodeSnappyBlockAsm10B:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 3(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeSnappyBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeSnappyBlockAsm10B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm10B
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
+
+one_byte_repeat_emit_encodeSnappyBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+
+matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x10
+ JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B
+ XORQ 8(BX)(R10*1), R11
+ JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
+ JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B
+
+matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
+
+matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm10B
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm10B:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm10B
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm10B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm10B
+
+no_repeat_found_encodeSnappyBlockAsm10B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm10B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm10B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm10B
+
+candidate3_match_encodeSnappyBlockAsm10B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm10B
+
+candidate2_match_encodeSnappyBlockAsm10B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm10B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm10B
+
+match_extend_back_loop_encodeSnappyBlockAsm10B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm10B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm10B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm10B
+ JMP match_extend_back_loop_encodeSnappyBlockAsm10B
+
+match_extend_back_end_encodeSnappyBlockAsm10B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm10B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm10B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm10B
+ JB three_bytes_match_emit_encodeSnappyBlockAsm10B
+
+three_bytes_match_emit_encodeSnappyBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
+
+two_bytes_match_emit_encodeSnappyBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm10B
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
+
+one_byte_match_emit_encodeSnappyBlockAsm10B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm10B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B
+
+memmove_long_match_emit_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm10B:
+match_nolit_loop_encodeSnappyBlockAsm10B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBlockAsm10B
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B
+
+matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm10B
+
+matchlen_match8_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
+
+matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm10B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ JB match_nolit_end_encodeSnappyBlockAsm10B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm10B
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm10B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm10B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm10B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm10B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm10B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm10B:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x36, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x36, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm10B
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm10B
+
+emit_remainder_encodeSnappyBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm10B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm10B
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm10B
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
+
+one_byte_emit_remainder_encodeSnappyBlockAsm10B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000008, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm8B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm8B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm8B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm8B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm8B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm8B
+
+repeat_extend_back_loop_encodeSnappyBlockAsm8B:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm8B
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm8B
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B
+
+repeat_extend_back_end_encodeSnappyBlockAsm8B:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 3(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_encodeSnappyBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+repeat_dst_size_check_encodeSnappyBlockAsm8B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm8B
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
+
+one_byte_repeat_emit_encodeSnappyBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+
+matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x10
+ JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B
+ XORQ 8(BX)(R10*1), R11
+ JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
+ JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B
+
+matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
+
+matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm8B
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm8B:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm8B
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm8B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm8B
+
+no_repeat_found_encodeSnappyBlockAsm8B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm8B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm8B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm8B
+
+candidate3_match_encodeSnappyBlockAsm8B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm8B
+
+candidate2_match_encodeSnappyBlockAsm8B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm8B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm8B
+
+match_extend_back_loop_encodeSnappyBlockAsm8B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm8B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm8B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm8B
+ JMP match_extend_back_loop_encodeSnappyBlockAsm8B
+
+match_extend_back_end_encodeSnappyBlockAsm8B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm8B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm8B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm8B
+ JB three_bytes_match_emit_encodeSnappyBlockAsm8B
+
+three_bytes_match_emit_encodeSnappyBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
+
+two_bytes_match_emit_encodeSnappyBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm8B
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
+
+one_byte_match_emit_encodeSnappyBlockAsm8B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm8B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B
+
+memmove_long_match_emit_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm8B:
+match_nolit_loop_encodeSnappyBlockAsm8B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBlockAsm8B
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B
+
+matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm8B
+
+matchlen_match8_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
+
+matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm8B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ JB match_nolit_end_encodeSnappyBlockAsm8B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm8B
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm8B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm8B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm8B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm8B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm8B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm8B:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x38, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x38, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm8B
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm8B
+
+emit_remainder_encodeSnappyBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm8B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm8B
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm8B
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
+
+one_byte_emit_remainder_encodeSnappyBlockAsm8B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00001200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
+ JBE check_maxskip_ok_encodeSnappyBetterBlockAsm
+ LEAL 100(CX), BX
+ JMP check_maxskip_cont_encodeSnappyBetterBlockAsm
+
+check_maxskip_ok_encodeSnappyBetterBlockAsm:
+ LEAL 1(CX)(BX*1), BX
+
+check_maxskip_cont_encodeSnappyBetterBlockAsm:
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm
+
+no_short_found_encodeSnappyBetterBlockAsm:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm
+
+candidateS_match_encodeSnappyBetterBlockAsm:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm
+
+match_extend_back_end_encodeSnappyBetterBlockAsm:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm
+
+matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm
+
+matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
+
+matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ JB match_nolit_end_encodeSnappyBetterBlockAsm
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL R11, $0x01
+ JA match_length_ok_encodeSnappyBetterBlockAsm
+ CMPL DI, $0x0000ffff
+ JBE match_length_ok_encodeSnappyBetterBlockAsm
+ MOVL 20(SP), CX
+ INCL CX
+ JMP search_loop_encodeSnappyBetterBlockAsm
+
+match_length_ok_encodeSnappyBetterBlockAsm:
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_match_emit_encodeSnappyBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
+
+four_bytes_match_emit_encodeSnappyBetterBlockAsm:
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL DI, $0x00010000
+ JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
+
+four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R11, $0x40
+ JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
+ MOVB $0xff, (AX)
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
+ ADDQ $0x05, AX
+ CMPL R11, $0x04
+ JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
+ JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm
+
+four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm:
+ TESTL R11, R11
+ JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
+
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm:
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm
+
+emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm
+ CMPL DX, $0x01000000
+ JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
+
+four_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000a00, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm64K:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm64K
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm64K:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm64K
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x30, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 262168(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 262168(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm64K
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm64K
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm64K
+
+no_short_found_encodeSnappyBetterBlockAsm64K:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm64K
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm64K
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm64K
+
+candidateS_match_encodeSnappyBetterBlockAsm64K:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x30, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm64K
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm64K:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm64K:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K
+
+match_extend_back_end_encodeSnappyBetterBlockAsm64K:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm64K:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K
+
+matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
+
+matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ JB match_nolit_end_encodeSnappyBetterBlockAsm64K
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm64K:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm64K
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm64K:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm64K:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm64K:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm64K
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K:
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x30, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x30, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 262168(SP)(R10*4)
+ MOVL R13, 262168(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm64K:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm64K
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x30, R9
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x30, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm64K
+
+emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm64K:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000280, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm12B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm12B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm12B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm12B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 65560(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 65560(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm12B
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm12B
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm12B
+
+no_short_found_encodeSnappyBetterBlockAsm12B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm12B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm12B
+
+candidateS_match_encodeSnappyBetterBlockAsm12B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm12B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm12B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm12B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B
+
+match_extend_back_end_encodeSnappyBetterBlockAsm12B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm12B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B
+
+matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
+
+matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm12B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm12B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm12B
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm12B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x32, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x34, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 65560(SP)(R10*4)
+ MOVL R13, 65560(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm12B:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm12B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm12B
+
+emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x000000a0, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm10B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm10B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm10B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm10B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 16408(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 16408(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm10B
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm10B
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm10B
+
+no_short_found_encodeSnappyBetterBlockAsm10B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm10B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm10B
+
+candidateS_match_encodeSnappyBetterBlockAsm10B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm10B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm10B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm10B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B
+
+match_extend_back_end_encodeSnappyBetterBlockAsm10B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm10B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B
+
+matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
+
+matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm10B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm10B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm10B
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm10B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x34, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x36, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 16408(SP)(R10*4)
+ MOVL R13, 16408(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm10B:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm10B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm10B
+
+emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000028, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm8B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm8B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm8B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm8B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 4120(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 4120(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm8B
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm8B
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm8B
+
+no_short_found_encodeSnappyBetterBlockAsm8B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm8B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm8B
+
+candidateS_match_encodeSnappyBetterBlockAsm8B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm8B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm8B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm8B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B
+
+match_extend_back_end_encodeSnappyBetterBlockAsm8B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm8B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+
+matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL DI, $0x10
+ JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVQ (R8)(R11*1), R10
+ MOVQ 8(R8)(R11*1), R12
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B
+ XORQ 8(R9)(R11*1), R12
+ JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL -16(DI), DI
+ LEAL 16(R11), R11
+ JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B
+
+matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R12, R12
+
+#else
+ BSFQ R12, R12
+
+#endif
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
+
+matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm8B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm8B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm8B
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm8B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x38, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x36, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x38, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 4120(SP)(R10*4)
+ MOVL R13, 4120(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm8B:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm8B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm8B
+
+emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func calcBlockSize(src []byte) int
+// Requires: BMI, SSE2
+TEXT ·calcBlockSize(SB), $32792-32
+ XORQ AX, AX
+ MOVQ $0x00000100, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_calcBlockSize:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_calcBlockSize
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+8(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+0(FP), DX
+
+search_loop_calcBlockSize:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_calcBlockSize
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x33, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x33, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x33, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_calcBlockSize
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_calcBlockSize
+
+repeat_extend_back_loop_calcBlockSize:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_calcBlockSize
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_calcBlockSize
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_calcBlockSize
+
+repeat_extend_back_end_calcBlockSize:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 5(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+repeat_dst_size_check_calcBlockSize:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_calcBlockSize
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_calcBlockSize
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_calcBlockSize
+ CMPL BX, $0x00010000
+ JB three_bytes_repeat_emit_calcBlockSize
+ CMPL BX, $0x01000000
+ JB four_bytes_repeat_emit_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+four_bytes_repeat_emit_calcBlockSize:
+ ADDQ $0x04, AX
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+three_bytes_repeat_emit_calcBlockSize:
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+two_bytes_repeat_emit_calcBlockSize:
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_calcBlockSize
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+one_byte_repeat_emit_calcBlockSize:
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_calcBlockSize:
+ LEAQ (AX)(DI*1), AX
+ JMP emit_literal_done_repeat_emit_calcBlockSize
+
+memmove_long_repeat_emit_calcBlockSize:
+ LEAQ (AX)(DI*1), AX
+
+emit_literal_done_repeat_emit_calcBlockSize:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+8(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+
+matchlen_loopback_16_repeat_extend_calcBlockSize:
+ CMPL DI, $0x10
+ JB matchlen_match8_repeat_extend_calcBlockSize
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_calcBlockSize
+ XORQ 8(BX)(R10*1), R11
+ JNZ matchlen_bsf_16repeat_extend_calcBlockSize
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
+ JMP matchlen_loopback_16_repeat_extend_calcBlockSize
+
+matchlen_bsf_16repeat_extend_calcBlockSize:
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
+ JMP repeat_extend_forward_end_calcBlockSize
+
+matchlen_match8_repeat_extend_calcBlockSize:
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_calcBlockSize
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_calcBlockSize
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ JMP matchlen_match4_repeat_extend_calcBlockSize
+
+matchlen_bsf_8_repeat_extend_calcBlockSize:
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_calcBlockSize
+
+matchlen_match4_repeat_extend_calcBlockSize:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_calcBlockSize
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_calcBlockSize
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_calcBlockSize:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_calcBlockSize
+ JB repeat_extend_forward_end_calcBlockSize
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_calcBlockSize
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_calcBlockSize
+
+matchlen_match1_repeat_extend_calcBlockSize:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_calcBlockSize
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_calcBlockSize:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+ CMPL SI, $0x00010000
+ JB two_byte_offset_repeat_as_copy_calcBlockSize
+
+four_bytes_loop_back_repeat_as_copy_calcBlockSize:
+ CMPL BX, $0x40
+ JBE four_bytes_remain_repeat_as_copy_calcBlockSize
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JB four_bytes_remain_repeat_as_copy_calcBlockSize
+ JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize
+
+four_bytes_remain_repeat_as_copy_calcBlockSize:
+ TESTL BX, BX
+ JZ repeat_end_emit_calcBlockSize
+ XORL BX, BX
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_calcBlockSize
+
+two_byte_offset_repeat_as_copy_calcBlockSize:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_calcBlockSize
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_calcBlockSize
+
+two_byte_offset_short_repeat_as_copy_calcBlockSize:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_calcBlockSize
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_calcBlockSize
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_calcBlockSize
+
+emit_copy_three_repeat_as_copy_calcBlockSize:
+ ADDQ $0x03, AX
+
+repeat_end_emit_calcBlockSize:
+ MOVL CX, 12(SP)
+ JMP search_loop_calcBlockSize
+
+no_repeat_found_calcBlockSize:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_calcBlockSize
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_calcBlockSize
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_calcBlockSize
+ MOVL 20(SP), CX
+ JMP search_loop_calcBlockSize
+
+candidate3_match_calcBlockSize:
+ ADDL $0x02, CX
+ JMP candidate_match_calcBlockSize
+
+candidate2_match_calcBlockSize:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_calcBlockSize:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_calcBlockSize
+
+match_extend_back_loop_calcBlockSize:
+ CMPL CX, SI
+ JBE match_extend_back_end_calcBlockSize
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_calcBlockSize
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_calcBlockSize
+ JMP match_extend_back_loop_calcBlockSize
+
+match_extend_back_end_calcBlockSize:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_dst_size_check_calcBlockSize:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_calcBlockSize
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
+ JB one_byte_match_emit_calcBlockSize
+ CMPL SI, $0x00000100
+ JB two_bytes_match_emit_calcBlockSize
+ CMPL SI, $0x00010000
+ JB three_bytes_match_emit_calcBlockSize
+ CMPL SI, $0x01000000
+ JB four_bytes_match_emit_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+four_bytes_match_emit_calcBlockSize:
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+three_bytes_match_emit_calcBlockSize:
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+two_bytes_match_emit_calcBlockSize:
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JB memmove_match_emit_calcBlockSize
+ JMP memmove_long_match_emit_calcBlockSize
+
+one_byte_match_emit_calcBlockSize:
+ ADDQ $0x01, AX
+
+memmove_match_emit_calcBlockSize:
+ LEAQ (AX)(R8*1), AX
+ JMP emit_literal_done_match_emit_calcBlockSize
+
+memmove_long_match_emit_calcBlockSize:
+ LEAQ (AX)(R8*1), AX
+
+emit_literal_done_match_emit_calcBlockSize:
+match_nolit_loop_calcBlockSize:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+8(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_calcBlockSize:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_calcBlockSize
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_calcBlockSize
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_calcBlockSize
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_calcBlockSize
+
+matchlen_bsf_16match_nolit_calcBlockSize:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_calcBlockSize
+
+matchlen_match8_match_nolit_calcBlockSize:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_calcBlockSize
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_calcBlockSize
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_calcBlockSize
+
+matchlen_bsf_8_match_nolit_calcBlockSize:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_calcBlockSize
+
+matchlen_match4_match_nolit_calcBlockSize:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_calcBlockSize
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_calcBlockSize
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_calcBlockSize:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_calcBlockSize
+ JB match_nolit_end_calcBlockSize
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_calcBlockSize
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_calcBlockSize
+
+matchlen_match1_match_nolit_calcBlockSize:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_calcBlockSize
+ LEAL 1(R9), R9
+
+match_nolit_end_calcBlockSize:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JB two_byte_offset_match_nolit_calcBlockSize
+
+four_bytes_loop_back_match_nolit_calcBlockSize:
+ CMPL R9, $0x40
+ JBE four_bytes_remain_match_nolit_calcBlockSize
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JB four_bytes_remain_match_nolit_calcBlockSize
+ JMP four_bytes_loop_back_match_nolit_calcBlockSize
+
+four_bytes_remain_match_nolit_calcBlockSize:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_calcBlockSize
+ XORL BX, BX
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_calcBlockSize
+
+two_byte_offset_match_nolit_calcBlockSize:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_calcBlockSize
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_calcBlockSize
+
+two_byte_offset_short_match_nolit_calcBlockSize:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_calcBlockSize
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_calcBlockSize
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_calcBlockSize
+
+emit_copy_three_match_nolit_calcBlockSize:
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_calcBlockSize:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_calcBlockSize
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_nolit_dst_ok_calcBlockSize:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x33, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x33, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_calcBlockSize
+ INCL CX
+ JMP search_loop_calcBlockSize
+
+emit_remainder_calcBlockSize:
+ MOVQ src_len+8(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+emit_remainder_ok_calcBlockSize:
+ MOVQ src_len+8(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_calcBlockSize
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), CX
+ CMPL CX, $0x3c
+ JB one_byte_emit_remainder_calcBlockSize
+ CMPL CX, $0x00000100
+ JB two_bytes_emit_remainder_calcBlockSize
+ CMPL CX, $0x00010000
+ JB three_bytes_emit_remainder_calcBlockSize
+ CMPL CX, $0x01000000
+ JB four_bytes_emit_remainder_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+four_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+three_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+two_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x02, AX
+ CMPL CX, $0x40
+ JB memmove_emit_remainder_calcBlockSize
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+one_byte_emit_remainder_calcBlockSize:
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_calcBlockSize:
+ LEAQ (AX)(SI*1), AX
+ JMP emit_literal_done_emit_remainder_calcBlockSize
+
+memmove_long_emit_remainder_calcBlockSize:
+ LEAQ (AX)(SI*1), AX
+
+emit_literal_done_emit_remainder_calcBlockSize:
+ MOVQ AX, ret+24(FP)
+ RET
+
+// func calcBlockSizeSmall(src []byte) int
+// Requires: BMI, SSE2
+TEXT ·calcBlockSizeSmall(SB), $2072-32
+ XORQ AX, AX
+ MOVQ $0x00000010, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_calcBlockSizeSmall:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_calcBlockSizeSmall
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+8(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+0(FP), DX
+
+search_loop_calcBlockSizeSmall:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_calcBlockSizeSmall
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x37, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x37, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x37, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_calcBlockSizeSmall
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_calcBlockSizeSmall
+
+repeat_extend_back_loop_calcBlockSizeSmall:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_calcBlockSizeSmall
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_calcBlockSizeSmall
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_calcBlockSizeSmall
+
+repeat_extend_back_end_calcBlockSizeSmall:
+ MOVL SI, BX
+ SUBL 12(SP), BX
+ LEAQ 3(AX)(BX*1), BX
+ CMPQ BX, (SP)
+ JB repeat_dst_size_check_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+repeat_dst_size_check_calcBlockSizeSmall:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_calcBlockSizeSmall
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_calcBlockSizeSmall
+ JB three_bytes_repeat_emit_calcBlockSizeSmall
+
+three_bytes_repeat_emit_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_calcBlockSizeSmall
+
+two_bytes_repeat_emit_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_calcBlockSizeSmall
+ JMP memmove_long_repeat_emit_calcBlockSizeSmall
+
+one_byte_repeat_emit_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_calcBlockSizeSmall:
+ LEAQ (AX)(DI*1), AX
+ JMP emit_literal_done_repeat_emit_calcBlockSizeSmall
+
+memmove_long_repeat_emit_calcBlockSizeSmall:
+ LEAQ (AX)(DI*1), AX
+
+emit_literal_done_repeat_emit_calcBlockSizeSmall:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+8(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+
+matchlen_loopback_16_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x10
+ JB matchlen_match8_repeat_extend_calcBlockSizeSmall
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall
+ XORQ 8(BX)(R10*1), R11
+ JNZ matchlen_bsf_16repeat_extend_calcBlockSizeSmall
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
+ JMP matchlen_loopback_16_repeat_extend_calcBlockSizeSmall
+
+matchlen_bsf_16repeat_extend_calcBlockSizeSmall:
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
+ JMP repeat_extend_forward_end_calcBlockSizeSmall
+
+matchlen_match8_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_calcBlockSizeSmall
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ JMP matchlen_match4_repeat_extend_calcBlockSizeSmall
+
+matchlen_bsf_8_repeat_extend_calcBlockSizeSmall:
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_calcBlockSizeSmall
+
+matchlen_match4_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_calcBlockSizeSmall
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_calcBlockSizeSmall
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_calcBlockSizeSmall
+ JB repeat_extend_forward_end_calcBlockSizeSmall
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_calcBlockSizeSmall
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_calcBlockSizeSmall
+
+matchlen_match1_repeat_extend_calcBlockSizeSmall:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_calcBlockSizeSmall
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_calcBlockSizeSmall:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_calcBlockSizeSmall:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall
+
+two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall:
+ MOVL BX, SI
+ SHLL $0x02, SI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_calcBlockSizeSmall
+
+emit_copy_three_repeat_as_copy_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+
+repeat_end_emit_calcBlockSizeSmall:
+ MOVL CX, 12(SP)
+ JMP search_loop_calcBlockSizeSmall
+
+no_repeat_found_calcBlockSizeSmall:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_calcBlockSizeSmall
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_calcBlockSizeSmall
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_calcBlockSizeSmall
+ MOVL 20(SP), CX
+ JMP search_loop_calcBlockSizeSmall
+
+candidate3_match_calcBlockSizeSmall:
+ ADDL $0x02, CX
+ JMP candidate_match_calcBlockSizeSmall
+
+candidate2_match_calcBlockSizeSmall:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_calcBlockSizeSmall:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_calcBlockSizeSmall
+
+match_extend_back_loop_calcBlockSizeSmall:
+ CMPL CX, SI
+ JBE match_extend_back_end_calcBlockSizeSmall
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_calcBlockSizeSmall
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_calcBlockSizeSmall
+ JMP match_extend_back_loop_calcBlockSizeSmall
+
+match_extend_back_end_calcBlockSizeSmall:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_dst_size_check_calcBlockSizeSmall:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_calcBlockSizeSmall
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
+ JB one_byte_match_emit_calcBlockSizeSmall
+ CMPL SI, $0x00000100
+ JB two_bytes_match_emit_calcBlockSizeSmall
+ JB three_bytes_match_emit_calcBlockSizeSmall
+
+three_bytes_match_emit_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_calcBlockSizeSmall
+
+two_bytes_match_emit_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JB memmove_match_emit_calcBlockSizeSmall
+ JMP memmove_long_match_emit_calcBlockSizeSmall
+
+one_byte_match_emit_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_match_emit_calcBlockSizeSmall:
+ LEAQ (AX)(R8*1), AX
+ JMP emit_literal_done_match_emit_calcBlockSizeSmall
+
+memmove_long_match_emit_calcBlockSizeSmall:
+ LEAQ (AX)(R8*1), AX
+
+emit_literal_done_match_emit_calcBlockSizeSmall:
+match_nolit_loop_calcBlockSizeSmall:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+8(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+
+matchlen_loopback_16_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x10
+ JB matchlen_match8_match_nolit_calcBlockSizeSmall
+ MOVQ (DI)(R9*1), R8
+ MOVQ 8(DI)(R9*1), R10
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall
+ XORQ 8(BX)(R9*1), R10
+ JNZ matchlen_bsf_16match_nolit_calcBlockSizeSmall
+ LEAL -16(SI), SI
+ LEAL 16(R9), R9
+ JMP matchlen_loopback_16_match_nolit_calcBlockSizeSmall
+
+matchlen_bsf_16match_nolit_calcBlockSizeSmall:
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL 8(R9)(R10*1), R9
+ JMP match_nolit_end_calcBlockSizeSmall
+
+matchlen_match8_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_calcBlockSizeSmall
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ JMP matchlen_match4_match_nolit_calcBlockSizeSmall
+
+matchlen_bsf_8_match_nolit_calcBlockSizeSmall:
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_calcBlockSizeSmall
+
+matchlen_match4_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_calcBlockSizeSmall
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_calcBlockSizeSmall
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_calcBlockSizeSmall
+ JB match_nolit_end_calcBlockSizeSmall
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_calcBlockSizeSmall
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_calcBlockSizeSmall
+
+matchlen_match1_match_nolit_calcBlockSizeSmall:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_calcBlockSizeSmall
+ LEAL 1(R9), R9
+
+match_nolit_end_calcBlockSizeSmall:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_calcBlockSizeSmall:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_calcBlockSizeSmall
+
+two_byte_offset_short_match_nolit_calcBlockSizeSmall:
+ MOVL R9, BX
+ SHLL $0x02, BX
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_calcBlockSizeSmall
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_calcBlockSizeSmall
+
+emit_copy_three_match_nolit_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_calcBlockSizeSmall:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_calcBlockSizeSmall
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_nolit_dst_ok_calcBlockSizeSmall:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x37, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x37, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_calcBlockSizeSmall
+ INCL CX
+ JMP search_loop_calcBlockSizeSmall
+
+emit_remainder_calcBlockSizeSmall:
+ MOVQ src_len+8(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+emit_remainder_ok_calcBlockSizeSmall:
+ MOVQ src_len+8(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), CX
+ CMPL CX, $0x3c
+ JB one_byte_emit_remainder_calcBlockSizeSmall
+ CMPL CX, $0x00000100
+ JB two_bytes_emit_remainder_calcBlockSizeSmall
+ JB three_bytes_emit_remainder_calcBlockSizeSmall
+
+three_bytes_emit_remainder_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_calcBlockSizeSmall
+
+two_bytes_emit_remainder_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL CX, $0x40
+ JB memmove_emit_remainder_calcBlockSizeSmall
+ JMP memmove_long_emit_remainder_calcBlockSizeSmall
+
+one_byte_emit_remainder_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_calcBlockSizeSmall:
+ LEAQ (AX)(SI*1), AX
+ JMP emit_literal_done_emit_remainder_calcBlockSizeSmall
+
+memmove_long_emit_remainder_calcBlockSizeSmall:
+ LEAQ (AX)(SI*1), AX
+
+emit_literal_done_emit_remainder_calcBlockSizeSmall:
+ MOVQ AX, ret+24(FP)
+ RET
+
+// func emitLiteral(dst []byte, lit []byte) int
+// Requires: SSE2
+TEXT ·emitLiteral(SB), NOSPLIT, $0-56
+ MOVQ lit_len+32(FP), DX
+ MOVQ dst_base+0(FP), AX
+ MOVQ lit_base+24(FP), CX
+ TESTQ DX, DX
+ JZ emit_literal_end_standalone_skip
+ MOVL DX, BX
+ LEAL -1(DX), SI
+ CMPL SI, $0x3c
+ JB one_byte_standalone
+ CMPL SI, $0x00000100
+ JB two_bytes_standalone
+ CMPL SI, $0x00010000
+ JB three_bytes_standalone
+ CMPL SI, $0x01000000
+ JB four_bytes_standalone
+ MOVB $0xfc, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP memmove_long_standalone
+
+four_bytes_standalone:
+ MOVL SI, DI
+ SHRL $0x10, DI
+ MOVB $0xf8, (AX)
+ MOVW SI, 1(AX)
+ MOVB DI, 3(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP memmove_long_standalone
+
+three_bytes_standalone:
+ MOVB $0xf4, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP memmove_long_standalone
+
+two_bytes_standalone:
+ MOVB $0xf0, (AX)
+ MOVB SI, 1(AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JB memmove_standalone
+ JMP memmove_long_standalone
+
+one_byte_standalone:
+ SHLB $0x02, SI
+ MOVB SI, (AX)
+ ADDQ $0x01, BX
+ ADDQ $0x01, AX
+
+memmove_standalone:
+ // genMemMoveShort
+ CMPQ DX, $0x03
+ JB emit_lit_memmove_standalone_memmove_move_1or2
+ JE emit_lit_memmove_standalone_memmove_move_3
+ CMPQ DX, $0x08
+ JB emit_lit_memmove_standalone_memmove_move_4through7
+ CMPQ DX, $0x10
+ JBE emit_lit_memmove_standalone_memmove_move_8through16
+ CMPQ DX, $0x20
+ JBE emit_lit_memmove_standalone_memmove_move_17through32
+ JMP emit_lit_memmove_standalone_memmove_move_33through64
+
+emit_lit_memmove_standalone_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(DX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(DX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(DX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(DX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(DX*1), X2
+ MOVOU -16(CX)(DX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DX*1)
+ MOVOU X3, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+ JMP emit_literal_end_standalone
+
+memmove_long_standalone:
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(DX*1), X2
+ MOVOU -16(CX)(DX*1), X3
+ MOVQ DX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_standalonelarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_standalonelarge_big_loop_back
+
+emit_lit_memmove_long_standalonelarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ DX, R8
+ JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DX*1)
+ MOVOU X3, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+ JMP emit_literal_end_standalone
+
+emit_literal_end_standalone_skip:
+ XORQ BX, BX
+
+emit_literal_end_standalone:
+ MOVQ BX, ret+48(FP)
+ RET
+
+// func emitRepeat(dst []byte, offset int, length int) int
+TEXT ·emitRepeat(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitRepeat
+emit_repeat_again_standalone:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JBE repeat_two_standalone
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_standalone
+ CMPL CX, $0x00000800
+ JB repeat_two_offset_standalone
+
+cant_repeat_two_offset_standalone:
+ CMPL DX, $0x00000104
+ JB repeat_three_standalone
+ CMPL DX, $0x00010100
+ JB repeat_four_standalone
+ CMPL DX, $0x0100ffff
+ JB repeat_five_standalone
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone
+
+repeat_five_standalone:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_repeat_end
+
+repeat_four_standalone:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_repeat_end
+
+repeat_three_standalone:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_repeat_end
+
+repeat_two_standalone:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_repeat_end
+
+repeat_two_offset_standalone:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+
+gen_emit_repeat_end:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func emitCopy(dst []byte, offset int, length int) int
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitCopy
+ CMPL CX, $0x00010000
+ JB two_byte_offset_standalone
+ CMPL DX, $0x40
+ JBE four_bytes_remain_standalone
+ MOVB $0xff, (AX)
+ MOVL CX, 1(AX)
+ LEAL -64(DX), DX
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ CMPL DX, $0x04
+ JB four_bytes_remain_standalone
+
+ // emitRepeat
+emit_repeat_again_standalone_emit_copy:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JBE repeat_two_standalone_emit_copy
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_standalone_emit_copy
+ CMPL CX, $0x00000800
+ JB repeat_two_offset_standalone_emit_copy
+
+cant_repeat_two_offset_standalone_emit_copy:
+ CMPL DX, $0x00000104
+ JB repeat_three_standalone_emit_copy
+ CMPL DX, $0x00010100
+ JB repeat_four_standalone_emit_copy
+ CMPL DX, $0x0100ffff
+ JB repeat_five_standalone_emit_copy
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy
+
+repeat_five_standalone_emit_copy:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+repeat_two_offset_standalone_emit_copy:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+four_bytes_remain_standalone:
+ TESTL DX, DX
+ JZ gen_emit_copy_end
+ XORL SI, SI
+ LEAL -1(SI)(DX*4), DX
+ MOVB DL, (AX)
+ MOVL CX, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+two_byte_offset_standalone:
+ CMPL DX, $0x40
+ JBE two_byte_offset_short_standalone
+ CMPL CX, $0x00000800
+ JAE long_offset_short_standalone
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB CL, 1(AX)
+ MOVL CX, DI
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ SUBL $0x08, DX
+
+ // emitRepeat
+ LEAL -4(DX), DX
+ JMP cant_repeat_two_offset_standalone_emit_copy_short_2b
+
+emit_repeat_again_standalone_emit_copy_short_2b:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JBE repeat_two_standalone_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_standalone_emit_copy_short_2b
+ CMPL CX, $0x00000800
+ JB repeat_two_offset_standalone_emit_copy_short_2b
+
+cant_repeat_two_offset_standalone_emit_copy_short_2b:
+ CMPL DX, $0x00000104
+ JB repeat_three_standalone_emit_copy_short_2b
+ CMPL DX, $0x00010100
+ JB repeat_four_standalone_emit_copy_short_2b
+ CMPL DX, $0x0100ffff
+ JB repeat_five_standalone_emit_copy_short_2b
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy_short_2b
+
+repeat_five_standalone_emit_copy_short_2b:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy_short_2b:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy_short_2b:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy_short_2b:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+repeat_two_offset_standalone_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+long_offset_short_standalone:
+ MOVB $0xee, (AX)
+ MOVW CX, 1(AX)
+ LEAL -60(DX), DX
+ ADDQ $0x03, AX
+ ADDQ $0x03, BX
+
+ // emitRepeat
+emit_repeat_again_standalone_emit_copy_short:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JBE repeat_two_standalone_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_standalone_emit_copy_short
+ CMPL CX, $0x00000800
+ JB repeat_two_offset_standalone_emit_copy_short
+
+cant_repeat_two_offset_standalone_emit_copy_short:
+ CMPL DX, $0x00000104
+ JB repeat_three_standalone_emit_copy_short
+ CMPL DX, $0x00010100
+ JB repeat_four_standalone_emit_copy_short
+ CMPL DX, $0x0100ffff
+ JB repeat_five_standalone_emit_copy_short
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy_short
+
+repeat_five_standalone_emit_copy_short:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy_short:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy_short:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy_short:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+repeat_two_offset_standalone_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+two_byte_offset_short_standalone:
+ MOVL DX, SI
+ SHLL $0x02, SI
+ CMPL DX, $0x0c
+ JAE emit_copy_three_standalone
+ CMPL CX, $0x00000800
+ JAE emit_copy_three_standalone
+ LEAL -15(SI), SI
+ MOVB CL, 1(AX)
+ SHRL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+emit_copy_three_standalone:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW CX, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+
+gen_emit_copy_end:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func emitCopyNoRepeat(dst []byte, offset int, length int) int
+TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitCopy
+ CMPL CX, $0x00010000
+ JB two_byte_offset_standalone_snappy
+
+four_bytes_loop_back_standalone_snappy:
+ CMPL DX, $0x40
+ JBE four_bytes_remain_standalone_snappy
+ MOVB $0xff, (AX)
+ MOVL CX, 1(AX)
+ LEAL -64(DX), DX
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ CMPL DX, $0x04
+ JB four_bytes_remain_standalone_snappy
+ JMP four_bytes_loop_back_standalone_snappy
+
+four_bytes_remain_standalone_snappy:
+ TESTL DX, DX
+ JZ gen_emit_copy_end_snappy
+ XORL SI, SI
+ LEAL -1(SI)(DX*4), DX
+ MOVB DL, (AX)
+ MOVL CX, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end_snappy
+
+two_byte_offset_standalone_snappy:
+ CMPL DX, $0x40
+ JBE two_byte_offset_short_standalone_snappy
+ MOVB $0xee, (AX)
+ MOVW CX, 1(AX)
+ LEAL -60(DX), DX
+ ADDQ $0x03, AX
+ ADDQ $0x03, BX
+ JMP two_byte_offset_standalone_snappy
+
+two_byte_offset_short_standalone_snappy:
+ MOVL DX, SI
+ SHLL $0x02, SI
+ CMPL DX, $0x0c
+ JAE emit_copy_three_standalone_snappy
+ CMPL CX, $0x00000800
+ JAE emit_copy_three_standalone_snappy
+ LEAL -15(SI), SI
+ MOVB CL, 1(AX)
+ SHRL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end_snappy
+
+emit_copy_three_standalone_snappy:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW CX, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+
+gen_emit_copy_end_snappy:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func matchLen(a []byte, b []byte) int
+// Requires: BMI
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+ MOVQ a_base+0(FP), AX
+ MOVQ b_base+24(FP), CX
+ MOVQ a_len+8(FP), DX
+
+ // matchLen
+ XORL SI, SI
+
+matchlen_loopback_16_standalone:
+ CMPL DX, $0x10
+ JB matchlen_match8_standalone
+ MOVQ (AX)(SI*1), BX
+ MOVQ 8(AX)(SI*1), DI
+ XORQ (CX)(SI*1), BX
+ JNZ matchlen_bsf_8_standalone
+ XORQ 8(CX)(SI*1), DI
+ JNZ matchlen_bsf_16standalone
+ LEAL -16(DX), DX
+ LEAL 16(SI), SI
+ JMP matchlen_loopback_16_standalone
+
+matchlen_bsf_16standalone:
+#ifdef GOAMD64_v3
+ TZCNTQ DI, DI
+
+#else
+ BSFQ DI, DI
+
+#endif
+ SARQ $0x03, DI
+ LEAL 8(SI)(DI*1), SI
+ JMP gen_match_len_end
+
+matchlen_match8_standalone:
+ CMPL DX, $0x08
+ JB matchlen_match4_standalone
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JNZ matchlen_bsf_8_standalone
+ LEAL -8(DX), DX
+ LEAL 8(SI), SI
+ JMP matchlen_match4_standalone
+
+matchlen_bsf_8_standalone:
+#ifdef GOAMD64_v3
+ TZCNTQ BX, BX
+
+#else
+ BSFQ BX, BX
+
+#endif
+ SARQ $0x03, BX
+ LEAL (SI)(BX*1), SI
+ JMP gen_match_len_end
+
+matchlen_match4_standalone:
+ CMPL DX, $0x04
+ JB matchlen_match2_standalone
+ MOVL (AX)(SI*1), BX
+ CMPL (CX)(SI*1), BX
+ JNE matchlen_match2_standalone
+ LEAL -4(DX), DX
+ LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+ CMPL DX, $0x01
+ JE matchlen_match1_standalone
+ JB gen_match_len_end
+ MOVW (AX)(SI*1), BX
+ CMPW (CX)(SI*1), BX
+ JNE matchlen_match1_standalone
+ LEAL 2(SI), SI
+ SUBL $0x02, DX
+ JZ gen_match_len_end
+
+matchlen_match1_standalone:
+ MOVB (AX)(SI*1), BL
+ CMPB (CX)(SI*1), BL
+ JNE gen_match_len_end
+ LEAL 1(SI), SI
+
+gen_match_len_end:
+ MOVQ SI, ret+48(FP)
+ RET
+
+// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+ XORQ DI, DI
+
+lz4_s2_loop:
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ CMPQ AX, CX
+ JAE lz4_s2_dstfull
+ MOVBQZX (DX), R8
+ MOVQ R8, R9
+ MOVQ R8, R10
+ SHRQ $0x04, R9
+ ANDQ $0x0f, R10
+ CMPQ R8, $0xf0
+ JB lz4_s2_ll_end
+
+lz4_s2_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ MOVBQZX (DX), R8
+ ADDQ R8, R9
+ CMPQ R8, $0xff
+ JEQ lz4_s2_ll_loop
+
+lz4_s2_ll_end:
+ LEAQ (DX)(R9*1), R8
+ ADDQ $0x04, R10
+ CMPQ R8, BX
+ JAE lz4_s2_corrupt
+ INCQ DX
+ INCQ R8
+ TESTQ R9, R9
+ JZ lz4_s2_lits_done
+ LEAQ (AX)(R9*1), R11
+ CMPQ R11, CX
+ JAE lz4_s2_dstfull
+ ADDQ R9, SI
+ LEAL -1(R9), R11
+ CMPL R11, $0x3c
+ JB one_byte_lz4_s2
+ CMPL R11, $0x00000100
+ JB two_bytes_lz4_s2
+ CMPL R11, $0x00010000
+ JB three_bytes_lz4_s2
+ CMPL R11, $0x01000000
+ JB four_bytes_lz4_s2
+ MOVB $0xfc, (AX)
+ MOVL R11, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4_s2
+
+four_bytes_lz4_s2:
+ MOVL R11, R12
+ SHRL $0x10, R12
+ MOVB $0xf8, (AX)
+ MOVW R11, 1(AX)
+ MOVB R12, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4_s2
+
+three_bytes_lz4_s2:
+ MOVB $0xf4, (AX)
+ MOVW R11, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4_s2
+
+two_bytes_lz4_s2:
+ MOVB $0xf0, (AX)
+ MOVB R11, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R11, $0x40
+ JB memmove_lz4_s2
+ JMP memmove_long_lz4_s2
+
+one_byte_lz4_s2:
+ SHLB $0x02, R11
+ MOVB R11, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveShort
+ CMPQ R9, $0x08
+ JBE emit_lit_memmove_lz4_s2_memmove_move_8
+ CMPQ R9, $0x10
+ JBE emit_lit_memmove_lz4_s2_memmove_move_8through16
+ CMPQ R9, $0x20
+ JBE emit_lit_memmove_lz4_s2_memmove_move_17through32
+ JMP emit_lit_memmove_lz4_s2_memmove_move_33through64
+
+emit_lit_memmove_lz4_s2_memmove_move_8:
+ MOVQ (DX), R12
+ MOVQ R12, (AX)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_8through16:
+ MOVQ (DX), R12
+ MOVQ -8(DX)(R9*1), DX
+ MOVQ R12, (AX)
+ MOVQ DX, -8(AX)(R9*1)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R9*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R9*1)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+
+memmove_end_copy_lz4_s2:
+ MOVQ R11, AX
+ JMP lz4_s2_lits_emit_done
+
+memmove_long_lz4_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ AX, R12
+ ANDL $0x0000001f, R12
+ MOVQ $0x00000040, R14
+ SUBQ R12, R14
+ DECQ R13
+ JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32
+ LEAQ -32(DX)(R14*1), R12
+ LEAQ -32(AX)(R14*1), R15
+
+emit_lit_memmove_long_lz4_s2large_big_loop_back:
+ MOVOU (R12), X4
+ MOVOU 16(R12), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R12
+ ADDQ $0x20, R14
+ DECQ R13
+ JNA emit_lit_memmove_long_lz4_s2large_big_loop_back
+
+emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32:
+ MOVOU -32(DX)(R14*1), X4
+ MOVOU -16(DX)(R14*1), X5
+ MOVOA X4, -32(AX)(R14*1)
+ MOVOA X5, -16(AX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
+ JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+ MOVQ R11, AX
+
+lz4_s2_lits_emit_done:
+ MOVQ R8, DX
+
+lz4_s2_lits_done:
+ CMPQ DX, BX
+ JNE lz4_s2_match
+ CMPQ R10, $0x04
+ JEQ lz4_s2_done
+ JMP lz4_s2_corrupt
+
+lz4_s2_match:
+ LEAQ 2(DX), R8
+ CMPQ R8, BX
+ JAE lz4_s2_corrupt
+ MOVWQZX (DX), R9
+ MOVQ R8, DX
+ TESTQ R9, R9
+ JZ lz4_s2_corrupt
+ CMPQ R9, SI
+ JA lz4_s2_corrupt
+ CMPQ R10, $0x13
+ JNE lz4_s2_ml_done
+
+lz4_s2_ml_loop:
+ MOVBQZX (DX), R8
+ INCQ DX
+ ADDQ R8, R10
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ CMPQ R8, $0xff
+ JEQ lz4_s2_ml_loop
+
+lz4_s2_ml_done:
+ ADDQ R10, SI
+ CMPQ R9, DI
+ JNE lz4_s2_docopy
+
+ // emitRepeat
+emit_repeat_again_lz4_s2:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2
+
+cant_repeat_two_offset_lz4_s2:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2
+
+repeat_five_lz4_s2:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4_s2_loop
+
+repeat_four_lz4_s2:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4_s2_loop
+
+repeat_three_lz4_s2:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4_s2_loop
+
+repeat_two_lz4_s2:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+repeat_two_offset_lz4_s2:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+lz4_s2_docopy:
+ MOVQ R9, DI
+
+ // emitCopy
+ CMPL R10, $0x40
+ JBE two_byte_offset_short_lz4_s2
+ CMPL R9, $0x00000800
+ JAE long_offset_short_lz4_s2
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB R9, 1(AX)
+ MOVL R9, R11
+ SHRL $0x08, R11
+ SHLL $0x05, R11
+ ORL R11, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+
+emit_repeat_again_lz4_s2_emit_copy_short_2b:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2_emit_copy_short_2b
+
+cant_repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2_emit_copy_short_2b
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2_emit_copy_short_2b
+
+repeat_five_lz4_s2_emit_copy_short_2b:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4_s2_loop
+
+repeat_four_lz4_s2_emit_copy_short_2b:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4_s2_loop
+
+repeat_three_lz4_s2_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4_s2_loop
+
+repeat_two_lz4_s2_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+long_offset_short_lz4_s2:
+ MOVB $0xee, (AX)
+ MOVW R9, 1(AX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_lz4_s2_emit_copy_short:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2_emit_copy_short
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2_emit_copy_short
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2_emit_copy_short
+
+cant_repeat_two_offset_lz4_s2_emit_copy_short:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2_emit_copy_short
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2_emit_copy_short
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2_emit_copy_short
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2_emit_copy_short
+
+repeat_five_lz4_s2_emit_copy_short:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4_s2_loop
+
+repeat_four_lz4_s2_emit_copy_short:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4_s2_loop
+
+repeat_three_lz4_s2_emit_copy_short:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4_s2_loop
+
+repeat_two_lz4_s2_emit_copy_short:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+repeat_two_offset_lz4_s2_emit_copy_short:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+two_byte_offset_short_lz4_s2:
+ MOVL R10, R8
+ SHLL $0x02, R8
+ CMPL R10, $0x0c
+ JAE emit_copy_three_lz4_s2
+ CMPL R9, $0x00000800
+ JAE emit_copy_three_lz4_s2
+ LEAL -15(R8), R8
+ MOVB R9, 1(AX)
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(R8), R8
+ MOVB R8, (AX)
+ MOVW R9, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4_s2_loop
+
+lz4_s2_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4_s2_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4_s2_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+ XORQ DI, DI
+
+lz4s_s2_loop:
+ CMPQ DX, BX
+ JAE lz4s_s2_corrupt
+ CMPQ AX, CX
+ JAE lz4s_s2_dstfull
+ MOVBQZX (DX), R8
+ MOVQ R8, R9
+ MOVQ R8, R10
+ SHRQ $0x04, R9
+ ANDQ $0x0f, R10
+ CMPQ R8, $0xf0
+ JB lz4s_s2_ll_end
+
+lz4s_s2_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4s_s2_corrupt
+ MOVBQZX (DX), R8
+ ADDQ R8, R9
+ CMPQ R8, $0xff
+ JEQ lz4s_s2_ll_loop
+
+lz4s_s2_ll_end:
+ LEAQ (DX)(R9*1), R8
+ ADDQ $0x03, R10
+ CMPQ R8, BX
+ JAE lz4s_s2_corrupt
+ INCQ DX
+ INCQ R8
+ TESTQ R9, R9
+ JZ lz4s_s2_lits_done
+ LEAQ (AX)(R9*1), R11
+ CMPQ R11, CX
+ JAE lz4s_s2_dstfull
+ ADDQ R9, SI
+ LEAL -1(R9), R11
+ CMPL R11, $0x3c
+ JB one_byte_lz4s_s2
+ CMPL R11, $0x00000100
+ JB two_bytes_lz4s_s2
+ CMPL R11, $0x00010000
+ JB three_bytes_lz4s_s2
+ CMPL R11, $0x01000000
+ JB four_bytes_lz4s_s2
+ MOVB $0xfc, (AX)
+ MOVL R11, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4s_s2
+
+four_bytes_lz4s_s2:
+ MOVL R11, R12
+ SHRL $0x10, R12
+ MOVB $0xf8, (AX)
+ MOVW R11, 1(AX)
+ MOVB R12, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4s_s2
+
+three_bytes_lz4s_s2:
+ MOVB $0xf4, (AX)
+ MOVW R11, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4s_s2
+
+two_bytes_lz4s_s2:
+ MOVB $0xf0, (AX)
+ MOVB R11, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R11, $0x40
+ JB memmove_lz4s_s2
+ JMP memmove_long_lz4s_s2
+
+one_byte_lz4s_s2:
+ SHLB $0x02, R11
+ MOVB R11, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4s_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveShort
+ CMPQ R9, $0x08
+ JBE emit_lit_memmove_lz4s_s2_memmove_move_8
+ CMPQ R9, $0x10
+ JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16
+ CMPQ R9, $0x20
+ JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32
+ JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64
+
+emit_lit_memmove_lz4s_s2_memmove_move_8:
+ MOVQ (DX), R12
+ MOVQ R12, (AX)
+ JMP memmove_end_copy_lz4s_s2
+
+emit_lit_memmove_lz4s_s2_memmove_move_8through16:
+ MOVQ (DX), R12
+ MOVQ -8(DX)(R9*1), DX
+ MOVQ R12, (AX)
+ MOVQ DX, -8(AX)(R9*1)
+ JMP memmove_end_copy_lz4s_s2
+
+emit_lit_memmove_lz4s_s2_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R9*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R9*1)
+ JMP memmove_end_copy_lz4s_s2
+
+emit_lit_memmove_lz4s_s2_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+
+memmove_end_copy_lz4s_s2:
+ MOVQ R11, AX
+ JMP lz4s_s2_lits_emit_done
+
+memmove_long_lz4s_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ AX, R12
+ ANDL $0x0000001f, R12
+ MOVQ $0x00000040, R14
+ SUBQ R12, R14
+ DECQ R13
+ JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32
+ LEAQ -32(DX)(R14*1), R12
+ LEAQ -32(AX)(R14*1), R15
+
+emit_lit_memmove_long_lz4s_s2large_big_loop_back:
+ MOVOU (R12), X4
+ MOVOU 16(R12), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R12
+ ADDQ $0x20, R14
+ DECQ R13
+ JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back
+
+emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32:
+ MOVOU -32(DX)(R14*1), X4
+ MOVOU -16(DX)(R14*1), X5
+ MOVOA X4, -32(AX)(R14*1)
+ MOVOA X5, -16(AX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
+ JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+ MOVQ R11, AX
+
+lz4s_s2_lits_emit_done:
+ MOVQ R8, DX
+
+lz4s_s2_lits_done:
+ CMPQ DX, BX
+ JNE lz4s_s2_match
+ CMPQ R10, $0x03
+ JEQ lz4s_s2_done
+ JMP lz4s_s2_corrupt
+
+lz4s_s2_match:
+ CMPQ R10, $0x03
+ JEQ lz4s_s2_loop
+ LEAQ 2(DX), R8
+ CMPQ R8, BX
+ JAE lz4s_s2_corrupt
+ MOVWQZX (DX), R9
+ MOVQ R8, DX
+ TESTQ R9, R9
+ JZ lz4s_s2_corrupt
+ CMPQ R9, SI
+ JA lz4s_s2_corrupt
+ CMPQ R10, $0x12
+ JNE lz4s_s2_ml_done
+
+lz4s_s2_ml_loop:
+ MOVBQZX (DX), R8
+ INCQ DX
+ ADDQ R8, R10
+ CMPQ DX, BX
+ JAE lz4s_s2_corrupt
+ CMPQ R8, $0xff
+ JEQ lz4s_s2_ml_loop
+
+lz4s_s2_ml_done:
+ ADDQ R10, SI
+ CMPQ R9, DI
+ JNE lz4s_s2_docopy
+
+ // emitRepeat
+emit_repeat_again_lz4_s2:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2
+
+cant_repeat_two_offset_lz4_s2:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2
+
+repeat_five_lz4_s2:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4s_s2_loop
+
+repeat_four_lz4_s2:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4s_s2_loop
+
+repeat_three_lz4_s2:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_s2_loop
+
+repeat_two_lz4_s2:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+repeat_two_offset_lz4_s2:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+lz4s_s2_docopy:
+ MOVQ R9, DI
+
+ // emitCopy
+ CMPL R10, $0x40
+ JBE two_byte_offset_short_lz4_s2
+ CMPL R9, $0x00000800
+ JAE long_offset_short_lz4_s2
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB R9, 1(AX)
+ MOVL R9, R11
+ SHRL $0x08, R11
+ SHLL $0x05, R11
+ ORL R11, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+
+emit_repeat_again_lz4_s2_emit_copy_short_2b:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2_emit_copy_short_2b
+
+cant_repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2_emit_copy_short_2b
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2_emit_copy_short_2b
+
+repeat_five_lz4_s2_emit_copy_short_2b:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4s_s2_loop
+
+repeat_four_lz4_s2_emit_copy_short_2b:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4s_s2_loop
+
+repeat_three_lz4_s2_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_s2_loop
+
+repeat_two_lz4_s2_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+long_offset_short_lz4_s2:
+ MOVB $0xee, (AX)
+ MOVW R9, 1(AX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_lz4_s2_emit_copy_short:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2_emit_copy_short
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2_emit_copy_short
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2_emit_copy_short
+
+cant_repeat_two_offset_lz4_s2_emit_copy_short:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2_emit_copy_short
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2_emit_copy_short
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2_emit_copy_short
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2_emit_copy_short
+
+repeat_five_lz4_s2_emit_copy_short:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4s_s2_loop
+
+repeat_four_lz4_s2_emit_copy_short:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4s_s2_loop
+
+repeat_three_lz4_s2_emit_copy_short:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_s2_loop
+
+repeat_two_lz4_s2_emit_copy_short:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+repeat_two_offset_lz4_s2_emit_copy_short:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+two_byte_offset_short_lz4_s2:
+ MOVL R10, R8
+ SHLL $0x02, R8
+ CMPL R10, $0x0c
+ JAE emit_copy_three_lz4_s2
+ CMPL R9, $0x00000800
+ JAE emit_copy_three_lz4_s2
+ LEAL -15(R8), R8
+ MOVB R9, 1(AX)
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(R8), R8
+ MOVB R8, (AX)
+ MOVW R9, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_s2_loop
+
+lz4s_s2_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4s_s2_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4s_s2_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+
+lz4_snappy_loop:
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ CMPQ AX, CX
+ JAE lz4_snappy_dstfull
+ MOVBQZX (DX), DI
+ MOVQ DI, R8
+ MOVQ DI, R9
+ SHRQ $0x04, R8
+ ANDQ $0x0f, R9
+ CMPQ DI, $0xf0
+ JB lz4_snappy_ll_end
+
+lz4_snappy_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ MOVBQZX (DX), DI
+ ADDQ DI, R8
+ CMPQ DI, $0xff
+ JEQ lz4_snappy_ll_loop
+
+lz4_snappy_ll_end:
+ LEAQ (DX)(R8*1), DI
+ ADDQ $0x04, R9
+ CMPQ DI, BX
+ JAE lz4_snappy_corrupt
+ INCQ DX
+ INCQ DI
+ TESTQ R8, R8
+ JZ lz4_snappy_lits_done
+ LEAQ (AX)(R8*1), R10
+ CMPQ R10, CX
+ JAE lz4_snappy_dstfull
+ ADDQ R8, SI
+ LEAL -1(R8), R10
+ CMPL R10, $0x3c
+ JB one_byte_lz4_snappy
+ CMPL R10, $0x00000100
+ JB two_bytes_lz4_snappy
+ CMPL R10, $0x00010000
+ JB three_bytes_lz4_snappy
+ CMPL R10, $0x01000000
+ JB four_bytes_lz4_snappy
+ MOVB $0xfc, (AX)
+ MOVL R10, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4_snappy
+
+four_bytes_lz4_snappy:
+ MOVL R10, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (AX)
+ MOVW R10, 1(AX)
+ MOVB R11, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4_snappy
+
+three_bytes_lz4_snappy:
+ MOVB $0xf4, (AX)
+ MOVW R10, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4_snappy
+
+two_bytes_lz4_snappy:
+ MOVB $0xf0, (AX)
+ MOVB R10, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R10, $0x40
+ JB memmove_lz4_snappy
+ JMP memmove_long_lz4_snappy
+
+one_byte_lz4_snappy:
+ SHLB $0x02, R10
+ MOVB R10, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4_snappy:
+ LEAQ (AX)(R8*1), R10
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_lz4_snappy_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32
+ JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64
+
+emit_lit_memmove_lz4_snappy_memmove_move_8:
+ MOVQ (DX), R11
+ MOVQ R11, (AX)
+ JMP memmove_end_copy_lz4_snappy
+
+emit_lit_memmove_lz4_snappy_memmove_move_8through16:
+ MOVQ (DX), R11
+ MOVQ -8(DX)(R8*1), DX
+ MOVQ R11, (AX)
+ MOVQ DX, -8(AX)(R8*1)
+ JMP memmove_end_copy_lz4_snappy
+
+emit_lit_memmove_lz4_snappy_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_lz4_snappy
+
+emit_lit_memmove_lz4_snappy_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_lz4_snappy:
+ MOVQ R10, AX
+ JMP lz4_snappy_lits_emit_done
+
+memmove_long_lz4_snappy:
+ LEAQ (AX)(R8*1), R10
+
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
+ JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32
+ LEAQ -32(DX)(R13*1), R11
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_lz4_snappylarge_big_loop_back:
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back
+
+emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32:
+ MOVOU -32(DX)(R13*1), X4
+ MOVOU -16(DX)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ R10, AX
+
+lz4_snappy_lits_emit_done:
+ MOVQ DI, DX
+
+lz4_snappy_lits_done:
+ CMPQ DX, BX
+ JNE lz4_snappy_match
+ CMPQ R9, $0x04
+ JEQ lz4_snappy_done
+ JMP lz4_snappy_corrupt
+
+lz4_snappy_match:
+ LEAQ 2(DX), DI
+ CMPQ DI, BX
+ JAE lz4_snappy_corrupt
+ MOVWQZX (DX), R8
+ MOVQ DI, DX
+ TESTQ R8, R8
+ JZ lz4_snappy_corrupt
+ CMPQ R8, SI
+ JA lz4_snappy_corrupt
+ CMPQ R9, $0x13
+ JNE lz4_snappy_ml_done
+
+lz4_snappy_ml_loop:
+ MOVBQZX (DX), DI
+ INCQ DX
+ ADDQ DI, R9
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ CMPQ DI, $0xff
+ JEQ lz4_snappy_ml_loop
+
+lz4_snappy_ml_done:
+ ADDQ R9, SI
+
+ // emitCopy
+two_byte_offset_lz4_s2:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_lz4_s2
+ MOVB $0xee, (AX)
+ MOVW R8, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ CMPQ AX, CX
+ JAE lz4_snappy_loop
+ JMP two_byte_offset_lz4_s2
+
+two_byte_offset_short_lz4_s2:
+ MOVL R9, DI
+ SHLL $0x02, DI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_lz4_s2
+ CMPL R8, $0x00000800
+ JAE emit_copy_three_lz4_s2
+ LEAL -15(DI), DI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_snappy_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW R8, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4_snappy_loop
+
+lz4_snappy_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4_snappy_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4_snappy_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+
+lz4s_snappy_loop:
+ CMPQ DX, BX
+ JAE lz4s_snappy_corrupt
+ CMPQ AX, CX
+ JAE lz4s_snappy_dstfull
+ MOVBQZX (DX), DI
+ MOVQ DI, R8
+ MOVQ DI, R9
+ SHRQ $0x04, R8
+ ANDQ $0x0f, R9
+ CMPQ DI, $0xf0
+ JB lz4s_snappy_ll_end
+
+lz4s_snappy_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4s_snappy_corrupt
+ MOVBQZX (DX), DI
+ ADDQ DI, R8
+ CMPQ DI, $0xff
+ JEQ lz4s_snappy_ll_loop
+
+lz4s_snappy_ll_end:
+ LEAQ (DX)(R8*1), DI
+ ADDQ $0x03, R9
+ CMPQ DI, BX
+ JAE lz4s_snappy_corrupt
+ INCQ DX
+ INCQ DI
+ TESTQ R8, R8
+ JZ lz4s_snappy_lits_done
+ LEAQ (AX)(R8*1), R10
+ CMPQ R10, CX
+ JAE lz4s_snappy_dstfull
+ ADDQ R8, SI
+ LEAL -1(R8), R10
+ CMPL R10, $0x3c
+ JB one_byte_lz4s_snappy
+ CMPL R10, $0x00000100
+ JB two_bytes_lz4s_snappy
+ CMPL R10, $0x00010000
+ JB three_bytes_lz4s_snappy
+ CMPL R10, $0x01000000
+ JB four_bytes_lz4s_snappy
+ MOVB $0xfc, (AX)
+ MOVL R10, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4s_snappy
+
+four_bytes_lz4s_snappy:
+ MOVL R10, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (AX)
+ MOVW R10, 1(AX)
+ MOVB R11, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4s_snappy
+
+three_bytes_lz4s_snappy:
+ MOVB $0xf4, (AX)
+ MOVW R10, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4s_snappy
+
+two_bytes_lz4s_snappy:
+ MOVB $0xf0, (AX)
+ MOVB R10, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R10, $0x40
+ JB memmove_lz4s_snappy
+ JMP memmove_long_lz4s_snappy
+
+one_byte_lz4s_snappy:
+ SHLB $0x02, R10
+ MOVB R10, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4s_snappy:
+ LEAQ (AX)(R8*1), R10
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_lz4s_snappy_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32
+ JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64
+
+emit_lit_memmove_lz4s_snappy_memmove_move_8:
+ MOVQ (DX), R11
+ MOVQ R11, (AX)
+ JMP memmove_end_copy_lz4s_snappy
+
+emit_lit_memmove_lz4s_snappy_memmove_move_8through16:
+ MOVQ (DX), R11
+ MOVQ -8(DX)(R8*1), DX
+ MOVQ R11, (AX)
+ MOVQ DX, -8(AX)(R8*1)
+ JMP memmove_end_copy_lz4s_snappy
+
+emit_lit_memmove_lz4s_snappy_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_lz4s_snappy
+
+emit_lit_memmove_lz4s_snappy_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_lz4s_snappy:
+ MOVQ R10, AX
+ JMP lz4s_snappy_lits_emit_done
+
+memmove_long_lz4s_snappy:
+ LEAQ (AX)(R8*1), R10
+
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
+ JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32
+ LEAQ -32(DX)(R13*1), R11
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_lz4s_snappylarge_big_loop_back:
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back
+
+emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32:
+ MOVOU -32(DX)(R13*1), X4
+ MOVOU -16(DX)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ R10, AX
+
+lz4s_snappy_lits_emit_done:
+ MOVQ DI, DX
+
+lz4s_snappy_lits_done:
+ CMPQ DX, BX
+ JNE lz4s_snappy_match
+ CMPQ R9, $0x03
+ JEQ lz4s_snappy_done
+ JMP lz4s_snappy_corrupt
+
+lz4s_snappy_match:
+ CMPQ R9, $0x03
+ JEQ lz4s_snappy_loop
+ LEAQ 2(DX), DI
+ CMPQ DI, BX
+ JAE lz4s_snappy_corrupt
+ MOVWQZX (DX), R8
+ MOVQ DI, DX
+ TESTQ R8, R8
+ JZ lz4s_snappy_corrupt
+ CMPQ R8, SI
+ JA lz4s_snappy_corrupt
+ CMPQ R9, $0x12
+ JNE lz4s_snappy_ml_done
+
+lz4s_snappy_ml_loop:
+ MOVBQZX (DX), DI
+ INCQ DX
+ ADDQ DI, R9
+ CMPQ DX, BX
+ JAE lz4s_snappy_corrupt
+ CMPQ DI, $0xff
+ JEQ lz4s_snappy_ml_loop
+
+lz4s_snappy_ml_done:
+ ADDQ R9, SI
+
+ // emitCopy
+two_byte_offset_lz4_s2:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_lz4_s2
+ MOVB $0xee, (AX)
+ MOVW R8, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ CMPQ AX, CX
+ JAE lz4s_snappy_loop
+ JMP two_byte_offset_lz4_s2
+
+two_byte_offset_short_lz4_s2:
+ MOVL R9, DI
+ SHLL $0x02, DI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_lz4_s2
+ CMPL R8, $0x00000800
+ JAE emit_copy_three_lz4_s2
+ LEAL -15(DI), DI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_snappy_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW R8, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_snappy_loop
+
+lz4s_snappy_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4s_snappy_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4s_snappy_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go
new file mode 100644
index 000000000..4229957b9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/index.go
@@ -0,0 +1,602 @@
+// Copyright (c) 2022+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+)
+
+const (
+ S2IndexHeader = "s2idx\x00"
+ S2IndexTrailer = "\x00xdi2s"
+ maxIndexEntries = 1 << 16
+ // If distance is less than this, we do not add the entry.
+ minIndexDist = 1 << 20
+)
+
+// Index represents an S2/Snappy index.
+type Index struct {
+ TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown.
+ TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown.
+ info []struct {
+ compressedOffset int64
+ uncompressedOffset int64
+ }
+ estBlockUncomp int64
+}
+
+func (i *Index) reset(maxBlock int) {
+ i.estBlockUncomp = int64(maxBlock)
+ i.TotalCompressed = -1
+ i.TotalUncompressed = -1
+ if len(i.info) > 0 {
+ i.info = i.info[:0]
+ }
+}
+
+// allocInfos will allocate an empty slice of infos.
+func (i *Index) allocInfos(n int) {
+ if n > maxIndexEntries {
+ panic("n > maxIndexEntries")
+ }
+ i.info = make([]struct {
+ compressedOffset int64
+ uncompressedOffset int64
+ }, 0, n)
+}
+
+// add an uncompressed and compressed pair.
+// Entries must be sent in order.
+func (i *Index) add(compressedOffset, uncompressedOffset int64) error {
+ if i == nil {
+ return nil
+ }
+ lastIdx := len(i.info) - 1
+ if lastIdx >= 0 {
+ latest := i.info[lastIdx]
+ if latest.uncompressedOffset == uncompressedOffset {
+ // Uncompressed didn't change, don't add entry,
+ // but update start index.
+ latest.compressedOffset = compressedOffset
+ i.info[lastIdx] = latest
+ return nil
+ }
+ if latest.uncompressedOffset > uncompressedOffset {
+ return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset)
+ }
+ if latest.compressedOffset > compressedOffset {
+ return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset)
+ }
+ if latest.uncompressedOffset+minIndexDist > uncompressedOffset {
+ // Only add entry if distance is large enough.
+ return nil
+ }
+ }
+ i.info = append(i.info, struct {
+ compressedOffset int64
+ uncompressedOffset int64
+ }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset})
+ return nil
+}
+
+// Find the offset at or before the wanted (uncompressed) offset.
+// If offset is 0 or positive it is the offset from the beginning of the file.
+// If the uncompressed size is known, the offset must be within the file.
+// If an offset outside the file is requested io.ErrUnexpectedEOF is returned.
+// If the offset is negative, it is interpreted as the distance from the end of the file,
+// where -1 represents the last byte.
+// If offset from the end of the file is requested, but size is unknown,
+// ErrUnsupported will be returned.
+func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) {
+ if i.TotalUncompressed < 0 {
+ return 0, 0, ErrCorrupt
+ }
+ if offset < 0 {
+ offset = i.TotalUncompressed + offset
+ if offset < 0 {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ }
+ if offset > i.TotalUncompressed {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ if len(i.info) > 200 {
+ n := sort.Search(len(i.info), func(n int) bool {
+ return i.info[n].uncompressedOffset > offset
+ })
+ if n == 0 {
+ n = 1
+ }
+ return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil
+ }
+ for _, info := range i.info {
+ if info.uncompressedOffset > offset {
+ break
+ }
+ compressedOff = info.compressedOffset
+ uncompressedOff = info.uncompressedOffset
+ }
+ return compressedOff, uncompressedOff, nil
+}
+
+// reduce to stay below maxIndexEntries
+func (i *Index) reduce() {
+ if len(i.info) < maxIndexEntries && i.estBlockUncomp >= minIndexDist {
+ return
+ }
+
+ // Algorithm, keep 1, remove removeN entries...
+ removeN := (len(i.info) + 1) / maxIndexEntries
+ src := i.info
+ j := 0
+
+ // Each block should be at least 1MB, but don't reduce below 1000 entries.
+ for i.estBlockUncomp*(int64(removeN)+1) < minIndexDist && len(i.info)/(removeN+1) > 1000 {
+ removeN++
+ }
+ for idx := 0; idx < len(src); idx++ {
+ i.info[j] = src[idx]
+ j++
+ idx += removeN
+ }
+ i.info = i.info[:j]
+ // Update maxblock estimate.
+ i.estBlockUncomp += i.estBlockUncomp * int64(removeN)
+}
+
+func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte {
+ i.reduce()
+ var tmp [binary.MaxVarintLen64]byte
+
+ initSize := len(b)
+ // We make the start a skippable header+size.
+ b = append(b, ChunkTypeIndex, 0, 0, 0)
+ b = append(b, []byte(S2IndexHeader)...)
+ // Total Uncompressed size
+ n := binary.PutVarint(tmp[:], uncompTotal)
+ b = append(b, tmp[:n]...)
+ // Total Compressed size
+ n = binary.PutVarint(tmp[:], compTotal)
+ b = append(b, tmp[:n]...)
+ // Put EstBlockUncomp size
+ n = binary.PutVarint(tmp[:], i.estBlockUncomp)
+ b = append(b, tmp[:n]...)
+ // Put length
+ n = binary.PutVarint(tmp[:], int64(len(i.info)))
+ b = append(b, tmp[:n]...)
+
+ // Check if we should add uncompressed offsets
+ var hasUncompressed byte
+ for idx, info := range i.info {
+ if idx == 0 {
+ if info.uncompressedOffset != 0 {
+ hasUncompressed = 1
+ break
+ }
+ continue
+ }
+ if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp {
+ hasUncompressed = 1
+ break
+ }
+ }
+ b = append(b, hasUncompressed)
+
+ // Add each entry
+ if hasUncompressed == 1 {
+ for idx, info := range i.info {
+ uOff := info.uncompressedOffset
+ if idx > 0 {
+ prev := i.info[idx-1]
+ uOff -= prev.uncompressedOffset + (i.estBlockUncomp)
+ }
+ n = binary.PutVarint(tmp[:], uOff)
+ b = append(b, tmp[:n]...)
+ }
+ }
+
+ // Initial compressed size estimate.
+ cPredict := i.estBlockUncomp / 2
+
+ for idx, info := range i.info {
+ cOff := info.compressedOffset
+ if idx > 0 {
+ prev := i.info[idx-1]
+ cOff -= prev.compressedOffset + cPredict
+ // Update compressed size prediction, with half the error.
+ cPredict += cOff / 2
+ }
+ n = binary.PutVarint(tmp[:], cOff)
+ b = append(b, tmp[:n]...)
+ }
+
+ // Add Total Size.
+ // Stored as fixed size for easier reading.
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer)))
+ b = append(b, tmp[:4]...)
+ // Trailer
+ b = append(b, []byte(S2IndexTrailer)...)
+
+ // Update size
+ chunkLen := len(b) - initSize - skippableFrameHeader
+ b[initSize+1] = uint8(chunkLen >> 0)
+ b[initSize+2] = uint8(chunkLen >> 8)
+ b[initSize+3] = uint8(chunkLen >> 16)
+ //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal)
+ return b
+}
+
+// Load a binary index.
+// A zero value Index can be used or a previous one can be reused.
+func (i *Index) Load(b []byte) ([]byte, error) {
+ if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) {
+ return b, io.ErrUnexpectedEOF
+ }
+ if b[0] != ChunkTypeIndex {
+ return b, ErrCorrupt
+ }
+ chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
+ b = b[4:]
+
+ // Validate we have enough...
+ if len(b) < chunkLen {
+ return b, io.ErrUnexpectedEOF
+ }
+ if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
+ return b, ErrUnsupported
+ }
+ b = b[len(S2IndexHeader):]
+
+ // Total Uncompressed
+ if v, n := binary.Varint(b); n <= 0 || v < 0 {
+ return b, ErrCorrupt
+ } else {
+ i.TotalUncompressed = v
+ b = b[n:]
+ }
+
+ // Total Compressed
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ i.TotalCompressed = v
+ b = b[n:]
+ }
+
+ // Read EstBlockUncomp
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ if v < 0 {
+ return b, ErrCorrupt
+ }
+ i.estBlockUncomp = v
+ b = b[n:]
+ }
+
+ var entries int
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ if v < 0 || v > maxIndexEntries {
+ return b, ErrCorrupt
+ }
+ entries = int(v)
+ b = b[n:]
+ }
+ if cap(i.info) < entries {
+ i.allocInfos(entries)
+ }
+ i.info = i.info[:entries]
+
+ if len(b) < 1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ hasUncompressed := b[0]
+ b = b[1:]
+ if hasUncompressed&1 != hasUncompressed {
+ return b, ErrCorrupt
+ }
+
+ // Add each uncompressed entry
+ for idx := range i.info {
+ var uOff int64
+ if hasUncompressed != 0 {
+ // Load delta
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ uOff = v
+ b = b[n:]
+ }
+ }
+
+ if idx > 0 {
+ prev := i.info[idx-1].uncompressedOffset
+ uOff += prev + (i.estBlockUncomp)
+ if uOff <= prev {
+ return b, ErrCorrupt
+ }
+ }
+ if uOff < 0 {
+ return b, ErrCorrupt
+ }
+ i.info[idx].uncompressedOffset = uOff
+ }
+
+ // Initial compressed size estimate.
+ cPredict := i.estBlockUncomp / 2
+
+ // Add each compressed entry
+ for idx := range i.info {
+ var cOff int64
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ cOff = v
+ b = b[n:]
+ }
+
+ if idx > 0 {
+ // Update compressed size prediction, with half the error.
+ cPredictNew := cPredict + cOff/2
+
+ prev := i.info[idx-1].compressedOffset
+ cOff += prev + cPredict
+ if cOff <= prev {
+ return b, ErrCorrupt
+ }
+ cPredict = cPredictNew
+ }
+ if cOff < 0 {
+ return b, ErrCorrupt
+ }
+ i.info[idx].compressedOffset = cOff
+ }
+ if len(b) < 4+len(S2IndexTrailer) {
+ return b, io.ErrUnexpectedEOF
+ }
+ // Skip size...
+ b = b[4:]
+
+ // Check trailer...
+ if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) {
+ return b, ErrCorrupt
+ }
+ return b[len(S2IndexTrailer):], nil
+}
+
+// LoadStream will load an index from the end of the supplied stream.
+// ErrUnsupported will be returned if the signature cannot be found.
+// ErrCorrupt will be returned if unexpected values are found.
+// io.ErrUnexpectedEOF is returned if there are too few bytes.
+// IO errors are returned as-is.
+func (i *Index) LoadStream(rs io.ReadSeeker) error {
+ // Go to end.
+ _, err := rs.Seek(-10, io.SeekEnd)
+ if err != nil {
+ return err
+ }
+ var tmp [10]byte
+ _, err = io.ReadFull(rs, tmp[:])
+ if err != nil {
+ return err
+ }
+ // Check trailer...
+ if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) {
+ return ErrUnsupported
+ }
+ sz := binary.LittleEndian.Uint32(tmp[:4])
+ if sz > maxChunkSize+skippableFrameHeader {
+ return ErrCorrupt
+ }
+ _, err = rs.Seek(-int64(sz), io.SeekEnd)
+ if err != nil {
+ return err
+ }
+
+ // Read index.
+ buf := make([]byte, sz)
+ _, err = io.ReadFull(rs, buf)
+ if err != nil {
+ return err
+ }
+ _, err = i.Load(buf)
+ return err
+}
+
+// IndexStream will return an index for a stream.
+// The stream structure will be checked, but
+// data within blocks is not verified.
+// The returned index can either be appended to the end of the stream
+// or stored separately.
+func IndexStream(r io.Reader) ([]byte, error) {
+ var i Index
+ var buf [maxChunkSize]byte
+ var readHeader bool
+ for {
+ _, err := io.ReadFull(r, buf[:4])
+ if err != nil {
+ if err == io.EOF {
+ return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil
+ }
+ return nil, err
+ }
+ // Start of this chunk.
+ startChunk := i.TotalCompressed
+ i.TotalCompressed += 4
+
+ chunkType := buf[0]
+ if !readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ return nil, ErrCorrupt
+ }
+ readHeader = true
+ }
+ chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16
+ if chunkLen < checksumSize {
+ return nil, ErrCorrupt
+ }
+
+ i.TotalCompressed += int64(chunkLen)
+ _, err = io.ReadFull(r, buf[:chunkLen])
+ if err != nil {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ // Skip checksum.
+ dLen, err := DecodedLen(buf[checksumSize:])
+ if err != nil {
+ return nil, err
+ }
+ if dLen > maxBlockSize {
+ return nil, ErrCorrupt
+ }
+ if i.estBlockUncomp == 0 {
+ // Use first block for estimate...
+ i.estBlockUncomp = int64(dLen)
+ }
+ err = i.add(startChunk, i.TotalUncompressed)
+ if err != nil {
+ return nil, err
+ }
+ i.TotalUncompressed += int64(dLen)
+ continue
+ case chunkTypeUncompressedData:
+ n2 := chunkLen - checksumSize
+ if n2 > maxBlockSize {
+ return nil, ErrCorrupt
+ }
+ if i.estBlockUncomp == 0 {
+ // Use first block for estimate...
+ i.estBlockUncomp = int64(n2)
+ }
+ err = i.add(startChunk, i.TotalUncompressed)
+ if err != nil {
+ return nil, err
+ }
+ i.TotalUncompressed += int64(n2)
+ continue
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ return nil, ErrCorrupt
+ }
+
+ if string(buf[:len(magicBody)]) != magicBody {
+ if string(buf[:len(magicBody)]) != magicBodySnappy {
+ return nil, ErrCorrupt
+ }
+ }
+
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ return nil, ErrUnsupported
+ }
+ if chunkLen > maxChunkSize {
+ return nil, ErrUnsupported
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ }
+}
+
+// JSON returns the index as JSON text.
+func (i *Index) JSON() []byte {
+ type offset struct {
+ CompressedOffset int64 `json:"compressed"`
+ UncompressedOffset int64 `json:"uncompressed"`
+ }
+ x := struct {
+ TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown.
+ TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown.
+ Offsets []offset `json:"offsets"`
+ EstBlockUncomp int64 `json:"est_block_uncompressed"`
+ }{
+ TotalUncompressed: i.TotalUncompressed,
+ TotalCompressed: i.TotalCompressed,
+ EstBlockUncomp: i.estBlockUncomp,
+ }
+ for _, v := range i.info {
+ x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset})
+ }
+ b, _ := json.MarshalIndent(x, "", " ")
+ return b
+}
+
+// RemoveIndexHeaders will trim all headers and trailers from a given index.
+// This is expected to save 20 bytes.
+// These can be restored using RestoreIndexHeaders.
+// This removes a layer of security, but is the most compact representation.
+// Returns nil if headers contains errors.
+// The returned slice references the provided slice.
+func RemoveIndexHeaders(b []byte) []byte {
+ const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4
+ if len(b) <= save {
+ return nil
+ }
+ if b[0] != ChunkTypeIndex {
+ return nil
+ }
+ chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
+ b = b[4:]
+
+ // Validate we have enough...
+ if len(b) < chunkLen {
+ return nil
+ }
+ b = b[:chunkLen]
+
+ if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
+ return nil
+ }
+ b = b[len(S2IndexHeader):]
+ if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) {
+ return nil
+ }
+ b = bytes.TrimSuffix(b, []byte(S2IndexTrailer))
+
+ if len(b) < 4 {
+ return nil
+ }
+ return b[:len(b)-4]
+}
+
+// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders.
+// No error checking is performed on the input.
+// If a 0 length slice is sent, it is returned without modification.
+func RestoreIndexHeaders(in []byte) []byte {
+ if len(in) == 0 {
+ return in
+ }
+ b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4)
+ b = append(b, ChunkTypeIndex, 0, 0, 0)
+ b = append(b, []byte(S2IndexHeader)...)
+ b = append(b, in...)
+
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer)))
+ b = append(b, tmp[:4]...)
+ // Trailer
+ b = append(b, []byte(S2IndexTrailer)...)
+
+ chunkLen := len(b) - skippableFrameHeader
+ b[1] = uint8(chunkLen >> 0)
+ b[2] = uint8(chunkLen >> 8)
+ b[3] = uint8(chunkLen >> 16)
+ return b
+}
diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go
new file mode 100644
index 000000000..46ed908e3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go
@@ -0,0 +1,585 @@
+// Copyright (c) 2022 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+)
+
+// LZ4Converter provides conversion from LZ4 blocks as defined here:
+// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md
+type LZ4Converter struct {
+}
+
+// ErrDstTooSmall is returned when provided destination is too small.
+var ErrDstTooSmall = errors.New("s2: destination too small")
+
+// ConvertBlock will convert an LZ4 block and append it as an S2
+// block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const inline = true
+ const lz4MinMatch = 4
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4BlockAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var lastOffset uint16
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return dst[:d], 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return dst[:d], 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if s == len(src) && ml == lz4MinMatch {
+ break
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if offset == lastOffset {
+ if debug {
+ fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitRepeat16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ d += 2
+ break
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ d += 3
+ break
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ d += 4
+ break
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ d += 5 + emitRepeat16(dst[5:], offset, left)
+ break
+ }
+ d += 5
+ break
+ }
+ }
+ } else {
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitCopy16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ d += off + emitRepeat16(dst[off:], offset, length)
+ break
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ }
+ lastOffset = offset
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
+
+// ConvertBlockSnappy will convert an LZ4 block and append it
+// as a Snappy block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const lz4MinMatch = 4
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ // Use assembly when possible
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return nil, 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if s == len(src) && ml == lz4MinMatch {
+ break
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ length := ml
+ // d += emitCopyNoRepeat(dst[d:], int(offset), ml)
+ for length > 0 {
+ if d >= dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = 63<<2 | tagCopy2
+ length -= 64
+ d += 3
+ continue
+ }
+ if length >= 12 || offset >= 2048 || length < 4 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
+
+// emitRepeat writes a repeat chunk and returns the number of bytes written.
+// Length must be at least 4 and < 1<<24
+func emitRepeat16(dst []byte, offset uint16, length int) int {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ return 2
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ return 2
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ return 3
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ return 4
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ return 5 + emitRepeat16(dst[5:], offset, left)
+ }
+ return 5
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint16
+// 4 <= length && length <= math.MaxUint32
+func emitCopy16(dst []byte, offset uint16, length int) int {
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return off + emitRepeat16(dst[off:], offset, length)
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ return 2
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+func emitLiteralGo(dst, lit []byte) int {
+ if len(lit) == 0 {
+ return 0
+ }
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[1] = uint8(n)
+ dst[0] = 60<<2 | tagLiteral
+ i = 2
+ case n < 1<<16:
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 61<<2 | tagLiteral
+ i = 3
+ case n < 1<<24:
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 62<<2 | tagLiteral
+ i = 4
+ default:
+ dst[4] = uint8(n >> 24)
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 63<<2 | tagLiteral
+ i = 5
+ }
+ return i + copy(dst[i:], lit)
+}
diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go
new file mode 100644
index 000000000..000f39719
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go
@@ -0,0 +1,467 @@
+// Copyright (c) 2022 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+// LZ4sConverter provides conversion from LZ4s.
+// (Intel modified LZ4 Blocks)
+// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf
+// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format.
+// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData.
+// The LZ4s block returned by the Intel® QAT hardware can be used by an external
+// software post-processing to generate other compressed data formats.
+// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses
+// the same high-level formatting as LZ4 block format with the following encoding changes:
+// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte.
+// ONLY "Min match of 4 bytes" is supported.
+type LZ4sConverter struct {
+}
+
+// ConvertBlock will convert an LZ4s block and append it as an S2
+// block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const inline = true
+ const lz4MinMatch = 3
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4sBlockAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var lastOffset uint16
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return dst[:d], 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return dst[:d], 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if ml == lz4MinMatch {
+ if s == len(src) {
+ break
+ }
+ // 0 bytes.
+ continue
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if offset == lastOffset {
+ if debug {
+ fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitRepeat16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ d += 2
+ break
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ d += 3
+ break
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ d += 4
+ break
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ d += 5 + emitRepeat16(dst[5:], offset, left)
+ break
+ }
+ d += 5
+ break
+ }
+ }
+ } else {
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitCopy16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ d += off + emitRepeat16(dst[off:], offset, length)
+ break
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ }
+ lastOffset = offset
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
+
+// ConvertBlockSnappy will convert an LZ4s block and append it
+// as a Snappy block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const lz4MinMatch = 3
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ // Use assembly when possible
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return nil, 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if ml == lz4MinMatch {
+ if s == len(src) {
+ break
+ }
+ // 0 bytes.
+ continue
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ length := ml
+ // d += emitCopyNoRepeat(dst[d:], int(offset), ml)
+ for length > 0 {
+ if d >= dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = 63<<2 | tagCopy2
+ length -= 64
+ d += 3
+ continue
+ }
+ if length >= 12 || offset >= 2048 || length < 4 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go
new file mode 100644
index 000000000..8372d752f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/reader.go
@@ -0,0 +1,1075 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "runtime"
+ "sync"
+)
+
+// ErrCantSeek is returned if the stream cannot be seeked.
+type ErrCantSeek struct {
+ Reason string
+}
+
+// Error returns the error as string.
+func (e ErrCantSeek) Error() string {
+ return fmt.Sprintf("s2: Can't seek because %s", e.Reason)
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes.
+func NewReader(r io.Reader, opts ...ReaderOption) *Reader {
+ nr := Reader{
+ r: r,
+ maxBlock: maxBlockSize,
+ }
+ for _, opt := range opts {
+ if err := opt(&nr); err != nil {
+ nr.err = err
+ return &nr
+ }
+ }
+ nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize
+ if nr.lazyBuf > 0 {
+ nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize)
+ } else {
+ nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize)
+ }
+ nr.readHeader = nr.ignoreStreamID
+ nr.paramsOK = true
+ return &nr
+}
+
+// ReaderOption is an option for creating a decoder.
+type ReaderOption func(*Reader) error
+
+// ReaderMaxBlockSize allows to control allocations if the stream
+// has been compressed with a smaller WriterBlockSize, or with the default 1MB.
+// Blocks must be this size or smaller to decompress,
+// otherwise the decoder will return ErrUnsupported.
+//
+// For streams compressed with Snappy this can safely be set to 64KB (64 << 10).
+//
+// Default is the maximum limit of 4MB.
+func ReaderMaxBlockSize(blockSize int) ReaderOption {
+ return func(r *Reader) error {
+ if blockSize > maxBlockSize || blockSize <= 0 {
+ return errors.New("s2: block size too large. Must be <= 4MB and > 0")
+ }
+ if r.lazyBuf == 0 && blockSize < defaultBlockSize {
+ r.lazyBuf = blockSize
+ }
+ r.maxBlock = blockSize
+ return nil
+ }
+}
+
+// ReaderAllocBlock allows to control upfront stream allocations
+// and not allocate for frames bigger than this initially.
+// If frames bigger than this is seen a bigger buffer will be allocated.
+//
+// Default is 1MB, which is default output size.
+func ReaderAllocBlock(blockSize int) ReaderOption {
+ return func(r *Reader) error {
+ if blockSize > maxBlockSize || blockSize < 1024 {
+ return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024")
+ }
+ r.lazyBuf = blockSize
+ return nil
+ }
+}
+
+// ReaderIgnoreStreamIdentifier will make the reader skip the expected
+// stream identifier at the beginning of the stream.
+// This can be used when serving a stream that has been forwarded to a specific point.
+func ReaderIgnoreStreamIdentifier() ReaderOption {
+ return func(r *Reader) error {
+ r.ignoreStreamID = true
+ return nil
+ }
+}
+
+// ReaderSkippableCB will register a callback for chuncks with the specified ID.
+// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
+// For each chunk with the ID, the callback is called with the content.
+// Any returned non-nil error will abort decompression.
+// Only one callback per ID is supported, latest sent will be used.
+// You can peek the stream, triggering the callback, by doing a Read with a 0
+// byte buffer.
+func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption {
+ return func(r *Reader) error {
+ if id < 0x80 || id > 0xfd {
+ return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)")
+ }
+ r.skippableCB[id-0x80] = fn
+ return nil
+ }
+}
+
+// ReaderIgnoreCRC will make the reader skip CRC calculation and checks.
+func ReaderIgnoreCRC() ReaderOption {
+ return func(r *Reader) error {
+ r.ignoreCRC = true
+ return nil
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ skippableCB [0xff - 0x80]func(r io.Reader) error
+ blockStart int64 // Uncompressed offset at start of current.
+ index *Index
+
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ // maximum block size allowed.
+ maxBlock int
+ // maximum expected buffer size.
+ maxBufSize int
+ // alloc a buffer this size if > 0.
+ lazyBuf int
+ readHeader bool
+ paramsOK bool
+ snappyFrame bool
+ ignoreStreamID bool
+ ignoreCRC bool
+}
+
+// GetBufferCapacity returns the capacity of the internal buffer.
+// This might be useful to know when reusing the same reader in combination
+// with the lazy buffer option.
+func (r *Reader) GetBufferCapacity() int {
+ return cap(r.buf)
+}
+
+// ensureBufferSize will ensure that the buffer can take at least n bytes.
+// If false is returned the buffer exceeds maximum allowed size.
+func (r *Reader) ensureBufferSize(n int) bool {
+ if n > r.maxBufSize {
+ r.err = ErrCorrupt
+ return false
+ }
+ if cap(r.buf) >= n {
+ return true
+ }
+ // Realloc buffer.
+ r.buf = make([]byte, n)
+ return true
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ if !r.paramsOK {
+ return
+ }
+ r.index = nil
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.blockStart = 0
+ r.readHeader = r.ignoreStreamID
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// skippable will skip n bytes.
+// If the supplied reader supports seeking that is used.
+// tmp is used as a temporary buffer for reading.
+// The supplied slice does not need to be the size of the read.
+func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) {
+ if id < 0x80 {
+ r.err = fmt.Errorf("internal error: skippable id < 0x80")
+ return false
+ }
+ if fn := r.skippableCB[id-0x80]; fn != nil {
+ rd := io.LimitReader(r.r, int64(n))
+ r.err = fn(rd)
+ if r.err != nil {
+ return false
+ }
+ _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp)
+ return r.err == nil
+ }
+ if rs, ok := r.r.(io.ReadSeeker); ok {
+ _, err := rs.Seek(int64(n), io.SeekCurrent)
+ if err == nil {
+ return true
+ }
+ if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ return false
+ }
+ }
+ for n > 0 {
+ if n < len(tmp) {
+ tmp = tmp[:n]
+ }
+ if _, r.err = io.ReadFull(r.r, tmp); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ n -= len(tmp)
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.ensureBufferSize(chunkLen) {
+ if r.err == nil {
+ r.err = ErrUnsupported
+ }
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+
+ if n > len(r.decoded) {
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.decoded = make([]byte, n)
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
+ r.err = ErrCRC
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.ensureBufferSize(chunkLen) {
+ if r.err == nil {
+ r.err = ErrUnsupported
+ }
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.decoded = make([]byte, n)
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
+ r.err = ErrCRC
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ if string(r.buf[:len(magicBody)]) != magicBody {
+ if string(r.buf[:len(magicBody)]) != magicBodySnappy {
+ r.err = ErrCorrupt
+ return 0, r.err
+ } else {
+ r.snappyFrame = true
+ }
+ } else {
+ r.snappyFrame = false
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ // fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if chunkLen > maxChunkSize {
+ // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
+ if !r.skippable(r.buf, chunkLen, false, chunkType) {
+ return 0, r.err
+ }
+ }
+}
+
+// DecodeConcurrent will decode the full stream to w.
+// This function should not be combined with reading, seeking or other operations.
+// Up to 'concurrent' goroutines will be used.
+// If <= 0, runtime.NumCPU will be used.
+// On success the number of bytes decompressed nil and is returned.
+// This is mainly intended for bigger streams.
+func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) {
+ if r.i > 0 || r.j > 0 || r.blockStart > 0 {
+ return 0, errors.New("DecodeConcurrent called after ")
+ }
+ if concurrent <= 0 {
+ concurrent = runtime.NumCPU()
+ }
+
+ // Write to output
+ var errMu sync.Mutex
+ var aErr error
+ setErr := func(e error) (ok bool) {
+ errMu.Lock()
+ defer errMu.Unlock()
+ if e == nil {
+ return aErr == nil
+ }
+ if aErr == nil {
+ aErr = e
+ }
+ return false
+ }
+ hasErr := func() (ok bool) {
+ errMu.Lock()
+ v := aErr != nil
+ errMu.Unlock()
+ return v
+ }
+
+ var aWritten int64
+ toRead := make(chan []byte, concurrent)
+ writtenBlocks := make(chan []byte, concurrent)
+ queue := make(chan chan []byte, concurrent)
+ reUse := make(chan chan []byte, concurrent)
+ for i := 0; i < concurrent; i++ {
+ toRead <- make([]byte, 0, r.maxBufSize)
+ writtenBlocks <- make([]byte, 0, r.maxBufSize)
+ reUse <- make(chan []byte, 1)
+ }
+ // Writer
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for toWrite := range queue {
+ entry := <-toWrite
+ reUse <- toWrite
+ if hasErr() || entry == nil {
+ if entry != nil {
+ writtenBlocks <- entry
+ }
+ continue
+ }
+ if hasErr() {
+ writtenBlocks <- entry
+ continue
+ }
+ n, err := w.Write(entry)
+ want := len(entry)
+ writtenBlocks <- entry
+ if err != nil {
+ setErr(err)
+ continue
+ }
+ if n != want {
+ setErr(io.ErrShortWrite)
+ continue
+ }
+ aWritten += int64(n)
+ }
+ }()
+
+ defer func() {
+ if r.err != nil {
+ setErr(r.err)
+ } else if err != nil {
+ setErr(err)
+ }
+ close(queue)
+ wg.Wait()
+ if err == nil {
+ err = aErr
+ }
+ written = aWritten
+ }()
+
+ // Reader
+ for !hasErr() {
+ if !r.readFull(r.buf[:4], true) {
+ if r.err == io.EOF {
+ r.err = nil
+ }
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if chunkLen > r.maxBufSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ orgBuf := <-toRead
+ buf := orgBuf[:chunkLen]
+
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ wg.Add(1)
+
+ decoded := <-writtenBlocks
+ entry := <-reUse
+ queue <- entry
+ go func() {
+ defer wg.Done()
+ decoded = decoded[:n]
+ _, err := Decode(decoded, buf)
+ toRead <- orgBuf
+ if err != nil {
+ writtenBlocks <- decoded
+ setErr(err)
+ entry <- nil
+ return
+ }
+ if !r.ignoreCRC && crc(decoded) != checksum {
+ writtenBlocks <- decoded
+ setErr(ErrCRC)
+ entry <- nil
+ return
+ }
+ entry <- decoded
+ }()
+ continue
+
+ case chunkTypeUncompressedData:
+
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if chunkLen > r.maxBufSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ // Grab write buffer
+ orgBuf := <-writtenBlocks
+ buf := orgBuf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read content.
+ n := chunkLen - checksumSize
+
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ // Read uncompressed
+ buf = orgBuf[:n]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+
+ if !r.ignoreCRC && crc(buf) != checksum {
+ r.err = ErrCRC
+ return 0, r.err
+ }
+ entry := <-reUse
+ queue <- entry
+ entry <- buf
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ if string(r.buf[:len(magicBody)]) != magicBody {
+ if string(r.buf[:len(magicBody)]) != magicBodySnappy {
+ r.err = ErrCorrupt
+ return 0, r.err
+ } else {
+ r.snappyFrame = true
+ }
+ } else {
+ r.snappyFrame = false
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ // fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if chunkLen > maxChunkSize {
+ // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
+ if !r.skippable(r.buf, chunkLen, false, chunkType) {
+ return 0, r.err
+ }
+ }
+ return 0, r.err
+}
+
+// Skip will skip n bytes forward in the decompressed output.
+// For larger skips this consumes less CPU and is faster than reading output and discarding it.
+// CRC is not checked on skipped blocks.
+// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped.
+// If a decoding error is encountered subsequent calls to Read will also fail.
+func (r *Reader) Skip(n int64) error {
+ if n < 0 {
+ return errors.New("attempted negative skip")
+ }
+ if r.err != nil {
+ return r.err
+ }
+
+ for n > 0 {
+ if r.i < r.j {
+ // Skip in buffer.
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ left := int64(r.j - r.i)
+ if left >= n {
+ tmp := int64(r.i) + n
+ if tmp > math.MaxInt32 {
+ return errors.New("s2: internal overflow in skip")
+ }
+ r.i = int(tmp)
+ return nil
+ }
+ n -= int64(r.j - r.i)
+ r.i = r.j
+ }
+
+ // Buffer empty; read blocks until we have content.
+ if !r.readFull(r.buf[:4], true) {
+ if r.err == io.EOF {
+ r.err = io.ErrUnexpectedEOF
+ }
+ return r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if !r.ensureBufferSize(chunkLen) {
+ if r.err == nil {
+ r.err = ErrUnsupported
+ }
+ return r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ dLen, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return r.err
+ }
+ if dLen > r.maxBlock {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ // Check if destination is within this block
+ if int64(dLen) > n {
+ if len(r.decoded) < dLen {
+ r.decoded = make([]byte, dLen)
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return r.err
+ }
+ if crc(r.decoded[:dLen]) != checksum {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ } else {
+ // Skip block completely
+ n -= int64(dLen)
+ r.blockStart += int64(dLen)
+ dLen = 0
+ }
+ r.i, r.j = 0, dLen
+ continue
+ case chunkTypeUncompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if !r.ensureBufferSize(chunkLen) {
+ if r.err != nil {
+ r.err = ErrUnsupported
+ }
+ return r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n2 := chunkLen - checksumSize
+ if n2 > len(r.decoded) {
+ if n2 > r.maxBlock {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ r.decoded = make([]byte, n2)
+ }
+ if !r.readFull(r.decoded[:n2], false) {
+ return r.err
+ }
+ if int64(n2) < n {
+ if crc(r.decoded[:n2]) != checksum {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ }
+ r.i, r.j = 0, n2
+ continue
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return r.err
+ }
+ if string(r.buf[:len(magicBody)]) != magicBody {
+ if string(r.buf[:len(magicBody)]) != magicBodySnappy {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ }
+
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return r.err
+ }
+ if chunkLen > maxChunkSize {
+ r.err = ErrUnsupported
+ return r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.skippable(r.buf, chunkLen, false, chunkType) {
+ return r.err
+ }
+ }
+ return nil
+}
+
+// ReadSeeker provides random or forward seeking in compressed content.
+// See Reader.ReadSeeker
+type ReadSeeker struct {
+ *Reader
+ readAtMu sync.Mutex
+}
+
+// ReadSeeker will return an io.ReadSeeker and io.ReaderAt
+// compatible version of the reader.
+// If 'random' is specified the returned io.Seeker can be used for
+// random seeking, otherwise only forward seeking is supported.
+// Enabling random seeking requires the original input to support
+// the io.Seeker interface.
+// A custom index can be specified which will be used if supplied.
+// When using a custom index, it will not be read from the input stream.
+// The ReadAt position will affect regular reads and the current position of Seek.
+// So using Read after ReadAt will continue from where the ReadAt stopped.
+// No functions should be used concurrently.
+// The returned ReadSeeker contains a shallow reference to the existing Reader,
+// meaning changes performed to one is reflected in the other.
+func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
+ // Read index if provided.
+ if len(index) != 0 {
+ if r.index == nil {
+ r.index = &Index{}
+ }
+ if _, err := r.index.Load(index); err != nil {
+ return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()}
+ }
+ }
+
+ // Check if input is seekable
+ rs, ok := r.r.(io.ReadSeeker)
+ if !ok {
+ if !random {
+ return &ReadSeeker{Reader: r}, nil
+ }
+ return nil, ErrCantSeek{Reason: "input stream isn't seekable"}
+ }
+
+ if r.index != nil {
+ // Seekable and index, ok...
+ return &ReadSeeker{Reader: r}, nil
+ }
+
+ // Load from stream.
+ r.index = &Index{}
+
+ // Read current position.
+ pos, err := rs.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()}
+ }
+ err = r.index.LoadStream(rs)
+ if err != nil {
+ if err == ErrUnsupported {
+ // If we don't require random seeking, reset input and return.
+ if !random {
+ _, err = rs.Seek(pos, io.SeekStart)
+ if err != nil {
+ return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()}
+ }
+ r.index = nil
+ return &ReadSeeker{Reader: r}, nil
+ }
+ return nil, ErrCantSeek{Reason: "input stream does not contain an index"}
+ }
+ return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()}
+ }
+
+ // reset position.
+ _, err = rs.Seek(pos, io.SeekStart)
+ if err != nil {
+ return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()}
+ }
+ return &ReadSeeker{Reader: r}, nil
+}
+
+// Seek allows seeking in compressed data.
+func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ if r.err != nil {
+ if !errors.Is(r.err, io.EOF) {
+ return 0, r.err
+ }
+ // Reset on EOF
+ r.err = nil
+ }
+
+ // Calculate absolute offset.
+ absOffset := offset
+
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ absOffset = r.blockStart + int64(r.i) + offset
+ case io.SeekEnd:
+ if r.index == nil {
+ return 0, ErrUnsupported
+ }
+ absOffset = r.index.TotalUncompressed + offset
+ default:
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ if absOffset < 0 {
+ return 0, errors.New("seek before start of file")
+ }
+
+ if !r.readHeader {
+ // Make sure we read the header.
+ _, r.err = r.Read([]byte{})
+ if r.err != nil {
+ return 0, r.err
+ }
+ }
+
+ // If we are inside current block no need to seek.
+ // This includes no offset changes.
+ if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) {
+ r.i = int(absOffset - r.blockStart)
+ return r.blockStart + int64(r.i), nil
+ }
+
+ rs, ok := r.r.(io.ReadSeeker)
+ if r.index == nil || !ok {
+ currOffset := r.blockStart + int64(r.i)
+ if absOffset >= currOffset {
+ err := r.Skip(absOffset - currOffset)
+ return r.blockStart + int64(r.i), err
+ }
+ return 0, ErrUnsupported
+ }
+
+ // We can seek and we have an index.
+ c, u, err := r.index.Find(absOffset)
+ if err != nil {
+ return r.blockStart + int64(r.i), err
+ }
+
+ // Seek to next block
+ _, err = rs.Seek(c, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ r.i = r.j // Remove rest of current block.
+ r.blockStart = u - int64(r.j) // Adjust current block start for accounting.
+ if u < absOffset {
+ // Forward inside block
+ return absOffset, r.Skip(absOffset - u)
+ }
+ if u > absOffset {
+ return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset)
+ }
+ return absOffset, nil
+}
+
+// ReadAt reads len(p) bytes into p starting at offset off in the
+// underlying input source. It returns the number of bytes
+// read (0 <= n <= len(p)) and any error encountered.
+//
+// When ReadAt returns n < len(p), it returns a non-nil error
+// explaining why more bytes were not returned. In this respect,
+// ReadAt is stricter than Read.
+//
+// Even if ReadAt returns n < len(p), it may use all of p as scratch
+// space during the call. If some data is available but not len(p) bytes,
+// ReadAt blocks until either all the data is available or an error occurs.
+// In this respect ReadAt is different from Read.
+//
+// If the n = len(p) bytes returned by ReadAt are at the end of the
+// input source, ReadAt may return either err == EOF or err == nil.
+//
+// If ReadAt is reading from an input source with a seek offset,
+// ReadAt should not affect nor be affected by the underlying
+// seek offset.
+//
+// Clients of ReadAt can execute parallel ReadAt calls on the
+// same input source. This is however not recommended.
+func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) {
+ r.readAtMu.Lock()
+ defer r.readAtMu.Unlock()
+ _, err := r.Seek(offset, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+ n := 0
+ for n < len(p) {
+ n2, err := r.Read(p[n:])
+ if err != nil {
+ // This will include io.EOF
+ return n + n2, err
+ }
+ n += n2
+ }
+ return n, nil
+}
+
+// ReadByte satisfies the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ if r.i < r.j {
+ c := r.decoded[r.i]
+ r.i++
+ return c, nil
+ }
+ var tmp [1]byte
+ for i := 0; i < 10; i++ {
+ n, err := r.Read(tmp[:])
+ if err != nil {
+ return 0, err
+ }
+ if n == 1 {
+ return tmp[0], nil
+ }
+ }
+ return 0, io.ErrNoProgress
+}
+
+// SkippableCB will register a callback for chunks with the specified ID.
+// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
+// For each chunk with the ID, the callback is called with the content.
+// Any returned non-nil error will abort decompression.
+// Only one callback per ID is supported, latest sent will be used.
+// Sending a nil function will disable previous callbacks.
+// You can peek the stream, triggering the callback, by doing a Read with a 0
+// byte buffer.
+func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error {
+ if id < 0x80 || id >= chunkTypePadding {
+ return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)")
+ }
+ r.skippableCB[id-0x80] = fn
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go
new file mode 100644
index 000000000..cbd1ed64d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/s2.go
@@ -0,0 +1,151 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package s2 implements the S2 compression format.
+//
+// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput,
+// which is why it features concurrent compression for bigger payloads.
+//
+// Decoding is compatible with Snappy compressed content,
+// but content compressed with S2 cannot be decompressed by Snappy.
+//
+// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2
+//
+// There are actually two S2 formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a S2 stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// A "better" compression option is available. This will trade some compression
+// speed
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// Blocks to not offer much data protection, so it is up to you to
+// add data validation of decompressed blocks.
+//
+// Streams perform CRC validation of the decompressed data.
+// Stream compression will also be performed on multiple CPU cores concurrently
+// significantly improving throughput.
+package s2
+
+import (
+ "bytes"
+ "hash/crc32"
+
+ "github.com/klauspost/compress/internal/race"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy
+ magicBodySnappy = "sNaPpY"
+ magicBody = "S2sTwO"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock.
+ //
+ // For the framing format (Writer type instead of Encode function),
+ // this is the maximum uncompressed size of a block.
+ maxBlockSize = 4 << 20
+
+ // minBlockSize is the minimum size of block setting when creating a writer.
+ minBlockSize = 4 << 10
+
+ skippableFrameHeader = 4
+ maxChunkSize = 1<<24 - 1 // 16777215
+
+ // Default block size
+ defaultBlockSize = 1 << 20
+
+ // maxSnappyBlockSize is the maximum snappy block size.
+ maxSnappyBlockSize = 1 << 16
+
+ obufHeaderLen = checksumSize + chunkHeaderSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ ChunkTypeIndex = 0x99
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var (
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+ magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes.
+ magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes.
+)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ race.ReadSlice(b)
+
+ c := crc32.Update(0, crcTable, b)
+ return c>>15 | c<<17 + 0xa282ead8
+}
+
+// literalExtraSize returns the extra size of encoding n literals.
+// n should be >= 0 and <= math.MaxUint32.
+func literalExtraSize(n int64) int64 {
+ if n == 0 {
+ return 0
+ }
+ switch {
+ case n < 60:
+ return 1
+ case n < 1<<8:
+ return 2
+ case n < 1<<16:
+ return 3
+ case n < 1<<24:
+ return 4
+ default:
+ return 5
+ }
+}
+
+type byter interface {
+ Bytes() []byte
+}
+
+var _ byter = &bytes.Buffer{}
diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go
new file mode 100644
index 000000000..0a46f2b98
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/writer.go
@@ -0,0 +1,1039 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "sync"
+
+ "github.com/klauspost/compress/internal/race"
+)
+
+const (
+ levelUncompressed = iota + 1
+ levelFast
+ levelBetter
+ levelBest
+)
+
+// NewWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// Users must call Close to guarantee all data has been forwarded to
+// the underlying io.Writer and that resources are released.
+// They may also call Flush zero or more times before calling Close.
+func NewWriter(w io.Writer, opts ...WriterOption) *Writer {
+ w2 := Writer{
+ blockSize: defaultBlockSize,
+ concurrency: runtime.GOMAXPROCS(0),
+ randSrc: rand.Reader,
+ level: levelFast,
+ }
+ for _, opt := range opts {
+ if err := opt(&w2); err != nil {
+ w2.errState = err
+ return &w2
+ }
+ }
+ w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize)
+ w2.paramsOK = true
+ w2.ibuf = make([]byte, 0, w2.blockSize)
+ w2.buffers.New = func() interface{} {
+ return make([]byte, w2.obufLen)
+ }
+ w2.Reset(w)
+ return &w2
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ errMu sync.Mutex
+ errState error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ ibuf []byte
+
+ blockSize int
+ obufLen int
+ concurrency int
+ written int64
+ uncompWritten int64 // Bytes sent to compression
+ output chan chan result
+ buffers sync.Pool
+ pad int
+
+ writer io.Writer
+ randSrc io.Reader
+ writerWg sync.WaitGroup
+ index Index
+ customEnc func(dst, src []byte) int
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+ paramsOK bool
+ snappy bool
+ flushOnWrite bool
+ appendIndex bool
+ level uint8
+}
+
+type result struct {
+ b []byte
+ // Uncompressed start offset
+ startOffset int64
+}
+
+// err returns the previously set error.
+// If no error has been set it is set to err if not nil.
+func (w *Writer) err(err error) error {
+ w.errMu.Lock()
+ errSet := w.errState
+ if errSet == nil && err != nil {
+ w.errState = err
+ errSet = err
+ }
+ w.errMu.Unlock()
+ return errSet
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to w.
+// This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ if !w.paramsOK {
+ return
+ }
+ // Close previous writer, if any.
+ if w.output != nil {
+ close(w.output)
+ w.writerWg.Wait()
+ w.output = nil
+ }
+ w.errState = nil
+ w.ibuf = w.ibuf[:0]
+ w.wroteStreamHeader = false
+ w.written = 0
+ w.writer = writer
+ w.uncompWritten = 0
+ w.index.reset(w.blockSize)
+
+ // If we didn't get a writer, stop here.
+ if writer == nil {
+ return
+ }
+ // If no concurrency requested, don't spin up writer goroutine.
+ if w.concurrency == 1 {
+ return
+ }
+
+ toWrite := make(chan chan result, w.concurrency)
+ w.output = toWrite
+ w.writerWg.Add(1)
+
+ // Start a writer goroutine that will write all output in order.
+ go func() {
+ defer w.writerWg.Done()
+
+ // Get a queued write.
+ for write := range toWrite {
+ // Wait for the data to be available.
+ input := <-write
+ in := input.b
+ if len(in) > 0 {
+ if w.err(nil) == nil {
+ // Don't expose data from previous buffers.
+ toWrite := in[:len(in):len(in)]
+ // Write to output.
+ n, err := writer.Write(toWrite)
+ if err == nil && n != len(toWrite) {
+ err = io.ErrShortBuffer
+ }
+ _ = w.err(err)
+ w.err(w.index.add(w.written, input.startOffset))
+ w.written += int64(n)
+ }
+ }
+ if cap(in) >= w.obufLen {
+ w.buffers.Put(in)
+ }
+ // close the incoming write request.
+ // This can be used for synchronizing flushes.
+ close(write)
+ }
+ }()
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if err := w.err(nil); err != nil {
+ return 0, err
+ }
+ if w.flushOnWrite {
+ return w.write(p)
+ }
+ // If we exceed the input buffer size, start writing
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if err := w.err(nil); err != nil {
+ return nRet, err
+ }
+ // p should always be able to fit into w.ibuf now.
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+// ReadFrom implements the io.ReaderFrom interface.
+// Using this is typically more efficient since it avoids a memory copy.
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except io.EOF encountered during the read is also returned.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ if err := w.err(nil); err != nil {
+ return 0, err
+ }
+ if len(w.ibuf) > 0 {
+ err := w.AsyncFlush()
+ if err != nil {
+ return 0, err
+ }
+ }
+ if br, ok := r.(byter); ok {
+ buf := br.Bytes()
+ if err := w.EncodeBuffer(buf); err != nil {
+ return 0, err
+ }
+ return int64(len(buf)), w.AsyncFlush()
+ }
+ for {
+ inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen]
+ n2, err := io.ReadFull(r, inbuf[obufHeaderLen:])
+ if err != nil {
+ if err == io.ErrUnexpectedEOF {
+ err = io.EOF
+ }
+ if err != io.EOF {
+ return n, w.err(err)
+ }
+ }
+ if n2 == 0 {
+ if cap(inbuf) >= w.obufLen {
+ w.buffers.Put(inbuf)
+ }
+ break
+ }
+ n += int64(n2)
+ err2 := w.writeFull(inbuf[:n2+obufHeaderLen])
+ if w.err(err2) != nil {
+ break
+ }
+
+ if err != nil {
+ // We got EOF and wrote everything
+ break
+ }
+ }
+
+ return n, w.err(nil)
+}
+
+// AddSkippableBlock will add a skippable block to the stream.
+// The ID must be 0x80-0xfe (inclusive).
+// Length of the skippable block must be <= 16777215 bytes.
+func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
+ if err := w.err(nil); err != nil {
+ return err
+ }
+ if len(data) == 0 {
+ return nil
+ }
+ if id < 0x80 || id > chunkTypePadding {
+ return fmt.Errorf("invalid skippable block id %x", id)
+ }
+ if len(data) > maxChunkSize {
+ return fmt.Errorf("skippable block excessed maximum size")
+ }
+ var header [4]byte
+ chunkLen := len(data)
+ header[0] = id
+ header[1] = uint8(chunkLen >> 0)
+ header[2] = uint8(chunkLen >> 8)
+ header[3] = uint8(chunkLen >> 16)
+ if w.concurrency == 1 {
+ write := func(b []byte) error {
+ n, err := w.writer.Write(b)
+ if err = w.err(err); err != nil {
+ return err
+ }
+ if n != len(b) {
+ return w.err(io.ErrShortWrite)
+ }
+ w.written += int64(n)
+ return w.err(nil)
+ }
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ if w.snappy {
+ if err := write([]byte(magicChunkSnappy)); err != nil {
+ return err
+ }
+ } else {
+ if err := write([]byte(magicChunk)); err != nil {
+ return err
+ }
+ }
+ }
+ if err := write(header[:]); err != nil {
+ return err
+ }
+ return write(data)
+ }
+
+ // Create output...
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ hWriter := make(chan result)
+ w.output <- hWriter
+ if w.snappy {
+ hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes}
+ } else {
+ hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
+ }
+ }
+
+ // Copy input.
+ inbuf := w.buffers.Get().([]byte)[:4]
+ copy(inbuf, header[:])
+ inbuf = append(inbuf, data...)
+
+ output := make(chan result, 1)
+ // Queue output.
+ w.output <- output
+ output <- result{startOffset: w.uncompWritten, b: inbuf}
+
+ return nil
+}
+
+// EncodeBuffer will add a buffer to the stream.
+// This is the fastest way to encode a stream,
+// but the input buffer cannot be written to by the caller
+// until Flush or Close has been called when concurrency != 1.
+//
+// If you cannot control that, use the regular Write function.
+//
+// Note that input is not buffered.
+// This means that each write will result in discrete blocks being created.
+// For buffered writes, use the regular Write function.
+func (w *Writer) EncodeBuffer(buf []byte) (err error) {
+ if err := w.err(nil); err != nil {
+ return err
+ }
+
+ if w.flushOnWrite {
+ _, err := w.write(buf)
+ return err
+ }
+ // Flush queued data first.
+ if len(w.ibuf) > 0 {
+ err := w.AsyncFlush()
+ if err != nil {
+ return err
+ }
+ }
+ if w.concurrency == 1 {
+ _, err := w.writeSync(buf)
+ return err
+ }
+
+ // Spawn goroutine and write block to output channel.
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ hWriter := make(chan result)
+ w.output <- hWriter
+ if w.snappy {
+ hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes}
+ } else {
+ hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
+ }
+ }
+
+ for len(buf) > 0 {
+ // Cut input.
+ uncompressed := buf
+ if len(uncompressed) > w.blockSize {
+ uncompressed = uncompressed[:w.blockSize]
+ }
+ buf = buf[len(uncompressed):]
+ // Get an output buffer.
+ obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
+ race.WriteSlice(obuf)
+
+ output := make(chan result)
+ // Queue output now, so we keep order.
+ w.output <- output
+ res := result{
+ startOffset: w.uncompWritten,
+ }
+ w.uncompWritten += int64(len(uncompressed))
+ go func() {
+ race.ReadSlice(uncompressed)
+
+ checksum := crc(uncompressed)
+
+ // Set to uncompressed.
+ chunkType := uint8(chunkTypeUncompressedData)
+ chunkLen := 4 + len(uncompressed)
+
+ // Attempt compressing.
+ n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
+ n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
+
+ // Check if we should use this, or store as uncompressed instead.
+ if n2 > 0 {
+ chunkType = uint8(chunkTypeCompressedData)
+ chunkLen = 4 + n + n2
+ obuf = obuf[:obufHeaderLen+n+n2]
+ } else {
+ // copy uncompressed
+ copy(obuf[obufHeaderLen:], uncompressed)
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ obuf[0] = chunkType
+ obuf[1] = uint8(chunkLen >> 0)
+ obuf[2] = uint8(chunkLen >> 8)
+ obuf[3] = uint8(chunkLen >> 16)
+ obuf[4] = uint8(checksum >> 0)
+ obuf[5] = uint8(checksum >> 8)
+ obuf[6] = uint8(checksum >> 16)
+ obuf[7] = uint8(checksum >> 24)
+
+ // Queue final output.
+ res.b = obuf
+ output <- res
+ }()
+ }
+ return nil
+}
+
+func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
+ if w.customEnc != nil {
+ if ret := w.customEnc(obuf, uncompressed); ret >= 0 {
+ return ret
+ }
+ }
+ if w.snappy {
+ switch w.level {
+ case levelFast:
+ return encodeBlockSnappy(obuf, uncompressed)
+ case levelBetter:
+ return encodeBlockBetterSnappy(obuf, uncompressed)
+ case levelBest:
+ return encodeBlockBestSnappy(obuf, uncompressed)
+ }
+ return 0
+ }
+ switch w.level {
+ case levelFast:
+ return encodeBlock(obuf, uncompressed)
+ case levelBetter:
+ return encodeBlockBetter(obuf, uncompressed)
+ case levelBest:
+ return encodeBlockBest(obuf, uncompressed, nil)
+ }
+ return 0
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if err := w.err(nil); err != nil {
+ return 0, err
+ }
+ if w.concurrency == 1 {
+ return w.writeSync(p)
+ }
+
+ // Spawn goroutine and write block to output channel.
+ for len(p) > 0 {
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ hWriter := make(chan result)
+ w.output <- hWriter
+ if w.snappy {
+ hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes}
+ } else {
+ hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
+ }
+ }
+
+ var uncompressed []byte
+ if len(p) > w.blockSize {
+ uncompressed, p = p[:w.blockSize], p[w.blockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+
+ // Copy input.
+ // If the block is incompressible, this is used for the result.
+ inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
+ obuf := w.buffers.Get().([]byte)[:w.obufLen]
+ copy(inbuf[obufHeaderLen:], uncompressed)
+ uncompressed = inbuf[obufHeaderLen:]
+
+ output := make(chan result)
+ // Queue output now, so we keep order.
+ w.output <- output
+ res := result{
+ startOffset: w.uncompWritten,
+ }
+ w.uncompWritten += int64(len(uncompressed))
+
+ go func() {
+ checksum := crc(uncompressed)
+
+ // Set to uncompressed.
+ chunkType := uint8(chunkTypeUncompressedData)
+ chunkLen := 4 + len(uncompressed)
+
+ // Attempt compressing.
+ n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
+ n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
+
+ // Check if we should use this, or store as uncompressed instead.
+ if n2 > 0 {
+ chunkType = uint8(chunkTypeCompressedData)
+ chunkLen = 4 + n + n2
+ obuf = obuf[:obufHeaderLen+n+n2]
+ } else {
+ // Use input as output.
+ obuf, inbuf = inbuf, obuf
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ obuf[0] = chunkType
+ obuf[1] = uint8(chunkLen >> 0)
+ obuf[2] = uint8(chunkLen >> 8)
+ obuf[3] = uint8(chunkLen >> 16)
+ obuf[4] = uint8(checksum >> 0)
+ obuf[5] = uint8(checksum >> 8)
+ obuf[6] = uint8(checksum >> 16)
+ obuf[7] = uint8(checksum >> 24)
+
+ // Queue final output.
+ res.b = obuf
+ output <- res
+
+ // Put unused buffer back in pool.
+ w.buffers.Put(inbuf)
+ }()
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// writeFull is a special version of write that will always write the full buffer.
+// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer.
+// The data will be written as a single block.
+// The caller is not allowed to use inbuf after this function has been called.
+func (w *Writer) writeFull(inbuf []byte) (errRet error) {
+ if err := w.err(nil); err != nil {
+ return err
+ }
+
+ if w.concurrency == 1 {
+ _, err := w.writeSync(inbuf[obufHeaderLen:])
+ if cap(inbuf) >= w.obufLen {
+ w.buffers.Put(inbuf)
+ }
+ return err
+ }
+
+ // Spawn goroutine and write block to output channel.
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ hWriter := make(chan result)
+ w.output <- hWriter
+ if w.snappy {
+ hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes}
+ } else {
+ hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
+ }
+ }
+
+ // Get an output buffer.
+ obuf := w.buffers.Get().([]byte)[:w.obufLen]
+ uncompressed := inbuf[obufHeaderLen:]
+
+ output := make(chan result)
+ // Queue output now, so we keep order.
+ w.output <- output
+ res := result{
+ startOffset: w.uncompWritten,
+ }
+ w.uncompWritten += int64(len(uncompressed))
+
+ go func() {
+ checksum := crc(uncompressed)
+
+ // Set to uncompressed.
+ chunkType := uint8(chunkTypeUncompressedData)
+ chunkLen := 4 + len(uncompressed)
+
+ // Attempt compressing.
+ n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
+ n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
+
+ // Check if we should use this, or store as uncompressed instead.
+ if n2 > 0 {
+ chunkType = uint8(chunkTypeCompressedData)
+ chunkLen = 4 + n + n2
+ obuf = obuf[:obufHeaderLen+n+n2]
+ } else {
+ // Use input as output.
+ obuf, inbuf = inbuf, obuf
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ obuf[0] = chunkType
+ obuf[1] = uint8(chunkLen >> 0)
+ obuf[2] = uint8(chunkLen >> 8)
+ obuf[3] = uint8(chunkLen >> 16)
+ obuf[4] = uint8(checksum >> 0)
+ obuf[5] = uint8(checksum >> 8)
+ obuf[6] = uint8(checksum >> 16)
+ obuf[7] = uint8(checksum >> 24)
+
+ // Queue final output.
+ res.b = obuf
+ output <- res
+
+ // Put unused buffer back in pool.
+ w.buffers.Put(inbuf)
+ }()
+ return nil
+}
+
+func (w *Writer) writeSync(p []byte) (nRet int, errRet error) {
+ if err := w.err(nil); err != nil {
+ return 0, err
+ }
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ var n int
+ var err error
+ if w.snappy {
+ n, err = w.writer.Write(magicChunkSnappyBytes)
+ } else {
+ n, err = w.writer.Write(magicChunkBytes)
+ }
+ if err != nil {
+ return 0, w.err(err)
+ }
+ if n != len(magicChunk) {
+ return 0, w.err(io.ErrShortWrite)
+ }
+ w.written += int64(n)
+ }
+
+ for len(p) > 0 {
+ var uncompressed []byte
+ if len(p) > w.blockSize {
+ uncompressed, p = p[:w.blockSize], p[w.blockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+
+ obuf := w.buffers.Get().([]byte)[:w.obufLen]
+ checksum := crc(uncompressed)
+
+ // Set to uncompressed.
+ chunkType := uint8(chunkTypeUncompressedData)
+ chunkLen := 4 + len(uncompressed)
+
+ // Attempt compressing.
+ n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
+ n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
+
+ if n2 > 0 {
+ chunkType = uint8(chunkTypeCompressedData)
+ chunkLen = 4 + n + n2
+ obuf = obuf[:obufHeaderLen+n+n2]
+ } else {
+ obuf = obuf[:8]
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ obuf[0] = chunkType
+ obuf[1] = uint8(chunkLen >> 0)
+ obuf[2] = uint8(chunkLen >> 8)
+ obuf[3] = uint8(chunkLen >> 16)
+ obuf[4] = uint8(checksum >> 0)
+ obuf[5] = uint8(checksum >> 8)
+ obuf[6] = uint8(checksum >> 16)
+ obuf[7] = uint8(checksum >> 24)
+
+ n, err := w.writer.Write(obuf)
+ if err != nil {
+ return 0, w.err(err)
+ }
+ if n != len(obuf) {
+ return 0, w.err(io.ErrShortWrite)
+ }
+ w.err(w.index.add(w.written, w.uncompWritten))
+ w.written += int64(n)
+ w.uncompWritten += int64(len(uncompressed))
+
+ if chunkType == chunkTypeUncompressedData {
+ // Write uncompressed data.
+ n, err := w.writer.Write(uncompressed)
+ if err != nil {
+ return 0, w.err(err)
+ }
+ if n != len(uncompressed) {
+ return 0, w.err(io.ErrShortWrite)
+ }
+ w.written += int64(n)
+ }
+ w.buffers.Put(obuf)
+ // Queue final output.
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// AsyncFlush writes any buffered bytes to a block and starts compressing it.
+// It does not wait for the output has been written as Flush() does.
+func (w *Writer) AsyncFlush() error {
+ if err := w.err(nil); err != nil {
+ return err
+ }
+
+ // Queue any data still in input buffer.
+ if len(w.ibuf) != 0 {
+ if !w.wroteStreamHeader {
+ _, err := w.writeSync(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err(err)
+ } else {
+ _, err := w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ err = w.err(err)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return w.err(nil)
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+// This does not apply padding.
+func (w *Writer) Flush() error {
+ if err := w.AsyncFlush(); err != nil {
+ return err
+ }
+ if w.output == nil {
+ return w.err(nil)
+ }
+
+ // Send empty buffer
+ res := make(chan result)
+ w.output <- res
+ // Block until this has been picked up.
+ res <- result{b: nil, startOffset: w.uncompWritten}
+ // When it is closed, we have flushed.
+ <-res
+ return w.err(nil)
+}
+
+// Close calls Flush and then closes the Writer.
+// Calling Close multiple times is ok,
+// but calling CloseIndex after this will make it not return the index.
+func (w *Writer) Close() error {
+ _, err := w.closeIndex(w.appendIndex)
+ return err
+}
+
+// CloseIndex calls Close and returns an index on first call.
+// This is not required if you are only adding index to a stream.
+func (w *Writer) CloseIndex() ([]byte, error) {
+ return w.closeIndex(true)
+}
+
+func (w *Writer) closeIndex(idx bool) ([]byte, error) {
+ err := w.Flush()
+ if w.output != nil {
+ close(w.output)
+ w.writerWg.Wait()
+ w.output = nil
+ }
+
+ var index []byte
+ if w.err(err) == nil && w.writer != nil {
+ // Create index.
+ if idx {
+ compSize := int64(-1)
+ if w.pad <= 1 {
+ compSize = w.written
+ }
+ index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize)
+ // Count as written for padding.
+ if w.appendIndex {
+ w.written += int64(len(index))
+ }
+ }
+
+ if w.pad > 1 {
+ tmp := w.ibuf[:0]
+ if len(index) > 0 {
+ // Allocate another buffer.
+ tmp = w.buffers.Get().([]byte)[:0]
+ defer w.buffers.Put(tmp)
+ }
+ add := calcSkippableFrame(w.written, int64(w.pad))
+ frame, err := skippableFrame(tmp, add, w.randSrc)
+ if err = w.err(err); err != nil {
+ return nil, err
+ }
+ n, err2 := w.writer.Write(frame)
+ if err2 == nil && n != len(frame) {
+ err2 = io.ErrShortWrite
+ }
+ _ = w.err(err2)
+ }
+ if len(index) > 0 && w.appendIndex {
+ n, err2 := w.writer.Write(index)
+ if err2 == nil && n != len(index) {
+ err2 = io.ErrShortWrite
+ }
+ _ = w.err(err2)
+ }
+ }
+ err = w.err(errClosed)
+ if err == errClosed {
+ return index, nil
+ }
+ return nil, err
+}
+
+// calcSkippableFrame will return a total size to be added for written
+// to be divisible by multiple.
+// The value will always be > skippableFrameHeader.
+// The function will panic if written < 0 or wantMultiple <= 0.
+func calcSkippableFrame(written, wantMultiple int64) int {
+ if wantMultiple <= 0 {
+ panic("wantMultiple <= 0")
+ }
+ if written < 0 {
+ panic("written < 0")
+ }
+ leftOver := written % wantMultiple
+ if leftOver == 0 {
+ return 0
+ }
+ toAdd := wantMultiple - leftOver
+ for toAdd < skippableFrameHeader {
+ toAdd += wantMultiple
+ }
+ return int(toAdd)
+}
+
+// skippableFrame will add a skippable frame with a total size of bytes.
+// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader
+func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
+ if total == 0 {
+ return dst, nil
+ }
+ if total < skippableFrameHeader {
+ return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total)
+ }
+ if int64(total) >= maxBlockSize+skippableFrameHeader {
+ return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total)
+ }
+ // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)"
+ dst = append(dst, chunkTypePadding)
+ f := uint32(total - skippableFrameHeader)
+ // Add chunk length.
+ dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16))
+ // Add data
+ start := len(dst)
+ dst = append(dst, make([]byte, f)...)
+ _, err := io.ReadFull(r, dst[start:])
+ return dst, err
+}
+
+var errClosed = errors.New("s2: Writer is closed")
+
+// WriterOption is an option for creating a encoder.
+type WriterOption func(*Writer) error
+
+// WriterConcurrency will set the concurrency,
+// meaning the maximum number of decoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WriterConcurrency(n int) WriterOption {
+ return func(w *Writer) error {
+ if n <= 0 {
+ return errors.New("concurrency must be at least 1")
+ }
+ w.concurrency = n
+ return nil
+ }
+}
+
+// WriterAddIndex will append an index to the end of a stream
+// when it is closed.
+func WriterAddIndex() WriterOption {
+ return func(w *Writer) error {
+ w.appendIndex = true
+ return nil
+ }
+}
+
+// WriterBetterCompression will enable better compression.
+// EncodeBetter compresses better than Encode but typically with a
+// 10-40% speed decrease on both compression and decompression.
+func WriterBetterCompression() WriterOption {
+ return func(w *Writer) error {
+ w.level = levelBetter
+ return nil
+ }
+}
+
+// WriterBestCompression will enable better compression.
+// EncodeBetter compresses better than Encode but typically with a
+// big speed decrease on compression.
+func WriterBestCompression() WriterOption {
+ return func(w *Writer) error {
+ w.level = levelBest
+ return nil
+ }
+}
+
+// WriterUncompressed will bypass compression.
+// The stream will be written as uncompressed blocks only.
+// If concurrency is > 1 CRC and output will still be done async.
+func WriterUncompressed() WriterOption {
+ return func(w *Writer) error {
+ w.level = levelUncompressed
+ return nil
+ }
+}
+
+// WriterBlockSize allows to override the default block size.
+// Blocks will be this size or smaller.
+// Minimum size is 4KB and maximum size is 4MB.
+//
+// Bigger blocks may give bigger throughput on systems with many cores,
+// and will increase compression slightly, but it will limit the possible
+// concurrency for smaller payloads for both encoding and decoding.
+// Default block size is 1MB.
+//
+// When writing Snappy compatible output using WriterSnappyCompat,
+// the maximum block size is 64KB.
+func WriterBlockSize(n int) WriterOption {
+ return func(w *Writer) error {
+ if w.snappy && n > maxSnappyBlockSize || n < minBlockSize {
+ return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output")
+ }
+ if n > maxBlockSize || n < minBlockSize {
+ return errors.New("s2: block size too large. Must be <= 4MB and >=4KB")
+ }
+ w.blockSize = n
+ return nil
+ }
+}
+
+// WriterPadding will add padding to all output so the size will be a multiple of n.
+// This can be used to obfuscate the exact output size or make blocks of a certain size.
+// The contents will be a skippable frame, so it will be invisible by the decoder.
+// n must be > 0 and <= 4MB.
+// The padded area will be filled with data from crypto/rand.Reader.
+// The padding will be applied whenever Close is called on the writer.
+func WriterPadding(n int) WriterOption {
+ return func(w *Writer) error {
+ if n <= 0 {
+ return fmt.Errorf("s2: padding must be at least 1")
+ }
+ // No need to waste our time.
+ if n == 1 {
+ w.pad = 0
+ }
+ if n > maxBlockSize {
+ return fmt.Errorf("s2: padding must less than 4MB")
+ }
+ w.pad = n
+ return nil
+ }
+}
+
+// WriterPaddingSrc will get random data for padding from the supplied source.
+// By default crypto/rand is used.
+func WriterPaddingSrc(reader io.Reader) WriterOption {
+ return func(w *Writer) error {
+ w.randSrc = reader
+ return nil
+ }
+}
+
+// WriterSnappyCompat will write snappy compatible output.
+// The output can be decompressed using either snappy or s2.
+// If block size is more than 64KB it is set to that.
+func WriterSnappyCompat() WriterOption {
+ return func(w *Writer) error {
+ w.snappy = true
+ if w.blockSize > 64<<10 {
+ // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective.
+ // And allows us to skip some size checks.
+ w.blockSize = (64 << 10) - 8
+ }
+ return nil
+ }
+}
+
+// WriterFlushOnWrite will compress blocks on each call to the Write function.
+//
+// This is quite inefficient as blocks size will depend on the write size.
+//
+// Use WriterConcurrency(1) to also make sure that output is flushed.
+// When Write calls return, otherwise they will be written when compression is done.
+func WriterFlushOnWrite() WriterOption {
+ return func(w *Writer) error {
+ w.flushOnWrite = true
+ return nil
+ }
+}
+
+// WriterCustomEncoder allows to override the encoder for blocks on the stream.
+// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer.
+// Block size (initial varint) should not be added by the encoder.
+// Returning value 0 indicates the block could not be compressed.
+// Returning a negative value indicates that compression should be attempted.
+// The function should expect to be called concurrently.
+func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption {
+ return func(w *Writer) error {
+ w.customEnc = fn
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore
new file mode 100644
index 000000000..042091d9b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS
new file mode 100644
index 000000000..52ccb5a93
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS
@@ -0,0 +1,18 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Amazon.com, Inc
+Damian Gryski
+Eric Buth
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Klaus Post
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
new file mode 100644
index 000000000..ea6524ddd
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
@@ -0,0 +1,41 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Alex Legg
+Damian Gryski
+Eric Buth
+Jan Mercl <0xjnml@gmail.com>
+Jonathan Swinney
+Kai Backman
+Klaus Post
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE
new file mode 100644
index 000000000..6050c10f4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/snappy/README.md b/vendor/github.com/klauspost/compress/snappy/README.md
new file mode 100644
index 000000000..8271bbd09
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/README.md
@@ -0,0 +1,17 @@
+# snappy
+
+The Snappy compression format in the Go programming language.
+
+This is a drop-in replacement for `github.com/golang/snappy`.
+
+It provides a full, compatible replacement of the Snappy package by simply changing imports.
+
+See [Snappy Compatibility](https://github.com/klauspost/compress/tree/master/s2#snappy-compatibility) in the S2 documentation.
+
+"Better" compression mode is used. For buffered streams concurrent compression is used.
+
+For more options use the [s2 package](https://pkg.go.dev/github.com/klauspost/compress/s2).
+
+# usage
+
+Replace imports `github.com/golang/snappy` with `github.com/klauspost/compress/snappy`.
diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go
new file mode 100644
index 000000000..89f1fa234
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/decode.go
@@ -0,0 +1,60 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "io"
+
+ "github.com/klauspost/compress/s2"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = s2.ErrCorrupt
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = s2.ErrTooLarge
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = s2.ErrUnsupported
+)
+
+const (
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ return s2.DecodedLen(src)
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Decode handles the Snappy block format, not the Snappy stream format.
+func Decode(dst, src []byte) ([]byte, error) {
+ return s2.Decode(dst, src)
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return s2.NewReader(r, s2.ReaderMaxBlockSize(maxBlockSize))
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+//
+// Reader handles the Snappy stream format, not the Snappy block format.
+type Reader = s2.Reader
diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go
new file mode 100644
index 000000000..e8bd72c18
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/encode.go
@@ -0,0 +1,59 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "io"
+
+ "github.com/klauspost/compress/s2"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Encode handles the Snappy block format, not the Snappy stream format.
+func Encode(dst, src []byte) []byte {
+ return s2.EncodeSnappyBetter(dst, src)
+}
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ return s2.MaxEncodedLen(srcLen)
+}
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression(), s2.WriterFlushOnWrite(), s2.WriterConcurrency(1))
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression())
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+//
+// Writer handles the Snappy stream format, not the Snappy block format.
+type Writer = s2.Writer
diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go
new file mode 100644
index 000000000..398cdc95a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/snappy.go
@@ -0,0 +1,46 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
diff --git a/vendor/github.com/klauspost/cpuid/v2/.gitignore b/vendor/github.com/klauspost/cpuid/v2/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml
new file mode 100644
index 000000000..944cc0007
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml
@@ -0,0 +1,74 @@
+# This is an example goreleaser.yaml file with some sane defaults.
+# Make sure to check the documentation at http://goreleaser.com
+
+builds:
+ -
+ id: "cpuid"
+ binary: cpuid
+ main: ./cmd/cpuid/main.go
+ env:
+ - CGO_ENABLED=0
+ flags:
+ - -ldflags=-s -w
+ goos:
+ - aix
+ - linux
+ - freebsd
+ - netbsd
+ - windows
+ - darwin
+ goarch:
+ - 386
+ - amd64
+ - arm64
+ goarm:
+ - 7
+
+archives:
+ -
+ id: cpuid
+ name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
+ replacements:
+ aix: AIX
+ darwin: OSX
+ linux: Linux
+ windows: Windows
+ 386: i386
+ amd64: x86_64
+ freebsd: FreeBSD
+ netbsd: NetBSD
+ format_overrides:
+ - goos: windows
+ format: zip
+ files:
+ - LICENSE
+checksum:
+ name_template: 'checksums.txt'
+snapshot:
+ name_template: "{{ .Tag }}-next"
+changelog:
+ sort: asc
+ filters:
+ exclude:
+ - '^doc:'
+ - '^docs:'
+ - '^test:'
+ - '^tests:'
+ - '^Update\sREADME.md'
+
+nfpms:
+ -
+ file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
+ vendor: Klaus Post
+ homepage: https://github.com/klauspost/cpuid
+ maintainer: Klaus Post
+ description: CPUID Tool
+ license: BSD 3-Clause
+ formats:
+ - deb
+ - rpm
+ replacements:
+ darwin: Darwin
+ linux: Linux
+ freebsd: FreeBSD
+ amd64: x86_64
diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt
new file mode 100644
index 000000000..2ef4714f7
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt
@@ -0,0 +1,35 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2015- Klaus Post & Contributors.
+Email: klauspost@gmail.com
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
diff --git a/vendor/github.com/klauspost/cpuid/v2/LICENSE b/vendor/github.com/klauspost/cpuid/v2/LICENSE
new file mode 100644
index 000000000..5cec7ee94
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md
new file mode 100644
index 000000000..21508edbd
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/README.md
@@ -0,0 +1,498 @@
+# cpuid
+Package cpuid provides information about the CPU running the current program.
+
+CPU features are detected on startup, and kept for fast access through the life of the application.
+Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
+
+You can access the CPU information by accessing the shared CPU variable of the cpuid library.
+
+Package home: https://github.com/klauspost/cpuid
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2)
+[![Go](https://github.com/klauspost/cpuid/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/cpuid/actions/workflows/go.yml)
+
+## installing
+
+`go get -u github.com/klauspost/cpuid/v2` using modules.
+Drop `v2` for others.
+
+Installing binary:
+
+`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest`
+
+Or download binaries from release page: https://github.com/klauspost/cpuid/releases
+
+### Homebrew
+
+For macOS/Linux users, you can install via [brew](https://brew.sh/)
+
+```sh
+$ brew install cpuid
+```
+
+## example
+
+```Go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ . "github.com/klauspost/cpuid/v2"
+)
+
+func main() {
+ // Print basic CPU information:
+ fmt.Println("Name:", CPU.BrandName)
+ fmt.Println("PhysicalCores:", CPU.PhysicalCores)
+ fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore)
+ fmt.Println("LogicalCores:", CPU.LogicalCores)
+ fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID)
+ fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ","))
+ fmt.Println("Cacheline bytes:", CPU.CacheLine)
+ fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes")
+ fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
+ fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes")
+ fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes")
+ fmt.Println("Frequency", CPU.Hz, "hz")
+
+ // Test if we have these specific features:
+ if CPU.Supports(SSE, SSE2) {
+ fmt.Println("We have Streaming SIMD 2 Extensions")
+ }
+}
+```
+
+Sample output:
+```
+>go run main.go
+Name: AMD Ryzen 9 3950X 16-Core Processor
+PhysicalCores: 16
+ThreadsPerCore: 2
+LogicalCores: 32
+Family 23 Model: 113 Vendor ID: AMD
+Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3
+Cacheline bytes: 64
+L1 Data Cache: 32768 bytes
+L1 Instruction Cache: 32768 bytes
+L2 Cache: 524288 bytes
+L3 Cache: 16777216 bytes
+Frequency 0 hz
+We have Streaming SIMD 2 Extensions
+```
+
+# usage
+
+The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features.
+A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler.
+
+To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc.
+This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported.
+
+Note that for some cpu/os combinations some features will not be detected.
+`amd64` has rather good support and should work reliably on all platforms.
+
+Note that hypervisors may not pass through all CPU features through to the guest OS,
+so even if your host supports a feature it may not be visible on guests.
+
+## arm64 feature detection
+
+Not all operating systems provide ARM features directly
+and there is no safe way to do so for the rest.
+
+Currently `arm64/linux` and `arm64/freebsd` should be quite reliable.
+`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected.
+
+A `DetectARM()` can be used if you are able to control your deployment,
+it will detect CPU features, but may crash if the OS doesn't intercept the calls.
+A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below.
+
+Note that currently only features are detected on ARM,
+no additional information is currently available.
+
+## flags
+
+It is possible to add flags that affects cpu detection.
+
+For this the `Flags()` command is provided.
+
+This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called.
+
+This means that any detection used in `init()` functions will not contain these flags.
+
+Example:
+
+```Go
+package main
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+
+ "github.com/klauspost/cpuid/v2"
+)
+
+func main() {
+ cpuid.Flags()
+ flag.Parse()
+ cpuid.Detect()
+
+ // Test if we have these specific features:
+ if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) {
+ fmt.Println("We have Streaming SIMD 2 Extensions")
+ }
+}
+```
+
+## commandline
+
+Download as binary from: https://github.com/klauspost/cpuid/releases
+
+Install from source:
+
+`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest`
+
+### Example
+
+```
+λ cpuid
+Name: AMD Ryzen 9 3950X 16-Core Processor
+Vendor String: AuthenticAMD
+Vendor ID: AMD
+PhysicalCores: 16
+Threads Per Core: 2
+Logical Cores: 32
+CPU Family 23 Model: 113
+Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CLZERO,CMOV,CMPXCHG8,CPBOOST,CX16,F16C,FMA3,FXSR,FXSROPT,HTT,HYPERVISOR,LAHF,LZCNT,MCAOVERFLOW,MMX,MMXEXT,MOVBE,NX,OSXSAVE,POPCNT,RDRAND,RDSEED,RDTSCP,SCE,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3,SUCCOR,X87,XSAVE
+Microarchitecture level: 3
+Cacheline bytes: 64
+L1 Instruction Cache: 32768 bytes
+L1 Data Cache: 32768 bytes
+L2 Cache: 524288 bytes
+L3 Cache: 16777216 bytes
+
+```
+### JSON Output:
+
+```
+λ cpuid --json
+{
+ "BrandName": "AMD Ryzen 9 3950X 16-Core Processor",
+ "VendorID": 2,
+ "VendorString": "AuthenticAMD",
+ "PhysicalCores": 16,
+ "ThreadsPerCore": 2,
+ "LogicalCores": 32,
+ "Family": 23,
+ "Model": 113,
+ "CacheLine": 64,
+ "Hz": 0,
+ "BoostFreq": 0,
+ "Cache": {
+ "L1I": 32768,
+ "L1D": 32768,
+ "L2": 524288,
+ "L3": 16777216
+ },
+ "SGX": {
+ "Available": false,
+ "LaunchControl": false,
+ "SGX1Supported": false,
+ "SGX2Supported": false,
+ "MaxEnclaveSizeNot64": 0,
+ "MaxEnclaveSize64": 0,
+ "EPCSections": null
+ },
+ "Features": [
+ "ADX",
+ "AESNI",
+ "AVX",
+ "AVX2",
+ "BMI1",
+ "BMI2",
+ "CLMUL",
+ "CLZERO",
+ "CMOV",
+ "CMPXCHG8",
+ "CPBOOST",
+ "CX16",
+ "F16C",
+ "FMA3",
+ "FXSR",
+ "FXSROPT",
+ "HTT",
+ "HYPERVISOR",
+ "LAHF",
+ "LZCNT",
+ "MCAOVERFLOW",
+ "MMX",
+ "MMXEXT",
+ "MOVBE",
+ "NX",
+ "OSXSAVE",
+ "POPCNT",
+ "RDRAND",
+ "RDSEED",
+ "RDTSCP",
+ "SCE",
+ "SHA",
+ "SSE",
+ "SSE2",
+ "SSE3",
+ "SSE4",
+ "SSE42",
+ "SSE4A",
+ "SSSE3",
+ "SUCCOR",
+ "X87",
+ "XSAVE"
+ ],
+ "X64Level": 3
+}
+```
+
+### Check CPU microarch level
+
+```
+λ cpuid --check-level=3
+2022/03/18 17:04:40 AMD Ryzen 9 3950X 16-Core Processor
+2022/03/18 17:04:40 Microarchitecture level 3 is supported. Max level is 3.
+Exit Code 0
+
+λ cpuid --check-level=4
+2022/03/18 17:06:18 AMD Ryzen 9 3950X 16-Core Processor
+2022/03/18 17:06:18 Microarchitecture level 4 not supported. Max level is 3.
+Exit Code 1
+```
+
+
+## Available flags
+
+### x86 & amd64
+
+| Feature Flag | Description |
+|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) |
+| AESNI | Advanced Encryption Standard New Instructions |
+| AMD3DNOW | AMD 3DNOW |
+| AMD3DNOWEXT | AMD 3DNowExt |
+| AMXBF16 | Tile computational operations on BFLOAT16 numbers |
+| AMXINT8 | Tile computational operations on 8-bit integers |
+| AMXFP16 | Tile computational operations on FP16 numbers |
+| AMXTILE | Tile architecture |
+| APX_F | Intel APX |
+| AVX | AVX functions |
+| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported |
+| AVX10_128 | If set indicates that AVX10 128-bit vector support is present |
+| AVX10_256 | If set indicates that AVX10 256-bit vector support is present |
+| AVX10_512 | If set indicates that AVX10 512-bit vector support is present |
+| AVX2 | AVX2 functions |
+| AVX512BF16 | AVX-512 BFLOAT16 Instructions |
+| AVX512BITALG | AVX-512 Bit Algorithms |
+| AVX512BW | AVX-512 Byte and Word Instructions |
+| AVX512CD | AVX-512 Conflict Detection Instructions |
+| AVX512DQ | AVX-512 Doubleword and Quadword Instructions |
+| AVX512ER | AVX-512 Exponential and Reciprocal Instructions |
+| AVX512F | AVX-512 Foundation |
+| AVX512FP16 | AVX-512 FP16 Instructions |
+| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions |
+| AVX512PF | AVX-512 Prefetch Instructions |
+| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions |
+| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 |
+| AVX512VL | AVX-512 Vector Length Extensions |
+| AVX512VNNI | AVX-512 Vector Neural Network Instructions |
+| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q |
+| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword |
+| AVXIFMA | AVX-IFMA instructions |
+| AVXNECONVERT | AVX-NE-CONVERT instructions |
+| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one |
+| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions |
+| AVXVNNIINT8 | AVX-VNNI-INT8 instructions |
+| AVXVNNIINT16 | AVX-VNNI-INT16 instructions |
+| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 |
+| BMI1 | Bit Manipulation Instruction Set 1 |
+| BMI2 | Bit Manipulation Instruction Set 2 |
+| CETIBT | Intel CET Indirect Branch Tracking |
+| CETSS | Intel CET Shadow Stack |
+| CLDEMOTE | Cache Line Demote |
+| CLMUL | Carry-less Multiplication |
+| CLZERO | CLZERO instruction supported |
+| CMOV | i686 CMOV |
+| CMPCCXADD | CMPCCXADD instructions |
+| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB |
+| CMPXCHG8 | CMPXCHG8 instruction |
+| CPBOOST | Core Performance Boost |
+| CPPC | AMD: Collaborative Processor Performance Control |
+| CX16 | CMPXCHG16B Instruction |
+| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ |
+| ENQCMD | Enqueue Command |
+| ERMS | Enhanced REP MOVSB/STOSB |
+| F16C | Half-precision floating-point conversion |
+| FLUSH_L1D | Flush L1D cache |
+| FMA3 | Intel FMA 3. Does not imply AVX. |
+| FMA4 | Bulldozer FMA4 functions |
+| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide |
+| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide |
+| FSRM | Fast Short Rep Mov |
+| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 |
+| FXSROPT | FXSAVE/FXRSTOR optimizations |
+| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. |
+| HLE | Hardware Lock Elision |
+| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR |
+| HTT | Hyperthreading (enabled) |
+| HWA | Hardware assert supported. Indicates support for MSRC001_10 |
+| HYBRID_CPU | This part has CPUs of more than one type. |
+| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors |
+| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) |
+| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR |
+| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) |
+| IBRS | AMD: Indirect Branch Restricted Speculation |
+| IBRS_PREFERRED | AMD: IBRS is preferred over software solution |
+| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection |
+| IBS | Instruction Based Sampling (AMD) |
+| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) |
+| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) |
+| IBSFFV | Instruction Based Sampling Feature (AMD) |
+| IBSOPCNT | Instruction Based Sampling Feature (AMD) |
+| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) |
+| IBSOPSAM | Instruction Based Sampling Feature (AMD) |
+| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) |
+| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) |
+| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported |
+| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported |
+| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse |
+| IBS_PREVENTHOST | Disallowing IBS use by the host supported |
+| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 |
+| IDPRED_CTRL | IPRED_DIS |
+| INT_WBINVD | WBINVD/WBNOINVD are interruptible. |
+| INVLPGB | NVLPGB and TLBSYNC instruction supported |
+| KEYLOCKER | Key locker |
+| KEYLOCKERW | Key locker wide |
+| LAHF | LAHF/SAHF in long mode |
+| LAM | If set, CPU supports Linear Address Masking |
+| LBRVIRT | LBR virtualization |
+| LZCNT | LZCNT instruction |
+| MCAOVERFLOW | MCA overflow recovery support. |
+| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. |
+| MCOMMIT | MCOMMIT instruction supported |
+| MD_CLEAR | VERW clears CPU buffers |
+| MMX | standard MMX |
+| MMXEXT | SSE integer functions or AMD MMX ext |
+| MOVBE | MOVBE instruction (big-endian) |
+| MOVDIR64B | Move 64 Bytes as Direct Store |
+| MOVDIRI | Move Doubleword as Direct Store |
+| MOVSB_ZL | Fast Zero-Length MOVSB |
+| MPX | Intel MPX (Memory Protection Extensions) |
+| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD |
+| MSRIRC | Instruction Retired Counter MSR available |
+| MSRLIST | Read/Write List of Model Specific Registers |
+| MSR_PAGEFLUSH | Page Flush MSR available |
+| NRIPS | Indicates support for NRIP save on VMEXIT |
+| NX | NX (No-Execute) bit |
+| OSXSAVE | XSAVE enabled by OS |
+| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption |
+| POPCNT | POPCNT instruction |
+| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled |
+| PREFETCHI | PREFETCHIT0/1 instructions |
+| PSFD | Predictive Store Forward Disable |
+| RDPRU | RDPRU instruction supported |
+| RDRAND | RDRAND instruction is available |
+| RDSEED | RDSEED instruction is available |
+| RDTSCP | RDTSCP Instruction |
+| RRSBA_CTRL | Restricted RSB Alternate |
+| RTM | Restricted Transactional Memory |
+| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. |
+| SERIALIZE | Serialize Instruction Execution |
+| SEV | AMD Secure Encrypted Virtualization supported |
+| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host |
+| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported |
+| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests |
+| SEV_ES | AMD SEV Encrypted State supported |
+| SEV_RESTRICTED | AMD SEV Restricted Injection supported |
+| SEV_SNP | AMD SEV Secure Nested Paging supported |
+| SGX | Software Guard Extensions |
+| SGXLC | Software Guard Extensions Launch Control |
+| SHA | Intel SHA Extensions |
+| SME | AMD Secure Memory Encryption supported |
+| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced |
+| SPEC_CTRL_SSBD | Speculative Store Bypass Disable |
+| SRBDS_CTRL | SRBDS mitigation MSR available |
+| SSE | SSE functions |
+| SSE2 | P4 SSE functions |
+| SSE3 | Prescott SSE3 functions |
+| SSE4 | Penryn SSE4.1 functions |
+| SSE42 | Nehalem SSE4.2 functions |
+| SSE4A | AMD Barcelona microarchitecture SSE4a instructions |
+| SSSE3 | Conroe SSSE3 functions |
+| STIBP | Single Thread Indirect Branch Predictors |
+| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On |
+| STOSB_SHORT | Fast short STOSB |
+| SUCCOR | Software uncorrectable error containment and recovery capability. |
+| SVM | AMD Secure Virtual Machine |
+| SVMDA | Indicates support for the SVM decode assists. |
+| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control |
+| SVML | AMD SVM lock. Indicates support for SVM-Lock. |
+| SVMNP | AMD SVM nested paging |
+| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter |
+| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold |
+| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
+| SYSEE | SYSENTER and SYSEXIT instructions |
+| TBM | AMD Trailing Bit Manipulation |
+| TDX_GUEST | Intel Trust Domain Extensions Guest |
+| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
+| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
+| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |
+| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 |
+| TSXLDTRK | Intel TSX Suspend Load Address Tracking |
+| VAES | Vector AES. AVX(512) versions requires additional checks. |
+| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. |
+| VMPL | AMD VM Permission Levels supported |
+| VMSA_REGPROT | AMD VMSA Register Protection supported |
+| VMX | Virtual Machine Extensions |
+| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. |
+| VTE | AMD Virtual Transparent Encryption supported |
+| WAITPKG | TPAUSE, UMONITOR, UMWAIT |
+| WBNOINVD | Write Back and Do Not Invalidate Cache |
+| WRMSRNS | Non-Serializing Write to Model Specific Register |
+| X87 | FPU |
+| XGETBV1 | Supports XGETBV with ECX = 1 |
+| XOP | Bulldozer XOP functions |
+| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV |
+| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. |
+| XSAVEOPT | XSAVEOPT available |
+| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS |
+
+# ARM features:
+
+| Feature Flag | Description |
+|--------------|------------------------------------------------------------------|
+| AESARM | AES instructions |
+| ARMCPUID | Some CPU ID registers readable at user-level |
+| ASIMD | Advanced SIMD |
+| ASIMDDP | SIMD Dot Product |
+| ASIMDHP | Advanced SIMD half-precision floating point |
+| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) |
+| ATOMICS | Large System Extensions (LSE) |
+| CRC32 | CRC32/CRC32C instructions |
+| DCPOP | Data cache clean to Point of Persistence (DC CVAP) |
+| EVTSTRM | Generic timer |
+| FCMA | Floatin point complex number addition and multiplication |
+| FP | Single-precision and double-precision floating point |
+| FPHP | Half-precision floating point |
+| GPA | Generic Pointer Authentication |
+| JSCVT | Javascript-style double->int convert (FJCVTZS) |
+| LRCPC | Weaker release consistency (LDAPR, etc) |
+| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) |
+| SHA1 | SHA-1 instructions (SHA1C, etc) |
+| SHA2 | SHA-2 instructions (SHA256H, etc) |
+| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) |
+| SHA512 | SHA512 instructions |
+| SM3 | SM3 instructions |
+| SM4 | SM4 instructions |
+| SVE | Scalable Vector Extension |
+
+# license
+
+This code is published under an MIT license. See LICENSE file for more information.
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
new file mode 100644
index 000000000..53bc18ca7
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
@@ -0,0 +1,1516 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// Package cpuid provides information about the CPU running the current program.
+//
+// CPU features are detected on startup, and kept for fast access through the life of the application.
+// Currently x86 / x64 (AMD64) as well as arm64 is supported.
+//
+// You can access the CPU information by accessing the shared CPU variable of the cpuid library.
+//
+// Package home: https://github.com/klauspost/cpuid
+package cpuid
+
+import (
+ "flag"
+ "fmt"
+ "math"
+ "math/bits"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf
+// and Processor Programming Reference (PPR)
+
+// Vendor is a representation of a CPU vendor.
+type Vendor int
+
+const (
+ VendorUnknown Vendor = iota
+ Intel
+ AMD
+ VIA
+ Transmeta
+ NSC
+ KVM // Kernel-based Virtual Machine
+ MSVM // Microsoft Hyper-V or Windows Virtual PC
+ VMware
+ XenHVM
+ Bhyve
+ Hygon
+ SiS
+ RDC
+
+ Ampere
+ ARM
+ Broadcom
+ Cavium
+ DEC
+ Fujitsu
+ Infineon
+ Motorola
+ NVIDIA
+ AMCC
+ Qualcomm
+ Marvell
+
+ lastVendor
+)
+
+//go:generate stringer -type=FeatureID,Vendor
+
+// FeatureID is the ID of a specific cpu feature.
+type FeatureID int
+
+const (
+ // Keep index -1 as unknown
+ UNKNOWN = -1
+
+ // x86 features
+ ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+ AESNI // Advanced Encryption Standard New Instructions
+ AMD3DNOW // AMD 3DNOW
+ AMD3DNOWEXT // AMD 3DNowExt
+ AMXBF16 // Tile computational operations on BFLOAT16 numbers
+ AMXFP16 // Tile computational operations on FP16 numbers
+ AMXINT8 // Tile computational operations on 8-bit integers
+ AMXTILE // Tile architecture
+ APX_F // Intel APX
+ AVX // AVX functions
+ AVX10 // If set the Intel AVX10 Converged Vector ISA is supported
+ AVX10_128 // If set indicates that AVX10 128-bit vector support is present
+ AVX10_256 // If set indicates that AVX10 256-bit vector support is present
+ AVX10_512 // If set indicates that AVX10 512-bit vector support is present
+ AVX2 // AVX2 functions
+ AVX512BF16 // AVX-512 BFLOAT16 Instructions
+ AVX512BITALG // AVX-512 Bit Algorithms
+ AVX512BW // AVX-512 Byte and Word Instructions
+ AVX512CD // AVX-512 Conflict Detection Instructions
+ AVX512DQ // AVX-512 Doubleword and Quadword Instructions
+ AVX512ER // AVX-512 Exponential and Reciprocal Instructions
+ AVX512F // AVX-512 Foundation
+ AVX512FP16 // AVX-512 FP16 Instructions
+ AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
+ AVX512PF // AVX-512 Prefetch Instructions
+ AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
+ AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2
+ AVX512VL // AVX-512 Vector Length Extensions
+ AVX512VNNI // AVX-512 Vector Neural Network Instructions
+ AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
+ AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
+ AVXIFMA // AVX-IFMA instructions
+ AVXNECONVERT // AVX-NE-CONVERT instructions
+ AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one
+ AVXVNNI // AVX (VEX encoded) VNNI neural network instructions
+ AVXVNNIINT8 // AVX-VNNI-INT8 instructions
+ AVXVNNIINT16 // AVX-VNNI-INT16 instructions
+ BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598
+ BMI1 // Bit Manipulation Instruction Set 1
+ BMI2 // Bit Manipulation Instruction Set 2
+ CETIBT // Intel CET Indirect Branch Tracking
+ CETSS // Intel CET Shadow Stack
+ CLDEMOTE // Cache Line Demote
+ CLMUL // Carry-less Multiplication
+ CLZERO // CLZERO instruction supported
+ CMOV // i686 CMOV
+ CMPCCXADD // CMPCCXADD instructions
+ CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB
+ CMPXCHG8 // CMPXCHG8 instruction
+ CPBOOST // Core Performance Boost
+ CPPC // AMD: Collaborative Processor Performance Control
+ CX16 // CMPXCHG16B Instruction
+ EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ
+ ENQCMD // Enqueue Command
+ ERMS // Enhanced REP MOVSB/STOSB
+ F16C // Half-precision floating-point conversion
+ FLUSH_L1D // Flush L1D cache
+ FMA3 // Intel FMA 3. Does not imply AVX.
+ FMA4 // Bulldozer FMA4 functions
+ FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide
+ FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide
+ FSRM // Fast Short Rep Mov
+ FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9
+ FXSROPT // FXSAVE/FXRSTOR optimizations
+ GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage.
+ HLE // Hardware Lock Elision
+ HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR
+ HTT // Hyperthreading (enabled)
+ HWA // Hardware assert supported. Indicates support for MSRC001_10
+ HYBRID_CPU // This part has CPUs of more than one type.
+ HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
+ IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel)
+ IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR
+ IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
+ IBPB_BRTYPE // Indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor
+ IBRS // AMD: Indirect Branch Restricted Speculation
+ IBRS_PREFERRED // AMD: IBRS is preferred over software solution
+ IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection
+ IBS // Instruction Based Sampling (AMD)
+ IBSBRNTRGT // Instruction Based Sampling Feature (AMD)
+ IBSFETCHSAM // Instruction Based Sampling Feature (AMD)
+ IBSFFV // Instruction Based Sampling Feature (AMD)
+ IBSOPCNT // Instruction Based Sampling Feature (AMD)
+ IBSOPCNTEXT // Instruction Based Sampling Feature (AMD)
+ IBSOPSAM // Instruction Based Sampling Feature (AMD)
+ IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
+ IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
+ IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported
+ IBS_OPDATA4 // AMD: IBS op data 4 MSR supported
+ IBS_OPFUSE // AMD: Indicates support for IbsOpFuse
+ IBS_PREVENTHOST // Disallowing IBS use by the host supported
+ IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4
+ IDPRED_CTRL // IPRED_DIS
+ INT_WBINVD // WBINVD/WBNOINVD are interruptible.
+ INVLPGB // NVLPGB and TLBSYNC instruction supported
+ KEYLOCKER // Key locker
+ KEYLOCKERW // Key locker wide
+ LAHF // LAHF/SAHF in long mode
+ LAM // If set, CPU supports Linear Address Masking
+ LBRVIRT // LBR virtualization
+ LZCNT // LZCNT instruction
+ MCAOVERFLOW // MCA overflow recovery support.
+ MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it.
+ MCOMMIT // MCOMMIT instruction supported
+ MD_CLEAR // VERW clears CPU buffers
+ MMX // standard MMX
+ MMXEXT // SSE integer functions or AMD MMX ext
+ MOVBE // MOVBE instruction (big-endian)
+ MOVDIR64B // Move 64 Bytes as Direct Store
+ MOVDIRI // Move Doubleword as Direct Store
+ MOVSB_ZL // Fast Zero-Length MOVSB
+ MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD
+ MPX // Intel MPX (Memory Protection Extensions)
+ MSRIRC // Instruction Retired Counter MSR available
+ MSRLIST // Read/Write List of Model Specific Registers
+ MSR_PAGEFLUSH // Page Flush MSR available
+ NRIPS // Indicates support for NRIP save on VMEXIT
+ NX // NX (No-Execute) bit
+ OSXSAVE // XSAVE enabled by OS
+ PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption
+ POPCNT // POPCNT instruction
+ PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled
+ PREFETCHI // PREFETCHIT0/1 instructions
+ PSFD // Predictive Store Forward Disable
+ RDPRU // RDPRU instruction supported
+ RDRAND // RDRAND instruction is available
+ RDSEED // RDSEED instruction is available
+ RDTSCP // RDTSCP Instruction
+ RRSBA_CTRL // Restricted RSB Alternate
+ RTM // Restricted Transactional Memory
+ RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
+ SBPB // Indicates support for the Selective Branch Predictor Barrier
+ SERIALIZE // Serialize Instruction Execution
+ SEV // AMD Secure Encrypted Virtualization supported
+ SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host
+ SEV_ALTERNATIVE // AMD SEV Alternate Injection supported
+ SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests
+ SEV_ES // AMD SEV Encrypted State supported
+ SEV_RESTRICTED // AMD SEV Restricted Injection supported
+ SEV_SNP // AMD SEV Secure Nested Paging supported
+ SGX // Software Guard Extensions
+ SGXLC // Software Guard Extensions Launch Control
+ SHA // Intel SHA Extensions
+ SME // AMD Secure Memory Encryption supported
+ SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced
+ SPEC_CTRL_SSBD // Speculative Store Bypass Disable
+ SRBDS_CTRL // SRBDS mitigation MSR available
+ SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO.
+ SRSO_NO // Indicates the CPU is not subject to the SRSO vulnerability
+ SRSO_USER_KERNEL_NO // Indicates the CPU is not subject to the SRSO vulnerability across user/kernel boundaries
+ SSE // SSE functions
+ SSE2 // P4 SSE functions
+ SSE3 // Prescott SSE3 functions
+ SSE4 // Penryn SSE4.1 functions
+ SSE42 // Nehalem SSE4.2 functions
+ SSE4A // AMD Barcelona microarchitecture SSE4a instructions
+ SSSE3 // Conroe SSSE3 functions
+ STIBP // Single Thread Indirect Branch Predictors
+ STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On
+ STOSB_SHORT // Fast short STOSB
+ SUCCOR // Software uncorrectable error containment and recovery capability.
+ SVM // AMD Secure Virtual Machine
+ SVMDA // Indicates support for the SVM decode assists.
+ SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control
+ SVML // AMD SVM lock. Indicates support for SVM-Lock.
+ SVMNP // AMD SVM nested paging
+ SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter
+ SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold
+ SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
+ SYSEE // SYSENTER and SYSEXIT instructions
+ TBM // AMD Trailing Bit Manipulation
+ TDX_GUEST // Intel Trust Domain Extensions Guest
+ TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
+ TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
+ TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
+ TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104
+ TSXLDTRK // Intel TSX Suspend Load Address Tracking
+ VAES // Vector AES. AVX(512) versions requires additional checks.
+ VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits.
+ VMPL // AMD VM Permission Levels supported
+ VMSA_REGPROT // AMD VMSA Register Protection supported
+ VMX // Virtual Machine Extensions
+ VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions.
+ VTE // AMD Virtual Transparent Encryption supported
+ WAITPKG // TPAUSE, UMONITOR, UMWAIT
+ WBNOINVD // Write Back and Do Not Invalidate Cache
+ WRMSRNS // Non-Serializing Write to Model Specific Register
+ X87 // FPU
+ XGETBV1 // Supports XGETBV with ECX = 1
+ XOP // Bulldozer XOP functions
+ XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV
+ XSAVEC // Supports XSAVEC and the compacted form of XRSTOR.
+ XSAVEOPT // XSAVEOPT available
+ XSAVES // Supports XSAVES/XRSTORS and IA32_XSS
+
+ // ARM features:
+ AESARM // AES instructions
+ ARMCPUID // Some CPU ID registers readable at user-level
+ ASIMD // Advanced SIMD
+ ASIMDDP // SIMD Dot Product
+ ASIMDHP // Advanced SIMD half-precision floating point
+ ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH)
+ ATOMICS // Large System Extensions (LSE)
+ CRC32 // CRC32/CRC32C instructions
+ DCPOP // Data cache clean to Point of Persistence (DC CVAP)
+ EVTSTRM // Generic timer
+ FCMA // Floatin point complex number addition and multiplication
+ FP // Single-precision and double-precision floating point
+ FPHP // Half-precision floating point
+ GPA // Generic Pointer Authentication
+ JSCVT // Javascript-style double->int convert (FJCVTZS)
+ LRCPC // Weaker release consistency (LDAPR, etc)
+ PMULL // Polynomial Multiply instructions (PMULL/PMULL2)
+ SHA1 // SHA-1 instructions (SHA1C, etc)
+ SHA2 // SHA-2 instructions (SHA256H, etc)
+ SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
+ SHA512 // SHA512 instructions
+ SM3 // SM3 instructions
+ SM4 // SM4 instructions
+ SVE // Scalable Vector Extension
+ // Keep it last. It automatically defines the size of []flagSet
+ lastID
+
+ firstID FeatureID = UNKNOWN + 1
+)
+
+// CPUInfo contains information about the detected system CPU.
+type CPUInfo struct {
+ BrandName string // Brand name reported by the CPU
+ VendorID Vendor // Comparable CPU vendor ID
+ VendorString string // Raw vendor string.
+ featureSet flagSet // Features of the CPU
+ PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
+ ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
+ LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
+ Family int // CPU family number
+ Model int // CPU model number
+ Stepping int // CPU stepping info
+ CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
+ Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
+ BoostFreq int64 // Max clock speed, if known, 0 otherwise
+ Cache struct {
+ L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
+ L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
+ L2 int // L2 Cache (per core or shared). Will be -1 if undetected
+ L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected
+ }
+ SGX SGXSupport
+ AMDMemEncryption AMDMemEncryptionSupport
+ AVX10Level uint8
+ maxFunc uint32
+ maxExFunc uint32
+}
+
+var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
+var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+var xgetbv func(index uint32) (eax, edx uint32)
+var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
+var darwinHasAVX512 = func() bool { return false }
+
+// CPU contains information about the CPU as detected on startup,
+// or when Detect last was called.
+//
+// Use this as the primary entry point to you data.
+var CPU CPUInfo
+
+func init() {
+ initCPU()
+ Detect()
+}
+
+// Detect will re-detect current CPU info.
+// This will replace the content of the exported CPU variable.
+//
+// Unless you expect the CPU to change while you are running your program
+// you should not need to call this function.
+// If you call this, you must ensure that no other goroutine is accessing the
+// exported CPU variable.
+func Detect() {
+ // Set defaults
+ CPU.ThreadsPerCore = 1
+ CPU.Cache.L1I = -1
+ CPU.Cache.L1D = -1
+ CPU.Cache.L2 = -1
+ CPU.Cache.L3 = -1
+ safe := true
+ if detectArmFlag != nil {
+ safe = !*detectArmFlag
+ }
+ addInfo(&CPU, safe)
+ if displayFeats != nil && *displayFeats {
+ fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ","))
+ // Exit with non-zero so tests will print value.
+ os.Exit(1)
+ }
+ if disableFlag != nil {
+ s := strings.Split(*disableFlag, ",")
+ for _, feat := range s {
+ feat := ParseFeature(strings.TrimSpace(feat))
+ if feat != UNKNOWN {
+ CPU.featureSet.unset(feat)
+ }
+ }
+ }
+}
+
+// DetectARM will detect ARM64 features.
+// This is NOT done automatically since it can potentially crash
+// if the OS does not handle the command.
+// If in the future this can be done safely this function may not
+// do anything.
+func DetectARM() {
+ addInfo(&CPU, false)
+}
+
+var detectArmFlag *bool
+var displayFeats *bool
+var disableFlag *string
+
+// Flags will enable flags.
+// This must be called *before* flag.Parse AND
+// Detect must be called after the flags have been parsed.
+// Note that this means that any detection used in init() functions
+// will not contain these flags.
+func Flags() {
+ disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list")
+ displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits")
+ detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash")
+}
+
+// Supports returns whether the CPU supports all of the requested features.
+func (c CPUInfo) Supports(ids ...FeatureID) bool {
+ for _, id := range ids {
+ if !c.featureSet.inSet(id) {
+ return false
+ }
+ }
+ return true
+}
+
+// Has allows for checking a single feature.
+// Should be inlined by the compiler.
+func (c *CPUInfo) Has(id FeatureID) bool {
+ return c.featureSet.inSet(id)
+}
+
+// AnyOf returns whether the CPU supports one or more of the requested features.
+func (c CPUInfo) AnyOf(ids ...FeatureID) bool {
+ for _, id := range ids {
+ if c.featureSet.inSet(id) {
+ return true
+ }
+ }
+ return false
+}
+
+// Features contains several features combined for a fast check using
+// CpuInfo.HasAll
+type Features *flagSet
+
+// CombineFeatures allows to combine several features for a close to constant time lookup.
+func CombineFeatures(ids ...FeatureID) Features {
+ var v flagSet
+ for _, id := range ids {
+ v.set(id)
+ }
+ return &v
+}
+
+func (c *CPUInfo) HasAll(f Features) bool {
+ return c.featureSet.hasSetP(f)
+}
+
+// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
+var oneOfLevel = CombineFeatures(SYSEE, SYSCALL)
+var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2)
+var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
+var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
+var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
+
+// X64Level returns the microarchitecture level detected on the CPU.
+// If features are lacking or non x64 mode, 0 is returned.
+// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
+func (c CPUInfo) X64Level() int {
+ if !c.featureSet.hasOneOf(oneOfLevel) {
+ return 0
+ }
+ if c.featureSet.hasSetP(level4Features) {
+ return 4
+ }
+ if c.featureSet.hasSetP(level3Features) {
+ return 3
+ }
+ if c.featureSet.hasSetP(level2Features) {
+ return 2
+ }
+ if c.featureSet.hasSetP(level1Features) {
+ return 1
+ }
+ return 0
+}
+
+// Disable will disable one or several features.
+func (c *CPUInfo) Disable(ids ...FeatureID) bool {
+ for _, id := range ids {
+ c.featureSet.unset(id)
+ }
+ return true
+}
+
+// Enable will disable one or several features even if they were undetected.
+// This is of course not recommended for obvious reasons.
+func (c *CPUInfo) Enable(ids ...FeatureID) bool {
+ for _, id := range ids {
+ c.featureSet.set(id)
+ }
+ return true
+}
+
+// IsVendor returns true if vendor is recognized as Intel
+func (c CPUInfo) IsVendor(v Vendor) bool {
+ return c.VendorID == v
+}
+
+// FeatureSet returns all available features as strings.
+func (c CPUInfo) FeatureSet() []string {
+ s := make([]string, 0, c.featureSet.nEnabled())
+ s = append(s, c.featureSet.Strings()...)
+ return s
+}
+
+// RTCounter returns the 64-bit time-stamp counter
+// Uses the RDTSCP instruction. The value 0 is returned
+// if the CPU does not support the instruction.
+func (c CPUInfo) RTCounter() uint64 {
+ if !c.Supports(RDTSCP) {
+ return 0
+ }
+ a, _, _, d := rdtscpAsm()
+ return uint64(a) | (uint64(d) << 32)
+}
+
+// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
+// This variable is OS dependent, but on Linux contains information
+// about the current cpu/core the code is running on.
+// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
+func (c CPUInfo) Ia32TscAux() uint32 {
+ if !c.Supports(RDTSCP) {
+ return 0
+ }
+ _, _, ecx, _ := rdtscpAsm()
+ return ecx
+}
+
+// LogicalCPU will return the Logical CPU the code is currently executing on.
+// This is likely to change when the OS re-schedules the running thread
+// to another CPU.
+// If the current core cannot be detected, -1 will be returned.
+func (c CPUInfo) LogicalCPU() int {
+ if c.maxFunc < 1 {
+ return -1
+ }
+ _, ebx, _, _ := cpuid(1)
+ return int(ebx >> 24)
+}
+
+// frequencies tries to compute the clock speed of the CPU. If leaf 15 is
+// supported, use it, otherwise parse the brand string. Yes, really.
+func (c *CPUInfo) frequencies() {
+ c.Hz, c.BoostFreq = 0, 0
+ mfi := maxFunctionID()
+ if mfi >= 0x15 {
+ eax, ebx, ecx, _ := cpuid(0x15)
+ if eax != 0 && ebx != 0 && ecx != 0 {
+ c.Hz = (int64(ecx) * int64(ebx)) / int64(eax)
+ }
+ }
+ if mfi >= 0x16 {
+ a, b, _, _ := cpuid(0x16)
+ // Base...
+ if a&0xffff > 0 {
+ c.Hz = int64(a&0xffff) * 1_000_000
+ }
+ // Boost...
+ if b&0xffff > 0 {
+ c.BoostFreq = int64(b&0xffff) * 1_000_000
+ }
+ }
+ if c.Hz > 0 {
+ return
+ }
+
+ // computeHz determines the official rated speed of a CPU from its brand
+ // string. This insanity is *actually the official documented way to do
+ // this according to Intel*, prior to leaf 0x15 existing. The official
+ // documentation only shows this working for exactly `x.xx` or `xxxx`
+ // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other
+ // sizes.
+ model := c.BrandName
+ hz := strings.LastIndex(model, "Hz")
+ if hz < 3 {
+ return
+ }
+ var multiplier int64
+ switch model[hz-1] {
+ case 'M':
+ multiplier = 1000 * 1000
+ case 'G':
+ multiplier = 1000 * 1000 * 1000
+ case 'T':
+ multiplier = 1000 * 1000 * 1000 * 1000
+ }
+ if multiplier == 0 {
+ return
+ }
+ freq := int64(0)
+ divisor := int64(0)
+ decimalShift := int64(1)
+ var i int
+ for i = hz - 2; i >= 0 && model[i] != ' '; i-- {
+ if model[i] >= '0' && model[i] <= '9' {
+ freq += int64(model[i]-'0') * decimalShift
+ decimalShift *= 10
+ } else if model[i] == '.' {
+ if divisor != 0 {
+ return
+ }
+ divisor = decimalShift
+ } else {
+ return
+ }
+ }
+ // we didn't find a space
+ if i < 0 {
+ return
+ }
+ if divisor != 0 {
+ c.Hz = (freq * multiplier) / divisor
+ return
+ }
+ c.Hz = freq * multiplier
+}
+
+// VM Will return true if the cpu id indicates we are in
+// a virtual machine.
+func (c CPUInfo) VM() bool {
+ return CPU.featureSet.inSet(HYPERVISOR)
+}
+
+// flags contains detected cpu features and characteristics
+type flags uint64
+
+// log2(bits_in_uint64)
+const flagBitsLog2 = 6
+const flagBits = 1 << flagBitsLog2
+const flagMask = flagBits - 1
+
+// flagSet contains detected cpu features and characteristics in an array of flags
+type flagSet [(lastID + flagMask) / flagBits]flags
+
+func (s *flagSet) inSet(feat FeatureID) bool {
+ return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0
+}
+
+func (s *flagSet) set(feat FeatureID) {
+ s[feat>>flagBitsLog2] |= 1 << (feat & flagMask)
+}
+
+// setIf will set a feature if boolean is true.
+func (s *flagSet) setIf(cond bool, features ...FeatureID) {
+ if cond {
+ for _, offset := range features {
+ s[offset>>flagBitsLog2] |= 1 << (offset & flagMask)
+ }
+ }
+}
+
+func (s *flagSet) unset(offset FeatureID) {
+ bit := flags(1 << (offset & flagMask))
+ s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit
+}
+
+// or with another flagset.
+func (s *flagSet) or(other flagSet) {
+ for i, v := range other[:] {
+ s[i] |= v
+ }
+}
+
+// hasSet returns whether all features are present.
+func (s *flagSet) hasSet(other flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != v {
+ return false
+ }
+ }
+ return true
+}
+
+// hasSet returns whether all features are present.
+func (s *flagSet) hasSetP(other *flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != v {
+ return false
+ }
+ }
+ return true
+}
+
+// hasOneOf returns whether one or more features are present.
+func (s *flagSet) hasOneOf(other *flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// nEnabled will return the number of enabled flags.
+func (s *flagSet) nEnabled() (n int) {
+ for _, v := range s[:] {
+ n += bits.OnesCount64(uint64(v))
+ }
+ return n
+}
+
+func flagSetWith(feat ...FeatureID) flagSet {
+ var res flagSet
+ for _, f := range feat {
+ res.set(f)
+ }
+ return res
+}
+
+// ParseFeature will parse the string and return the ID of the matching feature.
+// Will return UNKNOWN if not found.
+func ParseFeature(s string) FeatureID {
+ s = strings.ToUpper(s)
+ for i := firstID; i < lastID; i++ {
+ if i.String() == s {
+ return i
+ }
+ }
+ return UNKNOWN
+}
+
+// Strings returns an array of the detected features for FlagsSet.
+func (s flagSet) Strings() []string {
+ if len(s) == 0 {
+ return []string{""}
+ }
+ r := make([]string, 0)
+ for i := firstID; i < lastID; i++ {
+ if s.inSet(i) {
+ r = append(r, i.String())
+ }
+ }
+ return r
+}
+
+func maxExtendedFunction() uint32 {
+ eax, _, _, _ := cpuid(0x80000000)
+ return eax
+}
+
+func maxFunctionID() uint32 {
+ a, _, _, _ := cpuid(0)
+ return a
+}
+
+func brandName() string {
+ if maxExtendedFunction() >= 0x80000004 {
+ v := make([]uint32, 0, 48)
+ for i := uint32(0); i < 3; i++ {
+ a, b, c, d := cpuid(0x80000002 + i)
+ v = append(v, a, b, c, d)
+ }
+ return strings.Trim(string(valAsString(v...)), " ")
+ }
+ return "unknown"
+}
+
+func threadsPerCore() int {
+ mfi := maxFunctionID()
+ vend, _ := vendorID()
+
+ if mfi < 0x4 || (vend != Intel && vend != AMD) {
+ return 1
+ }
+
+ if mfi < 0xb {
+ if vend != Intel {
+ return 1
+ }
+ _, b, _, d := cpuid(1)
+ if (d & (1 << 28)) != 0 {
+ // v will contain logical core count
+ v := (b >> 16) & 255
+ if v > 1 {
+ a4, _, _, _ := cpuid(4)
+ // physical cores
+ v2 := (a4 >> 26) + 1
+ if v2 > 0 {
+ return int(v) / int(v2)
+ }
+ }
+ }
+ return 1
+ }
+ _, b, _, _ := cpuidex(0xb, 0)
+ if b&0xffff == 0 {
+ if vend == AMD {
+ // Workaround for AMD returning 0, assume 2 if >= Zen 2
+ // It will be more correct than not.
+ fam, _, _ := familyModel()
+ _, _, _, d := cpuid(1)
+ if (d&(1<<28)) != 0 && fam >= 23 {
+ return 2
+ }
+ }
+ return 1
+ }
+ return int(b & 0xffff)
+}
+
+func logicalCores() int {
+ mfi := maxFunctionID()
+ v, _ := vendorID()
+ switch v {
+ case Intel:
+ // Use this on old Intel processors
+ if mfi < 0xb {
+ if mfi < 1 {
+ return 0
+ }
+ // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
+ // that can be assigned to logical processors in a physical package.
+ // The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
+ _, ebx, _, _ := cpuid(1)
+ logical := (ebx >> 16) & 0xff
+ return int(logical)
+ }
+ _, b, _, _ := cpuidex(0xb, 1)
+ return int(b & 0xffff)
+ case AMD, Hygon:
+ _, b, _, _ := cpuid(1)
+ return int((b >> 16) & 0xff)
+ default:
+ return 0
+ }
+}
+
+func familyModel() (family, model, stepping int) {
+ if maxFunctionID() < 0x1 {
+ return 0, 0, 0
+ }
+ eax, _, _, _ := cpuid(1)
+ // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0].
+ family = int((eax >> 8) & 0xf)
+ extFam := family == 0x6 // Intel is 0x6, needs extended model.
+ if family == 0xf {
+ // Add ExtFamily
+ family += int((eax >> 20) & 0xff)
+ extFam = true
+ }
+ // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0].
+ model = int((eax >> 4) & 0xf)
+ if extFam {
+ // Add ExtModel
+ model += int((eax >> 12) & 0xf0)
+ }
+ stepping = int(eax & 0xf)
+ return family, model, stepping
+}
+
+func physicalCores() int {
+ v, _ := vendorID()
+ switch v {
+ case Intel:
+ return logicalCores() / threadsPerCore()
+ case AMD, Hygon:
+ lc := logicalCores()
+ tpc := threadsPerCore()
+ if lc > 0 && tpc > 0 {
+ return lc / tpc
+ }
+
+ // The following is inaccurate on AMD EPYC 7742 64-Core Processor
+ if maxExtendedFunction() >= 0x80000008 {
+ _, _, c, _ := cpuid(0x80000008)
+ if c&0xff > 0 {
+ return int(c&0xff) + 1
+ }
+ }
+ }
+ return 0
+}
+
+// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
+var vendorMapping = map[string]Vendor{
+ "AMDisbetter!": AMD,
+ "AuthenticAMD": AMD,
+ "CentaurHauls": VIA,
+ "GenuineIntel": Intel,
+ "TransmetaCPU": Transmeta,
+ "GenuineTMx86": Transmeta,
+ "Geode by NSC": NSC,
+ "VIA VIA VIA ": VIA,
+ "KVMKVMKVMKVM": KVM,
+ "Microsoft Hv": MSVM,
+ "VMwareVMware": VMware,
+ "XenVMMXenVMM": XenHVM,
+ "bhyve bhyve ": Bhyve,
+ "HygonGenuine": Hygon,
+ "Vortex86 SoC": SiS,
+ "SiS SiS SiS ": SiS,
+ "RiseRiseRise": SiS,
+ "Genuine RDC": RDC,
+}
+
+func vendorID() (Vendor, string) {
+ _, b, c, d := cpuid(0)
+ v := string(valAsString(b, d, c))
+ vend, ok := vendorMapping[v]
+ if !ok {
+ return VendorUnknown, v
+ }
+ return vend, v
+}
+
+func cacheLine() int {
+ if maxFunctionID() < 0x1 {
+ return 0
+ }
+
+ _, ebx, _, _ := cpuid(1)
+ cache := (ebx & 0xff00) >> 5 // cflush size
+ if cache == 0 && maxExtendedFunction() >= 0x80000006 {
+ _, _, ecx, _ := cpuid(0x80000006)
+ cache = ecx & 0xff // cacheline size
+ }
+ // TODO: Read from Cache and TLB Information
+ return int(cache)
+}
+
+func (c *CPUInfo) cacheSize() {
+ c.Cache.L1D = -1
+ c.Cache.L1I = -1
+ c.Cache.L2 = -1
+ c.Cache.L3 = -1
+ vendor, _ := vendorID()
+ switch vendor {
+ case Intel:
+ if maxFunctionID() < 4 {
+ return
+ }
+ c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0
+ for i := uint32(0); ; i++ {
+ eax, ebx, ecx, _ := cpuidex(4, i)
+ cacheType := eax & 15
+ if cacheType == 0 {
+ break
+ }
+ cacheLevel := (eax >> 5) & 7
+ coherency := int(ebx&0xfff) + 1
+ partitions := int((ebx>>12)&0x3ff) + 1
+ associativity := int((ebx>>22)&0x3ff) + 1
+ sets := int(ecx) + 1
+ size := associativity * partitions * coherency * sets
+ switch cacheLevel {
+ case 1:
+ if cacheType == 1 {
+ // 1 = Data Cache
+ c.Cache.L1D = size
+ } else if cacheType == 2 {
+ // 2 = Instruction Cache
+ c.Cache.L1I = size
+ } else {
+ if c.Cache.L1D < 0 {
+ c.Cache.L1I = size
+ }
+ if c.Cache.L1I < 0 {
+ c.Cache.L1I = size
+ }
+ }
+ case 2:
+ c.Cache.L2 = size
+ case 3:
+ c.Cache.L3 = size
+ }
+ }
+ case AMD, Hygon:
+ // Untested.
+ if maxExtendedFunction() < 0x80000005 {
+ return
+ }
+ _, _, ecx, edx := cpuid(0x80000005)
+ c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024)
+ c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024)
+
+ if maxExtendedFunction() < 0x80000006 {
+ return
+ }
+ _, _, ecx, _ = cpuid(0x80000006)
+ c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
+
+ // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties
+ if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) {
+ return
+ }
+
+ // Xen Hypervisor is buggy and returns the same entry no matter ECX value.
+ // Hack: When we encounter the same entry 100 times we break.
+ nSame := 0
+ var last uint32
+ for i := uint32(0); i < math.MaxUint32; i++ {
+ eax, ebx, ecx, _ := cpuidex(0x8000001D, i)
+
+ level := (eax >> 5) & 7
+ cacheNumSets := ecx + 1
+ cacheLineSize := 1 + (ebx & 2047)
+ cachePhysPartitions := 1 + ((ebx >> 12) & 511)
+ cacheNumWays := 1 + ((ebx >> 22) & 511)
+
+ typ := eax & 15
+ size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays)
+ if typ == 0 {
+ return
+ }
+
+ // Check for the same value repeated.
+ comb := eax ^ ebx ^ ecx
+ if comb == last {
+ nSame++
+ if nSame == 100 {
+ return
+ }
+ }
+ last = comb
+
+ switch level {
+ case 1:
+ switch typ {
+ case 1:
+ // Data cache
+ c.Cache.L1D = size
+ case 2:
+ // Inst cache
+ c.Cache.L1I = size
+ default:
+ if c.Cache.L1D < 0 {
+ c.Cache.L1I = size
+ }
+ if c.Cache.L1I < 0 {
+ c.Cache.L1I = size
+ }
+ }
+ case 2:
+ c.Cache.L2 = size
+ case 3:
+ c.Cache.L3 = size
+ }
+ }
+ }
+}
+
+type SGXEPCSection struct {
+ BaseAddress uint64
+ EPCSize uint64
+}
+
+type SGXSupport struct {
+ Available bool
+ LaunchControl bool
+ SGX1Supported bool
+ SGX2Supported bool
+ MaxEnclaveSizeNot64 int64
+ MaxEnclaveSize64 int64
+ EPCSections []SGXEPCSection
+}
+
+func hasSGX(available, lc bool) (rval SGXSupport) {
+ rval.Available = available
+
+ if !available {
+ return
+ }
+
+ rval.LaunchControl = lc
+
+ a, _, _, d := cpuidex(0x12, 0)
+ rval.SGX1Supported = a&0x01 != 0
+ rval.SGX2Supported = a&0x02 != 0
+ rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2
+ rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2
+ rval.EPCSections = make([]SGXEPCSection, 0)
+
+ for subleaf := uint32(2); subleaf < 2+8; subleaf++ {
+ eax, ebx, ecx, edx := cpuidex(0x12, subleaf)
+ leafType := eax & 0xf
+
+ if leafType == 0 {
+ // Invalid subleaf, stop iterating
+ break
+ } else if leafType == 1 {
+ // EPC Section subleaf
+ baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32)
+ size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32)
+
+ section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size}
+ rval.EPCSections = append(rval.EPCSections, section)
+ }
+ }
+
+ return
+}
+
+type AMDMemEncryptionSupport struct {
+ Available bool
+ CBitPossition uint32
+ NumVMPL uint32
+ PhysAddrReduction uint32
+ NumEntryptedGuests uint32
+ MinSevNoEsAsid uint32
+}
+
+func hasAMDMemEncryption(available bool) (rval AMDMemEncryptionSupport) {
+ rval.Available = available
+ if !available {
+ return
+ }
+
+ _, b, c, d := cpuidex(0x8000001f, 0)
+
+ rval.CBitPossition = b & 0x3f
+ rval.PhysAddrReduction = (b >> 6) & 0x3F
+ rval.NumVMPL = (b >> 12) & 0xf
+ rval.NumEntryptedGuests = c
+ rval.MinSevNoEsAsid = d
+
+ return
+}
+
+func support() flagSet {
+ var fs flagSet
+ mfi := maxFunctionID()
+ vend, _ := vendorID()
+ if mfi < 0x1 {
+ return fs
+ }
+ family, model, _ := familyModel()
+
+ _, _, c, d := cpuid(1)
+ fs.setIf((d&(1<<0)) != 0, X87)
+ fs.setIf((d&(1<<8)) != 0, CMPXCHG8)
+ fs.setIf((d&(1<<11)) != 0, SYSEE)
+ fs.setIf((d&(1<<15)) != 0, CMOV)
+ fs.setIf((d&(1<<23)) != 0, MMX)
+ fs.setIf((d&(1<<24)) != 0, FXSR)
+ fs.setIf((d&(1<<25)) != 0, FXSROPT)
+ fs.setIf((d&(1<<25)) != 0, SSE)
+ fs.setIf((d&(1<<26)) != 0, SSE2)
+ fs.setIf((c&1) != 0, SSE3)
+ fs.setIf((c&(1<<5)) != 0, VMX)
+ fs.setIf((c&(1<<9)) != 0, SSSE3)
+ fs.setIf((c&(1<<19)) != 0, SSE4)
+ fs.setIf((c&(1<<20)) != 0, SSE42)
+ fs.setIf((c&(1<<25)) != 0, AESNI)
+ fs.setIf((c&(1<<1)) != 0, CLMUL)
+ fs.setIf(c&(1<<22) != 0, MOVBE)
+ fs.setIf(c&(1<<23) != 0, POPCNT)
+ fs.setIf(c&(1<<30) != 0, RDRAND)
+
+ // This bit has been reserved by Intel & AMD for use by hypervisors,
+ // and indicates the presence of a hypervisor.
+ fs.setIf(c&(1<<31) != 0, HYPERVISOR)
+ fs.setIf(c&(1<<29) != 0, F16C)
+ fs.setIf(c&(1<<13) != 0, CX16)
+
+ if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 {
+ fs.setIf(threadsPerCore() > 1, HTT)
+ }
+ if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 {
+ fs.setIf(threadsPerCore() > 1, HTT)
+ }
+ fs.setIf(c&1<<26 != 0, XSAVE)
+ fs.setIf(c&1<<27 != 0, OSXSAVE)
+ // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits
+ const avxCheck = 1<<26 | 1<<27 | 1<<28
+ if c&avxCheck == avxCheck {
+ // Check for OS support
+ eax, _ := xgetbv(0)
+ if (eax & 0x6) == 0x6 {
+ fs.set(AVX)
+ switch vend {
+ case Intel:
+ // Older than Haswell.
+ fs.setIf(family == 6 && model < 60, AVXSLOW)
+ case AMD:
+ // Older than Zen 2
+ fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW)
+ }
+ }
+ }
+ // FMA3 can be used with SSE registers, so no OS support is strictly needed.
+ // fma3 and OSXSAVE needed.
+ const fma3Check = 1<<12 | 1<<27
+ fs.setIf(c&fma3Check == fma3Check, FMA3)
+
+ // Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
+ if mfi >= 7 {
+ _, ebx, ecx, edx := cpuidex(7, 0)
+ if fs.inSet(AVX) && (ebx&0x00000020) != 0 {
+ fs.set(AVX2)
+ }
+ // CPUID.(EAX=7, ECX=0).EBX
+ if (ebx & 0x00000008) != 0 {
+ fs.set(BMI1)
+ fs.setIf((ebx&0x00000100) != 0, BMI2)
+ }
+ fs.setIf(ebx&(1<<2) != 0, SGX)
+ fs.setIf(ebx&(1<<4) != 0, HLE)
+ fs.setIf(ebx&(1<<9) != 0, ERMS)
+ fs.setIf(ebx&(1<<11) != 0, RTM)
+ fs.setIf(ebx&(1<<14) != 0, MPX)
+ fs.setIf(ebx&(1<<18) != 0, RDSEED)
+ fs.setIf(ebx&(1<<19) != 0, ADX)
+ fs.setIf(ebx&(1<<29) != 0, SHA)
+
+ // CPUID.(EAX=7, ECX=0).ECX
+ fs.setIf(ecx&(1<<5) != 0, WAITPKG)
+ fs.setIf(ecx&(1<<7) != 0, CETSS)
+ fs.setIf(ecx&(1<<8) != 0, GFNI)
+ fs.setIf(ecx&(1<<9) != 0, VAES)
+ fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ)
+ fs.setIf(ecx&(1<<13) != 0, TME)
+ fs.setIf(ecx&(1<<25) != 0, CLDEMOTE)
+ fs.setIf(ecx&(1<<23) != 0, KEYLOCKER)
+ fs.setIf(ecx&(1<<27) != 0, MOVDIRI)
+ fs.setIf(ecx&(1<<28) != 0, MOVDIR64B)
+ fs.setIf(ecx&(1<<29) != 0, ENQCMD)
+ fs.setIf(ecx&(1<<30) != 0, SGXLC)
+
+ // CPUID.(EAX=7, ECX=0).EDX
+ fs.setIf(edx&(1<<4) != 0, FSRM)
+ fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL)
+ fs.setIf(edx&(1<<10) != 0, MD_CLEAR)
+ fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
+ fs.setIf(edx&(1<<14) != 0, SERIALIZE)
+ fs.setIf(edx&(1<<15) != 0, HYBRID_CPU)
+ fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
+ fs.setIf(edx&(1<<18) != 0, PCONFIG)
+ fs.setIf(edx&(1<<20) != 0, CETIBT)
+ fs.setIf(edx&(1<<26) != 0, IBPB)
+ fs.setIf(edx&(1<<27) != 0, STIBP)
+ fs.setIf(edx&(1<<28) != 0, FLUSH_L1D)
+ fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP)
+ fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
+ fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
+
+ // CPUID.(EAX=7, ECX=1).EAX
+ eax1, _, _, edx1 := cpuidex(7, 1)
+ fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
+ fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
+ fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
+ fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT)
+ fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT)
+ fs.setIf(eax1&(1<<22) != 0, HRESET)
+ fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
+ fs.setIf(eax1&(1<<26) != 0, LAM)
+
+ // CPUID.(EAX=7, ECX=1).EDX
+ fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
+ fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
+ fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16)
+ fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
+ fs.setIf(edx1&(1<<19) != 0, AVX10)
+ fs.setIf(edx1&(1<<21) != 0, APX_F)
+
+ // Only detect AVX-512 features if XGETBV is supported
+ if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
+ // Check for OS support
+ eax, _ := xgetbv(0)
+
+ // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
+ // ZMM16-ZMM31 state are enabled by OS)
+ /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS).
+ hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3
+ if runtime.GOOS == "darwin" {
+ hasAVX512 = fs.inSet(AVX) && darwinHasAVX512()
+ }
+ if hasAVX512 {
+ fs.setIf(ebx&(1<<16) != 0, AVX512F)
+ fs.setIf(ebx&(1<<17) != 0, AVX512DQ)
+ fs.setIf(ebx&(1<<21) != 0, AVX512IFMA)
+ fs.setIf(ebx&(1<<26) != 0, AVX512PF)
+ fs.setIf(ebx&(1<<27) != 0, AVX512ER)
+ fs.setIf(ebx&(1<<28) != 0, AVX512CD)
+ fs.setIf(ebx&(1<<30) != 0, AVX512BW)
+ fs.setIf(ebx&(1<<31) != 0, AVX512VL)
+ // ecx
+ fs.setIf(ecx&(1<<1) != 0, AVX512VBMI)
+ fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2)
+ fs.setIf(ecx&(1<<11) != 0, AVX512VNNI)
+ fs.setIf(ecx&(1<<12) != 0, AVX512BITALG)
+ fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ)
+ // edx
+ fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT)
+ fs.setIf(edx&(1<<22) != 0, AMXBF16)
+ fs.setIf(edx&(1<<23) != 0, AVX512FP16)
+ fs.setIf(edx&(1<<24) != 0, AMXTILE)
+ fs.setIf(edx&(1<<25) != 0, AMXINT8)
+ // eax1 = CPUID.(EAX=7, ECX=1).EAX
+ fs.setIf(eax1&(1<<5) != 0, AVX512BF16)
+ fs.setIf(eax1&(1<<19) != 0, WRMSRNS)
+ fs.setIf(eax1&(1<<21) != 0, AMXFP16)
+ fs.setIf(eax1&(1<<27) != 0, MSRLIST)
+ }
+ }
+
+ // CPUID.(EAX=7, ECX=2)
+ _, _, _, edx = cpuidex(7, 2)
+ fs.setIf(edx&(1<<0) != 0, PSFD)
+ fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL)
+ fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL)
+ fs.setIf(edx&(1<<4) != 0, BHI_CTRL)
+ fs.setIf(edx&(1<<5) != 0, MCDT_NO)
+
+ // Add keylocker features.
+ if fs.inSet(KEYLOCKER) && mfi >= 0x19 {
+ _, ebx, _, _ := cpuidex(0x19, 0)
+ fs.setIf(ebx&5 == 5, KEYLOCKERW) // Bit 0 and 2 (1+4)
+ }
+
+ // Add AVX10 features.
+ if fs.inSet(AVX10) && mfi >= 0x24 {
+ _, ebx, _, _ := cpuidex(0x24, 0)
+ fs.setIf(ebx&(1<<16) != 0, AVX10_128)
+ fs.setIf(ebx&(1<<17) != 0, AVX10_256)
+ fs.setIf(ebx&(1<<18) != 0, AVX10_512)
+ }
+ }
+
+ // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1)
+ // EAX
+ // Bit 00: XSAVEOPT is available.
+ // Bit 01: Supports XSAVEC and the compacted form of XRSTOR if set.
+ // Bit 02: Supports XGETBV with ECX = 1 if set.
+ // Bit 03: Supports XSAVES/XRSTORS and IA32_XSS if set.
+ // Bits 31 - 04: Reserved.
+ // EBX
+ // Bits 31 - 00: The size in bytes of the XSAVE area containing all states enabled by XCRO | IA32_XSS.
+ // ECX
+ // Bits 31 - 00: Reports the supported bits of the lower 32 bits of the IA32_XSS MSR. IA32_XSS[n] can be set to 1 only if ECX[n] is 1.
+ // EDX?
+ // Bits 07 - 00: Used for XCR0. Bit 08: PT state. Bit 09: Used for XCR0. Bits 12 - 10: Reserved. Bit 13: HWP state. Bits 31 - 14: Reserved.
+ if mfi >= 0xd {
+ if fs.inSet(XSAVE) {
+ eax, _, _, _ := cpuidex(0xd, 1)
+ fs.setIf(eax&(1<<0) != 0, XSAVEOPT)
+ fs.setIf(eax&(1<<1) != 0, XSAVEC)
+ fs.setIf(eax&(1<<2) != 0, XGETBV1)
+ fs.setIf(eax&(1<<3) != 0, XSAVES)
+ }
+ }
+ if maxExtendedFunction() >= 0x80000001 {
+ _, _, c, d := cpuid(0x80000001)
+ if (c & (1 << 5)) != 0 {
+ fs.set(LZCNT)
+ fs.set(POPCNT)
+ }
+ // ECX
+ fs.setIf((c&(1<<0)) != 0, LAHF)
+ fs.setIf((c&(1<<2)) != 0, SVM)
+ fs.setIf((c&(1<<6)) != 0, SSE4A)
+ fs.setIf((c&(1<<10)) != 0, IBS)
+ fs.setIf((c&(1<<22)) != 0, TOPEXT)
+
+ // EDX
+ fs.setIf(d&(1<<11) != 0, SYSCALL)
+ fs.setIf(d&(1<<20) != 0, NX)
+ fs.setIf(d&(1<<22) != 0, MMXEXT)
+ fs.setIf(d&(1<<23) != 0, MMX)
+ fs.setIf(d&(1<<24) != 0, FXSR)
+ fs.setIf(d&(1<<25) != 0, FXSROPT)
+ fs.setIf(d&(1<<27) != 0, RDTSCP)
+ fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT)
+ fs.setIf(d&(1<<31) != 0, AMD3DNOW)
+
+ /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
+ * used unless the OS has AVX support. */
+ if fs.inSet(AVX) {
+ fs.setIf((c&(1<<11)) != 0, XOP)
+ fs.setIf((c&(1<<16)) != 0, FMA4)
+ }
+
+ }
+ if maxExtendedFunction() >= 0x80000007 {
+ _, b, _, d := cpuid(0x80000007)
+ fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW)
+ fs.setIf((b&(1<<1)) != 0, SUCCOR)
+ fs.setIf((b&(1<<2)) != 0, HWA)
+ fs.setIf((d&(1<<9)) != 0, CPBOOST)
+ }
+
+ if maxExtendedFunction() >= 0x80000008 {
+ _, b, _, _ := cpuid(0x80000008)
+ fs.setIf(b&(1<<28) != 0, PSFD)
+ fs.setIf(b&(1<<27) != 0, CPPC)
+ fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD)
+ fs.setIf(b&(1<<23) != 0, PPIN)
+ fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED)
+ fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS)
+ fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP)
+ fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED)
+ fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON)
+ fs.setIf(b&(1<<15) != 0, STIBP)
+ fs.setIf(b&(1<<14) != 0, IBRS)
+ fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
+ fs.setIf(b&(1<<12) != 0, IBPB)
+ fs.setIf((b&(1<<9)) != 0, WBNOINVD)
+ fs.setIf((b&(1<<8)) != 0, MCOMMIT)
+ fs.setIf((b&(1<<4)) != 0, RDPRU)
+ fs.setIf((b&(1<<3)) != 0, INVLPGB)
+ fs.setIf((b&(1<<1)) != 0, MSRIRC)
+ fs.setIf((b&(1<<0)) != 0, CLZERO)
+ }
+
+ if fs.inSet(SVM) && maxExtendedFunction() >= 0x8000000A {
+ _, _, _, edx := cpuid(0x8000000A)
+ fs.setIf((edx>>0)&1 == 1, SVMNP)
+ fs.setIf((edx>>1)&1 == 1, LBRVIRT)
+ fs.setIf((edx>>2)&1 == 1, SVML)
+ fs.setIf((edx>>3)&1 == 1, NRIPS)
+ fs.setIf((edx>>4)&1 == 1, TSCRATEMSR)
+ fs.setIf((edx>>5)&1 == 1, VMCBCLEAN)
+ fs.setIf((edx>>6)&1 == 1, SVMFBASID)
+ fs.setIf((edx>>7)&1 == 1, SVMDA)
+ fs.setIf((edx>>10)&1 == 1, SVMPF)
+ fs.setIf((edx>>12)&1 == 1, SVMPFT)
+ }
+
+ if maxExtendedFunction() >= 0x8000001a {
+ eax, _, _, _ := cpuid(0x8000001a)
+ fs.setIf((eax>>0)&1 == 1, FP128)
+ fs.setIf((eax>>1)&1 == 1, MOVU)
+ fs.setIf((eax>>2)&1 == 1, FP256)
+ }
+
+ if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
+ eax, _, _, _ := cpuid(0x8000001b)
+ fs.setIf((eax>>0)&1 == 1, IBSFFV)
+ fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM)
+ fs.setIf((eax>>2)&1 == 1, IBSOPSAM)
+ fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT)
+ fs.setIf((eax>>4)&1 == 1, IBSOPCNT)
+ fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT)
+ fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT)
+ fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK)
+ fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE)
+ fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX)
+ fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1.
+ fs.setIf((eax>>11)&1 == 1, IBS_ZEN4)
+ }
+
+ if maxExtendedFunction() >= 0x8000001f && vend == AMD {
+ a, _, _, _ := cpuid(0x8000001f)
+ fs.setIf((a>>0)&1 == 1, SME)
+ fs.setIf((a>>1)&1 == 1, SEV)
+ fs.setIf((a>>2)&1 == 1, MSR_PAGEFLUSH)
+ fs.setIf((a>>3)&1 == 1, SEV_ES)
+ fs.setIf((a>>4)&1 == 1, SEV_SNP)
+ fs.setIf((a>>5)&1 == 1, VMPL)
+ fs.setIf((a>>10)&1 == 1, SME_COHERENT)
+ fs.setIf((a>>11)&1 == 1, SEV_64BIT)
+ fs.setIf((a>>12)&1 == 1, SEV_RESTRICTED)
+ fs.setIf((a>>13)&1 == 1, SEV_ALTERNATIVE)
+ fs.setIf((a>>14)&1 == 1, SEV_DEBUGSWAP)
+ fs.setIf((a>>15)&1 == 1, IBS_PREVENTHOST)
+ fs.setIf((a>>16)&1 == 1, VTE)
+ fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
+ }
+
+ if maxExtendedFunction() >= 0x80000021 && vend == AMD {
+ a, _, _, _ := cpuid(0x80000021)
+ fs.setIf((a>>31)&1 == 1, SRSO_MSR_FIX)
+ fs.setIf((a>>30)&1 == 1, SRSO_USER_KERNEL_NO)
+ fs.setIf((a>>29)&1 == 1, SRSO_NO)
+ fs.setIf((a>>28)&1 == 1, IBPB_BRTYPE)
+ fs.setIf((a>>27)&1 == 1, SBPB)
+ }
+
+ if mfi >= 0x20 {
+ // Microsoft has decided to purposefully hide the information
+ // of the guest TEE when VMs are being created using Hyper-V.
+ //
+ // This leads us to check for the Hyper-V cpuid features
+ // (0x4000000C), and then for the `ebx` value set.
+ //
+ // For Intel TDX, `ebx` is set as `0xbe3`, being 3 the part
+ // we're mostly interested about,according to:
+ // https://github.com/torvalds/linux/blob/d2f51b3516dade79269ff45eae2a7668ae711b25/arch/x86/include/asm/hyperv-tlfs.h#L169-L174
+ _, ebx, _, _ := cpuid(0x4000000C)
+ fs.setIf(ebx == 0xbe3, TDX_GUEST)
+ }
+
+ if mfi >= 0x21 {
+ // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21).
+ _, ebx, ecx, edx := cpuid(0x21)
+ identity := string(valAsString(ebx, edx, ecx))
+ fs.setIf(identity == "IntelTDX ", TDX_GUEST)
+ }
+
+ return fs
+}
+
+func (c *CPUInfo) supportAVX10() uint8 {
+ if c.maxFunc >= 0x24 && c.featureSet.inSet(AVX10) {
+ _, ebx, _, _ := cpuidex(0x24, 0)
+ return uint8(ebx)
+ }
+ return 0
+}
+
+func valAsString(values ...uint32) []byte {
+ r := make([]byte, 4*len(values))
+ for i, v := range values {
+ dst := r[i*4:]
+ dst[0] = byte(v & 0xff)
+ dst[1] = byte((v >> 8) & 0xff)
+ dst[2] = byte((v >> 16) & 0xff)
+ dst[3] = byte((v >> 24) & 0xff)
+ switch {
+ case dst[0] == 0:
+ return r[:i*4]
+ case dst[1] == 0:
+ return r[:i*4+1]
+ case dst[2] == 0:
+ return r[:i*4+2]
+ case dst[3] == 0:
+ return r[:i*4+3]
+ }
+ }
+ return r
+}
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s
new file mode 100644
index 000000000..8587c3a1f
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s
@@ -0,0 +1,47 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//+build 386,!gccgo,!noasm,!appengine
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+ XORL CX, CX
+ MOVL op+0(FP), AX
+ CPUID
+ MOVL AX, eax+4(FP)
+ MOVL BX, ebx+8(FP)
+ MOVL CX, ecx+12(FP)
+ MOVL DX, edx+16(FP)
+ RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+ MOVL op+0(FP), AX
+ MOVL op2+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func xgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+ MOVL index+0(FP), CX
+ BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+ MOVL AX, eax+4(FP)
+ MOVL DX, edx+8(FP)
+ RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+ BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+ MOVL AX, eax+0(FP)
+ MOVL BX, ebx+4(FP)
+ MOVL CX, ecx+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
+
+// func asmDarwinHasAVX512() bool
+TEXT ·asmDarwinHasAVX512(SB), 7, $0
+ MOVL $0, eax+0(FP)
+ RET
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s
new file mode 100644
index 000000000..bc11f8942
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s
@@ -0,0 +1,72 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//+build amd64,!gccgo,!noasm,!appengine
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+ XORQ CX, CX
+ MOVL op+0(FP), AX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+ MOVL op+0(FP), AX
+ MOVL op2+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func asmXgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+ MOVL index+0(FP), CX
+ BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+ MOVL AX, eax+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+ BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+ MOVL AX, eax+0(FP)
+ MOVL BX, ebx+4(FP)
+ MOVL CX, ecx+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
+
+// From https://go-review.googlesource.com/c/sys/+/285572/
+// func asmDarwinHasAVX512() bool
+TEXT ·asmDarwinHasAVX512(SB), 7, $0-1
+ MOVB $0, ret+0(FP) // default to false
+
+#ifdef GOOS_darwin // return if not darwin
+#ifdef GOARCH_amd64 // return if not amd64
+// These values from:
+// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h
+#define commpage64_base_address 0x00007fffffe00000
+#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010)
+#define commpage64_version (commpage64_base_address+0x01E)
+#define hasAVX512F 0x0000004000000000
+ MOVQ $commpage64_version, BX
+ MOVW (BX), AX
+ CMPW AX, $13 // versions < 13 do not support AVX512
+ JL no_avx512
+ MOVQ $commpage64_cpu_capabilities64, BX
+ MOVQ (BX), AX
+ MOVQ $hasAVX512F, CX
+ ANDQ CX, AX
+ JZ no_avx512
+ MOVB $1, ret+0(FP)
+
+no_avx512:
+#endif
+#endif
+ RET
+
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
new file mode 100644
index 000000000..b31d6aec4
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
@@ -0,0 +1,26 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//+build arm64,!gccgo,!noasm,!appengine
+
+// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt
+
+// func getMidr
+TEXT ·getMidr(SB), 7, $0
+ WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */
+ MOVD R0, midr+0(FP)
+ RET
+
+// func getProcFeatures
+TEXT ·getProcFeatures(SB), 7, $0
+ WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */
+ MOVD R0, procFeatures+0(FP)
+ RET
+
+// func getInstAttributes
+TEXT ·getInstAttributes(SB), 7, $0
+ WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */
+ WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */
+ MOVD R0, instAttrReg0+0(FP)
+ MOVD R1, instAttrReg1+8(FP)
+ RET
+
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
new file mode 100644
index 000000000..9a53504a0
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
@@ -0,0 +1,247 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build arm64 && !gccgo && !noasm && !appengine
+// +build arm64,!gccgo,!noasm,!appengine
+
+package cpuid
+
+import "runtime"
+
+func getMidr() (midr uint64)
+func getProcFeatures() (procFeatures uint64)
+func getInstAttributes() (instAttrReg0, instAttrReg1 uint64)
+
+func initCPU() {
+ cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
+ cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
+ xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
+ rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
+}
+
+func addInfo(c *CPUInfo, safe bool) {
+ // Seems to be safe to assume on ARM64
+ c.CacheLine = 64
+ detectOS(c)
+
+ // ARM64 disabled since it may crash if interrupt is not intercepted by OS.
+ if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" {
+ return
+ }
+ midr := getMidr()
+
+ // MIDR_EL1 - Main ID Register
+ // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1
+ // x--------------------------------------------------x
+ // | Name | bits | visible |
+ // |--------------------------------------------------|
+ // | Implementer | [31-24] | y |
+ // |--------------------------------------------------|
+ // | Variant | [23-20] | y |
+ // |--------------------------------------------------|
+ // | Architecture | [19-16] | y |
+ // |--------------------------------------------------|
+ // | PartNum | [15-4] | y |
+ // |--------------------------------------------------|
+ // | Revision | [3-0] | y |
+ // x--------------------------------------------------x
+
+ switch (midr >> 24) & 0xff {
+ case 0xC0:
+ c.VendorString = "Ampere Computing"
+ c.VendorID = Ampere
+ case 0x41:
+ c.VendorString = "Arm Limited"
+ c.VendorID = ARM
+ case 0x42:
+ c.VendorString = "Broadcom Corporation"
+ c.VendorID = Broadcom
+ case 0x43:
+ c.VendorString = "Cavium Inc"
+ c.VendorID = Cavium
+ case 0x44:
+ c.VendorString = "Digital Equipment Corporation"
+ c.VendorID = DEC
+ case 0x46:
+ c.VendorString = "Fujitsu Ltd"
+ c.VendorID = Fujitsu
+ case 0x49:
+ c.VendorString = "Infineon Technologies AG"
+ c.VendorID = Infineon
+ case 0x4D:
+ c.VendorString = "Motorola or Freescale Semiconductor Inc"
+ c.VendorID = Motorola
+ case 0x4E:
+ c.VendorString = "NVIDIA Corporation"
+ c.VendorID = NVIDIA
+ case 0x50:
+ c.VendorString = "Applied Micro Circuits Corporation"
+ c.VendorID = AMCC
+ case 0x51:
+ c.VendorString = "Qualcomm Inc"
+ c.VendorID = Qualcomm
+ case 0x56:
+ c.VendorString = "Marvell International Ltd"
+ c.VendorID = Marvell
+ case 0x69:
+ c.VendorString = "Intel Corporation"
+ c.VendorID = Intel
+ }
+
+ // Lower 4 bits: Architecture
+ // Architecture Meaning
+ // 0b0001 Armv4.
+ // 0b0010 Armv4T.
+ // 0b0011 Armv5 (obsolete).
+ // 0b0100 Armv5T.
+ // 0b0101 Armv5TE.
+ // 0b0110 Armv5TEJ.
+ // 0b0111 Armv6.
+ // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'.
+ // Upper 4 bit: Variant
+ // An IMPLEMENTATION DEFINED variant number.
+ // Typically, this field is used to distinguish between different product variants, or major revisions of a product.
+ c.Family = int(midr>>16) & 0xff
+
+ // PartNum, bits [15:4]
+ // An IMPLEMENTATION DEFINED primary part number for the device.
+ // On processors implemented by Arm, if the top four bits of the primary
+ // part number are 0x0 or 0x7, the variant and architecture are encoded differently.
+ // Revision, bits [3:0]
+ // An IMPLEMENTATION DEFINED revision number for the device.
+ c.Model = int(midr) & 0xffff
+
+ procFeatures := getProcFeatures()
+
+ // ID_AA64PFR0_EL1 - Processor Feature Register 0
+ // x--------------------------------------------------x
+ // | Name | bits | visible |
+ // |--------------------------------------------------|
+ // | DIT | [51-48] | y |
+ // |--------------------------------------------------|
+ // | SVE | [35-32] | y |
+ // |--------------------------------------------------|
+ // | GIC | [27-24] | n |
+ // |--------------------------------------------------|
+ // | AdvSIMD | [23-20] | y |
+ // |--------------------------------------------------|
+ // | FP | [19-16] | y |
+ // |--------------------------------------------------|
+ // | EL3 | [15-12] | n |
+ // |--------------------------------------------------|
+ // | EL2 | [11-8] | n |
+ // |--------------------------------------------------|
+ // | EL1 | [7-4] | n |
+ // |--------------------------------------------------|
+ // | EL0 | [3-0] | n |
+ // x--------------------------------------------------x
+
+ var f flagSet
+ // if procFeatures&(0xf<<48) != 0 {
+ // fmt.Println("DIT")
+ // }
+ f.setIf(procFeatures&(0xf<<32) != 0, SVE)
+ if procFeatures&(0xf<<20) != 15<<20 {
+ f.set(ASIMD)
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1
+ // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic.
+ f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP)
+ }
+ f.setIf(procFeatures&(0xf<<16) != 0, FP)
+
+ instAttrReg0, instAttrReg1 := getInstAttributes()
+
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
+ //
+ // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
+ // x--------------------------------------------------x
+ // | Name | bits | visible |
+ // |--------------------------------------------------|
+ // | TS | [55-52] | y |
+ // |--------------------------------------------------|
+ // | FHM | [51-48] | y |
+ // |--------------------------------------------------|
+ // | DP | [47-44] | y |
+ // |--------------------------------------------------|
+ // | SM4 | [43-40] | y |
+ // |--------------------------------------------------|
+ // | SM3 | [39-36] | y |
+ // |--------------------------------------------------|
+ // | SHA3 | [35-32] | y |
+ // |--------------------------------------------------|
+ // | RDM | [31-28] | y |
+ // |--------------------------------------------------|
+ // | ATOMICS | [23-20] | y |
+ // |--------------------------------------------------|
+ // | CRC32 | [19-16] | y |
+ // |--------------------------------------------------|
+ // | SHA2 | [15-12] | y |
+ // |--------------------------------------------------|
+ // | SHA1 | [11-8] | y |
+ // |--------------------------------------------------|
+ // | AES | [7-4] | y |
+ // x--------------------------------------------------x
+
+ // if instAttrReg0&(0xf<<52) != 0 {
+ // fmt.Println("TS")
+ // }
+ // if instAttrReg0&(0xf<<48) != 0 {
+ // fmt.Println("FHM")
+ // }
+ f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP)
+ f.setIf(instAttrReg0&(0xf<<40) != 0, SM4)
+ f.setIf(instAttrReg0&(0xf<<36) != 0, SM3)
+ f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3)
+ f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM)
+ f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS)
+ f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32)
+ f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2)
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
+ // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented.
+ f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512)
+ f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1)
+ f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM)
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
+ // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities.
+ f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL)
+
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1
+ //
+ // ID_AA64ISAR1_EL1 - Instruction set attribute register 1
+ // x--------------------------------------------------x
+ // | Name | bits | visible |
+ // |--------------------------------------------------|
+ // | GPI | [31-28] | y |
+ // |--------------------------------------------------|
+ // | GPA | [27-24] | y |
+ // |--------------------------------------------------|
+ // | LRCPC | [23-20] | y |
+ // |--------------------------------------------------|
+ // | FCMA | [19-16] | y |
+ // |--------------------------------------------------|
+ // | JSCVT | [15-12] | y |
+ // |--------------------------------------------------|
+ // | API | [11-8] | y |
+ // |--------------------------------------------------|
+ // | APA | [7-4] | y |
+ // |--------------------------------------------------|
+ // | DPB | [3-0] | y |
+ // x--------------------------------------------------x
+
+ // if instAttrReg1&(0xf<<28) != 0 {
+ // fmt.Println("GPI")
+ // }
+ f.setIf(instAttrReg1&(0xf<<28) != 24, GPA)
+ f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC)
+ f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA)
+ f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT)
+ // if instAttrReg1&(0xf<<8) != 0 {
+ // fmt.Println("API")
+ // }
+ // if instAttrReg1&(0xf<<4) != 0 {
+ // fmt.Println("APA")
+ // }
+ f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP)
+
+ // Store
+ c.featureSet.or(f)
+}
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
new file mode 100644
index 000000000..9636c2bc1
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine
+// +build !amd64,!386,!arm64 gccgo noasm appengine
+
+package cpuid
+
+func initCPU() {
+ cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
+ cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
+ xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
+ rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
+}
+
+func addInfo(info *CPUInfo, safe bool) {}
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
new file mode 100644
index 000000000..799b400c2
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine)
+// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine
+
+package cpuid
+
+func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+func asmXgetbv(index uint32) (eax, edx uint32)
+func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+func asmDarwinHasAVX512() bool
+
+func initCPU() {
+ cpuid = asmCpuid
+ cpuidex = asmCpuidex
+ xgetbv = asmXgetbv
+ rdtscpAsm = asmRdtscpAsm
+ darwinHasAVX512 = asmDarwinHasAVX512
+}
+
+func addInfo(c *CPUInfo, safe bool) {
+ c.maxFunc = maxFunctionID()
+ c.maxExFunc = maxExtendedFunction()
+ c.BrandName = brandName()
+ c.CacheLine = cacheLine()
+ c.Family, c.Model, c.Stepping = familyModel()
+ c.featureSet = support()
+ c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC))
+ c.AMDMemEncryption = hasAMDMemEncryption(c.featureSet.inSet(SME) || c.featureSet.inSet(SEV))
+ c.ThreadsPerCore = threadsPerCore()
+ c.LogicalCores = logicalCores()
+ c.PhysicalCores = physicalCores()
+ c.VendorID, c.VendorString = vendorID()
+ c.AVX10Level = c.supportAVX10()
+ c.cacheSize()
+ c.frequencies()
+}
diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
new file mode 100644
index 000000000..3a2560310
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
@@ -0,0 +1,285 @@
+// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT.
+
+package cpuid
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ADX-1]
+ _ = x[AESNI-2]
+ _ = x[AMD3DNOW-3]
+ _ = x[AMD3DNOWEXT-4]
+ _ = x[AMXBF16-5]
+ _ = x[AMXFP16-6]
+ _ = x[AMXINT8-7]
+ _ = x[AMXTILE-8]
+ _ = x[APX_F-9]
+ _ = x[AVX-10]
+ _ = x[AVX10-11]
+ _ = x[AVX10_128-12]
+ _ = x[AVX10_256-13]
+ _ = x[AVX10_512-14]
+ _ = x[AVX2-15]
+ _ = x[AVX512BF16-16]
+ _ = x[AVX512BITALG-17]
+ _ = x[AVX512BW-18]
+ _ = x[AVX512CD-19]
+ _ = x[AVX512DQ-20]
+ _ = x[AVX512ER-21]
+ _ = x[AVX512F-22]
+ _ = x[AVX512FP16-23]
+ _ = x[AVX512IFMA-24]
+ _ = x[AVX512PF-25]
+ _ = x[AVX512VBMI-26]
+ _ = x[AVX512VBMI2-27]
+ _ = x[AVX512VL-28]
+ _ = x[AVX512VNNI-29]
+ _ = x[AVX512VP2INTERSECT-30]
+ _ = x[AVX512VPOPCNTDQ-31]
+ _ = x[AVXIFMA-32]
+ _ = x[AVXNECONVERT-33]
+ _ = x[AVXSLOW-34]
+ _ = x[AVXVNNI-35]
+ _ = x[AVXVNNIINT8-36]
+ _ = x[AVXVNNIINT16-37]
+ _ = x[BHI_CTRL-38]
+ _ = x[BMI1-39]
+ _ = x[BMI2-40]
+ _ = x[CETIBT-41]
+ _ = x[CETSS-42]
+ _ = x[CLDEMOTE-43]
+ _ = x[CLMUL-44]
+ _ = x[CLZERO-45]
+ _ = x[CMOV-46]
+ _ = x[CMPCCXADD-47]
+ _ = x[CMPSB_SCADBS_SHORT-48]
+ _ = x[CMPXCHG8-49]
+ _ = x[CPBOOST-50]
+ _ = x[CPPC-51]
+ _ = x[CX16-52]
+ _ = x[EFER_LMSLE_UNS-53]
+ _ = x[ENQCMD-54]
+ _ = x[ERMS-55]
+ _ = x[F16C-56]
+ _ = x[FLUSH_L1D-57]
+ _ = x[FMA3-58]
+ _ = x[FMA4-59]
+ _ = x[FP128-60]
+ _ = x[FP256-61]
+ _ = x[FSRM-62]
+ _ = x[FXSR-63]
+ _ = x[FXSROPT-64]
+ _ = x[GFNI-65]
+ _ = x[HLE-66]
+ _ = x[HRESET-67]
+ _ = x[HTT-68]
+ _ = x[HWA-69]
+ _ = x[HYBRID_CPU-70]
+ _ = x[HYPERVISOR-71]
+ _ = x[IA32_ARCH_CAP-72]
+ _ = x[IA32_CORE_CAP-73]
+ _ = x[IBPB-74]
+ _ = x[IBPB_BRTYPE-75]
+ _ = x[IBRS-76]
+ _ = x[IBRS_PREFERRED-77]
+ _ = x[IBRS_PROVIDES_SMP-78]
+ _ = x[IBS-79]
+ _ = x[IBSBRNTRGT-80]
+ _ = x[IBSFETCHSAM-81]
+ _ = x[IBSFFV-82]
+ _ = x[IBSOPCNT-83]
+ _ = x[IBSOPCNTEXT-84]
+ _ = x[IBSOPSAM-85]
+ _ = x[IBSRDWROPCNT-86]
+ _ = x[IBSRIPINVALIDCHK-87]
+ _ = x[IBS_FETCH_CTLX-88]
+ _ = x[IBS_OPDATA4-89]
+ _ = x[IBS_OPFUSE-90]
+ _ = x[IBS_PREVENTHOST-91]
+ _ = x[IBS_ZEN4-92]
+ _ = x[IDPRED_CTRL-93]
+ _ = x[INT_WBINVD-94]
+ _ = x[INVLPGB-95]
+ _ = x[KEYLOCKER-96]
+ _ = x[KEYLOCKERW-97]
+ _ = x[LAHF-98]
+ _ = x[LAM-99]
+ _ = x[LBRVIRT-100]
+ _ = x[LZCNT-101]
+ _ = x[MCAOVERFLOW-102]
+ _ = x[MCDT_NO-103]
+ _ = x[MCOMMIT-104]
+ _ = x[MD_CLEAR-105]
+ _ = x[MMX-106]
+ _ = x[MMXEXT-107]
+ _ = x[MOVBE-108]
+ _ = x[MOVDIR64B-109]
+ _ = x[MOVDIRI-110]
+ _ = x[MOVSB_ZL-111]
+ _ = x[MOVU-112]
+ _ = x[MPX-113]
+ _ = x[MSRIRC-114]
+ _ = x[MSRLIST-115]
+ _ = x[MSR_PAGEFLUSH-116]
+ _ = x[NRIPS-117]
+ _ = x[NX-118]
+ _ = x[OSXSAVE-119]
+ _ = x[PCONFIG-120]
+ _ = x[POPCNT-121]
+ _ = x[PPIN-122]
+ _ = x[PREFETCHI-123]
+ _ = x[PSFD-124]
+ _ = x[RDPRU-125]
+ _ = x[RDRAND-126]
+ _ = x[RDSEED-127]
+ _ = x[RDTSCP-128]
+ _ = x[RRSBA_CTRL-129]
+ _ = x[RTM-130]
+ _ = x[RTM_ALWAYS_ABORT-131]
+ _ = x[SBPB-132]
+ _ = x[SERIALIZE-133]
+ _ = x[SEV-134]
+ _ = x[SEV_64BIT-135]
+ _ = x[SEV_ALTERNATIVE-136]
+ _ = x[SEV_DEBUGSWAP-137]
+ _ = x[SEV_ES-138]
+ _ = x[SEV_RESTRICTED-139]
+ _ = x[SEV_SNP-140]
+ _ = x[SGX-141]
+ _ = x[SGXLC-142]
+ _ = x[SHA-143]
+ _ = x[SME-144]
+ _ = x[SME_COHERENT-145]
+ _ = x[SPEC_CTRL_SSBD-146]
+ _ = x[SRBDS_CTRL-147]
+ _ = x[SRSO_MSR_FIX-148]
+ _ = x[SRSO_NO-149]
+ _ = x[SRSO_USER_KERNEL_NO-150]
+ _ = x[SSE-151]
+ _ = x[SSE2-152]
+ _ = x[SSE3-153]
+ _ = x[SSE4-154]
+ _ = x[SSE42-155]
+ _ = x[SSE4A-156]
+ _ = x[SSSE3-157]
+ _ = x[STIBP-158]
+ _ = x[STIBP_ALWAYSON-159]
+ _ = x[STOSB_SHORT-160]
+ _ = x[SUCCOR-161]
+ _ = x[SVM-162]
+ _ = x[SVMDA-163]
+ _ = x[SVMFBASID-164]
+ _ = x[SVML-165]
+ _ = x[SVMNP-166]
+ _ = x[SVMPF-167]
+ _ = x[SVMPFT-168]
+ _ = x[SYSCALL-169]
+ _ = x[SYSEE-170]
+ _ = x[TBM-171]
+ _ = x[TDX_GUEST-172]
+ _ = x[TLB_FLUSH_NESTED-173]
+ _ = x[TME-174]
+ _ = x[TOPEXT-175]
+ _ = x[TSCRATEMSR-176]
+ _ = x[TSXLDTRK-177]
+ _ = x[VAES-178]
+ _ = x[VMCBCLEAN-179]
+ _ = x[VMPL-180]
+ _ = x[VMSA_REGPROT-181]
+ _ = x[VMX-182]
+ _ = x[VPCLMULQDQ-183]
+ _ = x[VTE-184]
+ _ = x[WAITPKG-185]
+ _ = x[WBNOINVD-186]
+ _ = x[WRMSRNS-187]
+ _ = x[X87-188]
+ _ = x[XGETBV1-189]
+ _ = x[XOP-190]
+ _ = x[XSAVE-191]
+ _ = x[XSAVEC-192]
+ _ = x[XSAVEOPT-193]
+ _ = x[XSAVES-194]
+ _ = x[AESARM-195]
+ _ = x[ARMCPUID-196]
+ _ = x[ASIMD-197]
+ _ = x[ASIMDDP-198]
+ _ = x[ASIMDHP-199]
+ _ = x[ASIMDRDM-200]
+ _ = x[ATOMICS-201]
+ _ = x[CRC32-202]
+ _ = x[DCPOP-203]
+ _ = x[EVTSTRM-204]
+ _ = x[FCMA-205]
+ _ = x[FP-206]
+ _ = x[FPHP-207]
+ _ = x[GPA-208]
+ _ = x[JSCVT-209]
+ _ = x[LRCPC-210]
+ _ = x[PMULL-211]
+ _ = x[SHA1-212]
+ _ = x[SHA2-213]
+ _ = x[SHA3-214]
+ _ = x[SHA512-215]
+ _ = x[SM3-216]
+ _ = x[SM4-217]
+ _ = x[SVE-218]
+ _ = x[lastID-219]
+ _ = x[firstID-0]
+}
+
+const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
+
+var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586}
+
+func (i FeatureID) String() string {
+ if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
+ return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[VendorUnknown-0]
+ _ = x[Intel-1]
+ _ = x[AMD-2]
+ _ = x[VIA-3]
+ _ = x[Transmeta-4]
+ _ = x[NSC-5]
+ _ = x[KVM-6]
+ _ = x[MSVM-7]
+ _ = x[VMware-8]
+ _ = x[XenHVM-9]
+ _ = x[Bhyve-10]
+ _ = x[Hygon-11]
+ _ = x[SiS-12]
+ _ = x[RDC-13]
+ _ = x[Ampere-14]
+ _ = x[ARM-15]
+ _ = x[Broadcom-16]
+ _ = x[Cavium-17]
+ _ = x[DEC-18]
+ _ = x[Fujitsu-19]
+ _ = x[Infineon-20]
+ _ = x[Motorola-21]
+ _ = x[NVIDIA-22]
+ _ = x[AMCC-23]
+ _ = x[Qualcomm-24]
+ _ = x[Marvell-25]
+ _ = x[lastVendor-26]
+}
+
+const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor"
+
+var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155}
+
+func (i Vendor) String() string {
+ if i < 0 || i >= Vendor(len(_Vendor_index)-1) {
+ return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]]
+}
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
new file mode 100644
index 000000000..84b1acd21
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
@@ -0,0 +1,121 @@
+// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
+
+package cpuid
+
+import (
+ "runtime"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+func detectOS(c *CPUInfo) bool {
+ if runtime.GOOS != "ios" {
+ tryToFillCPUInfoFomSysctl(c)
+ }
+ // There are no hw.optional sysctl values for the below features on Mac OS 11.0
+ // to detect their supported state dynamically. Assume the CPU features that
+ // Apple Silicon M1 supports to be available as a minimal set of features
+ // to all Go programs running on darwin/arm64.
+ // TODO: Add more if we know them.
+ c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2)
+
+ return true
+}
+
+func sysctlGetBool(name string) bool {
+ value, err := unix.SysctlUint32(name)
+ if err != nil {
+ return false
+ }
+ return value != 0
+}
+
+func sysctlGetString(name string) string {
+ value, err := unix.Sysctl(name)
+ if err != nil {
+ return ""
+ }
+ return value
+}
+
+func sysctlGetInt(unknown int, names ...string) int {
+ for _, name := range names {
+ value, err := unix.SysctlUint32(name)
+ if err != nil {
+ continue
+ }
+ if value != 0 {
+ return int(value)
+ }
+ }
+ return unknown
+}
+
+func sysctlGetInt64(unknown int, names ...string) int {
+ for _, name := range names {
+ value64, err := unix.SysctlUint64(name)
+ if err != nil {
+ continue
+ }
+ if int(value64) != unknown {
+ return int(value64)
+ }
+ }
+ return unknown
+}
+
+func setFeature(c *CPUInfo, name string, feature FeatureID) {
+ c.featureSet.setIf(sysctlGetBool(name), feature)
+}
+func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
+ c.BrandName = sysctlGetString("machdep.cpu.brand_string")
+
+ if len(c.BrandName) != 0 {
+ c.VendorString = strings.Fields(c.BrandName)[0]
+ }
+
+ c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu")
+ c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") /
+ sysctlGetInt(1, "hw.physicalcpu")
+ c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count")
+ c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily")
+ c.Model = sysctlGetInt(0, "machdep.cpu.model")
+ c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize")
+ c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize")
+ c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize")
+ c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize")
+ c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize")
+
+ // from https://developer.arm.com/downloads/-/exploration-tools/feature-names-for-a-profile
+ setFeature(c, "hw.optional.arm.FEAT_AES", AESARM)
+ setFeature(c, "hw.optional.AdvSIMD", ASIMD)
+ setFeature(c, "hw.optional.arm.FEAT_DotProd", ASIMDDP)
+ setFeature(c, "hw.optional.arm.FEAT_RDM", ASIMDRDM)
+ setFeature(c, "hw.optional.FEAT_CRC32", CRC32)
+ setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP)
+ // setFeature(c, "", EVTSTRM)
+ setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA)
+ setFeature(c, "hw.optional.arm.FEAT_FP", FP)
+ setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP)
+ setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA)
+ setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT)
+ setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC)
+ setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL)
+ setFeature(c, "hw.optional.arm.FEAT_SHA1", SHA1)
+ setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2)
+ setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3)
+ setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512)
+ // setFeature(c, "", SM3)
+ // setFeature(c, "", SM4)
+ setFeature(c, "hw.optional.arm.FEAT_SVE", SVE)
+
+ // from empirical observation
+ setFeature(c, "hw.optional.AdvSIMD_HPFPCvt", ASIMDHP)
+ setFeature(c, "hw.optional.armv8_1_atomics", ATOMICS)
+ setFeature(c, "hw.optional.floatingpoint", FP)
+ setFeature(c, "hw.optional.armv8_2_sha3", SHA3)
+ setFeature(c, "hw.optional.armv8_2_sha512", SHA512)
+ setFeature(c, "hw.optional.armv8_3_compnum", FCMA)
+ setFeature(c, "hw.optional.armv8_crc32", CRC32)
+}
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
new file mode 100644
index 000000000..ee278b9e4
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file located
+// here https://github.com/golang/sys/blob/master/LICENSE
+
+package cpuid
+
+import (
+ "encoding/binary"
+ "io/ioutil"
+ "runtime"
+)
+
+// HWCAP bits.
+const (
+ hwcap_FP = 1 << 0
+ hwcap_ASIMD = 1 << 1
+ hwcap_EVTSTRM = 1 << 2
+ hwcap_AES = 1 << 3
+ hwcap_PMULL = 1 << 4
+ hwcap_SHA1 = 1 << 5
+ hwcap_SHA2 = 1 << 6
+ hwcap_CRC32 = 1 << 7
+ hwcap_ATOMICS = 1 << 8
+ hwcap_FPHP = 1 << 9
+ hwcap_ASIMDHP = 1 << 10
+ hwcap_CPUID = 1 << 11
+ hwcap_ASIMDRDM = 1 << 12
+ hwcap_JSCVT = 1 << 13
+ hwcap_FCMA = 1 << 14
+ hwcap_LRCPC = 1 << 15
+ hwcap_DCPOP = 1 << 16
+ hwcap_SHA3 = 1 << 17
+ hwcap_SM3 = 1 << 18
+ hwcap_SM4 = 1 << 19
+ hwcap_ASIMDDP = 1 << 20
+ hwcap_SHA512 = 1 << 21
+ hwcap_SVE = 1 << 22
+ hwcap_ASIMDFHM = 1 << 23
+)
+
+func detectOS(c *CPUInfo) bool {
+ // For now assuming no hyperthreading is reasonable.
+ c.LogicalCores = runtime.NumCPU()
+ c.PhysicalCores = c.LogicalCores
+ c.ThreadsPerCore = 1
+ if hwcap == 0 {
+ // We did not get values from the runtime.
+ // Try reading /proc/self/auxv
+
+ // From https://github.com/golang/sys
+ const (
+ _AT_HWCAP = 16
+ _AT_HWCAP2 = 26
+
+ uintSize = int(32 << (^uint(0) >> 63))
+ )
+
+ buf, err := ioutil.ReadFile("/proc/self/auxv")
+ if err != nil {
+ // e.g. on android /proc/self/auxv is not accessible, so silently
+ // ignore the error and leave Initialized = false. On some
+ // architectures (e.g. arm64) doinit() implements a fallback
+ // readout and will set Initialized = true again.
+ return false
+ }
+ bo := binary.LittleEndian
+ for len(buf) >= 2*(uintSize/8) {
+ var tag, val uint
+ switch uintSize {
+ case 32:
+ tag = uint(bo.Uint32(buf[0:]))
+ val = uint(bo.Uint32(buf[4:]))
+ buf = buf[8:]
+ case 64:
+ tag = uint(bo.Uint64(buf[0:]))
+ val = uint(bo.Uint64(buf[8:]))
+ buf = buf[16:]
+ }
+ switch tag {
+ case _AT_HWCAP:
+ hwcap = val
+ case _AT_HWCAP2:
+ // Not used
+ }
+ }
+ if hwcap == 0 {
+ return false
+ }
+ }
+
+ // HWCap was populated by the runtime from the auxiliary vector.
+ // Use HWCap information since reading aarch64 system registers
+ // is not supported in user space on older linux kernels.
+ c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM)
+ c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD)
+ c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM)
+ c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID)
+ c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32)
+ c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM)
+ c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA)
+ c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT)
+ c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC)
+ c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE)
+
+ // The Samsung S9+ kernel reports support for atomics, but not all cores
+ // actually support them, resulting in SIGILL. See issue #28431.
+ // TODO(elias.naur): Only disable the optimization on bad chipsets on android.
+ c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS)
+
+ return true
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
new file mode 100644
index 000000000..8733ba343
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build arm64 && !linux && !darwin
+// +build arm64,!linux,!darwin
+
+package cpuid
+
+import "runtime"
+
+func detectOS(c *CPUInfo) bool {
+ c.PhysicalCores = runtime.NumCPU()
+ // For now assuming 1 thread per core...
+ c.ThreadsPerCore = 1
+ c.LogicalCores = c.PhysicalCores
+ return false
+}
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
new file mode 100644
index 000000000..f8f201b5f
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build nounsafe
+// +build nounsafe
+
+package cpuid
+
+var hwcap uint
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
new file mode 100644
index 000000000..92af622eb
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build !nounsafe
+// +build !nounsafe
+
+package cpuid
+
+import _ "unsafe" // needed for go:linkname
+
+//go:linkname hwcap internal/cpu.HWCap
+var hwcap uint
diff --git a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh
new file mode 100644
index 000000000..471d986d2
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+go tool dist list | while IFS=/ read os arch; do
+ echo "Checking $os/$arch..."
+ echo " normal"
+ GOARCH=$arch GOOS=$os go build -o /dev/null .
+ echo " noasm"
+ GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null .
+ echo " appengine"
+ GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null .
+ echo " noasm,appengine"
+ GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null .
+done
diff --git a/vendor/github.com/libp2p/go-reuseport/LICENSE b/vendor/github.com/libp2p/go-reuseport/LICENSE
new file mode 100644
index 000000000..0d760cbb4
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2013 Conformal Systems LLC.
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-reuseport/README.md b/vendor/github.com/libp2p/go-reuseport/README.md
new file mode 100644
index 000000000..d511adebc
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/README.md
@@ -0,0 +1,48 @@
+# go-reuseport
+
+[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
+[![GoDoc](https://godoc.org/github.com/libp2p/go-reuseport?status.svg)](https://godoc.org/github.com/libp2p/go-reuseport)
+[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/)
+[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23libp2p)
+[![codecov](https://codecov.io/gh/libp2p/go-reuseport/branch/master/graph/badge.svg)](https://codecov.io/gh/libp2p/go-reuseport)
+[![Travis CI](https://travis-ci.org/libp2p/go-reuseport.svg?branch=master)](https://travis-ci.org/libp2p/go-reuseport)
+[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io)
+
+This package enables listening and dialing from _the same_ TCP or UDP port.
+This means that the following sockopts may be set:
+
+```
+SO_REUSEADDR
+SO_REUSEPORT
+```
+
+This is a simple package to help with address reuse. This is particularly
+important when attempting to do TCP NAT holepunching, which requires a process
+to both Listen and Dial on the same TCP port. This package provides some
+utilities around enabling this behaviour on various OS.
+
+## Examples
+
+
+```Go
+// listen on the same port. oh yeah.
+l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+l2, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+```
+
+```Go
+// dial from the same port. oh yeah.
+l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+l2, _ := reuse.Listen("tcp", "127.0.0.1:1235")
+c, _ := reuse.Dial("tcp", "127.0.0.1:1234", "127.0.0.1:1235")
+```
+
+**Note: cant dial self because tcp/ip stacks use 4-tuples to identify connections, and doing so would clash.**
+
+## Tested
+
+Tested on `darwin`, `linux`, and `windows`.
+
+---
+
+The last gx published version of this module was: 0.2.2: Qme8kdM7thoCqLqd7GYCRqipoZJS64rhJo5MBcTpyWfsL9
diff --git a/vendor/github.com/libp2p/go-reuseport/addr.go b/vendor/github.com/libp2p/go-reuseport/addr.go
new file mode 100644
index 000000000..cfffc7c8c
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/addr.go
@@ -0,0 +1,20 @@
+package reuseport
+
+import (
+ "net"
+)
+
+func ResolveAddr(network, address string) (net.Addr, error) {
+ switch network {
+ default:
+ return nil, net.UnknownNetworkError(network)
+ case "ip", "ip4", "ip6":
+ return net.ResolveIPAddr(network, address)
+ case "tcp", "tcp4", "tcp6":
+ return net.ResolveTCPAddr(network, address)
+ case "udp", "udp4", "udp6":
+ return net.ResolveUDPAddr(network, address)
+ case "unix", "unixgram", "unixpacket":
+ return net.ResolveUnixAddr(network, address)
+ }
+}
diff --git a/vendor/github.com/libp2p/go-reuseport/codecov.yml b/vendor/github.com/libp2p/go-reuseport/codecov.yml
new file mode 100644
index 000000000..5f88a9ea2
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/codecov.yml
@@ -0,0 +1,3 @@
+coverage:
+ range: "50...100"
+comment: off
diff --git a/vendor/github.com/libp2p/go-reuseport/control_plan9.go b/vendor/github.com/libp2p/go-reuseport/control_plan9.go
new file mode 100644
index 000000000..a8f7f3456
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/control_plan9.go
@@ -0,0 +1,9 @@
+package reuseport
+
+import (
+ "syscall"
+)
+
+func Control(network, address string, c syscall.RawConn) error {
+ return nil
+}
diff --git a/vendor/github.com/libp2p/go-reuseport/control_unix.go b/vendor/github.com/libp2p/go-reuseport/control_unix.go
new file mode 100644
index 000000000..4197d1f74
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/control_unix.go
@@ -0,0 +1,23 @@
+//go:build !plan9 && !windows && !wasm
+
+package reuseport
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func Control(network, address string, c syscall.RawConn) (err error) {
+ controlErr := c.Control(func(fd uintptr) {
+ err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+ if err != nil {
+ return
+ }
+ err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
+ })
+ if controlErr != nil {
+ err = controlErr
+ }
+ return
+}
diff --git a/vendor/github.com/libp2p/go-reuseport/control_wasm.go b/vendor/github.com/libp2p/go-reuseport/control_wasm.go
new file mode 100644
index 000000000..8b22fade5
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/control_wasm.go
@@ -0,0 +1,11 @@
+//go:build wasm
+
+package reuseport
+
+import (
+ "syscall"
+)
+
+func Control(network, address string, c syscall.RawConn) error {
+ return nil
+}
diff --git a/vendor/github.com/libp2p/go-reuseport/control_windows.go b/vendor/github.com/libp2p/go-reuseport/control_windows.go
new file mode 100644
index 000000000..c45e43f4b
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/control_windows.go
@@ -0,0 +1,17 @@
+package reuseport
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/windows"
+)
+
+func Control(network, address string, c syscall.RawConn) (err error) {
+ controlErr := c.Control(func(fd uintptr) {
+ err = windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_REUSEADDR, 1)
+ })
+ if controlErr != nil {
+ err = controlErr
+ }
+ return
+}
diff --git a/vendor/github.com/libp2p/go-reuseport/interface.go b/vendor/github.com/libp2p/go-reuseport/interface.go
new file mode 100644
index 000000000..b864da8c5
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/interface.go
@@ -0,0 +1,72 @@
+// Package reuseport provides Listen and Dial functions that set socket
+// options in order to be able to reuse ports. You should only use this
+// package if you know what SO_REUSEADDR and SO_REUSEPORT are.
+//
+// For example:
+//
+// // listen on the same port. oh yeah.
+// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+// l2, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+//
+// // dial from the same port. oh yeah.
+// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+// l2, _ := reuse.Listen("tcp", "127.0.0.1:1235")
+// c, _ := reuse.Dial("tcp", "127.0.0.1:1234", "127.0.0.1:1235")
+//
+// Note: cant dial self because tcp/ip stacks use 4-tuples to identify connections,
+// and doing so would clash.
+package reuseport
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+)
+
+// Available returns whether or not SO_REUSEPORT or equivalent behaviour is
+// available in the OS.
+func Available() bool {
+ return true
+}
+
+var listenConfig = net.ListenConfig{
+ Control: Control,
+}
+
+// Listen listens at the given network and address. see net.Listen
+// Returns a net.Listener created from a file discriptor for a socket
+// with SO_REUSEPORT and SO_REUSEADDR option set.
+func Listen(network, address string) (net.Listener, error) {
+ return listenConfig.Listen(context.Background(), network, address)
+}
+
+// ListenPacket listens at the given network and address. see net.ListenPacket
+// Returns a net.Listener created from a file discriptor for a socket
+// with SO_REUSEPORT and SO_REUSEADDR option set.
+func ListenPacket(network, address string) (net.PacketConn, error) {
+ return listenConfig.ListenPacket(context.Background(), network, address)
+}
+
+// Dial dials the given network and address. see net.Dial
+// Returns a net.Conn created from a file descriptor for a socket
+// with SO_REUSEPORT and SO_REUSEADDR option set.
+func Dial(network, laddr, raddr string) (net.Conn, error) {
+ return DialTimeout(network, laddr, raddr, time.Duration(0))
+}
+
+// Dial dials the given network and address, with the given timeout. see
+// net.DialTimeout Returns a net.Conn created from a file descriptor for
+// a socket with SO_REUSEPORT and SO_REUSEADDR option set.
+func DialTimeout(network, laddr, raddr string, timeout time.Duration) (net.Conn, error) {
+ nla, err := ResolveAddr(network, laddr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to resolve local addr: %w", err)
+ }
+ d := net.Dialer{
+ Control: Control,
+ LocalAddr: nla,
+ Timeout: timeout,
+ }
+ return d.Dial(network, raddr)
+}
diff --git a/vendor/github.com/libp2p/go-reuseport/version.json b/vendor/github.com/libp2p/go-reuseport/version.json
new file mode 100644
index 000000000..a654d65ab
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/version.json
@@ -0,0 +1,3 @@
+{
+ "version": "v0.3.0"
+}
diff --git a/vendor/github.com/mdlayher/ethernet/.travis.yml b/vendor/github.com/mdlayher/ethernet/.travis.yml
new file mode 100644
index 000000000..cc215992a
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+ - 1.x
+os:
+ - linux
+before_install:
+ - go get golang.org/x/lint/golint
+ - go get honnef.co/go/tools/cmd/staticcheck
+ - go get -d ./...
+script:
+ - go build -tags=gofuzz ./...
+ - go vet ./...
+ - staticcheck ./...
+ - golint -set_exit_status ./...
+ - go test -v -race ./...
diff --git a/vendor/github.com/mdlayher/ethernet/LICENSE.md b/vendor/github.com/mdlayher/ethernet/LICENSE.md
new file mode 100644
index 000000000..75ed9de17
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/LICENSE.md
@@ -0,0 +1,10 @@
+MIT License
+===========
+
+Copyright (C) 2015 Matt Layher
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mdlayher/ethernet/README.md b/vendor/github.com/mdlayher/ethernet/README.md
new file mode 100644
index 000000000..ec6f4fe4b
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/README.md
@@ -0,0 +1,8 @@
+ethernet [![Build Status](https://travis-ci.org/mdlayher/ethernet.svg?branch=master)](https://travis-ci.org/mdlayher/ethernet) [![GoDoc](https://godoc.org/github.com/mdlayher/ethernet?status.svg)](https://godoc.org/github.com/mdlayher/ethernet) [![Go Report Card](https://goreportcard.com/badge/github.com/mdlayher/ethernet)](https://goreportcard.com/report/github.com/mdlayher/ethernet)
+========
+
+Package `ethernet` implements marshaling and unmarshaling of IEEE 802.3
+Ethernet II frames and IEEE 802.1Q VLAN tags. MIT Licensed.
+
+For more information about using Ethernet frames in Go, check out my blog
+post: [Network Protocol Breakdown: Ethernet and Go](https://medium.com/@mdlayher/network-protocol-breakdown-ethernet-and-go-de985d726cc1).
\ No newline at end of file
diff --git a/vendor/github.com/mdlayher/ethernet/ethernet.go b/vendor/github.com/mdlayher/ethernet/ethernet.go
new file mode 100644
index 000000000..a462413f3
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/ethernet.go
@@ -0,0 +1,297 @@
+// Package ethernet implements marshaling and unmarshaling of IEEE 802.3
+// Ethernet II frames and IEEE 802.1Q VLAN tags.
+package ethernet
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "net"
+)
+
+//go:generate stringer -output=string.go -type=EtherType
+
+const (
+ // minPayload is the minimum payload size for an Ethernet frame, assuming
+ // that no 802.1Q VLAN tags are present.
+ minPayload = 46
+)
+
+// Broadcast is a special hardware address which indicates a Frame should
+// be sent to every device on a given LAN segment.
+var Broadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+// ErrInvalidFCS is returned when Frame.UnmarshalFCS detects an incorrect
+// Ethernet frame check sequence in a byte slice for a Frame.
+var ErrInvalidFCS = errors.New("invalid frame check sequence")
+
+// An EtherType is a value used to identify an upper layer protocol
+// encapsulated in a Frame.
+//
+// A list of IANA-assigned EtherType values may be found here:
+// http://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml.
+type EtherType uint16
+
+// Common EtherType values frequently used in a Frame.
+const (
+ EtherTypeIPv4 EtherType = 0x0800
+ EtherTypeARP EtherType = 0x0806
+ EtherTypeIPv6 EtherType = 0x86DD
+
+ // EtherTypeVLAN and EtherTypeServiceVLAN are used as 802.1Q Tag Protocol
+ // Identifiers (TPIDs).
+ EtherTypeVLAN EtherType = 0x8100
+ EtherTypeServiceVLAN EtherType = 0x88a8
+)
+
+// A Frame is an IEEE 802.3 Ethernet II frame. A Frame contains information
+// such as source and destination hardware addresses, zero or more optional
+// 802.1Q VLAN tags, an EtherType, and payload data.
+type Frame struct {
+ // Destination specifies the destination hardware address for this Frame.
+ //
+ // If this address is set to Broadcast, the Frame will be sent to every
+ // device on a given LAN segment.
+ Destination net.HardwareAddr
+
+ // Source specifies the source hardware address for this Frame.
+ //
+ // Typically, this is the hardware address of the network interface used to
+ // send this Frame.
+ Source net.HardwareAddr
+
+ // ServiceVLAN specifies an optional 802.1Q service VLAN tag, for use with
+ // 802.1ad double tagging, or "Q-in-Q". If ServiceVLAN is not nil, VLAN must
+ // not be nil as well.
+ //
+ // Most users should leave this field set to nil and use VLAN instead.
+ ServiceVLAN *VLAN
+
+ // VLAN specifies an optional 802.1Q customer VLAN tag, which may or may
+ // not be present in a Frame. It is important to note that the operating
+ // system may automatically strip VLAN tags before they can be parsed.
+ VLAN *VLAN
+
+ // EtherType is a value used to identify an upper layer protocol
+ // encapsulated in this Frame.
+ EtherType EtherType
+
+ // Payload is a variable length data payload encapsulated by this Frame.
+ Payload []byte
+}
+
+// MarshalBinary allocates a byte slice and marshals a Frame into binary form.
+func (f *Frame) MarshalBinary() ([]byte, error) {
+ b := make([]byte, f.length())
+ _, err := f.read(b)
+ return b, err
+}
+
+// MarshalFCS allocates a byte slice, marshals a Frame into binary form, and
+// finally calculates and places a 4-byte IEEE CRC32 frame check sequence at
+// the end of the slice.
+//
+// Most users should use MarshalBinary instead. MarshalFCS is provided as a
+// convenience for rare occasions when the operating system cannot
+// automatically generate a frame check sequence for an Ethernet frame.
+func (f *Frame) MarshalFCS() ([]byte, error) {
+ // Frame length with 4 extra bytes for frame check sequence
+ b := make([]byte, f.length()+4)
+ if _, err := f.read(b); err != nil {
+ return nil, err
+ }
+
+ // Compute IEEE CRC32 checksum of frame bytes and place it directly
+ // in the last four bytes of the slice
+ binary.BigEndian.PutUint32(b[len(b)-4:], crc32.ChecksumIEEE(b[0:len(b)-4]))
+ return b, nil
+}
+
+// read reads data from a Frame into b. read is used to marshal a Frame
+// into binary form, but does not allocate on its own.
+func (f *Frame) read(b []byte) (int, error) {
+ // S-VLAN must also have accompanying C-VLAN.
+ if f.ServiceVLAN != nil && f.VLAN == nil {
+ return 0, ErrInvalidVLAN
+ }
+
+ copy(b[0:6], f.Destination)
+ copy(b[6:12], f.Source)
+
+ // Marshal each non-nil VLAN tag into bytes, inserting the appropriate
+ // EtherType/TPID before each, so devices know that one or more VLANs
+ // are present.
+ vlans := []struct {
+ vlan *VLAN
+ tpid EtherType
+ }{
+ {vlan: f.ServiceVLAN, tpid: EtherTypeServiceVLAN},
+ {vlan: f.VLAN, tpid: EtherTypeVLAN},
+ }
+
+ n := 12
+ for _, vt := range vlans {
+ if vt.vlan == nil {
+ continue
+ }
+
+ // Add VLAN EtherType and VLAN bytes.
+ binary.BigEndian.PutUint16(b[n:n+2], uint16(vt.tpid))
+ if _, err := vt.vlan.read(b[n+2 : n+4]); err != nil {
+ return 0, err
+ }
+ n += 4
+ }
+
+ // Marshal actual EtherType after any VLANs, copy payload into
+ // output bytes.
+ binary.BigEndian.PutUint16(b[n:n+2], uint16(f.EtherType))
+ copy(b[n+2:], f.Payload)
+
+ return len(b), nil
+}
+
+// UnmarshalBinary unmarshals a byte slice into a Frame.
+func (f *Frame) UnmarshalBinary(b []byte) error {
+ // Verify that both hardware addresses and a single EtherType are present
+ if len(b) < 14 {
+ return io.ErrUnexpectedEOF
+ }
+
+ // Track offset in packet for reading data
+ n := 14
+
+ // Continue looping and parsing VLAN tags until no more VLAN EtherType
+ // values are detected
+ et := EtherType(binary.BigEndian.Uint16(b[n-2 : n]))
+ switch et {
+ case EtherTypeServiceVLAN, EtherTypeVLAN:
+ // VLAN type is hinted for further parsing. An index is returned which
+ // indicates how many bytes were consumed by VLAN tags.
+ nn, err := f.unmarshalVLANs(et, b[n:])
+ if err != nil {
+ return err
+ }
+
+ n += nn
+ default:
+ // No VLANs detected.
+ f.EtherType = et
+ }
+
+ // Allocate single byte slice to store destination and source hardware
+ // addresses, and payload
+ bb := make([]byte, 6+6+len(b[n:]))
+ copy(bb[0:6], b[0:6])
+ f.Destination = bb[0:6]
+ copy(bb[6:12], b[6:12])
+ f.Source = bb[6:12]
+
+ // There used to be a minimum payload length restriction here, but as
+ // long as two hardware addresses and an EtherType are present, it
+ // doesn't really matter what is contained in the payload. We will
+ // follow the "robustness principle".
+ copy(bb[12:], b[n:])
+ f.Payload = bb[12:]
+
+ return nil
+}
+
+// UnmarshalFCS computes the IEEE CRC32 frame check sequence of a Frame,
+// verifies it against the checksum present in the byte slice, and finally,
+// unmarshals a byte slice into a Frame.
+//
+// Most users should use UnmarshalBinary instead. UnmarshalFCS is provided as
+// a convenience for rare occasions when the operating system cannot
+// automatically verify a frame check sequence for an Ethernet frame.
+func (f *Frame) UnmarshalFCS(b []byte) error {
+ // Must contain enough data for FCS, to avoid panics
+ if len(b) < 4 {
+ return io.ErrUnexpectedEOF
+ }
+
+ // Verify checksum in slice versus newly computed checksum
+ want := binary.BigEndian.Uint32(b[len(b)-4:])
+ got := crc32.ChecksumIEEE(b[0 : len(b)-4])
+ if want != got {
+ return ErrInvalidFCS
+ }
+
+ return f.UnmarshalBinary(b[0 : len(b)-4])
+}
+
+// length calculates the number of bytes required to store a Frame.
+func (f *Frame) length() int {
+ // If payload is less than the required minimum length, we zero-pad up to
+ // the required minimum length
+ pl := len(f.Payload)
+ if pl < minPayload {
+ pl = minPayload
+ }
+
+ // Add additional length if VLAN tags are needed.
+ var vlanLen int
+ switch {
+ case f.ServiceVLAN != nil && f.VLAN != nil:
+ vlanLen = 8
+ case f.VLAN != nil:
+ vlanLen = 4
+ }
+
+ // 6 bytes: destination hardware address
+ // 6 bytes: source hardware address
+ // N bytes: VLAN tags (if present)
+ // 2 bytes: EtherType
+ // N bytes: payload length (may be padded)
+ return 6 + 6 + vlanLen + 2 + pl
+}
+
+// unmarshalVLANs unmarshals S/C-VLAN tags. It is assumed that tpid
+// is a valid S/C-VLAN TPID.
+func (f *Frame) unmarshalVLANs(tpid EtherType, b []byte) (int, error) {
+ // 4 or more bytes must remain for valid S/C-VLAN tag and EtherType.
+ if len(b) < 4 {
+ return 0, io.ErrUnexpectedEOF
+ }
+
+ // Track how many bytes are consumed by VLAN tags.
+ var n int
+
+ switch tpid {
+ case EtherTypeServiceVLAN:
+ vlan := new(VLAN)
+ if err := vlan.UnmarshalBinary(b[n : n+2]); err != nil {
+ return 0, err
+ }
+ f.ServiceVLAN = vlan
+
+ // Assume that a C-VLAN immediately trails an S-VLAN.
+ if EtherType(binary.BigEndian.Uint16(b[n+2:n+4])) != EtherTypeVLAN {
+ return 0, ErrInvalidVLAN
+ }
+
+ // 4 or more bytes must remain for valid C-VLAN tag and EtherType.
+ n += 4
+ if len(b[n:]) < 4 {
+ return 0, io.ErrUnexpectedEOF
+ }
+
+ // Continue to parse the C-VLAN.
+ fallthrough
+ case EtherTypeVLAN:
+ vlan := new(VLAN)
+ if err := vlan.UnmarshalBinary(b[n : n+2]); err != nil {
+ return 0, err
+ }
+
+ f.VLAN = vlan
+ f.EtherType = EtherType(binary.BigEndian.Uint16(b[n+2 : n+4]))
+ n += 4
+ default:
+ panic(fmt.Sprintf("unknown VLAN TPID: %04x", tpid))
+ }
+
+ return n, nil
+}
diff --git a/vendor/github.com/mdlayher/ethernet/fuzz.go b/vendor/github.com/mdlayher/ethernet/fuzz.go
new file mode 100644
index 000000000..2698b3bd9
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/fuzz.go
@@ -0,0 +1,25 @@
+//go:build gofuzz
+// +build gofuzz
+
+package ethernet
+
+func Fuzz(data []byte) int {
+ f := new(Frame)
+ if err := f.UnmarshalBinary(data); err != nil {
+ return 0
+ }
+
+ if _, err := f.MarshalBinary(); err != nil {
+ panic(err)
+ }
+
+ if err := f.UnmarshalFCS(data); err != nil {
+ return 0
+ }
+
+ if _, err := f.MarshalFCS(); err != nil {
+ panic(err)
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/mdlayher/ethernet/string.go b/vendor/github.com/mdlayher/ethernet/string.go
new file mode 100644
index 000000000..89a3e010a
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/string.go
@@ -0,0 +1,38 @@
+// Code generated by "stringer -output=string.go -type=EtherType"; DO NOT EDIT.
+
+package ethernet
+
+import "fmt"
+
+const (
+ _EtherType_name_0 = "EtherTypeIPv4"
+ _EtherType_name_1 = "EtherTypeARP"
+ _EtherType_name_2 = "EtherTypeVLAN"
+ _EtherType_name_3 = "EtherTypeIPv6"
+ _EtherType_name_4 = "EtherTypeServiceVLAN"
+)
+
+var (
+ _EtherType_index_0 = [...]uint8{0, 13}
+ _EtherType_index_1 = [...]uint8{0, 12}
+ _EtherType_index_2 = [...]uint8{0, 13}
+ _EtherType_index_3 = [...]uint8{0, 13}
+ _EtherType_index_4 = [...]uint8{0, 20}
+)
+
+func (i EtherType) String() string {
+ switch {
+ case i == 2048:
+ return _EtherType_name_0
+ case i == 2054:
+ return _EtherType_name_1
+ case i == 33024:
+ return _EtherType_name_2
+ case i == 34525:
+ return _EtherType_name_3
+ case i == 34984:
+ return _EtherType_name_4
+ default:
+ return fmt.Sprintf("EtherType(%d)", i)
+ }
+}
diff --git a/vendor/github.com/mdlayher/ethernet/vlan.go b/vendor/github.com/mdlayher/ethernet/vlan.go
new file mode 100644
index 000000000..87b7ca2da
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/vlan.go
@@ -0,0 +1,127 @@
+package ethernet
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+const (
+ // VLANNone is a special VLAN ID which indicates that no VLAN is being
+ // used in a Frame. In this case, the VLAN's other fields may be used
+ // to indicate a Frame's priority.
+ VLANNone = 0x000
+
+ // VLANMax is a reserved VLAN ID which may indicate a wildcard in some
+ // management systems, but may not be configured or transmitted in a
+ // VLAN tag.
+ VLANMax = 0xfff
+)
+
+// ErrInvalidVLAN is returned when a VLAN tag is invalid due to one of the
+// following reasons:
+// - Priority of greater than 7 is detected
+// - ID of greater than 4094 (0xffe) is detected
+// - A customer VLAN does not follow a service VLAN (when using Q-in-Q)
+var ErrInvalidVLAN = errors.New("invalid VLAN")
+
+// Priority is an IEEE P802.1p priority level. Priority can be any value from
+// 0 to 7.
+//
+// It is important to note that priority 1 (PriorityBackground) actually has
+// a lower priority than 0 (PriorityBestEffort). All other Priority constants
+// indicate higher priority as the integer values increase.
+type Priority uint8
+
+// IEEE P802.1p recommended priority levels. Note that PriorityBackground has
+// a lower priority than PriorityBestEffort.
+const (
+ PriorityBackground Priority = 1
+ PriorityBestEffort Priority = 0
+ PriorityExcellentEffort Priority = 2
+ PriorityCriticalApplications Priority = 3
+ PriorityVideo Priority = 4
+ PriorityVoice Priority = 5
+ PriorityInternetworkControl Priority = 6
+ PriorityNetworkControl Priority = 7
+)
+
+// A VLAN is an IEEE 802.1Q Virtual LAN (VLAN) tag. A VLAN contains
+// information regarding traffic priority and a VLAN identifier for
+// a given Frame.
+type VLAN struct {
+ // Priority specifies a IEEE P802.1p priority level. Priority can be any
+ // value from 0 to 7.
+ Priority Priority
+
+ // DropEligible indicates if a Frame is eligible to be dropped in the
+ // presence of network congestion.
+ DropEligible bool
+
+ // ID specifies the VLAN ID for a Frame. ID can be any value from 0 to
+ // 4094 (0x000 to 0xffe), allowing up to 4094 VLANs.
+ //
+ // If ID is 0 (0x000, VLANNone), no VLAN is specified, and the other fields
+ // simply indicate a Frame's priority.
+ ID uint16
+}
+
+// MarshalBinary allocates a byte slice and marshals a VLAN into binary form.
+func (v *VLAN) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 2)
+ _, err := v.read(b)
+ return b, err
+}
+
+// read reads data from a VLAN into b. read is used to marshal a VLAN into
+// binary form, but does not allocate on its own.
+func (v *VLAN) read(b []byte) (int, error) {
+ // Check for VLAN priority in valid range
+ if v.Priority > PriorityNetworkControl {
+ return 0, ErrInvalidVLAN
+ }
+
+ // Check for VLAN ID in valid range
+ if v.ID >= VLANMax {
+ return 0, ErrInvalidVLAN
+ }
+
+ // 3 bits: priority
+ ub := uint16(v.Priority) << 13
+
+ // 1 bit: drop eligible
+ var drop uint16
+ if v.DropEligible {
+ drop = 1
+ }
+ ub |= drop << 12
+
+ // 12 bits: VLAN ID
+ ub |= v.ID
+
+ binary.BigEndian.PutUint16(b, ub)
+ return 2, nil
+}
+
+// UnmarshalBinary unmarshals a byte slice into a VLAN.
+func (v *VLAN) UnmarshalBinary(b []byte) error {
+ // VLAN tag is always 2 bytes
+ if len(b) != 2 {
+ return io.ErrUnexpectedEOF
+ }
+
+ // 3 bits: priority
+ // 1 bit : drop eligible
+ // 12 bits: VLAN ID
+ ub := binary.BigEndian.Uint16(b[0:2])
+ v.Priority = Priority(uint8(ub >> 13))
+ v.DropEligible = ub&0x1000 != 0
+ v.ID = ub & 0x0fff
+
+ // Check for VLAN ID in valid range
+ if v.ID >= VLANMax {
+ return ErrInvalidVLAN
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/minio/md5-simd/LICENSE b/vendor/github.com/minio/md5-simd/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/minio/md5-simd/LICENSE.Golang b/vendor/github.com/minio/md5-simd/LICENSE.Golang
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/LICENSE.Golang
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/minio/md5-simd/README.md b/vendor/github.com/minio/md5-simd/README.md
new file mode 100644
index 000000000..fa6fce1a4
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/README.md
@@ -0,0 +1,198 @@
+
+# md5-simd
+
+This is a SIMD accelerated MD5 package, allowing up to either 8 (AVX2) or 16 (AVX512) independent MD5 sums to be calculated on a single CPU core.
+
+It was originally based on the [md5vec](https://github.com/igneous-systems/md5vec) repository by Igneous Systems, but has been made more flexible by amongst others supporting different message sizes per lane and adding AVX512.
+
+`md5-simd` integrates a similar mechanism as described in [minio/sha256-simd](https://github.com/minio/sha256-simd#support-for-avx512) for making it easy for clients to take advantages of the parallel nature of the MD5 calculation. This will result in reduced overall CPU load.
+
+It is important to understand that `md5-simd` **does not speed up** a single threaded MD5 hash sum.
+Rather it allows multiple __independent__ MD5 sums to be computed in parallel on the same CPU core,
+thereby making more efficient usage of the computing resources.
+
+## Usage
+
+[![Documentation](https://godoc.org/github.com/minio/md5-simd?status.svg)](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc)
+
+
+In order to use `md5-simd`, you must first create an `Server` which can be
+used to instantiate one or more objects for MD5 hashing.
+
+These objects conform to the regular [`hash.Hash`](https://pkg.go.dev/hash?tab=doc#Hash) interface
+and as such the normal Write/Reset/Sum functionality works as expected.
+
+As an example:
+```
+ // Create server
+ server := md5simd.NewServer()
+ defer server.Close()
+
+ // Create hashing object (conforming to hash.Hash)
+ md5Hash := server.NewHash()
+ defer md5Hash.Close()
+
+ // Write one (or more) blocks
+ md5Hash.Write(block)
+
+ // Return digest
+ digest := md5Hash.Sum([]byte{})
+```
+
+To keep performance both a [Server](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Server)
+and individual [Hasher](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Hasher) should
+be closed using the `Close()` function when no longer needed.
+
+A Hasher can efficiently be re-used by using [`Reset()`](https://pkg.go.dev/hash?tab=doc#Hash) functionality.
+
+In case your system does not support the instructions required it will fall back to using `crypto/md5` for hashing.
+
+## Limitations
+
+As explained above `md5-simd` does not speed up an individual MD5 hash sum computation,
+unless some hierarchical tree construct is used but this will result in different outcomes.
+Running a single hash on a server results in approximately half the throughput.
+
+Instead, it allows running multiple MD5 calculations in parallel on a single CPU core.
+This can be beneficial in e.g. multi-threaded server applications where many go-routines
+are dealing with many requests and multiple MD5 calculations can be packed/scheduled for parallel execution on a single core.
+
+This will result in a lower overall CPU usage as compared to using the standard `crypto/md5`
+functionality where each MD5 hash computation will consume a single thread (core).
+
+It is best to test and measure the overall CPU usage in a representative usage scenario in your application
+to get an overall understanding of the benefits of `md5-simd` as compared to `crypto/md5`, ideally under heavy CPU load.
+
+Also note that `md5-simd` is best meant to work with large objects,
+so if your application only hashes small objects of a few kilobytes
+you may be better of by using `crypto/md5`.
+
+## Performance
+
+For the best performance writes should be a multiple of 64 bytes, ideally a multiple of 32KB.
+To help with that a [`buffered := bufio.NewWriterSize(hasher, 32<<10)`](https://golang.org/pkg/bufio/#NewWriterSize)
+can be inserted if you are unsure of the sizes of the writes.
+Remember to [flush](https://golang.org/pkg/bufio/#Writer.Flush) `buffered` before reading the hash.
+
+A single 'server' can process 16 streams concurrently with 1 core (AVX-512) or 2 cores (AVX2).
+In situations where it is likely that more than 16 streams are fully loaded it may be beneficial
+to use multiple servers.
+
+The following chart compares the multi-core performance between `crypto/md5` vs the AVX2 vs the AVX512 code:
+
+![md5-performance-overview](chart/Multi-core-MD5-Aggregated-Hashing-Performance.png)
+
+Compared to `crypto/md5`, the AVX2 version is up to 4x faster:
+
+```
+$ benchcmp crypto-md5.txt avx2.txt
+benchmark old MB/s new MB/s speedup
+BenchmarkParallel/32KB-4 2229.22 7370.50 3.31x
+BenchmarkParallel/64KB-4 2233.61 8248.46 3.69x
+BenchmarkParallel/128KB-4 2235.43 8660.74 3.87x
+BenchmarkParallel/256KB-4 2236.39 8863.87 3.96x
+BenchmarkParallel/512KB-4 2238.05 8985.39 4.01x
+BenchmarkParallel/1MB-4 2233.56 9042.62 4.05x
+BenchmarkParallel/2MB-4 2224.11 9014.46 4.05x
+BenchmarkParallel/4MB-4 2199.78 8993.61 4.09x
+BenchmarkParallel/8MB-4 2182.48 8748.22 4.01x
+```
+
+Compared to `crypto/md5`, the AVX512 is up to 8x faster (for larger block sizes):
+
+```
+$ benchcmp crypto-md5.txt avx512.txt
+benchmark old MB/s new MB/s speedup
+BenchmarkParallel/32KB-4 2229.22 11605.78 5.21x
+BenchmarkParallel/64KB-4 2233.61 14329.65 6.42x
+BenchmarkParallel/128KB-4 2235.43 16166.39 7.23x
+BenchmarkParallel/256KB-4 2236.39 15570.09 6.96x
+BenchmarkParallel/512KB-4 2238.05 16705.83 7.46x
+BenchmarkParallel/1MB-4 2233.56 16941.95 7.59x
+BenchmarkParallel/2MB-4 2224.11 17136.01 7.70x
+BenchmarkParallel/4MB-4 2199.78 17218.61 7.83x
+BenchmarkParallel/8MB-4 2182.48 17252.88 7.91x
+```
+
+These measurements were performed on AWS EC2 instance of type `c5.xlarge` equipped with a Xeon Platinum 8124M CPU at 3.0 GHz.
+
+If only one or two inputs are available the scalar calculation method will be used for the
+optimal speed in these cases.
+
+## Operation
+
+To make operation as easy as possible there is a “Server” coordinating everything. The server keeps track of individual hash states and updates them as new data comes in. This can be visualized as follows:
+
+![server-architecture](chart/server-architecture.png)
+
+The data is sent to the server from each hash input in blocks of up to 32KB per round. In our testing we found this to be the block size that yielded the best results.
+
+Whenever there is data available the server will collect data for up to 16 hashes and process all 16 lanes in parallel. This means that if 16 hashes have data available all the lanes will be filled. However since that may not be the case, the server will fill less lanes and do a round anyway. Lanes can also be partially filled if less than 32KB of data is written.
+
+![server-lanes-example](chart/server-lanes-example.png)
+
+In this example 4 lanes are fully filled and 2 lanes are partially filled. In this case the black areas will simply be masked out from the results and ignored. This is also why calculating a single hash on a server will not result in any speedup and hash writes should be a multiple of 32KB for the best performance.
+
+For AVX512 all 16 calculations will be done on a single core, on AVX2 on 2 cores if there is data for more than 8 lanes.
+So for optimal usage there should be data available for all 16 hashes. It may be perfectly reasonable to use more than 16 concurrent hashes.
+
+
+## Design & Tech
+
+md5-simd has both an AVX2 (8-lane parallel), and an AVX512 (16-lane parallel version) algorithm to accelerate the computation with the following function definitions:
+```
+//go:noescape
+func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int)
+
+//go:noescape
+func block16(state *uint32, ptrs *int64, mask uint64, n int)
+```
+
+The AVX2 version is based on the [md5vec](https://github.com/igneous-systems/md5vec) repository and is essentially unchanged except for minor (cosmetic) changes.
+
+The AVX512 version is derived from the AVX2 version but adds some further optimizations and simplifications.
+
+### Caching in upper ZMM registers
+
+The AVX2 version passes in a `cache8` block of memory (about 0.5 KB) for temporary storage of intermediate results during `ROUND1` which are subsequently used during `ROUND2` through to `ROUND4`.
+
+Since AVX512 has double the amount of registers (32 ZMM registers as compared to 16 YMM registers), it is possible to use the upper 16 ZMM registers for keeping the intermediate states on the CPU. As such, there is no need to pass in a corresponding `cache16` into the AVX512 block function.
+
+### Direct loading using 64-bit pointers
+
+The AVX2 uses the `VPGATHERDD` instruction (for YMM) to do a parallel load of 8 lanes using (8 independent) 32-bit offets. Since there is no control over how the 8 slices that are passed into the (Golang) `blockMd5` function are laid out into memory, it is not possible to derive a "base" address and corresponding offsets (all within 32-bits) for all 8 slices.
+
+As such the AVX2 version uses an interim buffer to collect the byte slices to be hashed from all 8 inut slices and passed this buffer along with (fixed) 32-bit offsets into the assembly code.
+
+For the AVX512 version this interim buffer is not needed since the AVX512 code uses a pair of `VPGATHERQD` instructions to directly dereference 64-bit pointers (from a base register address that is initialized to zero).
+
+Note that two load (gather) instructions are needed because the AVX512 version processes 16-lanes in parallel, requiring 16 times 64-bit = 1024 bits in total to be loaded. A simple `VALIGND` and `VPORD` are subsequently used to merge the lower and upper halves together into a single ZMM register (that contains 16 lanes of 32-bit DWORDS).
+
+### Masking support
+
+Due to the fact that pointers are passed directly from the Golang slices, we need to protect against NULL pointers.
+For this a 16-bit mask is passed in the AVX512 assembly code which is used during the `VPGATHERQD` instructions to mask out lanes that could otherwise result in segment violations.
+
+### Minor optimizations
+
+The `roll` macro (three instructions on AVX2) is no longer needed for AVX512 and is replaced by a single `VPROLD` instruction.
+
+Also several logical operations from the various ROUNDS of the AVX2 version could be combined into a single instruction using ternary logic (with the `VPTERMLOGD` instruction), resulting in a further simplification and speed-up.
+
+## Low level block function performance
+
+The benchmark below shows the (single thread) maximum performance of the `block()` function for AVX2 (having 8 lanes) and AVX512 (having 16 lanes). Also the baseline single-core performance from the standard `crypto/md5` package is shown for comparison.
+
+```
+BenchmarkCryptoMd5-4 687.66 MB/s 0 B/op 0 allocs/op
+BenchmarkBlock8-4 4144.80 MB/s 0 B/op 0 allocs/op
+BenchmarkBlock16-4 8228.88 MB/s 0 B/op 0 allocs/op
+```
+
+## License
+
+`md5-simd` is released under the Apache License v2.0. You can find the complete text in the file LICENSE.
+
+## Contributing
+
+Contributions are welcome, please send PRs for any enhancements.
\ No newline at end of file
diff --git a/vendor/github.com/minio/md5-simd/block16_amd64.s b/vendor/github.com/minio/md5-simd/block16_amd64.s
new file mode 100644
index 000000000..be0a43a3b
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block16_amd64.s
@@ -0,0 +1,228 @@
+// Copyright (c) 2020 MinIO Inc. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+//+build !noasm,!appengine,gc
+
+// This is the AVX512 implementation of the MD5 block function (16-way parallel)
+
+#define prep(index) \
+ KMOVQ kmask, ktmp \
+ VPGATHERDD index*4(base)(ptrs*1), ktmp, mem
+
+#define ROUND1(a, b, c, d, index, const, shift) \
+ VPXORQ c, tmp, tmp \
+ VPADDD 64*const(consts), a, a \
+ VPADDD mem, a, a \
+ VPTERNLOGD $0x6C, b, d, tmp \
+ prep(index) \
+ VPADDD tmp, a, a \
+ VPROLD $shift, a, a \
+ VMOVAPD c, tmp \
+ VPADDD b, a, a
+
+#define ROUND1noload(a, b, c, d, const, shift) \
+ VPXORQ c, tmp, tmp \
+ VPADDD 64*const(consts), a, a \
+ VPADDD mem, a, a \
+ VPTERNLOGD $0x6C, b, d, tmp \
+ VPADDD tmp, a, a \
+ VPROLD $shift, a, a \
+ VMOVAPD c, tmp \
+ VPADDD b, a, a
+
+#define ROUND2(a, b, c, d, zreg, const, shift) \
+ VPADDD 64*const(consts), a, a \
+ VPADDD zreg, a, a \
+ VANDNPD c, tmp, tmp \
+ VPTERNLOGD $0xEC, b, tmp, tmp2 \
+ VMOVAPD c, tmp \
+ VPADDD tmp2, a, a \
+ VMOVAPD c, tmp2 \
+ VPROLD $shift, a, a \
+ VPADDD b, a, a
+
+#define ROUND3(a, b, c, d, zreg, const, shift) \
+ VPADDD 64*const(consts), a, a \
+ VPADDD zreg, a, a \
+ VPTERNLOGD $0x96, b, d, tmp \
+ VPADDD tmp, a, a \
+ VPROLD $shift, a, a \
+ VMOVAPD b, tmp \
+ VPADDD b, a, a
+
+#define ROUND4(a, b, c, d, zreg, const, shift) \
+ VPADDD 64*const(consts), a, a \
+ VPADDD zreg, a, a \
+ VPTERNLOGD $0x36, b, c, tmp \
+ VPADDD tmp, a, a \
+ VPROLD $shift, a, a \
+ VPXORQ c, ones, tmp \
+ VPADDD b, a, a
+
+TEXT ·block16(SB), 4, $0-40
+
+ MOVQ state+0(FP), BX
+ MOVQ base+8(FP), SI
+ MOVQ ptrs+16(FP), AX
+ KMOVQ mask+24(FP), K1
+ MOVQ n+32(FP), DX
+ MOVQ ·avx512md5consts+0(SB), DI
+
+#define a Z0
+#define b Z1
+#define c Z2
+#define d Z3
+
+#define sa Z4
+#define sb Z5
+#define sc Z6
+#define sd Z7
+
+#define tmp Z8
+#define tmp2 Z9
+#define ptrs Z10
+#define ones Z12
+#define mem Z15
+
+#define kmask K1
+#define ktmp K3
+
+// ----------------------------------------------------------
+// Registers Z16 through to Z31 are used for caching purposes
+// ----------------------------------------------------------
+
+#define dig BX
+#define count DX
+#define base SI
+#define consts DI
+
+ // load digest into state registers
+ VMOVUPD (dig), a
+ VMOVUPD 0x40(dig), b
+ VMOVUPD 0x80(dig), c
+ VMOVUPD 0xc0(dig), d
+
+ // load source pointers
+ VMOVUPD 0x00(AX), ptrs
+
+ MOVQ $-1, AX
+ VPBROADCASTQ AX, ones
+
+loop:
+ VMOVAPD a, sa
+ VMOVAPD b, sb
+ VMOVAPD c, sc
+ VMOVAPD d, sd
+
+ prep(0)
+ VMOVAPD d, tmp
+ VMOVAPD mem, Z16
+
+ ROUND1(a,b,c,d, 1,0x00, 7)
+ VMOVAPD mem, Z17
+ ROUND1(d,a,b,c, 2,0x01,12)
+ VMOVAPD mem, Z18
+ ROUND1(c,d,a,b, 3,0x02,17)
+ VMOVAPD mem, Z19
+ ROUND1(b,c,d,a, 4,0x03,22)
+ VMOVAPD mem, Z20
+ ROUND1(a,b,c,d, 5,0x04, 7)
+ VMOVAPD mem, Z21
+ ROUND1(d,a,b,c, 6,0x05,12)
+ VMOVAPD mem, Z22
+ ROUND1(c,d,a,b, 7,0x06,17)
+ VMOVAPD mem, Z23
+ ROUND1(b,c,d,a, 8,0x07,22)
+ VMOVAPD mem, Z24
+ ROUND1(a,b,c,d, 9,0x08, 7)
+ VMOVAPD mem, Z25
+ ROUND1(d,a,b,c,10,0x09,12)
+ VMOVAPD mem, Z26
+ ROUND1(c,d,a,b,11,0x0a,17)
+ VMOVAPD mem, Z27
+ ROUND1(b,c,d,a,12,0x0b,22)
+ VMOVAPD mem, Z28
+ ROUND1(a,b,c,d,13,0x0c, 7)
+ VMOVAPD mem, Z29
+ ROUND1(d,a,b,c,14,0x0d,12)
+ VMOVAPD mem, Z30
+ ROUND1(c,d,a,b,15,0x0e,17)
+ VMOVAPD mem, Z31
+
+ ROUND1noload(b,c,d,a, 0x0f,22)
+
+ VMOVAPD d, tmp
+ VMOVAPD d, tmp2
+
+ ROUND2(a,b,c,d, Z17,0x10, 5)
+ ROUND2(d,a,b,c, Z22,0x11, 9)
+ ROUND2(c,d,a,b, Z27,0x12,14)
+ ROUND2(b,c,d,a, Z16,0x13,20)
+ ROUND2(a,b,c,d, Z21,0x14, 5)
+ ROUND2(d,a,b,c, Z26,0x15, 9)
+ ROUND2(c,d,a,b, Z31,0x16,14)
+ ROUND2(b,c,d,a, Z20,0x17,20)
+ ROUND2(a,b,c,d, Z25,0x18, 5)
+ ROUND2(d,a,b,c, Z30,0x19, 9)
+ ROUND2(c,d,a,b, Z19,0x1a,14)
+ ROUND2(b,c,d,a, Z24,0x1b,20)
+ ROUND2(a,b,c,d, Z29,0x1c, 5)
+ ROUND2(d,a,b,c, Z18,0x1d, 9)
+ ROUND2(c,d,a,b, Z23,0x1e,14)
+ ROUND2(b,c,d,a, Z28,0x1f,20)
+
+ VMOVAPD c, tmp
+
+ ROUND3(a,b,c,d, Z21,0x20, 4)
+ ROUND3(d,a,b,c, Z24,0x21,11)
+ ROUND3(c,d,a,b, Z27,0x22,16)
+ ROUND3(b,c,d,a, Z30,0x23,23)
+ ROUND3(a,b,c,d, Z17,0x24, 4)
+ ROUND3(d,a,b,c, Z20,0x25,11)
+ ROUND3(c,d,a,b, Z23,0x26,16)
+ ROUND3(b,c,d,a, Z26,0x27,23)
+ ROUND3(a,b,c,d, Z29,0x28, 4)
+ ROUND3(d,a,b,c, Z16,0x29,11)
+ ROUND3(c,d,a,b, Z19,0x2a,16)
+ ROUND3(b,c,d,a, Z22,0x2b,23)
+ ROUND3(a,b,c,d, Z25,0x2c, 4)
+ ROUND3(d,a,b,c, Z28,0x2d,11)
+ ROUND3(c,d,a,b, Z31,0x2e,16)
+ ROUND3(b,c,d,a, Z18,0x2f,23)
+
+ VPXORQ d, ones, tmp
+
+ ROUND4(a,b,c,d, Z16,0x30, 6)
+ ROUND4(d,a,b,c, Z23,0x31,10)
+ ROUND4(c,d,a,b, Z30,0x32,15)
+ ROUND4(b,c,d,a, Z21,0x33,21)
+ ROUND4(a,b,c,d, Z28,0x34, 6)
+ ROUND4(d,a,b,c, Z19,0x35,10)
+ ROUND4(c,d,a,b, Z26,0x36,15)
+ ROUND4(b,c,d,a, Z17,0x37,21)
+ ROUND4(a,b,c,d, Z24,0x38, 6)
+ ROUND4(d,a,b,c, Z31,0x39,10)
+ ROUND4(c,d,a,b, Z22,0x3a,15)
+ ROUND4(b,c,d,a, Z29,0x3b,21)
+ ROUND4(a,b,c,d, Z20,0x3c, 6)
+ ROUND4(d,a,b,c, Z27,0x3d,10)
+ ROUND4(c,d,a,b, Z18,0x3e,15)
+ ROUND4(b,c,d,a, Z25,0x3f,21)
+
+ VPADDD sa, a, a
+ VPADDD sb, b, b
+ VPADDD sc, c, c
+ VPADDD sd, d, d
+
+ LEAQ 64(base), base
+ SUBQ $64, count
+ JNE loop
+
+ VMOVUPD a, (dig)
+ VMOVUPD b, 0x40(dig)
+ VMOVUPD c, 0x80(dig)
+ VMOVUPD d, 0xc0(dig)
+
+ VZEROUPPER
+ RET
diff --git a/vendor/github.com/minio/md5-simd/block8_amd64.s b/vendor/github.com/minio/md5-simd/block8_amd64.s
new file mode 100644
index 000000000..f57db17aa
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block8_amd64.s
@@ -0,0 +1,281 @@
+//+build !noasm,!appengine,gc
+
+// Copyright (c) 2018 Igneous Systems
+// MIT License
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+// Copyright (c) 2020 MinIO Inc. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// This is the AVX2 implementation of the MD5 block function (8-way parallel)
+
+// block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int)
+TEXT ·block8(SB), 4, $0-40
+ MOVQ state+0(FP), BX
+ MOVQ base+8(FP), SI
+ MOVQ bufs+16(FP), AX
+ MOVQ cache+24(FP), CX
+ MOVQ n+32(FP), DX
+ MOVQ ·avx256md5consts+0(SB), DI
+
+ // Align cache (which is stack allocated by the compiler)
+ // to a 256 bit boundary (ymm register alignment)
+ // The cache8 type is deliberately oversized to permit this.
+ ADDQ $31, CX
+ ANDB $-32, CL
+
+#define a Y0
+#define b Y1
+#define c Y2
+#define d Y3
+
+#define sa Y4
+#define sb Y5
+#define sc Y6
+#define sd Y7
+
+#define tmp Y8
+#define tmp2 Y9
+
+#define mask Y10
+#define off Y11
+
+#define ones Y12
+
+#define rtmp1 Y13
+#define rtmp2 Y14
+
+#define mem Y15
+
+#define dig BX
+#define cache CX
+#define count DX
+#define base SI
+#define consts DI
+
+#define prepmask \
+ VPXOR mask, mask, mask \
+ VPCMPGTD mask, off, mask
+
+#define prep(index) \
+ VMOVAPD mask, rtmp2 \
+ VPGATHERDD rtmp2, index*4(base)(off*1), mem
+
+#define load(index) \
+ VMOVAPD index*32(cache), mem
+
+#define store(index) \
+ VMOVAPD mem, index*32(cache)
+
+#define roll(shift, a) \
+ VPSLLD $shift, a, rtmp1 \
+ VPSRLD $32-shift, a, a \
+ VPOR rtmp1, a, a
+
+#define ROUND1(a, b, c, d, index, const, shift) \
+ VPXOR c, tmp, tmp \
+ VPADDD 32*const(consts), a, a \
+ VPADDD mem, a, a \
+ VPAND b, tmp, tmp \
+ VPXOR d, tmp, tmp \
+ prep(index) \
+ VPADDD tmp, a, a \
+ roll(shift,a) \
+ VMOVAPD c, tmp \
+ VPADDD b, a, a
+
+#define ROUND1load(a, b, c, d, index, const, shift) \
+ VXORPD c, tmp, tmp \
+ VPADDD 32*const(consts), a, a \
+ VPADDD mem, a, a \
+ VPAND b, tmp, tmp \
+ VPXOR d, tmp, tmp \
+ load(index) \
+ VPADDD tmp, a, a \
+ roll(shift,a) \
+ VMOVAPD c, tmp \
+ VPADDD b, a, a
+
+#define ROUND2(a, b, c, d, index, const, shift) \
+ VPADDD 32*const(consts), a, a \
+ VPADDD mem, a, a \
+ VPAND b, tmp2, tmp2 \
+ VANDNPD c, tmp, tmp \
+ load(index) \
+ VPOR tmp, tmp2, tmp2 \
+ VMOVAPD c, tmp \
+ VPADDD tmp2, a, a \
+ VMOVAPD c, tmp2 \
+ roll(shift,a) \
+ VPADDD b, a, a
+
+#define ROUND3(a, b, c, d, index, const, shift) \
+ VPADDD 32*const(consts), a, a \
+ VPADDD mem, a, a \
+ load(index) \
+ VPXOR d, tmp, tmp \
+ VPXOR b, tmp, tmp \
+ VPADDD tmp, a, a \
+ roll(shift,a) \
+ VMOVAPD b, tmp \
+ VPADDD b, a, a
+
+#define ROUND4(a, b, c, d, index, const, shift) \
+ VPADDD 32*const(consts), a, a \
+ VPADDD mem, a, a \
+ VPOR b, tmp, tmp \
+ VPXOR c, tmp, tmp \
+ VPADDD tmp, a, a \
+ load(index) \
+ roll(shift,a) \
+ VPXOR c, ones, tmp \
+ VPADDD b, a, a
+
+ // load digest into state registers
+ VMOVUPD (dig), a
+ VMOVUPD 32(dig), b
+ VMOVUPD 64(dig), c
+ VMOVUPD 96(dig), d
+
+ // load source buffer offsets
+ VMOVUPD (AX), off
+
+ prepmask
+ VPCMPEQD ones, ones, ones
+
+loop:
+ VMOVAPD a, sa
+ VMOVAPD b, sb
+ VMOVAPD c, sc
+ VMOVAPD d, sd
+
+ prep(0)
+ VMOVAPD d, tmp
+ store(0)
+
+ ROUND1(a,b,c,d, 1,0x00, 7)
+ store(1)
+ ROUND1(d,a,b,c, 2,0x01,12)
+ store(2)
+ ROUND1(c,d,a,b, 3,0x02,17)
+ store(3)
+ ROUND1(b,c,d,a, 4,0x03,22)
+ store(4)
+ ROUND1(a,b,c,d, 5,0x04, 7)
+ store(5)
+ ROUND1(d,a,b,c, 6,0x05,12)
+ store(6)
+ ROUND1(c,d,a,b, 7,0x06,17)
+ store(7)
+ ROUND1(b,c,d,a, 8,0x07,22)
+ store(8)
+ ROUND1(a,b,c,d, 9,0x08, 7)
+ store(9)
+ ROUND1(d,a,b,c,10,0x09,12)
+ store(10)
+ ROUND1(c,d,a,b,11,0x0a,17)
+ store(11)
+ ROUND1(b,c,d,a,12,0x0b,22)
+ store(12)
+ ROUND1(a,b,c,d,13,0x0c, 7)
+ store(13)
+ ROUND1(d,a,b,c,14,0x0d,12)
+ store(14)
+ ROUND1(c,d,a,b,15,0x0e,17)
+ store(15)
+ ROUND1load(b,c,d,a, 1,0x0f,22)
+
+ VMOVAPD d, tmp
+ VMOVAPD d, tmp2
+
+ ROUND2(a,b,c,d, 6,0x10, 5)
+ ROUND2(d,a,b,c,11,0x11, 9)
+ ROUND2(c,d,a,b, 0,0x12,14)
+ ROUND2(b,c,d,a, 5,0x13,20)
+ ROUND2(a,b,c,d,10,0x14, 5)
+ ROUND2(d,a,b,c,15,0x15, 9)
+ ROUND2(c,d,a,b, 4,0x16,14)
+ ROUND2(b,c,d,a, 9,0x17,20)
+ ROUND2(a,b,c,d,14,0x18, 5)
+ ROUND2(d,a,b,c, 3,0x19, 9)
+ ROUND2(c,d,a,b, 8,0x1a,14)
+ ROUND2(b,c,d,a,13,0x1b,20)
+ ROUND2(a,b,c,d, 2,0x1c, 5)
+ ROUND2(d,a,b,c, 7,0x1d, 9)
+ ROUND2(c,d,a,b,12,0x1e,14)
+ ROUND2(b,c,d,a, 0,0x1f,20)
+
+ load(5)
+ VMOVAPD c, tmp
+
+ ROUND3(a,b,c,d, 8,0x20, 4)
+ ROUND3(d,a,b,c,11,0x21,11)
+ ROUND3(c,d,a,b,14,0x22,16)
+ ROUND3(b,c,d,a, 1,0x23,23)
+ ROUND3(a,b,c,d, 4,0x24, 4)
+ ROUND3(d,a,b,c, 7,0x25,11)
+ ROUND3(c,d,a,b,10,0x26,16)
+ ROUND3(b,c,d,a,13,0x27,23)
+ ROUND3(a,b,c,d, 0,0x28, 4)
+ ROUND3(d,a,b,c, 3,0x29,11)
+ ROUND3(c,d,a,b, 6,0x2a,16)
+ ROUND3(b,c,d,a, 9,0x2b,23)
+ ROUND3(a,b,c,d,12,0x2c, 4)
+ ROUND3(d,a,b,c,15,0x2d,11)
+ ROUND3(c,d,a,b, 2,0x2e,16)
+ ROUND3(b,c,d,a, 0,0x2f,23)
+
+ load(0)
+ VPXOR d, ones, tmp
+
+ ROUND4(a,b,c,d, 7,0x30, 6)
+ ROUND4(d,a,b,c,14,0x31,10)
+ ROUND4(c,d,a,b, 5,0x32,15)
+ ROUND4(b,c,d,a,12,0x33,21)
+ ROUND4(a,b,c,d, 3,0x34, 6)
+ ROUND4(d,a,b,c,10,0x35,10)
+ ROUND4(c,d,a,b, 1,0x36,15)
+ ROUND4(b,c,d,a, 8,0x37,21)
+ ROUND4(a,b,c,d,15,0x38, 6)
+ ROUND4(d,a,b,c, 6,0x39,10)
+ ROUND4(c,d,a,b,13,0x3a,15)
+ ROUND4(b,c,d,a, 4,0x3b,21)
+ ROUND4(a,b,c,d,11,0x3c, 6)
+ ROUND4(d,a,b,c, 2,0x3d,10)
+ ROUND4(c,d,a,b, 9,0x3e,15)
+ ROUND4(b,c,d,a, 0,0x3f,21)
+
+ VPADDD sa, a, a
+ VPADDD sb, b, b
+ VPADDD sc, c, c
+ VPADDD sd, d, d
+
+ LEAQ 64(base), base
+ SUBQ $64, count
+ JNE loop
+
+ VMOVUPD a, (dig)
+ VMOVUPD b, 32(dig)
+ VMOVUPD c, 64(dig)
+ VMOVUPD d, 96(dig)
+
+ VZEROUPPER
+ RET
diff --git a/vendor/github.com/minio/md5-simd/block_amd64.go b/vendor/github.com/minio/md5-simd/block_amd64.go
new file mode 100644
index 000000000..16edda268
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/block_amd64.go
@@ -0,0 +1,210 @@
+//+build !noasm,!appengine,gc
+
+// Copyright (c) 2020 MinIO Inc. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+package md5simd
+
+import (
+ "fmt"
+ "math"
+ "unsafe"
+
+ "github.com/klauspost/cpuid/v2"
+)
+
+var hasAVX512 bool
+
+func init() {
+ // VANDNPD requires AVX512DQ. Technically it could be VPTERNLOGQ which is AVX512F.
+ hasAVX512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ)
+}
+
+//go:noescape
+func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int)
+
+//go:noescape
+func block16(state *uint32, base uintptr, ptrs *int32, mask uint64, n int)
+
+// 8-way 4x uint32 digests in 4 ymm registers
+// (ymm0, ymm1, ymm2, ymm3)
+type digest8 struct {
+ v0, v1, v2, v3 [8]uint32
+}
+
+// Stack cache for 8x64 byte md5.BlockSize bytes.
+// Must be 32-byte aligned, so allocate 512+32 and
+// align upwards at runtime.
+type cache8 [512 + 32]byte
+
+// MD5 magic numbers for one lane of hashing; inflated
+// 8x below at init time.
+var md5consts = [64]uint32{
+ 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
+ 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
+ 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
+ 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
+ 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
+ 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8,
+ 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
+ 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
+ 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
+ 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
+ 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05,
+ 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
+ 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
+ 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
+ 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
+ 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
+}
+
+// inflate the consts 8-way for 8x md5 (256 bit ymm registers)
+var avx256md5consts = func(c []uint32) []uint32 {
+ inf := make([]uint32, 8*len(c))
+ for i := range c {
+ for j := 0; j < 8; j++ {
+ inf[(i*8)+j] = c[i]
+ }
+ }
+ return inf
+}(md5consts[:])
+
+// 16-way 4x uint32 digests in 4 zmm registers
+type digest16 struct {
+ v0, v1, v2, v3 [16]uint32
+}
+
+// inflate the consts 16-way for 16x md5 (512 bit zmm registers)
+var avx512md5consts = func(c []uint32) []uint32 {
+ inf := make([]uint32, 16*len(c))
+ for i := range c {
+ for j := 0; j < 16; j++ {
+ inf[(i*16)+j] = c[i]
+ }
+ }
+ return inf
+}(md5consts[:])
+
+// Interface function to assembly code
+func (s *md5Server) blockMd5_x16(d *digest16, input [16][]byte, half bool) {
+ if hasAVX512 {
+ blockMd5_avx512(d, input, s.allBufs, &s.maskRounds16)
+ return
+ }
+
+ // Preparing data using copy is slower since copies aren't inlined.
+
+ // Calculate on this goroutine
+ if half {
+ for i := range s.i8[0][:] {
+ s.i8[0][i] = input[i]
+ }
+ for i := range s.d8a.v0[:] {
+ s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i]
+ }
+ blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a)
+ for i := range s.d8a.v0[:] {
+ d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i]
+ }
+ return
+ }
+
+ for i := range s.i8[0][:] {
+ s.i8[0][i], s.i8[1][i] = input[i], input[8+i]
+ }
+
+ for i := range s.d8a.v0[:] {
+ j := (i + 8) & 15
+ s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i]
+ s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] = d.v0[j], d.v1[j], d.v2[j], d.v3[j]
+ }
+
+ // Benchmarks appears to be slightly faster when spinning up 2 goroutines instead
+ // of using the current for one of the blocks.
+ s.wg.Add(2)
+ go func() { blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a); s.wg.Done() }()
+ go func() { blockMd5_avx2(&s.d8b, s.i8[1], s.allBufs, &s.maskRounds8b); s.wg.Done() }()
+ s.wg.Wait()
+ for i := range s.d8a.v0[:] {
+ d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i]
+ }
+ for i := range s.d8b.v0[:] {
+ j := (i + 8) & 15
+ d.v0[j], d.v1[j], d.v2[j], d.v3[j] = s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i]
+ }
+}
+
+// Interface function to AVX512 assembly code
+func blockMd5_avx512(s *digest16, input [16][]byte, base []byte, maskRounds *[16]maskRounds) {
+ baseMin := uint64(uintptr(unsafe.Pointer(&(base[0]))))
+ ptrs := [16]int32{}
+
+ for i := range ptrs {
+ if len(input[i]) > 0 {
+ if len(input[i]) > internalBlockSize {
+ panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i))
+ }
+
+ off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin
+ if off > math.MaxUint32 {
+ panic(fmt.Sprintf("invalid buffer sent with offset %x", off))
+ }
+ ptrs[i] = int32(off)
+ }
+ }
+
+ sdup := *s // create copy of initial states to receive intermediate updates
+
+ rounds := generateMaskAndRounds16(input, maskRounds)
+
+ for r := 0; r < rounds; r++ {
+ m := maskRounds[r]
+
+ block16(&sdup.v0[0], uintptr(baseMin), &ptrs[0], m.mask, int(64*m.rounds))
+
+ for j := 0; j < len(ptrs); j++ {
+ ptrs[j] += int32(64 * m.rounds) // update pointers for next round
+ if m.mask&(1< 0 {
+ if len(input[i]) > internalBlockSize {
+ panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i))
+ }
+
+ off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin
+ if off > math.MaxUint32 {
+ panic(fmt.Sprintf("invalid buffer sent with offset %x", off))
+ }
+ ptrs[i] = int32(off)
+ }
+ }
+
+ sdup := *s // create copy of initial states to receive intermediate updates
+
+ rounds := generateMaskAndRounds8(input, maskRounds)
+
+ for r := 0; r < rounds; r++ {
+ m := maskRounds[r]
+ var cache cache8 // stack storage for block8 tmp state
+ block8(&sdup.v0[0], uintptr(baseMin), &ptrs[0], &cache[0], int(64*m.rounds))
+
+ for j := 0; j < len(ptrs); j++ {
+ ptrs[j] += int32(64 * m.rounds) // update pointers for next round
+ if m.mask&(1< internalBlockSize {
+ l = internalBlockSize
+ }
+ nnn, err := d.write(p[:l])
+ if err != nil {
+ return nn, err
+ }
+ nn += nnn
+ p = p[l:]
+
+ if len(p) == 0 {
+ break
+ }
+
+ }
+ return
+}
+
+func (d *md5Digest) write(p []byte) (nn int, err error) {
+
+ nn = len(p)
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n := copy(d.x[d.nx:], p)
+ d.nx += n
+ if d.nx == BlockSize {
+ // Create a copy of the overflow buffer in order to send it async over the channel
+ // (since we will modify the overflow buffer down below with any access beyond multiples of 64)
+ tmp := <-d.buffers
+ tmp = tmp[:BlockSize]
+ copy(tmp, d.x[:])
+ d.sendBlock(blockInput{uid: d.uid, msg: tmp}, len(p)-n < BlockSize)
+ d.nx = 0
+ }
+ p = p[n:]
+ }
+ if len(p) >= BlockSize {
+ n := len(p) &^ (BlockSize - 1)
+ buf := <-d.buffers
+ buf = buf[:n]
+ copy(buf, p)
+ d.sendBlock(blockInput{uid: d.uid, msg: buf}, len(p)-n < BlockSize)
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ d.nx = copy(d.x[:], p)
+ }
+ return
+}
+
+func (d *md5Digest) Close() {
+ if d.blocksCh != nil {
+ close(d.blocksCh)
+ d.blocksCh = nil
+ }
+}
+
+var sumChPool sync.Pool
+
+func init() {
+ sumChPool.New = func() interface{} {
+ return make(chan sumResult, 1)
+ }
+}
+
+// Sum - Return MD5 sum in bytes
+func (d *md5Digest) Sum(in []byte) (result []byte) {
+ if d.blocksCh == nil {
+ panic("sum after close")
+ }
+
+ trail := <-d.buffers
+ trail = append(trail[:0], d.x[:d.nx]...)
+
+ length := d.len
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ var tmp [64]byte
+ tmp[0] = 0x80
+ if length%64 < 56 {
+ trail = append(trail, tmp[0:56-length%64]...)
+ } else {
+ trail = append(trail, tmp[0:64+56-length%64]...)
+ }
+
+ // Length in bits.
+ length <<= 3
+ binary.LittleEndian.PutUint64(tmp[:], length) // append length in bits
+
+ trail = append(trail, tmp[0:8]...)
+ if len(trail)%BlockSize != 0 {
+ panic(fmt.Errorf("internal error: sum block was not aligned. len=%d, nx=%d", len(trail), d.nx))
+ }
+ sumCh := sumChPool.Get().(chan sumResult)
+ d.sendBlock(blockInput{uid: d.uid, msg: trail, sumCh: sumCh}, true)
+
+ sum := <-sumCh
+ sumChPool.Put(sumCh)
+
+ return append(in, sum.digest[:]...)
+}
+
+// sendBlock will send a block for processing.
+// If cycle is true we will block on cycle, otherwise we will only block
+// if the block channel is full.
+func (d *md5Digest) sendBlock(bi blockInput, cycle bool) {
+ if cycle {
+ select {
+ case d.blocksCh <- bi:
+ d.cycleServer <- d.uid
+ }
+ return
+ }
+ // Only block on cycle if we filled the buffer
+ select {
+ case d.blocksCh <- bi:
+ return
+ default:
+ d.cycleServer <- d.uid
+ d.blocksCh <- bi
+ }
+}
diff --git a/vendor/github.com/minio/md5-simd/md5-server_amd64.go b/vendor/github.com/minio/md5-simd/md5-server_amd64.go
new file mode 100644
index 000000000..94f741c54
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-server_amd64.go
@@ -0,0 +1,397 @@
+//+build !noasm,!appengine,gc
+
+// Copyright (c) 2020 MinIO Inc. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+package md5simd
+
+import (
+ "encoding/binary"
+ "fmt"
+ "runtime"
+ "sync"
+
+ "github.com/klauspost/cpuid/v2"
+)
+
+// MD5 initialization constants
+const (
+ // Lanes is the number of concurrently calculated hashes.
+ Lanes = 16
+
+ init0 = 0x67452301
+ init1 = 0xefcdab89
+ init2 = 0x98badcfe
+ init3 = 0x10325476
+
+ // Use scalar routine when below this many lanes
+ useScalarBelow = 3
+)
+
+// md5ServerUID - Does not start at 0 but next multiple of 16 so as to be able to
+// differentiate with default initialisation value of 0
+const md5ServerUID = Lanes
+
+const buffersPerLane = 3
+
+// Message to send across input channel
+type blockInput struct {
+ uid uint64
+ msg []byte
+ sumCh chan sumResult
+ reset bool
+}
+
+type sumResult struct {
+ digest [Size]byte
+}
+
+type lanesInfo [Lanes]blockInput
+
+// md5Server - Type to implement parallel handling of MD5 invocations
+type md5Server struct {
+ uidCounter uint64
+ cycle chan uint64 // client with uid has update.
+ newInput chan newClient // Add new client.
+ digests map[uint64][Size]byte // Map of uids to (interim) digest results
+ maskRounds16 [16]maskRounds // Pre-allocated static array for max 16 rounds
+ maskRounds8a [8]maskRounds // Pre-allocated static array for max 8 rounds (1st AVX2 core)
+ maskRounds8b [8]maskRounds // Pre-allocated static array for max 8 rounds (2nd AVX2 core)
+ allBufs []byte // Preallocated buffer.
+ buffers chan []byte // Preallocated buffers, sliced from allBufs.
+
+ i8 [2][8][]byte // avx2 temporary vars
+ d8a, d8b digest8
+ wg sync.WaitGroup
+}
+
+// NewServer - Create new object for parallel processing handling
+func NewServer() Server {
+ if !cpuid.CPU.Supports(cpuid.AVX2) {
+ return &fallbackServer{}
+ }
+ md5srv := &md5Server{}
+ md5srv.digests = make(map[uint64][Size]byte)
+ md5srv.newInput = make(chan newClient, Lanes)
+ md5srv.cycle = make(chan uint64, Lanes*10)
+ md5srv.uidCounter = md5ServerUID - 1
+ md5srv.allBufs = make([]byte, 32+buffersPerLane*Lanes*internalBlockSize)
+ md5srv.buffers = make(chan []byte, buffersPerLane*Lanes)
+ // Fill buffers.
+ for i := 0; i < buffersPerLane*Lanes; i++ {
+ s := 32 + i*internalBlockSize
+ md5srv.buffers <- md5srv.allBufs[s : s+internalBlockSize : s+internalBlockSize]
+ }
+
+ // Start a single thread for reading from the input channel
+ go md5srv.process(md5srv.newInput)
+ return md5srv
+}
+
+type newClient struct {
+ uid uint64
+ input chan blockInput
+}
+
+// process - Sole handler for reading from the input channel.
+func (s *md5Server) process(newClients chan newClient) {
+ // To fill up as many lanes as possible:
+ //
+ // 1. Wait for a cycle id.
+ // 2. If not already in a lane, add, otherwise leave on channel
+ // 3. Start timer
+ // 4. Check if lanes is full, if so, goto 10 (process).
+ // 5. If timeout, goto 10.
+ // 6. Wait for new id (goto 2) or timeout (goto 10).
+ // 10. Process.
+ // 11. Check all input if there is already input, if so add to lanes.
+ // 12. Goto 1
+
+ // lanes contains the lanes.
+ var lanes lanesInfo
+ // lanesFilled contains the number of filled lanes for current cycle.
+ var lanesFilled int
+ // clients contains active clients
+ var clients = make(map[uint64]chan blockInput, Lanes)
+
+ addToLane := func(uid uint64) {
+ cl, ok := clients[uid]
+ if !ok {
+ // Unknown client. Maybe it was already removed.
+ return
+ }
+ // Check if we already have it.
+ for _, lane := range lanes[:lanesFilled] {
+ if lane.uid == uid {
+ return
+ }
+ }
+ // Continue until we get a block or there is nothing on channel
+ for {
+ select {
+ case block, ok := <-cl:
+ if !ok {
+ // Client disconnected
+ delete(clients, block.uid)
+ return
+ }
+ if block.uid != uid {
+ panic(fmt.Errorf("uid mismatch, %d (block) != %d (client)", block.uid, uid))
+ }
+ // If reset message, reset and we're done
+ if block.reset {
+ delete(s.digests, uid)
+ continue
+ }
+
+ // If requesting sum, we will need to maintain state.
+ if block.sumCh != nil {
+ var dig digest
+ d, ok := s.digests[uid]
+ if ok {
+ dig.s[0] = binary.LittleEndian.Uint32(d[0:4])
+ dig.s[1] = binary.LittleEndian.Uint32(d[4:8])
+ dig.s[2] = binary.LittleEndian.Uint32(d[8:12])
+ dig.s[3] = binary.LittleEndian.Uint32(d[12:16])
+ } else {
+ dig.s[0], dig.s[1], dig.s[2], dig.s[3] = init0, init1, init2, init3
+ }
+
+ sum := sumResult{}
+ // Add end block to current digest.
+ blockScalar(&dig.s, block.msg)
+
+ binary.LittleEndian.PutUint32(sum.digest[0:], dig.s[0])
+ binary.LittleEndian.PutUint32(sum.digest[4:], dig.s[1])
+ binary.LittleEndian.PutUint32(sum.digest[8:], dig.s[2])
+ binary.LittleEndian.PutUint32(sum.digest[12:], dig.s[3])
+ block.sumCh <- sum
+ if block.msg != nil {
+ s.buffers <- block.msg
+ }
+ continue
+ }
+ if len(block.msg) == 0 {
+ continue
+ }
+ lanes[lanesFilled] = block
+ lanesFilled++
+ return
+ default:
+ return
+ }
+ }
+ }
+ addNewClient := func(cl newClient) {
+ if _, ok := clients[cl.uid]; ok {
+ panic("internal error: duplicate client registration")
+ }
+ clients[cl.uid] = cl.input
+ }
+
+ allLanesFilled := func() bool {
+ return lanesFilled == Lanes || lanesFilled >= len(clients)
+ }
+
+ for {
+ // Step 1.
+ for lanesFilled == 0 {
+ select {
+ case cl, ok := <-newClients:
+ if !ok {
+ return
+ }
+ addNewClient(cl)
+ // Check if it already sent a payload.
+ addToLane(cl.uid)
+ continue
+ case uid := <-s.cycle:
+ addToLane(uid)
+ }
+ }
+
+ fillLanes:
+ for !allLanesFilled() {
+ select {
+ case cl, ok := <-newClients:
+ if !ok {
+ return
+ }
+ addNewClient(cl)
+
+ case uid := <-s.cycle:
+ addToLane(uid)
+ default:
+ // Nothing more queued...
+ break fillLanes
+ }
+ }
+
+ // If we did not fill all lanes, check if there is more waiting
+ if !allLanesFilled() {
+ runtime.Gosched()
+ for uid := range clients {
+ addToLane(uid)
+ if allLanesFilled() {
+ break
+ }
+ }
+ }
+ if false {
+ if !allLanesFilled() {
+ fmt.Println("Not all lanes filled", lanesFilled, "of", len(clients))
+ //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
+ } else if true {
+ fmt.Println("all lanes filled")
+ }
+ }
+ // Process the lanes we could collect
+ s.blocks(lanes[:lanesFilled])
+
+ // Clear lanes...
+ lanesFilled = 0
+ // Add all current queued
+ for uid := range clients {
+ addToLane(uid)
+ if allLanesFilled() {
+ break
+ }
+ }
+ }
+}
+
+func (s *md5Server) Close() {
+ if s.newInput != nil {
+ close(s.newInput)
+ s.newInput = nil
+ }
+}
+
+// Invoke assembly and send results back
+func (s *md5Server) blocks(lanes []blockInput) {
+ if len(lanes) < useScalarBelow {
+ // Use scalar routine when below this many lanes
+ switch len(lanes) {
+ case 0:
+ case 1:
+ lane := lanes[0]
+ var d digest
+ a, ok := s.digests[lane.uid]
+ if ok {
+ d.s[0] = binary.LittleEndian.Uint32(a[0:4])
+ d.s[1] = binary.LittleEndian.Uint32(a[4:8])
+ d.s[2] = binary.LittleEndian.Uint32(a[8:12])
+ d.s[3] = binary.LittleEndian.Uint32(a[12:16])
+ } else {
+ d.s[0] = init0
+ d.s[1] = init1
+ d.s[2] = init2
+ d.s[3] = init3
+ }
+ if len(lane.msg) > 0 {
+ // Update...
+ blockScalar(&d.s, lane.msg)
+ }
+ dig := [Size]byte{}
+ binary.LittleEndian.PutUint32(dig[0:], d.s[0])
+ binary.LittleEndian.PutUint32(dig[4:], d.s[1])
+ binary.LittleEndian.PutUint32(dig[8:], d.s[2])
+ binary.LittleEndian.PutUint32(dig[12:], d.s[3])
+ s.digests[lane.uid] = dig
+
+ if lane.msg != nil {
+ s.buffers <- lane.msg
+ }
+ lanes[0] = blockInput{}
+
+ default:
+ s.wg.Add(len(lanes))
+ var results [useScalarBelow]digest
+ for i := range lanes {
+ lane := lanes[i]
+ go func(i int) {
+ var d digest
+ defer s.wg.Done()
+ a, ok := s.digests[lane.uid]
+ if ok {
+ d.s[0] = binary.LittleEndian.Uint32(a[0:4])
+ d.s[1] = binary.LittleEndian.Uint32(a[4:8])
+ d.s[2] = binary.LittleEndian.Uint32(a[8:12])
+ d.s[3] = binary.LittleEndian.Uint32(a[12:16])
+ } else {
+ d.s[0] = init0
+ d.s[1] = init1
+ d.s[2] = init2
+ d.s[3] = init3
+ }
+ if len(lane.msg) == 0 {
+ results[i] = d
+ return
+ }
+ // Update...
+ blockScalar(&d.s, lane.msg)
+ results[i] = d
+ }(i)
+ }
+ s.wg.Wait()
+ for i, lane := range lanes {
+ dig := [Size]byte{}
+ binary.LittleEndian.PutUint32(dig[0:], results[i].s[0])
+ binary.LittleEndian.PutUint32(dig[4:], results[i].s[1])
+ binary.LittleEndian.PutUint32(dig[8:], results[i].s[2])
+ binary.LittleEndian.PutUint32(dig[12:], results[i].s[3])
+ s.digests[lane.uid] = dig
+
+ if lane.msg != nil {
+ s.buffers <- lane.msg
+ }
+ lanes[i] = blockInput{}
+ }
+ }
+ return
+ }
+
+ inputs := [16][]byte{}
+ for i := range lanes {
+ inputs[i] = lanes[i].msg
+ }
+
+ // Collect active digests...
+ state := s.getDigests(lanes)
+ // Process all lanes...
+ s.blockMd5_x16(&state, inputs, len(lanes) <= 8)
+
+ for i, lane := range lanes {
+ uid := lane.uid
+ dig := [Size]byte{}
+ binary.LittleEndian.PutUint32(dig[0:], state.v0[i])
+ binary.LittleEndian.PutUint32(dig[4:], state.v1[i])
+ binary.LittleEndian.PutUint32(dig[8:], state.v2[i])
+ binary.LittleEndian.PutUint32(dig[12:], state.v3[i])
+
+ s.digests[uid] = dig
+ if lane.msg != nil {
+ s.buffers <- lane.msg
+ }
+ lanes[i] = blockInput{}
+ }
+}
+
+func (s *md5Server) getDigests(lanes []blockInput) (d digest16) {
+ for i, lane := range lanes {
+ a, ok := s.digests[lane.uid]
+ if ok {
+ d.v0[i] = binary.LittleEndian.Uint32(a[0:4])
+ d.v1[i] = binary.LittleEndian.Uint32(a[4:8])
+ d.v2[i] = binary.LittleEndian.Uint32(a[8:12])
+ d.v3[i] = binary.LittleEndian.Uint32(a[12:16])
+ } else {
+ d.v0[i] = init0
+ d.v1[i] = init1
+ d.v2[i] = init2
+ d.v3[i] = init3
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/minio/md5-simd/md5-server_fallback.go b/vendor/github.com/minio/md5-simd/md5-server_fallback.go
new file mode 100644
index 000000000..7814dada3
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-server_fallback.go
@@ -0,0 +1,12 @@
+//+build !amd64 appengine !gc noasm
+
+// Copyright (c) 2020 MinIO Inc. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+package md5simd
+
+// NewServer - Create new object for parallel processing handling
+func NewServer() *fallbackServer {
+ return &fallbackServer{}
+}
diff --git a/vendor/github.com/minio/md5-simd/md5-util_amd64.go b/vendor/github.com/minio/md5-simd/md5-util_amd64.go
new file mode 100644
index 000000000..73981b0eb
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5-util_amd64.go
@@ -0,0 +1,85 @@
+//+build !noasm,!appengine,gc
+
+// Copyright (c) 2020 MinIO Inc. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+package md5simd
+
+// Helper struct for sorting blocks based on length
+type lane struct {
+ len uint
+ pos uint
+}
+
+type digest struct {
+ s [4]uint32
+}
+
+// Helper struct for generating number of rounds in combination with mask for valid lanes
+type maskRounds struct {
+ mask uint64
+ rounds uint64
+}
+
+func generateMaskAndRounds8(input [8][]byte, mr *[8]maskRounds) (rounds int) {
+ // Sort on blocks length small to large
+ var sorted [8]lane
+ for c, inpt := range input[:] {
+ sorted[c] = lane{uint(len(inpt)), uint(c)}
+ for i := c - 1; i >= 0; i-- {
+ // swap so largest is at the end...
+ if sorted[i].len > sorted[i+1].len {
+ sorted[i], sorted[i+1] = sorted[i+1], sorted[i]
+ continue
+ }
+ break
+ }
+ }
+
+ // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks
+ m, round := uint64(0xff), uint64(0)
+
+ for _, s := range sorted[:] {
+ if s.len > 0 {
+ if uint64(s.len)>>6 > round {
+ mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round}
+ rounds++
+ }
+ round = uint64(s.len) >> 6
+ }
+ m = m & ^(1 << uint(s.pos))
+ }
+ return
+}
+
+func generateMaskAndRounds16(input [16][]byte, mr *[16]maskRounds) (rounds int) {
+ // Sort on blocks length small to large
+ var sorted [16]lane
+ for c, inpt := range input[:] {
+ sorted[c] = lane{uint(len(inpt)), uint(c)}
+ for i := c - 1; i >= 0; i-- {
+ // swap so largest is at the end...
+ if sorted[i].len > sorted[i+1].len {
+ sorted[i], sorted[i+1] = sorted[i+1], sorted[i]
+ continue
+ }
+ break
+ }
+ }
+
+ // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks
+ m, round := uint64(0xffff), uint64(0)
+
+ for _, s := range sorted[:] {
+ if s.len > 0 {
+ if uint64(s.len)>>6 > round {
+ mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round}
+ rounds++
+ }
+ round = uint64(s.len) >> 6
+ }
+ m = m & ^(1 << uint(s.pos))
+ }
+ return
+}
diff --git a/vendor/github.com/minio/md5-simd/md5.go b/vendor/github.com/minio/md5-simd/md5.go
new file mode 100644
index 000000000..11b0cb962
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5.go
@@ -0,0 +1,63 @@
+package md5simd
+
+import (
+ "crypto/md5"
+ "hash"
+ "sync"
+)
+
+const (
+ // The blocksize of MD5 in bytes.
+ BlockSize = 64
+
+ // The size of an MD5 checksum in bytes.
+ Size = 16
+
+ // internalBlockSize is the internal block size.
+ internalBlockSize = 32 << 10
+)
+
+type Server interface {
+ NewHash() Hasher
+ Close()
+}
+
+type Hasher interface {
+ hash.Hash
+ Close()
+}
+
+// StdlibHasher returns a Hasher that uses the stdlib for hashing.
+// Used hashers are stored in a pool for fast reuse.
+func StdlibHasher() Hasher {
+ return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)}
+}
+
+// md5Wrapper is a wrapper around the builtin hasher.
+type md5Wrapper struct {
+ hash.Hash
+}
+
+var md5Pool = sync.Pool{New: func() interface{} {
+ return md5.New()
+}}
+
+// fallbackServer - Fallback when no assembly is available.
+type fallbackServer struct {
+}
+
+// NewHash -- return regular Golang md5 hashing from crypto
+func (s *fallbackServer) NewHash() Hasher {
+ return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)}
+}
+
+func (s *fallbackServer) Close() {
+}
+
+func (m *md5Wrapper) Close() {
+ if m.Hash != nil {
+ m.Reset()
+ md5Pool.Put(m.Hash)
+ m.Hash = nil
+ }
+}
diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.go b/vendor/github.com/minio/md5-simd/md5block_amd64.go
new file mode 100644
index 000000000..4c2793662
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5block_amd64.go
@@ -0,0 +1,11 @@
+// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT.
+
+// +build !appengine
+// +build !noasm
+// +build gc
+
+package md5simd
+
+// Encode p to digest
+//go:noescape
+func blockScalar(dig *[4]uint32, p []byte)
diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.s b/vendor/github.com/minio/md5-simd/md5block_amd64.s
new file mode 100644
index 000000000..fbc4a21f2
--- /dev/null
+++ b/vendor/github.com/minio/md5-simd/md5block_amd64.s
@@ -0,0 +1,714 @@
+// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT.
+
+// +build !appengine
+// +build !noasm
+// +build gc
+
+// func blockScalar(dig *[4]uint32, p []byte)
+TEXT ·blockScalar(SB), $0-32
+ MOVQ p_len+16(FP), AX
+ MOVQ dig+0(FP), CX
+ MOVQ p_base+8(FP), DX
+ SHRQ $0x06, AX
+ SHLQ $0x06, AX
+ LEAQ (DX)(AX*1), AX
+ CMPQ DX, AX
+ JEQ end
+ MOVL (CX), BX
+ MOVL 4(CX), BP
+ MOVL 8(CX), SI
+ MOVL 12(CX), CX
+ MOVL $0xffffffff, DI
+
+loop:
+ MOVL (DX), R8
+ MOVL CX, R9
+ MOVL BX, R10
+ MOVL BP, R11
+ MOVL SI, R12
+ MOVL CX, R13
+
+ // ROUND1
+ XORL SI, R9
+ ADDL $0xd76aa478, BX
+ ADDL R8, BX
+ ANDL BP, R9
+ XORL CX, R9
+ MOVL 4(DX), R8
+ ADDL R9, BX
+ ROLL $0x07, BX
+ MOVL SI, R9
+ ADDL BP, BX
+ XORL BP, R9
+ ADDL $0xe8c7b756, CX
+ ADDL R8, CX
+ ANDL BX, R9
+ XORL SI, R9
+ MOVL 8(DX), R8
+ ADDL R9, CX
+ ROLL $0x0c, CX
+ MOVL BP, R9
+ ADDL BX, CX
+ XORL BX, R9
+ ADDL $0x242070db, SI
+ ADDL R8, SI
+ ANDL CX, R9
+ XORL BP, R9
+ MOVL 12(DX), R8
+ ADDL R9, SI
+ ROLL $0x11, SI
+ MOVL BX, R9
+ ADDL CX, SI
+ XORL CX, R9
+ ADDL $0xc1bdceee, BP
+ ADDL R8, BP
+ ANDL SI, R9
+ XORL BX, R9
+ MOVL 16(DX), R8
+ ADDL R9, BP
+ ROLL $0x16, BP
+ MOVL CX, R9
+ ADDL SI, BP
+ XORL SI, R9
+ ADDL $0xf57c0faf, BX
+ ADDL R8, BX
+ ANDL BP, R9
+ XORL CX, R9
+ MOVL 20(DX), R8
+ ADDL R9, BX
+ ROLL $0x07, BX
+ MOVL SI, R9
+ ADDL BP, BX
+ XORL BP, R9
+ ADDL $0x4787c62a, CX
+ ADDL R8, CX
+ ANDL BX, R9
+ XORL SI, R9
+ MOVL 24(DX), R8
+ ADDL R9, CX
+ ROLL $0x0c, CX
+ MOVL BP, R9
+ ADDL BX, CX
+ XORL BX, R9
+ ADDL $0xa8304613, SI
+ ADDL R8, SI
+ ANDL CX, R9
+ XORL BP, R9
+ MOVL 28(DX), R8
+ ADDL R9, SI
+ ROLL $0x11, SI
+ MOVL BX, R9
+ ADDL CX, SI
+ XORL CX, R9
+ ADDL $0xfd469501, BP
+ ADDL R8, BP
+ ANDL SI, R9
+ XORL BX, R9
+ MOVL 32(DX), R8
+ ADDL R9, BP
+ ROLL $0x16, BP
+ MOVL CX, R9
+ ADDL SI, BP
+ XORL SI, R9
+ ADDL $0x698098d8, BX
+ ADDL R8, BX
+ ANDL BP, R9
+ XORL CX, R9
+ MOVL 36(DX), R8
+ ADDL R9, BX
+ ROLL $0x07, BX
+ MOVL SI, R9
+ ADDL BP, BX
+ XORL BP, R9
+ ADDL $0x8b44f7af, CX
+ ADDL R8, CX
+ ANDL BX, R9
+ XORL SI, R9
+ MOVL 40(DX), R8
+ ADDL R9, CX
+ ROLL $0x0c, CX
+ MOVL BP, R9
+ ADDL BX, CX
+ XORL BX, R9
+ ADDL $0xffff5bb1, SI
+ ADDL R8, SI
+ ANDL CX, R9
+ XORL BP, R9
+ MOVL 44(DX), R8
+ ADDL R9, SI
+ ROLL $0x11, SI
+ MOVL BX, R9
+ ADDL CX, SI
+ XORL CX, R9
+ ADDL $0x895cd7be, BP
+ ADDL R8, BP
+ ANDL SI, R9
+ XORL BX, R9
+ MOVL 48(DX), R8
+ ADDL R9, BP
+ ROLL $0x16, BP
+ MOVL CX, R9
+ ADDL SI, BP
+ XORL SI, R9
+ ADDL $0x6b901122, BX
+ ADDL R8, BX
+ ANDL BP, R9
+ XORL CX, R9
+ MOVL 52(DX), R8
+ ADDL R9, BX
+ ROLL $0x07, BX
+ MOVL SI, R9
+ ADDL BP, BX
+ XORL BP, R9
+ ADDL $0xfd987193, CX
+ ADDL R8, CX
+ ANDL BX, R9
+ XORL SI, R9
+ MOVL 56(DX), R8
+ ADDL R9, CX
+ ROLL $0x0c, CX
+ MOVL BP, R9
+ ADDL BX, CX
+ XORL BX, R9
+ ADDL $0xa679438e, SI
+ ADDL R8, SI
+ ANDL CX, R9
+ XORL BP, R9
+ MOVL 60(DX), R8
+ ADDL R9, SI
+ ROLL $0x11, SI
+ MOVL BX, R9
+ ADDL CX, SI
+ XORL CX, R9
+ ADDL $0x49b40821, BP
+ ADDL R8, BP
+ ANDL SI, R9
+ XORL BX, R9
+ MOVL 4(DX), R8
+ ADDL R9, BP
+ ROLL $0x16, BP
+ MOVL CX, R9
+ ADDL SI, BP
+
+ // ROUND2
+ MOVL CX, R9
+ MOVL CX, R14
+ XORL DI, R9
+ ADDL $0xf61e2562, BX
+ ADDL R8, BX
+ ANDL BP, R14
+ ANDL SI, R9
+ MOVL 24(DX), R8
+ ORL R9, R14
+ MOVL SI, R9
+ ADDL R14, BX
+ MOVL SI, R14
+ ROLL $0x05, BX
+ ADDL BP, BX
+ XORL DI, R9
+ ADDL $0xc040b340, CX
+ ADDL R8, CX
+ ANDL BX, R14
+ ANDL BP, R9
+ MOVL 44(DX), R8
+ ORL R9, R14
+ MOVL BP, R9
+ ADDL R14, CX
+ MOVL BP, R14
+ ROLL $0x09, CX
+ ADDL BX, CX
+ XORL DI, R9
+ ADDL $0x265e5a51, SI
+ ADDL R8, SI
+ ANDL CX, R14
+ ANDL BX, R9
+ MOVL (DX), R8
+ ORL R9, R14
+ MOVL BX, R9
+ ADDL R14, SI
+ MOVL BX, R14
+ ROLL $0x0e, SI
+ ADDL CX, SI
+ XORL DI, R9
+ ADDL $0xe9b6c7aa, BP
+ ADDL R8, BP
+ ANDL SI, R14
+ ANDL CX, R9
+ MOVL 20(DX), R8
+ ORL R9, R14
+ MOVL CX, R9
+ ADDL R14, BP
+ MOVL CX, R14
+ ROLL $0x14, BP
+ ADDL SI, BP
+ XORL DI, R9
+ ADDL $0xd62f105d, BX
+ ADDL R8, BX
+ ANDL BP, R14
+ ANDL SI, R9
+ MOVL 40(DX), R8
+ ORL R9, R14
+ MOVL SI, R9
+ ADDL R14, BX
+ MOVL SI, R14
+ ROLL $0x05, BX
+ ADDL BP, BX
+ XORL DI, R9
+ ADDL $0x02441453, CX
+ ADDL R8, CX
+ ANDL BX, R14
+ ANDL BP, R9
+ MOVL 60(DX), R8
+ ORL R9, R14
+ MOVL BP, R9
+ ADDL R14, CX
+ MOVL BP, R14
+ ROLL $0x09, CX
+ ADDL BX, CX
+ XORL DI, R9
+ ADDL $0xd8a1e681, SI
+ ADDL R8, SI
+ ANDL CX, R14
+ ANDL BX, R9
+ MOVL 16(DX), R8
+ ORL R9, R14
+ MOVL BX, R9
+ ADDL R14, SI
+ MOVL BX, R14
+ ROLL $0x0e, SI
+ ADDL CX, SI
+ XORL DI, R9
+ ADDL $0xe7d3fbc8, BP
+ ADDL R8, BP
+ ANDL SI, R14
+ ANDL CX, R9
+ MOVL 36(DX), R8
+ ORL R9, R14
+ MOVL CX, R9
+ ADDL R14, BP
+ MOVL CX, R14
+ ROLL $0x14, BP
+ ADDL SI, BP
+ XORL DI, R9
+ ADDL $0x21e1cde6, BX
+ ADDL R8, BX
+ ANDL BP, R14
+ ANDL SI, R9
+ MOVL 56(DX), R8
+ ORL R9, R14
+ MOVL SI, R9
+ ADDL R14, BX
+ MOVL SI, R14
+ ROLL $0x05, BX
+ ADDL BP, BX
+ XORL DI, R9
+ ADDL $0xc33707d6, CX
+ ADDL R8, CX
+ ANDL BX, R14
+ ANDL BP, R9
+ MOVL 12(DX), R8
+ ORL R9, R14
+ MOVL BP, R9
+ ADDL R14, CX
+ MOVL BP, R14
+ ROLL $0x09, CX
+ ADDL BX, CX
+ XORL DI, R9
+ ADDL $0xf4d50d87, SI
+ ADDL R8, SI
+ ANDL CX, R14
+ ANDL BX, R9
+ MOVL 32(DX), R8
+ ORL R9, R14
+ MOVL BX, R9
+ ADDL R14, SI
+ MOVL BX, R14
+ ROLL $0x0e, SI
+ ADDL CX, SI
+ XORL DI, R9
+ ADDL $0x455a14ed, BP
+ ADDL R8, BP
+ ANDL SI, R14
+ ANDL CX, R9
+ MOVL 52(DX), R8
+ ORL R9, R14
+ MOVL CX, R9
+ ADDL R14, BP
+ MOVL CX, R14
+ ROLL $0x14, BP
+ ADDL SI, BP
+ XORL DI, R9
+ ADDL $0xa9e3e905, BX
+ ADDL R8, BX
+ ANDL BP, R14
+ ANDL SI, R9
+ MOVL 8(DX), R8
+ ORL R9, R14
+ MOVL SI, R9
+ ADDL R14, BX
+ MOVL SI, R14
+ ROLL $0x05, BX
+ ADDL BP, BX
+ XORL DI, R9
+ ADDL $0xfcefa3f8, CX
+ ADDL R8, CX
+ ANDL BX, R14
+ ANDL BP, R9
+ MOVL 28(DX), R8
+ ORL R9, R14
+ MOVL BP, R9
+ ADDL R14, CX
+ MOVL BP, R14
+ ROLL $0x09, CX
+ ADDL BX, CX
+ XORL DI, R9
+ ADDL $0x676f02d9, SI
+ ADDL R8, SI
+ ANDL CX, R14
+ ANDL BX, R9
+ MOVL 48(DX), R8
+ ORL R9, R14
+ MOVL BX, R9
+ ADDL R14, SI
+ MOVL BX, R14
+ ROLL $0x0e, SI
+ ADDL CX, SI
+ XORL DI, R9
+ ADDL $0x8d2a4c8a, BP
+ ADDL R8, BP
+ ANDL SI, R14
+ ANDL CX, R9
+ MOVL 20(DX), R8
+ ORL R9, R14
+ MOVL CX, R9
+ ADDL R14, BP
+ MOVL CX, R14
+ ROLL $0x14, BP
+ ADDL SI, BP
+
+ // ROUND3
+ MOVL SI, R9
+ ADDL $0xfffa3942, BX
+ ADDL R8, BX
+ MOVL 32(DX), R8
+ XORL CX, R9
+ XORL BP, R9
+ ADDL R9, BX
+ ROLL $0x04, BX
+ MOVL BP, R9
+ ADDL BP, BX
+ ADDL $0x8771f681, CX
+ ADDL R8, CX
+ MOVL 44(DX), R8
+ XORL SI, R9
+ XORL BX, R9
+ ADDL R9, CX
+ ROLL $0x0b, CX
+ MOVL BX, R9
+ ADDL BX, CX
+ ADDL $0x6d9d6122, SI
+ ADDL R8, SI
+ MOVL 56(DX), R8
+ XORL BP, R9
+ XORL CX, R9
+ ADDL R9, SI
+ ROLL $0x10, SI
+ MOVL CX, R9
+ ADDL CX, SI
+ ADDL $0xfde5380c, BP
+ ADDL R8, BP
+ MOVL 4(DX), R8
+ XORL BX, R9
+ XORL SI, R9
+ ADDL R9, BP
+ ROLL $0x17, BP
+ MOVL SI, R9
+ ADDL SI, BP
+ ADDL $0xa4beea44, BX
+ ADDL R8, BX
+ MOVL 16(DX), R8
+ XORL CX, R9
+ XORL BP, R9
+ ADDL R9, BX
+ ROLL $0x04, BX
+ MOVL BP, R9
+ ADDL BP, BX
+ ADDL $0x4bdecfa9, CX
+ ADDL R8, CX
+ MOVL 28(DX), R8
+ XORL SI, R9
+ XORL BX, R9
+ ADDL R9, CX
+ ROLL $0x0b, CX
+ MOVL BX, R9
+ ADDL BX, CX
+ ADDL $0xf6bb4b60, SI
+ ADDL R8, SI
+ MOVL 40(DX), R8
+ XORL BP, R9
+ XORL CX, R9
+ ADDL R9, SI
+ ROLL $0x10, SI
+ MOVL CX, R9
+ ADDL CX, SI
+ ADDL $0xbebfbc70, BP
+ ADDL R8, BP
+ MOVL 52(DX), R8
+ XORL BX, R9
+ XORL SI, R9
+ ADDL R9, BP
+ ROLL $0x17, BP
+ MOVL SI, R9
+ ADDL SI, BP
+ ADDL $0x289b7ec6, BX
+ ADDL R8, BX
+ MOVL (DX), R8
+ XORL CX, R9
+ XORL BP, R9
+ ADDL R9, BX
+ ROLL $0x04, BX
+ MOVL BP, R9
+ ADDL BP, BX
+ ADDL $0xeaa127fa, CX
+ ADDL R8, CX
+ MOVL 12(DX), R8
+ XORL SI, R9
+ XORL BX, R9
+ ADDL R9, CX
+ ROLL $0x0b, CX
+ MOVL BX, R9
+ ADDL BX, CX
+ ADDL $0xd4ef3085, SI
+ ADDL R8, SI
+ MOVL 24(DX), R8
+ XORL BP, R9
+ XORL CX, R9
+ ADDL R9, SI
+ ROLL $0x10, SI
+ MOVL CX, R9
+ ADDL CX, SI
+ ADDL $0x04881d05, BP
+ ADDL R8, BP
+ MOVL 36(DX), R8
+ XORL BX, R9
+ XORL SI, R9
+ ADDL R9, BP
+ ROLL $0x17, BP
+ MOVL SI, R9
+ ADDL SI, BP
+ ADDL $0xd9d4d039, BX
+ ADDL R8, BX
+ MOVL 48(DX), R8
+ XORL CX, R9
+ XORL BP, R9
+ ADDL R9, BX
+ ROLL $0x04, BX
+ MOVL BP, R9
+ ADDL BP, BX
+ ADDL $0xe6db99e5, CX
+ ADDL R8, CX
+ MOVL 60(DX), R8
+ XORL SI, R9
+ XORL BX, R9
+ ADDL R9, CX
+ ROLL $0x0b, CX
+ MOVL BX, R9
+ ADDL BX, CX
+ ADDL $0x1fa27cf8, SI
+ ADDL R8, SI
+ MOVL 8(DX), R8
+ XORL BP, R9
+ XORL CX, R9
+ ADDL R9, SI
+ ROLL $0x10, SI
+ MOVL CX, R9
+ ADDL CX, SI
+ ADDL $0xc4ac5665, BP
+ ADDL R8, BP
+ MOVL (DX), R8
+ XORL BX, R9
+ XORL SI, R9
+ ADDL R9, BP
+ ROLL $0x17, BP
+ MOVL SI, R9
+ ADDL SI, BP
+
+ // ROUND4
+ MOVL DI, R9
+ XORL CX, R9
+ ADDL $0xf4292244, BX
+ ADDL R8, BX
+ ORL BP, R9
+ XORL SI, R9
+ ADDL R9, BX
+ MOVL 28(DX), R8
+ MOVL DI, R9
+ ROLL $0x06, BX
+ XORL SI, R9
+ ADDL BP, BX
+ ADDL $0x432aff97, CX
+ ADDL R8, CX
+ ORL BX, R9
+ XORL BP, R9
+ ADDL R9, CX
+ MOVL 56(DX), R8
+ MOVL DI, R9
+ ROLL $0x0a, CX
+ XORL BP, R9
+ ADDL BX, CX
+ ADDL $0xab9423a7, SI
+ ADDL R8, SI
+ ORL CX, R9
+ XORL BX, R9
+ ADDL R9, SI
+ MOVL 20(DX), R8
+ MOVL DI, R9
+ ROLL $0x0f, SI
+ XORL BX, R9
+ ADDL CX, SI
+ ADDL $0xfc93a039, BP
+ ADDL R8, BP
+ ORL SI, R9
+ XORL CX, R9
+ ADDL R9, BP
+ MOVL 48(DX), R8
+ MOVL DI, R9
+ ROLL $0x15, BP
+ XORL CX, R9
+ ADDL SI, BP
+ ADDL $0x655b59c3, BX
+ ADDL R8, BX
+ ORL BP, R9
+ XORL SI, R9
+ ADDL R9, BX
+ MOVL 12(DX), R8
+ MOVL DI, R9
+ ROLL $0x06, BX
+ XORL SI, R9
+ ADDL BP, BX
+ ADDL $0x8f0ccc92, CX
+ ADDL R8, CX
+ ORL BX, R9
+ XORL BP, R9
+ ADDL R9, CX
+ MOVL 40(DX), R8
+ MOVL DI, R9
+ ROLL $0x0a, CX
+ XORL BP, R9
+ ADDL BX, CX
+ ADDL $0xffeff47d, SI
+ ADDL R8, SI
+ ORL CX, R9
+ XORL BX, R9
+ ADDL R9, SI
+ MOVL 4(DX), R8
+ MOVL DI, R9
+ ROLL $0x0f, SI
+ XORL BX, R9
+ ADDL CX, SI
+ ADDL $0x85845dd1, BP
+ ADDL R8, BP
+ ORL SI, R9
+ XORL CX, R9
+ ADDL R9, BP
+ MOVL 32(DX), R8
+ MOVL DI, R9
+ ROLL $0x15, BP
+ XORL CX, R9
+ ADDL SI, BP
+ ADDL $0x6fa87e4f, BX
+ ADDL R8, BX
+ ORL BP, R9
+ XORL SI, R9
+ ADDL R9, BX
+ MOVL 60(DX), R8
+ MOVL DI, R9
+ ROLL $0x06, BX
+ XORL SI, R9
+ ADDL BP, BX
+ ADDL $0xfe2ce6e0, CX
+ ADDL R8, CX
+ ORL BX, R9
+ XORL BP, R9
+ ADDL R9, CX
+ MOVL 24(DX), R8
+ MOVL DI, R9
+ ROLL $0x0a, CX
+ XORL BP, R9
+ ADDL BX, CX
+ ADDL $0xa3014314, SI
+ ADDL R8, SI
+ ORL CX, R9
+ XORL BX, R9
+ ADDL R9, SI
+ MOVL 52(DX), R8
+ MOVL DI, R9
+ ROLL $0x0f, SI
+ XORL BX, R9
+ ADDL CX, SI
+ ADDL $0x4e0811a1, BP
+ ADDL R8, BP
+ ORL SI, R9
+ XORL CX, R9
+ ADDL R9, BP
+ MOVL 16(DX), R8
+ MOVL DI, R9
+ ROLL $0x15, BP
+ XORL CX, R9
+ ADDL SI, BP
+ ADDL $0xf7537e82, BX
+ ADDL R8, BX
+ ORL BP, R9
+ XORL SI, R9
+ ADDL R9, BX
+ MOVL 44(DX), R8
+ MOVL DI, R9
+ ROLL $0x06, BX
+ XORL SI, R9
+ ADDL BP, BX
+ ADDL $0xbd3af235, CX
+ ADDL R8, CX
+ ORL BX, R9
+ XORL BP, R9
+ ADDL R9, CX
+ MOVL 8(DX), R8
+ MOVL DI, R9
+ ROLL $0x0a, CX
+ XORL BP, R9
+ ADDL BX, CX
+ ADDL $0x2ad7d2bb, SI
+ ADDL R8, SI
+ ORL CX, R9
+ XORL BX, R9
+ ADDL R9, SI
+ MOVL 36(DX), R8
+ MOVL DI, R9
+ ROLL $0x0f, SI
+ XORL BX, R9
+ ADDL CX, SI
+ ADDL $0xeb86d391, BP
+ ADDL R8, BP
+ ORL SI, R9
+ XORL CX, R9
+ ADDL R9, BP
+ ROLL $0x15, BP
+ ADDL SI, BP
+ ADDL R10, BX
+ ADDL R11, BP
+ ADDL R12, SI
+ ADDL R13, CX
+
+ // Prepare next loop
+ ADDQ $0x40, DX
+ CMPQ DX, AX
+ JB loop
+
+ // Write output
+ MOVQ dig+0(FP), AX
+ MOVL BX, (AX)
+ MOVL BP, 4(AX)
+ MOVL SI, 8(AX)
+ MOVL CX, 12(AX)
+
+end:
+ RET
diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore
new file mode 100644
index 000000000..8ae0384eb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/.gitignore
@@ -0,0 +1,6 @@
+*~
+*.test
+validator
+golangci-lint
+functional_tests
+.idea
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml
new file mode 100644
index 000000000..875b949c6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml
@@ -0,0 +1,27 @@
+linters-settings:
+ misspell:
+ locale: US
+
+linters:
+ disable-all: true
+ enable:
+ - typecheck
+ - goimports
+ - misspell
+ - revive
+ - govet
+ - ineffassign
+ - gosimple
+ - unused
+ - gocritic
+
+issues:
+ exclude-use-default: false
+ exclude:
+ # todo fix these when we get enough time.
+ - "singleCaseSwitch: should rewrite switch statement to if statement"
+ - "unlambda: replace"
+ - "captLocal:"
+ - "ifElseChain:"
+ - "elseif:"
+ - "should have a package comment"
diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME
new file mode 100644
index 000000000..d365a7bb2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/CNAME
@@ -0,0 +1 @@
+minio-go.min.io
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
new file mode 100644
index 000000000..24522ef75
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
@@ -0,0 +1,22 @@
+### Developer Guidelines
+
+``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
+
+* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
+ - Fork it
+ - Create your feature branch (git checkout -b my-new-feature)
+ - Commit your changes (git commit -am 'Add some feature')
+ - Push to the branch (git push origin my-new-feature)
+ - Create new Pull Request
+
+* When you're ready to create a pull request, be sure to:
+ - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
+ - Run `go fmt`
+ - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
+ - Make sure `go test -race ./...` and `go build` completes.
+ NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
+ ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
+
+* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
+ - `minio-go` project is strictly conformant with Golang style
+ - if you happen to observe offending code, please feel free to send a pull request
diff --git a/vendor/github.com/minio/minio-go/v7/CREDITS b/vendor/github.com/minio/minio-go/v7/CREDITS
new file mode 100644
index 000000000..154c9fd58
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/CREDITS
@@ -0,0 +1,1101 @@
+Go (the standard library)
+https://golang.org/
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+github.com/davecgh/go-spew
+https://github.com/davecgh/go-spew
+----------------------------------------------------------------
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+================================================================
+
+github.com/dustin/go-humanize
+https://github.com/dustin/go-humanize
+----------------------------------------------------------------
+Copyright (c) 2005-2008 Dustin Sallings
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+
+================================================================
+
+github.com/goccy/go-json
+https://github.com/goccy/go-json
+----------------------------------------------------------------
+MIT License
+
+Copyright (c) 2020 Masaaki Goshima
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+================================================================
+
+github.com/google/uuid
+https://github.com/google/uuid
+----------------------------------------------------------------
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+github.com/klauspost/compress
+https://github.com/klauspost/compress
+----------------------------------------------------------------
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------
+
+Files: gzhttp/*
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-2017 The New York Times Company
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------
+
+Files: s2/cmd/internal/readahead/*
+
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+---------------------
+Files: snappy/*
+Files: internal/snapref/*
+
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------
+
+Files: s2/cmd/internal/filepathx/*
+
+Copyright 2016 The filepathx Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+================================================================
+
+github.com/klauspost/cpuid/v2
+https://github.com/klauspost/cpuid/v2
+----------------------------------------------------------------
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+================================================================
+
+github.com/minio/md5-simd
+https://github.com/minio/md5-simd
+----------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+================================================================
+
+github.com/pmezard/go-difflib
+https://github.com/pmezard/go-difflib
+----------------------------------------------------------------
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+ The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+github.com/rs/xid
+https://github.com/rs/xid
+----------------------------------------------------------------
+Copyright (c) 2015 Olivier Poitrey
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+================================================================
+
+github.com/stretchr/testify
+https://github.com/stretchr/testify
+----------------------------------------------------------------
+MIT License
+
+Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+================================================================
+
+golang.org/x/crypto
+https://golang.org/x/crypto
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+golang.org/x/net
+https://golang.org/x/net
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+golang.org/x/sys
+https://golang.org/x/sys
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+golang.org/x/text
+https://golang.org/x/text
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+gopkg.in/ini.v1
+https://gopkg.in/ini.v1
+----------------------------------------------------------------
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright 2014 Unknwon
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+================================================================
+
diff --git a/vendor/github.com/minio/minio-go/v7/LICENSE b/vendor/github.com/minio/minio-go/v7/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md
new file mode 100644
index 000000000..f640dfb9f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md
@@ -0,0 +1,35 @@
+# For maintainers only
+
+## Responsibilities
+
+Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
+
+### Making new releases
+Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key.
+```sh
+$ export GNUPGHOME=/media/${USER}/minio/trusted
+$ git tag -s 4.0.0
+$ git push
+$ git push --tags
+```
+
+### Update version
+Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
+
+```sh
+$ grep libraryVersion api.go
+ libraryVersion = "4.0.1"
+```
+
+Commit your changes
+```
+$ git commit -a -m "Update version for next release" --author "MinIO Trusted "
+```
+
+### Announce
+Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
+
+To generate `changelog`
+```sh
+$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' ..
+```
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile
new file mode 100644
index 000000000..68444aa68
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/Makefile
@@ -0,0 +1,38 @@
+GOPATH := $(shell go env GOPATH)
+TMPDIR := $(shell mktemp -d)
+
+all: checks
+
+.PHONY: examples docs
+
+checks: lint vet test examples functional-test
+
+lint:
+ @mkdir -p ${GOPATH}/bin
+ @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin
+ @echo "Running $@ check"
+ @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
+ @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
+
+vet:
+ @GO111MODULE=on go vet ./...
+ @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
+ ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
+
+test:
+ @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
+
+examples:
+ @echo "Building s3 examples"
+ @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;)
+ @echo "Building minio examples"
+ @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;)
+
+functional-test:
+ @GO111MODULE=on go build -race functional_tests.go
+ @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
+
+clean:
+ @echo "Cleaning up all the generated files"
+ @find . -name '*.test' | xargs rm -fv
+ @find . -name '*~' | xargs rm -fv
diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE
new file mode 100644
index 000000000..1e8fd3b92
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/NOTICE
@@ -0,0 +1,9 @@
+MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc.
+
+This product includes software developed at MinIO, Inc.
+(https://min.io/).
+
+The MinIO project contains unmodified/modified subcomponents too with
+separate copyright notices and license terms. Your use of the source
+code for these subcomponents is subject to the terms and conditions
+of Apache License Version 2.0
diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md
new file mode 100644
index 000000000..82f70a131
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/README.md
@@ -0,0 +1,312 @@
+# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
+
+The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage.
+
+This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader.
+For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html).
+
+These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html).
+
+## Download from Github
+
+From your project directory:
+
+```sh
+go get github.com/minio/minio-go/v7
+```
+
+## Initialize a MinIO Client Object
+
+The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage:
+
+| Parameter | Description |
+| ----------------- | ---------------------------------------------------------- |
+| `endpoint` | URL to object storage service. |
+| `_minio.Options_` | All the options such as credentials, custom transport etc. |
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go/v7"
+ "github.com/minio/minio-go/v7/pkg/credentials"
+)
+
+func main() {
+ endpoint := "play.min.io"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, &minio.Options{
+ Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
+ Secure: useSSL,
+ })
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("%#v\n", minioClient) // minioClient is now set up
+}
+```
+
+## Example - File Uploader
+
+This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket.
+It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io).
+
+The `play` server runs the latest stable version of MinIO and may be used for testing and development.
+The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected.
+
+### FileUploader.go
+
+This example does the following:
+
+- Connects to the MinIO `play` server using the provided credentials.
+- Creates a bucket named `testbucket`.
+- Uploads a file named `testdata` from `/tmp`.
+- Verifies the file was created using `mc ls`.
+
+```go
+// FileUploader.go MinIO example
+package main
+
+import (
+ "context"
+ "log"
+
+ "github.com/minio/minio-go/v7"
+ "github.com/minio/minio-go/v7/pkg/credentials"
+)
+
+func main() {
+ ctx := context.Background()
+ endpoint := "play.min.io"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, &minio.Options{
+ Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
+ Secure: useSSL,
+ })
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Make a new bucket called testbucket.
+ bucketName := "testbucket"
+ location := "us-east-1"
+
+ err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
+ if err != nil {
+ // Check to see if we already own this bucket (which happens if you run this twice)
+ exists, errBucketExists := minioClient.BucketExists(ctx, bucketName)
+ if errBucketExists == nil && exists {
+ log.Printf("We already own %s\n", bucketName)
+ } else {
+ log.Fatalln(err)
+ }
+ } else {
+ log.Printf("Successfully created %s\n", bucketName)
+ }
+
+ // Upload the test file
+ // Change the value of filePath if the file is in another location
+ objectName := "testdata"
+ filePath := "/tmp/testdata"
+ contentType := "application/octet-stream"
+
+ // Upload the test file with FPutObject
+ info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType})
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size)
+}
+```
+
+**1. Create a test file containing data:**
+
+You can do this with `dd` on Linux or macOS systems:
+
+```sh
+dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10
+```
+
+or `fsutil` on Windows:
+
+```sh
+fsutil file createnew "C:\Users\\Desktop\sample.txt" 20480
+```
+
+**2. Run FileUploader with the following commands:**
+
+```sh
+go mod init example/FileUploader
+go get github.com/minio/minio-go/v7
+go get github.com/minio/minio-go/v7/pkg/credentials
+go run FileUploader.go
+```
+
+The output resembles the following:
+
+```sh
+2023/11/01 14:27:55 Successfully created testbucket
+2023/11/01 14:27:55 Successfully uploaded testdata of size 20480
+```
+
+**3. Verify the Uploaded File With `mc ls`:**
+
+```sh
+mc ls play/testbucket
+[2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile
+```
+
+## API Reference
+
+The full API Reference is available here.
+
+* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
+
+### API Reference : Bucket Operations
+
+* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket)
+* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets)
+* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists)
+* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket)
+* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects)
+* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads)
+
+### API Reference : Bucket policy Operations
+
+* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy)
+* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy)
+
+### API Reference : Bucket notification Operations
+
+* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification)
+* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification)
+* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification)
+* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension)
+* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension)
+
+### API Reference : File Object Operations
+
+* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
+* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject)
+
+### API Reference : Object Operations
+
+* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject)
+* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject)
+* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming)
+* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject)
+* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject)
+* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject)
+* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects)
+* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload)
+* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent)
+
+### API Reference : Presigned Operations
+
+* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject)
+* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject)
+* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject)
+* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy)
+
+### API Reference : Client custom settings
+
+* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo)
+* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn)
+* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff)
+
+## Full Examples
+
+### Full Examples : Bucket Operations
+
+* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
+* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
+* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
+* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
+* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
+* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
+* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
+
+### Full Examples : Bucket policy Operations
+
+* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
+* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
+* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
+
+### Full Examples : Bucket lifecycle Operations
+
+* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
+* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
+
+### Full Examples : Bucket encryption Operations
+
+* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
+* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
+* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
+
+### Full Examples : Bucket replication Operations
+
+* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go)
+* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go)
+* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go)
+
+### Full Examples : Bucket notification Operations
+
+* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
+* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
+* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
+* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension)
+* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension)
+
+### Full Examples : File Object Operations
+
+* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
+* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+
+### Full Examples : Object Operations
+
+* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
+* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
+* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
+* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
+* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
+* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
+
+### Full Examples : Encrypted Object Operations
+
+* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
+* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
+* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
+
+### Full Examples : Presigned Operations
+
+* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
+* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
+* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
+
+## Explore Further
+
+* [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7)
+* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html)
+* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
+
+## Contribute
+
+[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
+
+## License
+
+This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
new file mode 100644
index 000000000..8bf537f73
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
@@ -0,0 +1,136 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2024 MinIO, Inc.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/v7/pkg/cors"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// SetBucketCors sets the cors configuration for the bucket
+func (c *Client) SetBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ if corsConfig == nil {
+ return c.removeBucketCors(ctx, bucketName)
+ }
+
+ return c.putBucketCors(ctx, bucketName, corsConfig)
+}
+
+func (c *Client) putBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error {
+ urlValues := make(url.Values)
+ urlValues.Set("cors", "")
+
+ corsStr, err := corsConfig.ToXML()
+ if err != nil {
+ return err
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(corsStr),
+ contentLength: int64(len(corsStr)),
+ contentMD5Base64: sumMD5Base64([]byte(corsStr)),
+ }
+
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+func (c *Client) removeBucketCors(ctx context.Context, bucketName string) error {
+ urlValues := make(url.Values)
+ urlValues.Set("cors", "")
+
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ return nil
+}
+
+// GetBucketCors returns the current cors
+func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ bucketCors, err := c.getBucketCors(ctx, bucketName)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if errResponse.Code == "NoSuchCORSConfiguration" {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return bucketCors, nil
+}
+
+func (c *Client) getBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) {
+ urlValues := make(url.Values)
+ urlValues.Set("cors", "")
+
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex, // TODO: needed? copied over from other example, but not spec'd in API.
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ corsConfig, err := cors.ParseBucketCorsConfig(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ return corsConfig, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go
new file mode 100644
index 000000000..24f94e034
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go
@@ -0,0 +1,134 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/minio/minio-go/v7/pkg/sse"
+)
+
+// SetBucketEncryption sets the default encryption configuration on an existing bucket.
+func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ if config == nil {
+ return errInvalidArgument("configuration cannot be empty")
+ }
+
+ buf, err := xml.Marshal(config)
+ if err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("encryption", "")
+
+ // Content-length is mandatory to set a default encryption configuration
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(buf),
+ contentLength: int64(len(buf)),
+ contentMD5Base64: sumMD5Base64(buf),
+ }
+
+ // Execute PUT to upload a new bucket default encryption configuration.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ return nil
+}
+
+// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts.
+func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("encryption", "")
+
+ // DELETE default encryption configuration on a bucket.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ return nil
+}
+
+// GetBucketEncryption gets the default encryption configuration
+// on an existing bucket with a context to control cancellations and timeouts.
+func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("encryption", "")
+
+ // Execute GET on bucket to get the default encryption configuration.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ encryptionConfig := &sse.Configuration{}
+ if err = xmlDecoder(resp.Body, encryptionConfig); err != nil {
+ return nil, err
+ }
+
+ return encryptionConfig, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
new file mode 100644
index 000000000..fec5cece5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
@@ -0,0 +1,169 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "io"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/lifecycle"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// SetBucketLifecycle set the lifecycle on an existing bucket.
+func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // If lifecycle is empty then delete it.
+ if config.Empty() {
+ return c.removeBucketLifecycle(ctx, bucketName)
+ }
+
+ buf, err := xml.Marshal(config)
+ if err != nil {
+ return err
+ }
+
+ // Save the updated lifecycle.
+ return c.putBucketLifecycle(ctx, bucketName, buf)
+}
+
+// Saves a new bucket lifecycle.
+func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("lifecycle", "")
+
+ // Content-length is mandatory for put lifecycle request
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(buf),
+ contentLength: int64(len(buf)),
+ contentMD5Base64: sumMD5Base64(buf),
+ }
+
+ // Execute PUT to upload a new bucket lifecycle.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// Remove lifecycle from a bucket.
+func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("lifecycle", "")
+
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// GetBucketLifecycle fetch bucket lifecycle configuration
+func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) {
+ lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName)
+ return lc, err
+}
+
+// GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated
+func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, time.Time{}, err
+ }
+
+ bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName)
+ if err != nil {
+ return nil, time.Time{}, err
+ }
+
+ config := lifecycle.NewConfiguration()
+ if err = xml.Unmarshal(bucketLifecycle, config); err != nil {
+ return nil, time.Time{}, err
+ }
+ return config, updatedAt, nil
+}
+
+// Request server for current bucket lifecycle.
+func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("lifecycle", "")
+ urlValues.Set("withUpdatedAt", "true")
+
+ // Execute GET on bucket to get lifecycle.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, time.Time{}, err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ lcBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, time.Time{}, err
+ }
+
+ const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt"
+ var updatedAt time.Time
+ if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" {
+ updatedAt, err = time.Parse(iso8601DateFormat, timeStr)
+ if err != nil {
+ return nil, time.Time{}, err
+ }
+ }
+
+ return lcBytes, updatedAt, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
new file mode 100644
index 000000000..ad8eada4a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
@@ -0,0 +1,260 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/xml"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/goccy/go-json"
+ "github.com/minio/minio-go/v7/pkg/notification"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts.
+func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ notifBytes, err := xml.Marshal(&config)
+ if err != nil {
+ return err
+ }
+
+ notifBuffer := bytes.NewReader(notifBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: notifBuffer,
+ contentLength: int64(len(notifBytes)),
+ contentMD5Base64: sumMD5Base64(notifBytes),
+ contentSHA256Hex: sum256Hex(notifBytes),
+ }
+
+ // Execute PUT to upload a new bucket notification.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
+func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error {
+ return c.SetBucketNotification(ctx, bucketName, notification.Configuration{})
+}
+
+// GetBucketNotification returns current bucket notification configuration
+func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return notification.Configuration{}, err
+ }
+ return c.getBucketNotification(ctx, bucketName)
+}
+
+// Request server for notification rules.
+func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) {
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return notification.Configuration{}, err
+ }
+ return processBucketNotificationResponse(bucketName, resp)
+}
+
+// processes the GetNotification http response from the server.
+func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) {
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ return notification.Configuration{}, errResponse
+ }
+ var bucketNotification notification.Configuration
+ err := xmlDecoder(resp.Body, &bucketNotification)
+ if err != nil {
+ return notification.Configuration{}, err
+ }
+ return bucketNotification, nil
+}
+
+// ListenNotification listen for all events, this is a MinIO specific API
+func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info {
+ return c.ListenBucketNotification(ctx, "", prefix, suffix, events)
+}
+
+// ListenBucketNotification listen for bucket events, this is a MinIO specific API
+func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info {
+ notificationInfoCh := make(chan notification.Info, 1)
+ const notificationCapacity = 4 * 1024 * 1024
+ notificationEventBuffer := make([]byte, notificationCapacity)
+ // Only success, start a routine to start reading line by line.
+ go func(notificationInfoCh chan<- notification.Info) {
+ defer close(notificationInfoCh)
+
+ // Validate the bucket name.
+ if bucketName != "" {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ select {
+ case notificationInfoCh <- notification.Info{
+ Err: err,
+ }:
+ case <-ctx.Done():
+ }
+ return
+ }
+ }
+
+ // Check ARN partition to verify if listening bucket is supported
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) {
+ select {
+ case notificationInfoCh <- notification.Info{
+ Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
+ }:
+ case <-ctx.Done():
+ }
+ return
+ }
+
+ // Continuously run and listen on bucket notification.
+ // Create a done channel to control 'ListObjects' go routine.
+ retryDoneCh := make(chan struct{}, 1)
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(retryDoneCh)
+
+ // Prepare urlValues to pass into the request on every loop
+ urlValues := make(url.Values)
+ urlValues.Set("ping", "10")
+ urlValues.Set("prefix", prefix)
+ urlValues.Set("suffix", suffix)
+ urlValues["events"] = events
+
+ // Wait on the jitter retry loop.
+ for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ if err != nil {
+ select {
+ case notificationInfoCh <- notification.Info{
+ Err: err,
+ }:
+ case <-ctx.Done():
+ }
+ return
+ }
+
+ // Validate http response, upon error return quickly.
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ select {
+ case notificationInfoCh <- notification.Info{
+ Err: errResponse,
+ }:
+ case <-ctx.Done():
+ }
+ return
+ }
+
+ // Initialize a new bufio scanner, to read line by line.
+ bio := bufio.NewScanner(resp.Body)
+
+ // Use a higher buffer to support unexpected
+ // caching done by proxies
+ bio.Buffer(notificationEventBuffer, notificationCapacity)
+
+ // Unmarshal each line, returns marshaled values.
+ for bio.Scan() {
+ var notificationInfo notification.Info
+ if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil {
+ // Unexpected error during json unmarshal, send
+ // the error to caller for actionable as needed.
+ select {
+ case notificationInfoCh <- notification.Info{
+ Err: err,
+ }:
+ case <-ctx.Done():
+ return
+ }
+ closeResponse(resp)
+ continue
+ }
+
+ // Empty events pinged from the server
+ if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil {
+ continue
+ }
+
+ // Send notificationInfo
+ select {
+ case notificationInfoCh <- notificationInfo:
+ case <-ctx.Done():
+ closeResponse(resp)
+ return
+ }
+ }
+
+ if err = bio.Err(); err != nil {
+ select {
+ case notificationInfoCh <- notification.Info{
+ Err: err,
+ }:
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ // Close current connection before looping further.
+ closeResponse(resp)
+
+ }
+ }(notificationInfoCh)
+
+ // Returns the notification info channel, for caller to start reading from.
+ return notificationInfoCh
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
new file mode 100644
index 000000000..dbb5259a8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
@@ -0,0 +1,147 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// SetBucketPolicy sets the access permissions on an existing bucket.
+func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // If policy is empty then delete the bucket policy.
+ if policy == "" {
+ return c.removeBucketPolicy(ctx, bucketName)
+ }
+
+ // Save the updated policies.
+ return c.putBucketPolicy(ctx, bucketName, policy)
+}
+
+// Saves a new bucket policy.
+func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: strings.NewReader(policy),
+ contentLength: int64(len(policy)),
+ }
+
+ // Execute PUT to upload a new bucket policy.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// Removes all policies on a bucket.
+func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ return nil
+}
+
+// GetBucketPolicy returns the current policy
+func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ bucketPolicy, err := c.getBucketPolicy(ctx, bucketName)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if errResponse.Code == "NoSuchBucketPolicy" {
+ return "", nil
+ }
+ return "", err
+ }
+ return bucketPolicy, nil
+}
+
+// Request server for current bucket policy.
+func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return "", err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return "", httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ bucketPolicyBuf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ policy := string(bucketPolicyBuf)
+ return policy, err
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
new file mode 100644
index 000000000..b12bb13a6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
@@ -0,0 +1,355 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/minio/minio-go/v7/pkg/replication"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// RemoveBucketReplication removes a replication config on an existing bucket.
+func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error {
+ return c.removeBucketReplication(ctx, bucketName)
+}
+
+// SetBucketReplication sets a replication config on an existing bucket.
+func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // If replication is empty then delete it.
+ if cfg.Empty() {
+ return c.removeBucketReplication(ctx, bucketName)
+ }
+ // Save the updated replication.
+ return c.putBucketReplication(ctx, bucketName, cfg)
+}
+
+// Saves a new bucket replication.
+func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("replication", "")
+ replication, err := xml.Marshal(cfg)
+ if err != nil {
+ return err
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(replication),
+ contentLength: int64(len(replication)),
+ contentMD5Base64: sumMD5Base64(replication),
+ }
+
+ // Execute PUT to upload a new bucket replication config.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ return nil
+}
+
+// Remove replication from a bucket.
+func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("replication", "")
+
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ return nil
+}
+
+// GetBucketReplication fetches bucket replication configuration.If config is not
+// found, returns empty config with nil error.
+func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return cfg, err
+ }
+ bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if errResponse.Code == "ReplicationConfigurationNotFoundError" {
+ return cfg, nil
+ }
+ return cfg, err
+ }
+ return bucketReplicationCfg, nil
+}
+
+// Request server for current bucket replication config.
+func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("replication", "")
+
+ // Execute GET on bucket to get replication config.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return cfg, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return cfg, httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ if err = xmlDecoder(resp.Body, &cfg); err != nil {
+ return cfg, err
+ }
+
+ return cfg, nil
+}
+
+// GetBucketReplicationMetrics fetches bucket replication status metrics
+func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return s, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("replication-metrics", "")
+
+ // Execute GET on bucket to get replication config.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return s, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return s, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ respBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return s, err
+ }
+
+ if err := json.Unmarshal(respBytes, &s); err != nil {
+ return s, err
+ }
+ return s, nil
+}
+
+// mustGetUUID - get a random UUID.
+func mustGetUUID() string {
+ u, err := uuid.NewRandom()
+ if err != nil {
+ return ""
+ }
+ return u.String()
+}
+
+// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
+// is enabled in the replication config
+func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) {
+ rID = mustGetUUID()
+ _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID)
+ if err != nil {
+ return rID, err
+ }
+ return rID, nil
+}
+
+// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if
+// ExistingObjectReplication is enabled in the replication config
+func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) {
+ return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID())
+}
+
+// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
+// is enabled in the replication config
+func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("replication-reset", "")
+ if olderThan > 0 {
+ urlValues.Set("older-than", olderThan.String())
+ }
+ if tgtArn != "" {
+ urlValues.Set("arn", tgtArn)
+ }
+ urlValues.Set("reset-id", resetID)
+ // Execute GET on bucket to get replication config.
+ resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return rinfo, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return rinfo, httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil {
+ return rinfo, err
+ }
+ return rinfo, nil
+}
+
+// GetBucketReplicationResyncStatus gets the status of replication resync
+func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return rinfo, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("replication-reset-status", "")
+ if arn != "" {
+ urlValues.Set("arn", arn)
+ }
+ // Execute GET on bucket to get replication config.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return rinfo, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return rinfo, httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil {
+ return rinfo, err
+ }
+ return rinfo, nil
+}
+
+// GetBucketReplicationMetricsV2 fetches bucket replication status metrics
+func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return s, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("replication-metrics", "2")
+
+ // Execute GET on bucket to get replication metrics.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return s, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return s, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ respBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return s, err
+ }
+
+ if err := json.Unmarshal(respBytes, &s); err != nil {
+ return s, err
+ }
+ return s, nil
+}
+
+// CheckBucketReplication validates if replication is set up properly for a bucket
+func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("replication-check", "")
+
+ // Execute GET on bucket to get replication config.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
new file mode 100644
index 000000000..86d74298a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
@@ -0,0 +1,134 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/minio/minio-go/v7/pkg/tags"
+)
+
+// GetBucketTagging fetch tagging configuration for a bucket with a
+// context to control cancellations and timeouts.
+func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("tagging", "")
+
+ // Execute GET on bucket to get tagging configuration.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ defer io.Copy(io.Discard, resp.Body)
+ return tags.ParseBucketXML(resp.Body)
+}
+
+// SetBucketTagging sets tagging configuration for a bucket
+// with a context to control cancellations and timeouts.
+func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ if tags == nil {
+ return errors.New("nil tags passed")
+ }
+
+ buf, err := xml.Marshal(tags)
+ if err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("tagging", "")
+
+ // Content-length is mandatory to set a default encryption configuration
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(buf),
+ contentLength: int64(len(buf)),
+ contentMD5Base64: sumMD5Base64(buf),
+ }
+
+ // Execute PUT on bucket to put tagging configuration.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ return nil
+}
+
+// RemoveBucketTagging removes tagging configuration for a
+// bucket with a context to control cancellations and timeouts.
+func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("tagging", "")
+
+ // Execute DELETE on bucket to remove tagging configuration.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go
new file mode 100644
index 000000000..8c84e4f27
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go
@@ -0,0 +1,146 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// SetBucketVersioning sets a bucket versioning configuration
+func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ buf, err := xml.Marshal(config)
+ if err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("versioning", "")
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(buf),
+ contentLength: int64(len(buf)),
+ contentMD5Base64: sumMD5Base64(buf),
+ contentSHA256Hex: sum256Hex(buf),
+ }
+
+ // Execute PUT to set a bucket versioning.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// EnableVersioning - enable object versioning in given bucket.
+func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error {
+ return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"})
+}
+
+// SuspendVersioning - suspend object versioning in given bucket.
+func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error {
+ return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"})
+}
+
+// ExcludedPrefix - holds individual prefixes excluded from being versioned.
+type ExcludedPrefix struct {
+ Prefix string
+}
+
+// BucketVersioningConfiguration is the versioning configuration structure
+type BucketVersioningConfiguration struct {
+ XMLName xml.Name `xml:"VersioningConfiguration"`
+ Status string `xml:"Status"`
+ MFADelete string `xml:"MfaDelete,omitempty"`
+ // MinIO extension - allows selective, prefix-level versioning exclusion.
+ // Requires versioning to be enabled
+ ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"`
+ ExcludeFolders bool `xml:",omitempty"`
+}
+
+// Various supported states
+const (
+ Enabled = "Enabled"
+ // Disabled State = "Disabled" only used by MFA Delete not supported yet.
+ Suspended = "Suspended"
+)
+
+// Enabled returns true if bucket versioning is enabled
+func (b BucketVersioningConfiguration) Enabled() bool {
+ return b.Status == Enabled
+}
+
+// Suspended returns true if bucket versioning is suspended
+func (b BucketVersioningConfiguration) Suspended() bool {
+ return b.Status == Suspended
+}
+
+// GetBucketVersioning gets the versioning configuration on
+// an existing bucket with a context to control cancellations and timeouts.
+func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return BucketVersioningConfiguration{}, err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("versioning", "")
+
+ // Execute GET on bucket to get the versioning configuration.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return BucketVersioningConfiguration{}, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ versioningConfig := BucketVersioningConfiguration{}
+ if err = xmlDecoder(resp.Body, &versioningConfig); err != nil {
+ return versioningConfig, err
+ }
+
+ return versioningConfig, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
new file mode 100644
index 000000000..bb595626e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
@@ -0,0 +1,594 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017, 2018 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
+type CopyDestOptions struct {
+ Bucket string // points to destination bucket
+ Object string // points to destination object
+
+ // `Encryption` is the key info for server-side-encryption with customer
+ // provided key. If it is nil, no encryption is performed.
+ Encryption encrypt.ServerSide
+
+ // `userMeta` is the user-metadata key-value pairs to be set on the
+ // destination. The keys are automatically prefixed with `x-amz-meta-`
+ // if needed. If nil is passed, and if only a single source (of any
+ // size) is provided in the ComposeObject call, then metadata from the
+ // source is copied to the destination.
+ // if no user-metadata is provided, it is copied from source
+ // (when there is only once source object in the compose
+ // request)
+ UserMetadata map[string]string
+ // UserMetadata is only set to destination if ReplaceMetadata is true
+ // other value is UserMetadata is ignored and we preserve src.UserMetadata
+ // NOTE: if you set this value to true and now metadata is present
+ // in UserMetadata your destination object will not have any metadata
+ // set.
+ ReplaceMetadata bool
+
+ // `userTags` is the user defined object tags to be set on destination.
+ // This will be set only if the `replaceTags` field is set to true.
+ // Otherwise this field is ignored
+ UserTags map[string]string
+ ReplaceTags bool
+
+ // Specifies whether you want to apply a Legal Hold to the copied object.
+ LegalHold LegalHoldStatus
+
+ // Object Retention related fields
+ Mode RetentionMode
+ RetainUntilDate time.Time
+
+ Size int64 // Needs to be specified if progress bar is specified.
+ // Progress of the entire copy operation will be sent here.
+ Progress io.Reader
+}
+
+// Process custom-metadata to remove a `x-amz-meta-` prefix if
+// present and validate that keys are distinct (after this
+// prefix removal).
+func filterCustomMeta(userMeta map[string]string) map[string]string {
+ m := make(map[string]string)
+ for k, v := range userMeta {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+ k = k[len("x-amz-meta-"):]
+ }
+ if _, ok := m[k]; ok {
+ continue
+ }
+ m[k] = v
+ }
+ return m
+}
+
+// Marshal converts all the CopyDestOptions into their
+// equivalent HTTP header representation
+func (opts CopyDestOptions) Marshal(header http.Header) {
+ const replaceDirective = "REPLACE"
+ if opts.ReplaceTags {
+ header.Set(amzTaggingHeaderDirective, replaceDirective)
+ if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
+ header.Set(amzTaggingHeader, tags)
+ }
+ }
+
+ if opts.LegalHold != LegalHoldStatus("") {
+ header.Set(amzLegalHoldHeader, opts.LegalHold.String())
+ }
+
+ if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() {
+ header.Set(amzLockMode, opts.Mode.String())
+ header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339))
+ }
+
+ if opts.Encryption != nil {
+ opts.Encryption.Marshal(header)
+ }
+
+ if opts.ReplaceMetadata {
+ header.Set("x-amz-metadata-directive", replaceDirective)
+ for k, v := range filterCustomMeta(opts.UserMetadata) {
+ if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) {
+ header.Set(k, v)
+ } else {
+ header.Set("x-amz-meta-"+k, v)
+ }
+ }
+ }
+}
+
+// toDestinationInfo returns a validated copyOptions object.
+func (opts CopyDestOptions) validate() (err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
+ return err
+ }
+ if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
+ return err
+ }
+ if opts.Progress != nil && opts.Size < 0 {
+ return errInvalidArgument("For progress bar effective size needs to be specified")
+ }
+ return nil
+}
+
+// CopySrcOptions represents a source object to be copied, using
+// server-side copying APIs.
+type CopySrcOptions struct {
+ Bucket, Object string
+ VersionID string
+ MatchETag string
+ NoMatchETag string
+ MatchModifiedSince time.Time
+ MatchUnmodifiedSince time.Time
+ MatchRange bool
+ Start, End int64
+ Encryption encrypt.ServerSide
+}
+
+// Marshal converts all the CopySrcOptions into their
+// equivalent HTTP header representation
+func (opts CopySrcOptions) Marshal(header http.Header) {
+ // Set the source header
+ header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object))
+ if opts.VersionID != "" {
+ header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID)
+ }
+
+ if opts.MatchETag != "" {
+ header.Set("x-amz-copy-source-if-match", opts.MatchETag)
+ }
+ if opts.NoMatchETag != "" {
+ header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag)
+ }
+
+ if !opts.MatchModifiedSince.IsZero() {
+ header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat))
+ }
+ if !opts.MatchUnmodifiedSince.IsZero() {
+ header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat))
+ }
+
+ if opts.Encryption != nil {
+ encrypt.SSECopy(opts.Encryption).Marshal(header)
+ }
+}
+
+func (opts CopySrcOptions) validate() (err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
+ return err
+ }
+ if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
+ return err
+ }
+ if opts.Start > opts.End || opts.Start < 0 {
+ return errInvalidArgument("start must be non-negative, and start must be at most end.")
+ }
+ return nil
+}
+
+// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
+func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
+ metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
+) (ObjectInfo, error) {
+ // Build headers.
+ headers := make(http.Header)
+
+ // Set all the metadata headers.
+ for k, v := range metadata {
+ headers.Set(k, v)
+ }
+ if !dstOpts.Internal.ReplicationStatus.Empty() {
+ headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus))
+ }
+ if !dstOpts.Internal.SourceMTime.IsZero() {
+ headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano))
+ }
+ if dstOpts.Internal.SourceETag != "" {
+ headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
+ }
+ if dstOpts.Internal.ReplicationRequest {
+ headers.Set(minIOBucketReplicationRequest, "true")
+ }
+ if dstOpts.Internal.ReplicationValidityCheck {
+ headers.Set(minIOBucketReplicationCheck, "true")
+ }
+ if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
+ headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
+ }
+ if !dstOpts.Internal.RetentionTimestamp.IsZero() {
+ headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
+ }
+ if !dstOpts.Internal.TaggingTimestamp.IsZero() {
+ headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
+ }
+
+ if len(dstOpts.UserTags) != 0 {
+ headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: destBucket,
+ objectName: destObject,
+ customHeader: headers,
+ }
+ if dstOpts.Internal.SourceVersionID != "" {
+ if dstOpts.Internal.SourceVersionID != nullVersionID {
+ if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
+ return ObjectInfo{}, errInvalidArgument(err.Error())
+ }
+ }
+ urlValues := make(url.Values)
+ urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
+ reqMetadata.queryValues = urlValues
+ }
+
+ // Set the source header
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
+ if srcOpts.VersionID != "" {
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID)
+ }
+ // Send upload-part-copy request
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
+ }
+
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ objInfo := ObjectInfo{
+ Key: destObject,
+ ETag: strings.Trim(cpObjRes.ETag, "\""),
+ LastModified: cpObjRes.LastModified,
+ }
+ return objInfo, nil
+}
+
+func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
+ partID int, startOffset, length int64, metadata map[string]string,
+) (p CompletePart, err error) {
+ headers := make(http.Header)
+
+ // Set source
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
+
+ if startOffset < 0 {
+ return p, errInvalidArgument("startOffset must be non-negative")
+ }
+
+ if length >= 0 {
+ headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
+ }
+
+ for k, v := range metadata {
+ headers.Set(k, v)
+ }
+
+ queryValues := make(url.Values)
+ queryValues.Set("partNumber", strconv.Itoa(partID))
+ queryValues.Set("uploadId", uploadID)
+
+ resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
+ bucketName: destBucket,
+ objectName: destObject,
+ customHeader: headers,
+ queryValues: queryValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return p, httpRespToErrorResponse(resp, destBucket, destObject)
+ }
+
+ // Decode copy-part response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return p, err
+ }
+ p.PartNumber, p.ETag = partID, cpObjRes.ETag
+ return p, nil
+}
+
+// uploadPartCopy - helper function to create a part in a multipart
+// upload via an upload-part-copy request
+// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
+func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
+ headers http.Header,
+) (p CompletePart, err error) {
+ // Build query parameters
+ urlValues := make(url.Values)
+ urlValues.Set("partNumber", strconv.Itoa(partNumber))
+ urlValues.Set("uploadId", uploadID)
+
+ // Send upload-part-copy request
+ resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
+ bucketName: bucket,
+ objectName: object,
+ customHeader: headers,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return p, err
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return p, httpRespToErrorResponse(resp, bucket, object)
+ }
+
+ // Decode copy-part response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return p, err
+ }
+ p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
+ return p, nil
+}
+
+// ComposeObject - creates an object using server-side copying
+// of existing objects. It takes a list of source objects (with optional offsets)
+// and concatenates them into a new object using only server-side copying
+// operations. Optionally takes progress reader hook for applications to
+// look at current progress.
+func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
+ if len(srcs) < 1 || len(srcs) > maxPartsCount {
+ return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
+ }
+
+ for _, src := range srcs {
+ if err := src.validate(); err != nil {
+ return UploadInfo{}, err
+ }
+ }
+
+ if err := dst.validate(); err != nil {
+ return UploadInfo{}, err
+ }
+
+ srcObjectInfos := make([]ObjectInfo, len(srcs))
+ srcObjectSizes := make([]int64, len(srcs))
+ var totalSize, totalParts int64
+ var err error
+ for i, src := range srcs {
+ opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
+ srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ srcCopySize := srcObjectInfos[i].Size
+ // Check if a segment is specified, and if so, is the
+ // segment within object bounds?
+ if src.MatchRange {
+ // Since range is specified,
+ // 0 <= src.start <= src.end
+ // so only invalid case to check is:
+ if src.End >= srcCopySize || src.Start < 0 {
+ return UploadInfo{}, errInvalidArgument(
+ fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)",
+ i, src.Start, src.End, srcCopySize))
+ }
+ srcCopySize = src.End - src.Start + 1
+ }
+
+ // Only the last source may be less than `absMinPartSize`
+ if srcCopySize < absMinPartSize && i < len(srcs)-1 {
+ return UploadInfo{}, errInvalidArgument(
+ fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize))
+ }
+
+ // Is data to copy too large?
+ totalSize += srcCopySize
+ if totalSize > maxMultipartPutObjectSize {
+ return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
+ }
+
+ // record source size
+ srcObjectSizes[i] = srcCopySize
+
+ // calculate parts needed for current source
+ totalParts += partsRequired(srcCopySize)
+ // Do we need more parts than we are allowed?
+ if totalParts > maxPartsCount {
+ return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
+ "Your proposed compose object requires more than %d parts", maxPartsCount))
+ }
+ }
+
+ // Single source object case (i.e. when only one source is
+ // involved, it is being copied wholly and at most 5GiB in
+ // size, emptyfiles are also supported).
+ if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
+ return c.CopyObject(ctx, dst, srcs[0])
+ }
+
+ // Now, handle multipart-copy cases.
+
+ // 1. Ensure that the object has not been changed while
+ // we are copying data.
+ for i, src := range srcs {
+ src.MatchETag = srcObjectInfos[i].ETag
+ }
+
+ // 2. Initiate a new multipart upload.
+
+ // Set user-metadata on the destination object. If no
+ // user-metadata is specified, and there is only one source,
+ // (only) then metadata from source is copied.
+ var userMeta map[string]string
+ if dst.ReplaceMetadata {
+ userMeta = dst.UserMetadata
+ } else {
+ userMeta = srcObjectInfos[0].UserMetadata
+ }
+
+ var userTags map[string]string
+ if dst.ReplaceTags {
+ userTags = dst.UserTags
+ } else {
+ userTags = srcObjectInfos[0].UserTags
+ }
+
+ uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{
+ ServerSideEncryption: dst.Encryption,
+ UserMetadata: userMeta,
+ UserTags: userTags,
+ Mode: dst.Mode,
+ RetainUntilDate: dst.RetainUntilDate,
+ LegalHold: dst.LegalHold,
+ })
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ // 3. Perform copy part uploads
+ objParts := []CompletePart{}
+ partIndex := 1
+ for i, src := range srcs {
+ h := make(http.Header)
+ src.Marshal(h)
+ if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
+ dst.Encryption.Marshal(h)
+ }
+
+ // calculate start/end indices of parts after
+ // splitting.
+ startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
+ for j, start := range startIdx {
+ end := endIdx[j]
+
+ // Add (or reset) source range header for
+ // upload part copy request.
+ h.Set("x-amz-copy-source-range",
+ fmt.Sprintf("bytes=%d-%d", start, end))
+
+ // make upload-part-copy request
+ complPart, err := c.uploadPartCopy(ctx, dst.Bucket,
+ dst.Object, uploadID, partIndex, h)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ if dst.Progress != nil {
+ io.CopyN(io.Discard, dst.Progress, end-start+1)
+ }
+ objParts = append(objParts, complPart)
+ partIndex++
+ }
+ }
+
+ // 4. Make final complete-multipart request.
+ uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
+ completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ uploadInfo.Size = totalSize
+ return uploadInfo, nil
+}
+
+// partsRequired is maximum parts possible with
+// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
+func partsRequired(size int64) int64 {
+ maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
+ r := size / int64(maxPartSize)
+ if size%int64(maxPartSize) > 0 {
+ r++
+ }
+ return r
+}
+
+// calculateEvenSplits - computes splits for a source and returns
+// start and end index slices. Splits happen evenly to be sure that no
+// part is less than 5MiB, as that could fail the multipart request if
+// it is not the last part.
+func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
+ if size == 0 {
+ return
+ }
+
+ reqParts := partsRequired(size)
+ startIndex = make([]int64, reqParts)
+ endIndex = make([]int64, reqParts)
+ // Compute number of required parts `k`, as:
+ //
+ // k = ceiling(size / copyPartSize)
+ //
+ // Now, distribute the `size` bytes in the source into
+ // k parts as evenly as possible:
+ //
+ // r parts sized (q+1) bytes, and
+ // (k - r) parts sized q bytes, where
+ //
+ // size = q * k + r (by simple division of size by k,
+ // so that 0 <= r < k)
+ //
+ start := src.Start
+ if start == -1 {
+ start = 0
+ }
+ quot, rem := size/reqParts, size%reqParts
+ nextStart := start
+ for j := int64(0); j < reqParts; j++ {
+ curPartSize := quot
+ if j < rem {
+ curPartSize++
+ }
+
+ cStart := nextStart
+ cEnd := cStart + curPartSize - 1
+ nextStart = cEnd + 1
+
+ startIndex[j], endIndex[j] = cStart, cEnd
+ }
+ return
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go
new file mode 100644
index 000000000..0c95d91ec
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go
@@ -0,0 +1,76 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017, 2018 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "net/http"
+)
+
+// CopyObject - copy a source object into a new object
+func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) {
+ if err := src.validate(); err != nil {
+ return UploadInfo{}, err
+ }
+
+ if err := dst.validate(); err != nil {
+ return UploadInfo{}, err
+ }
+
+ header := make(http.Header)
+ dst.Marshal(header)
+ src.Marshal(header)
+
+ resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
+ bucketName: dst.Bucket,
+ objectName: dst.Object,
+ customHeader: header,
+ })
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ defer closeResponse(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object)
+ }
+
+ // Update the progress properly after successful copy.
+ if dst.Progress != nil {
+ io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size))
+ }
+
+ cpObjRes := copyObjectResult{}
+ if err = xmlDecoder(resp.Body, &cpObjRes); err != nil {
+ return UploadInfo{}, err
+ }
+
+ // extract lifecycle expiry date and rule ID
+ expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
+
+ return UploadInfo{
+ Bucket: dst.Bucket,
+ Key: dst.Object,
+ LastModified: cpObjRes.LastModified,
+ ETag: trimEtag(resp.Header.Get("ETag")),
+ VersionID: resp.Header.Get(amzVersionID),
+ Expiration: expTime,
+ ExpirationRuleID: ruleID,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
new file mode 100644
index 000000000..97a6f80b2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
@@ -0,0 +1,254 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// BucketInfo container for bucket metadata.
+type BucketInfo struct {
+ // The name of the bucket.
+ Name string `json:"name"`
+ // Date the bucket was created.
+ CreationDate time.Time `json:"creationDate"`
+}
+
+// StringMap represents map with custom UnmarshalXML
+type StringMap map[string]string
+
+// UnmarshalXML unmarshals the XML into a map of string to strings,
+// creating a key in the map for each tag and setting it's value to the
+// tags contents.
+//
+// The fact this function is on the pointer of Map is important, so that
+// if m is nil it can be initialized, which is often the case if m is
+// nested in another xml structural. This is also why the first thing done
+// on the first line is initialize it.
+func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error {
+ *m = StringMap{}
+ for {
+ // Format is value
+ var e struct {
+ XMLName xml.Name
+ Value string `xml:",chardata"`
+ }
+ err := d.Decode(&e)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ (*m)[e.XMLName.Local] = e.Value
+ }
+ return nil
+}
+
+// URLMap represents map with custom UnmarshalXML
+type URLMap map[string]string
+
+// UnmarshalXML unmarshals the XML into a map of string to strings,
+// creating a key in the map for each tag and setting it's value to the
+// tags contents.
+//
+// The fact this function is on the pointer of Map is important, so that
+// if m is nil it can be initialized, which is often the case if m is
+// nested in another xml structural. This is also why the first thing done
+// on the first line is initialize it.
+func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error {
+ *m = URLMap{}
+ var tgs string
+ if err := d.DecodeElement(&tgs, &se); err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ for tgs != "" {
+ var key string
+ key, tgs, _ = stringsCut(tgs, "&")
+ if key == "" {
+ continue
+ }
+ key, value, _ := stringsCut(key, "=")
+ key, err := url.QueryUnescape(key)
+ if err != nil {
+ return err
+ }
+
+ value, err = url.QueryUnescape(value)
+ if err != nil {
+ return err
+ }
+ (*m)[key] = value
+ }
+ return nil
+}
+
+// stringsCut slices s around the first instance of sep,
+// returning the text before and after sep.
+// The found result reports whether sep appears in s.
+// If sep does not appear in s, cut returns s, "", false.
+func stringsCut(s, sep string) (before, after string, found bool) {
+ if i := strings.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, "", false
+}
+
+// Owner name.
+type Owner struct {
+ XMLName xml.Name `xml:"Owner" json:"owner"`
+ DisplayName string `xml:"ID" json:"name"`
+ ID string `xml:"DisplayName" json:"id"`
+}
+
+// UploadInfo contains information about the
+// newly uploaded or copied object.
+type UploadInfo struct {
+ Bucket string
+ Key string
+ ETag string
+ Size int64
+ LastModified time.Time
+ Location string
+ VersionID string
+
+ // Lifecycle expiry-date and ruleID associated with the expiry
+ // not to be confused with `Expires` HTTP header.
+ Expiration time.Time
+ ExpirationRuleID string
+
+ // Verified checksum values, if any.
+ // Values are base64 (standard) encoded.
+ // For multipart objects this is a checksum of the checksum of each part.
+ ChecksumCRC32 string
+ ChecksumCRC32C string
+ ChecksumSHA1 string
+ ChecksumSHA256 string
+}
+
+// RestoreInfo contains information of the restore operation of an archived object
+type RestoreInfo struct {
+ // Is the restoring operation is still ongoing
+ OngoingRestore bool
+ // When the restored copy of the archived object will be removed
+ ExpiryTime time.Time
+}
+
+// ObjectInfo container for object metadata.
+type ObjectInfo struct {
+ // An ETag is optionally set to md5sum of an object. In case of multipart objects,
+ // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
+ // each parts concatenated into one string.
+ ETag string `json:"etag"`
+
+ Key string `json:"name"` // Name of the object
+ LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
+ Size int64 `json:"size"` // Size in bytes of the object.
+ ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
+ Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached.
+
+ // Collection of additional metadata on the object.
+ // eg: x-amz-meta-*, content-encoding etc.
+ Metadata http.Header `json:"metadata" xml:"-"`
+
+ // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
+ // Only returned by MinIO servers.
+ UserMetadata StringMap `json:"userMetadata,omitempty"`
+
+ // x-amz-tagging values in their k/v values.
+ // Only returned by MinIO servers.
+ UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
+
+ // x-amz-tagging-count value
+ UserTagCount int
+
+ // Owner name.
+ Owner Owner
+
+ // ACL grant.
+ Grant []Grant
+
+ // The class of storage used to store the object.
+ StorageClass string `json:"storageClass"`
+
+ // Versioning related information
+ IsLatest bool
+ IsDeleteMarker bool
+ VersionID string `xml:"VersionId"`
+
+ // x-amz-replication-status value is either in one of the following states
+ // - COMPLETED
+ // - PENDING
+ // - FAILED
+ // - REPLICA (on the destination)
+ ReplicationStatus string `xml:"ReplicationStatus"`
+ // set to true if delete marker has backing object version on target, and eligible to replicate
+ ReplicationReady bool
+ // Lifecycle expiry-date and ruleID associated with the expiry
+ // not to be confused with `Expires` HTTP header.
+ Expiration time.Time
+ ExpirationRuleID string
+
+ Restore *RestoreInfo
+
+ // Checksum values
+ ChecksumCRC32 string
+ ChecksumCRC32C string
+ ChecksumSHA1 string
+ ChecksumSHA256 string
+
+ Internal *struct {
+ K int // Data blocks
+ M int // Parity blocks
+ } `xml:"Internal"`
+
+ // Error
+ Err error `json:"-"`
+}
+
+// ObjectMultipartInfo container for multipart object metadata.
+type ObjectMultipartInfo struct {
+ // Date and time at which the multipart upload was initiated.
+ Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Initiator initiator
+ Owner owner
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass string
+
+ // Key of the object for which the multipart upload was initiated.
+ Key string
+
+ // Size in bytes of the object.
+ Size int64
+
+ // Upload ID that identifies the multipart upload.
+ UploadID string `xml:"UploadId"`
+
+ // Error
+ Err error
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go
new file mode 100644
index 000000000..7df211fda
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go
@@ -0,0 +1,284 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+)
+
+/* **** SAMPLE ERROR RESPONSE ****
+
+
+ AccessDenied
+ Access Denied
+ bucketName
+ objectName
+ F19772218238A85A
+ GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD
+
+*/
+
+// ErrorResponse - Is the typed error returned by all API operations.
+// ErrorResponse struct should be comparable since it is compared inside
+// golang http API (https://github.com/golang/go/issues/29768)
+type ErrorResponse struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string
+ Message string
+ BucketName string
+ Key string
+ Resource string
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+
+ // Region where the bucket is located. This header is returned
+ // only in HEAD bucket and ListObjects response.
+ Region string
+
+ // Captures the server string returned in response header.
+ Server string
+
+ // Underlying HTTP status code for the returned error
+ StatusCode int `xml:"-" json:"-"`
+}
+
+// ToErrorResponse - Returns parsed ErrorResponse struct from body and
+// http headers.
+//
+// For example:
+//
+// import s3 "github.com/minio/minio-go/v7"
+// ...
+// ...
+// reader, stat, err := s3.GetObject(...)
+// if err != nil {
+// resp := s3.ToErrorResponse(err)
+// }
+// ...
+func ToErrorResponse(err error) ErrorResponse {
+ switch err := err.(type) {
+ case ErrorResponse:
+ return err
+ default:
+ return ErrorResponse{}
+ }
+}
+
+// Error - Returns S3 error string.
+func (e ErrorResponse) Error() string {
+ if e.Message == "" {
+ msg, ok := s3ErrorResponseMap[e.Code]
+ if !ok {
+ msg = fmt.Sprintf("Error response code %s.", e.Code)
+ }
+ return msg
+ }
+ return e.Message
+}
+
+// Common string for errors to report issue location in unexpected
+// cases.
+const (
+ reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
+)
+
+// xmlDecodeAndBody reads the whole body up to 1MB and
+// tries to XML decode it into v.
+// The body that was read and any error from reading or decoding is returned.
+func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
+ // read the whole body (up to 1MB)
+ const maxBodyLength = 1 << 20
+ body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
+ if err != nil {
+ return nil, err
+ }
+ return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
+}
+
+// httpRespToErrorResponse returns a new encoded ErrorResponse
+// structure as error.
+func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
+ if resp == nil {
+ msg := "Empty http response. " + reportIssue
+ return errInvalidArgument(msg)
+ }
+
+ errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Server: resp.Header.Get("Server"),
+ }
+
+ errBody, err := xmlDecodeAndBody(resp.Body, &errResp)
+ // Xml decoding failed with no body, fall back to HTTP headers.
+ if err != nil {
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ if objectName == "" {
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "NoSuchBucket",
+ Message: "The specified bucket does not exist.",
+ BucketName: bucketName,
+ }
+ } else {
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "NoSuchKey",
+ Message: "The specified key does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ }
+ }
+ case http.StatusForbidden:
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "AccessDenied",
+ Message: "Access Denied.",
+ BucketName: bucketName,
+ Key: objectName,
+ }
+ case http.StatusConflict:
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "Conflict",
+ Message: "Bucket not empty.",
+ BucketName: bucketName,
+ }
+ case http.StatusPreconditionFailed:
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "PreconditionFailed",
+ Message: s3ErrorResponseMap["PreconditionFailed"],
+ BucketName: bucketName,
+ Key: objectName,
+ }
+ default:
+ msg := resp.Status
+ if len(errBody) > 0 {
+ msg = string(errBody)
+ if len(msg) > 1024 {
+ msg = msg[:1024] + "..."
+ }
+ }
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: resp.Status,
+ Message: msg,
+ BucketName: bucketName,
+ }
+ }
+ }
+
+ code := resp.Header.Get("x-minio-error-code")
+ if code != "" {
+ errResp.Code = code
+ }
+ desc := resp.Header.Get("x-minio-error-desc")
+ if desc != "" {
+ errResp.Message = strings.Trim(desc, `"`)
+ }
+
+ // Save hostID, requestID and region information
+ // from headers if not available through error XML.
+ if errResp.RequestID == "" {
+ errResp.RequestID = resp.Header.Get("x-amz-request-id")
+ }
+ if errResp.HostID == "" {
+ errResp.HostID = resp.Header.Get("x-amz-id-2")
+ }
+ if errResp.Region == "" {
+ errResp.Region = resp.Header.Get("x-amz-bucket-region")
+ }
+ if errResp.Code == "InvalidRegion" && errResp.Region != "" {
+ errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
+ }
+
+ return errResp
+}
+
+// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
+func errTransferAccelerationBucket(bucketName string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
+ BucketName: bucketName,
+ }
+}
+
+// errEntityTooLarge - Input size is larger than supported maximum.
+func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "EntityTooLarge",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// errEntityTooSmall - Input size is smaller than supported minimum.
+func errEntityTooSmall(totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "EntityTooSmall",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// errUnexpectedEOF - Unexpected end of file reached.
+func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "UnexpectedEOF",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// errInvalidArgument - Invalid argument response.
+func errInvalidArgument(message string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// errAPINotSupported - API not supported response
+// The specified API call is not supported
+func errAPINotSupported(message string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusNotImplemented,
+ Code: "APINotSupported",
+ Message: message,
+ RequestID: "minio",
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
new file mode 100644
index 000000000..9041d99e9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
@@ -0,0 +1,152 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2018 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "encoding/xml"
+ "net/http"
+ "net/url"
+)
+
+// Grantee represents the person being granted permissions.
+type Grantee struct {
+ XMLName xml.Name `xml:"Grantee"`
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName"`
+ URI string `xml:"URI"`
+}
+
+// Grant holds grant information
+type Grant struct {
+ XMLName xml.Name `xml:"Grant"`
+ Grantee Grantee
+ Permission string `xml:"Permission"`
+}
+
+// AccessControlList contains the set of grantees and the permissions assigned to each grantee.
+type AccessControlList struct {
+ XMLName xml.Name `xml:"AccessControlList"`
+ Grant []Grant
+ Permission string `xml:"Permission"`
+}
+
+type accessControlPolicy struct {
+ XMLName xml.Name `xml:"AccessControlPolicy"`
+ Owner Owner
+ AccessControlList AccessControlList
+}
+
+// GetObjectACL get object ACLs
+func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) {
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: url.Values{
+ "acl": []string{""},
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer closeResponse(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+
+ res := &accessControlPolicy{}
+
+ if err := xmlDecoder(resp.Body, res); err != nil {
+ return nil, err
+ }
+
+ objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ objInfo.Owner.DisplayName = res.Owner.DisplayName
+ objInfo.Owner.ID = res.Owner.ID
+
+ objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...)
+
+ cannedACL := getCannedACL(res)
+ if cannedACL != "" {
+ objInfo.Metadata.Add("X-Amz-Acl", cannedACL)
+ return &objInfo, nil
+ }
+
+ grantACL := getAmzGrantACL(res)
+ for k, v := range grantACL {
+ objInfo.Metadata[k] = v
+ }
+
+ return &objInfo, nil
+}
+
+func getCannedACL(aCPolicy *accessControlPolicy) string {
+ grants := aCPolicy.AccessControlList.Grant
+
+ switch {
+ case len(grants) == 1:
+ if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" {
+ return "private"
+ }
+ case len(grants) == 2:
+ for _, g := range grants {
+ if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
+ return "authenticated-read"
+ }
+ if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
+ return "public-read"
+ }
+ if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID {
+ return "bucket-owner-read"
+ }
+ }
+ case len(grants) == 3:
+ for _, g := range grants {
+ if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
+ return "public-read-write"
+ }
+ }
+ }
+ return ""
+}
+
+func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string {
+ grants := aCPolicy.AccessControlList.Grant
+ res := map[string][]string{}
+
+ for _, g := range grants {
+ switch {
+ case g.Permission == "READ":
+ res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID)
+ case g.Permission == "WRITE":
+ res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID)
+ case g.Permission == "READ_ACP":
+ res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID)
+ case g.Permission == "WRITE_ACP":
+ res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID)
+ case g.Permission == "FULL_CONTROL":
+ res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID)
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go
new file mode 100644
index 000000000..e1155c372
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go
@@ -0,0 +1,201 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "encoding/xml"
+ "errors"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// ObjectAttributesOptions are options used for the GetObjectAttributes API
+//
+// - MaxParts
+// How many parts the caller wants to be returned (default: 1000)
+//
+// - VersionID
+// The object version you want to attributes for
+//
+// - PartNumberMarker
+// the listing will start AFTER the part matching PartNumberMarker
+//
+// - ServerSideEncryption
+// The server-side encryption algorithm used when storing this object in Minio
+type ObjectAttributesOptions struct {
+ MaxParts int
+ VersionID string
+ PartNumberMarker int
+ ServerSideEncryption encrypt.ServerSide
+}
+
+// ObjectAttributes is the response object returned by the GetObjectAttributes API
+//
+// - VersionID
+// The object version
+//
+// - LastModified
+// The last time the object was modified
+//
+// - ObjectAttributesResponse
+// Contains more information about the object
+type ObjectAttributes struct {
+ VersionID string
+ LastModified time.Time
+ ObjectAttributesResponse
+}
+
+// ObjectAttributesResponse contains details returned by the GetObjectAttributes API
+//
+// Noteworthy fields:
+//
+// - ObjectParts.PartsCount
+// Contains the total part count for the object (not the current response)
+//
+// - ObjectParts.PartNumberMarker
+// Pagination of parts will begin at (but not include) PartNumberMarker
+//
+// - ObjectParts.NextPartNumberMarket
+// The next PartNumberMarker to be used in order to continue pagination
+//
+// - ObjectParts.IsTruncated
+// Indicates if the last part is included in the request (does not check if parts are missing from the start of the list, ONLY the end)
+//
+// - ObjectParts.MaxParts
+// Reflects the MaxParts used by the caller or the default MaxParts value of the API
+type ObjectAttributesResponse struct {
+ ETag string `xml:",omitempty"`
+ StorageClass string
+ ObjectSize int
+ Checksum struct {
+ ChecksumCRC32 string `xml:",omitempty"`
+ ChecksumCRC32C string `xml:",omitempty"`
+ ChecksumSHA1 string `xml:",omitempty"`
+ ChecksumSHA256 string `xml:",omitempty"`
+ }
+ ObjectParts struct {
+ PartsCount int
+ PartNumberMarker int
+ NextPartNumberMarker int
+ MaxParts int
+ IsTruncated bool
+ Parts []*ObjectAttributePart `xml:"Part"`
+ }
+}
+
+// ObjectAttributePart is used by ObjectAttributesResponse to describe an object part
+type ObjectAttributePart struct {
+ ChecksumCRC32 string `xml:",omitempty"`
+ ChecksumCRC32C string `xml:",omitempty"`
+ ChecksumSHA1 string `xml:",omitempty"`
+ ChecksumSHA256 string `xml:",omitempty"`
+ PartNumber int
+ Size int
+}
+
+func (o *ObjectAttributes) parseResponse(resp *http.Response) (err error) {
+ mod, err := parseRFC7231Time(resp.Header.Get("Last-Modified"))
+ if err != nil {
+ return err
+ }
+ o.LastModified = mod
+ o.VersionID = resp.Header.Get(amzVersionID)
+
+ response := new(ObjectAttributesResponse)
+ if err := xml.NewDecoder(resp.Body).Decode(response); err != nil {
+ return err
+ }
+ o.ObjectAttributesResponse = *response
+
+ return
+}
+
+// GetObjectAttributes API combines HeadObject and ListParts.
+// More details on usage can be found in the documentation for ObjectAttributesOptions{}
+func (c *Client) GetObjectAttributes(ctx context.Context, bucketName, objectName string, opts ObjectAttributesOptions) (*ObjectAttributes, error) {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+
+ urlValues := make(url.Values)
+ urlValues.Add("attributes", "")
+ if opts.VersionID != "" {
+ urlValues.Add("versionId", opts.VersionID)
+ }
+
+ headers := make(http.Header)
+ headers.Set(amzObjectAttributes, GetObjectAttributesTags)
+
+ if opts.PartNumberMarker > 0 {
+ headers.Set(amzPartNumberMarker, strconv.Itoa(opts.PartNumberMarker))
+ }
+
+ if opts.MaxParts > 0 {
+ headers.Set(amzMaxParts, strconv.Itoa(opts.MaxParts))
+ } else {
+ headers.Set(amzMaxParts, strconv.Itoa(GetObjectAttributesMaxParts))
+ }
+
+ if opts.ServerSideEncryption != nil {
+ opts.ServerSideEncryption.Marshal(headers)
+ }
+
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: headers,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ defer closeResponse(resp)
+
+ hasEtag := resp.Header.Get(ETag)
+ if hasEtag != "" {
+ return nil, errors.New("getObjectAttributes is not supported by the current endpoint version")
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ ER := new(ErrorResponse)
+ if err := xml.NewDecoder(resp.Body).Decode(ER); err != nil {
+ return nil, err
+ }
+
+ return nil, *ER
+ }
+
+ OA := new(ObjectAttributes)
+ err = OA.parseResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return OA, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go
new file mode 100644
index 000000000..567a42e45
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go
@@ -0,0 +1,127 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// FGetObject - download contents of an object to a local file.
+// The options can be used to specify the GET request further.
+func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Verify if destination already exists.
+ st, err := os.Stat(filePath)
+ if err == nil {
+ // If the destination exists and is a directory.
+ if st.IsDir() {
+ return errInvalidArgument("fileName is a directory.")
+ }
+ }
+
+ // Proceed if file does not exist. return for all other errors.
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ }
+
+ // Extract top level directory.
+ objectDir, _ := filepath.Split(filePath)
+ if objectDir != "" {
+ // Create any missing top level directories.
+ if err := os.MkdirAll(objectDir, 0o700); err != nil {
+ return err
+ }
+ }
+
+ // Gather md5sum.
+ objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts))
+ if err != nil {
+ return err
+ }
+
+ // Write to a temporary file "fileName.part.minio" before saving.
+ filePartPath := filePath + sum256Hex([]byte(objectStat.ETag)) + ".part.minio"
+
+ // If exists, open in append mode. If not create it as a part file.
+ filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600)
+ if err != nil {
+ return err
+ }
+
+ // If we return early with an error, be sure to close and delete
+ // filePart. If we have an error along the way there is a chance
+ // that filePart is somehow damaged, and we should discard it.
+ closeAndRemove := true
+ defer func() {
+ if closeAndRemove {
+ _ = filePart.Close()
+ _ = os.Remove(filePartPath)
+ }
+ }()
+
+ // Issue Stat to get the current offset.
+ st, err = filePart.Stat()
+ if err != nil {
+ return err
+ }
+
+ // Initialize get object request headers to set the
+ // appropriate range offsets to read from.
+ if st.Size() > 0 {
+ opts.SetRange(st.Size(), 0)
+ }
+
+ // Seek to current position for incoming reader.
+ objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return err
+ }
+
+ // Write to the part file.
+ if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
+ return err
+ }
+
+ // Close the file before rename, this is specifically needed for Windows users.
+ closeAndRemove = false
+ if err = filePart.Close(); err != nil {
+ return err
+ }
+
+ // Safely completed. Now commit by renaming to actual filename.
+ if err = os.Rename(filePartPath, filePath); err != nil {
+ return err
+ }
+
+ // Return.
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go
new file mode 100644
index 000000000..d7fd27835
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go
@@ -0,0 +1,699 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// GetObject wrapper function that accepts a request context
+func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidBucketName",
+ Message: err.Error(),
+ }
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "XMinioInvalidObjectName",
+ Message: err.Error(),
+ }
+ }
+
+ gctx, cancel := context.WithCancel(ctx)
+
+ // Detect if snowball is server location we are talking to.
+ var snowball bool
+ if location, ok := c.bucketLocCache.Get(bucketName); ok {
+ snowball = location == "snowball"
+ }
+
+ var (
+ err error
+ httpReader io.ReadCloser
+ objectInfo ObjectInfo
+ totalRead int
+ )
+
+ // Create request channel.
+ reqCh := make(chan getRequest)
+ // Create response channel.
+ resCh := make(chan getResponse)
+
+ // This routine feeds partial object data as and when the caller reads.
+ go func() {
+ defer close(resCh)
+ defer func() {
+ // Close the http response body before returning.
+ // This ends the connection with the server.
+ if httpReader != nil {
+ httpReader.Close()
+ }
+ }()
+ defer cancel()
+
+ // Used to verify if etag of object has changed since last read.
+ var etag string
+
+ for req := range reqCh {
+ // If this is the first request we may not need to do a getObject request yet.
+ if req.isFirstReq {
+ // First request is a Read/ReadAt.
+ if req.isReadOp {
+ // Differentiate between wanting the whole object and just a range.
+ if req.isReadAt {
+ // If this is a ReadAt request only get the specified range.
+ // Range is set with respect to the offset and length of the buffer requested.
+ // Do not set objectInfo from the first readAt request because it will not get
+ // the whole object.
+ opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ } else if req.Offset > 0 {
+ opts.SetRange(req.Offset, 0)
+ }
+ httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
+ if err != nil {
+ resCh <- getResponse{Error: err}
+ return
+ }
+ etag = objectInfo.ETag
+ // Read at least firstReq.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := readFull(httpReader, req.Buffer)
+ totalRead += size
+ if size > 0 && err == io.ErrUnexpectedEOF {
+ if int64(size) < objectInfo.Size {
+ // In situations when returned size
+ // is less than the expected content
+ // length set by the server, make sure
+ // we return io.ErrUnexpectedEOF
+ err = io.ErrUnexpectedEOF
+ } else {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ } else if size == 0 && err == io.EOF && objectInfo.Size > 0 {
+ // Special cases when server writes more data
+ // than the content-length, net/http response
+ // body returns an error, instead of converting
+ // it to io.EOF - return unexpected EOF.
+ err = io.ErrUnexpectedEOF
+ }
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ Size: size,
+ Error: err,
+ didRead: true,
+ }
+ } else {
+ // First request is a Stat or Seek call.
+ // Only need to run a StatObject until an actual Read or ReadAt request comes through.
+
+ // Remove range header if already set, for stat Operations to get original file size.
+ delete(opts.headers, "Range")
+ objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ // Exit the go-routine.
+ return
+ }
+ etag = objectInfo.ETag
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ }
+ } else if req.settingObjectInfo { // Request is just to get objectInfo.
+ // Remove range header if already set, for stat Operations to get original file size.
+ delete(opts.headers, "Range")
+ // Check whether this is snowball
+ // if yes do not use If-Match feature
+ // it doesn't work.
+ if etag != "" && !snowball {
+ opts.SetMatchETag(etag)
+ }
+ objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+ // Send back the objectInfo.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ } else {
+ // Offset changes fetch the new object at an Offset.
+ // Because the httpReader may not be set by the first
+ // request if it was a stat or seek it must be checked
+ // if the object has been read or not to only initialize
+ // new ones when they haven't been already.
+ // All readAt requests are new requests.
+ if req.DidOffsetChange || !req.beenRead {
+ // Check whether this is snowball
+ // if yes do not use If-Match feature
+ // it doesn't work.
+ if etag != "" && !snowball {
+ opts.SetMatchETag(etag)
+ }
+ if httpReader != nil {
+ // Close previously opened http reader.
+ httpReader.Close()
+ }
+ // If this request is a readAt only get the specified range.
+ if req.isReadAt {
+ // Range is set with respect to the offset and length of the buffer requested.
+ opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ } else if req.Offset > 0 { // Range is set with respect to the offset.
+ opts.SetRange(req.Offset, 0)
+ } else {
+ // Remove range header if already set
+ delete(opts.headers, "Range")
+ }
+ httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ return
+ }
+ totalRead = 0
+ }
+
+ // Read at least req.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := readFull(httpReader, req.Buffer)
+ totalRead += size
+ if size > 0 && err == io.ErrUnexpectedEOF {
+ if int64(totalRead) < objectInfo.Size {
+ // In situations when returned size
+ // is less than the expected content
+ // length set by the server, make sure
+ // we return io.ErrUnexpectedEOF
+ err = io.ErrUnexpectedEOF
+ } else {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ } else if size == 0 && err == io.EOF && objectInfo.Size > 0 {
+ // Special cases when server writes more data
+ // than the content-length, net/http response
+ // body returns an error, instead of converting
+ // it to io.EOF - return unexpected EOF.
+ err = io.ErrUnexpectedEOF
+ }
+
+ // Reply back how much was read.
+ resCh <- getResponse{
+ Size: size,
+ Error: err,
+ didRead: true,
+ objectInfo: objectInfo,
+ }
+ }
+ }
+ }()
+
+ // Create a newObject through the information sent back by reqCh.
+ return newObject(gctx, cancel, reqCh, resCh), nil
+}
+
+// get request message container to communicate with internal
+// go-routine.
+type getRequest struct {
+ Buffer []byte
+ Offset int64 // readAt offset.
+ DidOffsetChange bool // Tracks the offset changes for Seek requests.
+ beenRead bool // Determines if this is the first time an object is being read.
+ isReadAt bool // Determines if this request is a request to a specific range
+ isReadOp bool // Determines if this request is a Read or Read/At request.
+ isFirstReq bool // Determines if this request is the first time an object is being accessed.
+ settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
+}
+
+// get response message container to reply back for the request.
+type getResponse struct {
+ Size int
+ Error error
+ didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
+ objectInfo ObjectInfo // Used for the first request.
+}
+
+// Object represents an open object. It implements
+// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
+type Object struct {
+ // Mutex.
+ mutex *sync.Mutex
+
+ // User allocated and defined.
+ reqCh chan<- getRequest
+ resCh <-chan getResponse
+ ctx context.Context
+ cancel context.CancelFunc
+ currOffset int64
+ objectInfo ObjectInfo
+
+ // Ask lower level to initiate data fetching based on currOffset
+ seekData bool
+
+ // Keeps track of closed call.
+ isClosed bool
+
+ // Keeps track of if this is the first call.
+ isStarted bool
+
+ // Previous error saved for future calls.
+ prevErr error
+
+ // Keeps track of if this object has been read yet.
+ beenRead bool
+
+ // Keeps track of if objectInfo has been set yet.
+ objectInfoSet bool
+}
+
+// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
+// Returns back the size of the buffer read, if anything was read, as well
+// as any error encountered. For all first requests sent on the object
+// it is also responsible for sending back the objectInfo.
+func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
+ select {
+ case <-o.ctx.Done():
+ return getResponse{}, o.ctx.Err()
+ case o.reqCh <- request:
+ }
+
+ response := <-o.resCh
+
+ // Return any error to the top level.
+ if response.Error != nil {
+ return response, response.Error
+ }
+
+ // This was the first request.
+ if !o.isStarted {
+ // The object has been operated on.
+ o.isStarted = true
+ }
+ // Set the objectInfo if the request was not readAt
+ // and it hasn't been set before.
+ if !o.objectInfoSet && !request.isReadAt {
+ o.objectInfo = response.objectInfo
+ o.objectInfoSet = true
+ }
+ // Set beenRead only if it has not been set before.
+ if !o.beenRead {
+ o.beenRead = response.didRead
+ }
+ // Data are ready on the wire, no need to reinitiate connection in lower level
+ o.seekData = false
+
+ return response, nil
+}
+
+// setOffset - handles the setting of offsets for
+// Read/ReadAt/Seek requests.
+func (o *Object) setOffset(bytesRead int64) error {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+
+ if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
+ return io.EOF
+ }
+ return nil
+}
+
+// Read reads up to len(b) bytes into b. It returns the number of
+// bytes read (0 <= n <= len(b)) and any error encountered. Returns
+// io.EOF upon end of file.
+func (o *Object) Read(b []byte) (n int, err error) {
+ if o == nil {
+ return 0, errInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // prevErr is previous error saved from previous operation.
+ if o.prevErr != nil || o.isClosed {
+ return 0, o.prevErr
+ }
+
+ // Create a new request.
+ readReq := getRequest{
+ isReadOp: true,
+ beenRead: o.beenRead,
+ Buffer: b,
+ }
+
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readReq.isFirstReq = true
+ }
+
+ // Ask to establish a new data fetch routine based on seekData flag
+ readReq.DidOffsetChange = o.seekData
+ readReq.Offset = o.currOffset
+
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readReq)
+ if err != nil && err != io.EOF {
+ // Save the error for future calls.
+ o.prevErr = err
+ return response.Size, err
+ }
+
+ // Bytes read.
+ bytesRead := int64(response.Size)
+
+ // Set the new offset.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ // Save the error for future calls.
+ o.prevErr = oerr
+ return response.Size, oerr
+ }
+
+ // Return the response.
+ return response.Size, err
+}
+
+// Stat returns the ObjectInfo structure describing Object.
+func (o *Object) Stat() (ObjectInfo, error) {
+ if o == nil {
+ return ObjectInfo{}, errInvalidArgument("Object is nil")
+ }
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
+ return ObjectInfo{}, o.prevErr
+ }
+
+ // This is the first request.
+ if !o.isStarted || !o.objectInfoSet {
+ // Send the request and get the response.
+ _, err := o.doGetRequest(getRequest{
+ isFirstReq: !o.isStarted,
+ settingObjectInfo: !o.objectInfoSet,
+ })
+ if err != nil {
+ o.prevErr = err
+ return ObjectInfo{}, err
+ }
+ }
+
+ return o.objectInfo, nil
+}
+
+// ReadAt reads len(b) bytes from the File starting at byte offset
+// off. It returns the number of bytes read and the error, if any.
+// ReadAt always returns a non-nil error when n < len(b). At end of
+// file, that error is io.EOF.
+func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
+ if o == nil {
+ return 0, errInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // prevErr is error which was saved in previous operation.
+ if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
+ return 0, o.prevErr
+ }
+
+ // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method.
+ o.currOffset = offset
+
+ // Can only compare offsets to size when size has been set.
+ if o.objectInfoSet {
+ // If offset is negative than we return io.EOF.
+ // If offset is greater than or equal to object size we return io.EOF.
+ if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
+ return 0, io.EOF
+ }
+ }
+
+ // Create the new readAt request.
+ readAtReq := getRequest{
+ isReadOp: true,
+ isReadAt: true,
+ DidOffsetChange: true, // Offset always changes.
+ beenRead: o.beenRead, // Set if this is the first request to try and read.
+ Offset: offset, // Set the offset.
+ Buffer: b,
+ }
+
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readAtReq.isFirstReq = true
+ }
+
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readAtReq)
+ if err != nil && err != io.EOF {
+ // Save the error.
+ o.prevErr = err
+ return response.Size, err
+ }
+ // Bytes read.
+ bytesRead := int64(response.Size)
+ // There is no valid objectInfo yet
+ // to compare against for EOF.
+ if !o.objectInfoSet {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+ } else {
+ // If this was not the first request update
+ // the offsets and compare against objectInfo
+ // for EOF.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ o.prevErr = oerr
+ return response.Size, oerr
+ }
+ }
+ return response.Size, err
+}
+
+// Seek sets the offset for the next Read or Write to offset,
+// interpreted according to whence: 0 means relative to the
+// origin of the file, 1 means relative to the current offset,
+// and 2 means relative to the end.
+// Seek returns the new offset and an error, if any.
+//
+// Seeking to a negative offset is an error. Seeking to any positive
+// offset is legal, subsequent io operations succeed until the
+// underlying object is not closed.
+func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
+ if o == nil {
+ return 0, errInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // At EOF seeking is legal allow only io.EOF, for any other errors we return.
+ if o.prevErr != nil && o.prevErr != io.EOF {
+ return 0, o.prevErr
+ }
+
+ // Negative offset is valid for whence of '2'.
+ if offset < 0 && whence != 2 {
+ return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence))
+ }
+
+ // This is the first request. So before anything else
+ // get the ObjectInfo.
+ if !o.isStarted || !o.objectInfoSet {
+ // Create the new Seek request.
+ seekReq := getRequest{
+ isReadOp: false,
+ Offset: offset,
+ isFirstReq: true,
+ }
+ // Send and receive from the seek request.
+ _, err := o.doGetRequest(seekReq)
+ if err != nil {
+ // Save the error.
+ o.prevErr = err
+ return 0, err
+ }
+ }
+
+ newOffset := o.currOffset
+
+ // Switch through whence.
+ switch whence {
+ default:
+ return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
+ case 0:
+ if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
+ return 0, io.EOF
+ }
+ newOffset = offset
+ case 1:
+ if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
+ return 0, io.EOF
+ }
+ newOffset += offset
+ case 2:
+ // If we don't know the object size return an error for io.SeekEnd
+ if o.objectInfo.Size < 0 {
+ return 0, errInvalidArgument("Whence END is not supported when the object size is unknown")
+ }
+ // Seeking to positive offset is valid for whence '2', but
+ // since we are backing a Reader we have reached 'EOF' if
+ // offset is positive.
+ if offset > 0 {
+ return 0, io.EOF
+ }
+ // Seeking to negative position not allowed for whence.
+ if o.objectInfo.Size+offset < 0 {
+ return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
+ }
+ newOffset = o.objectInfo.Size + offset
+ }
+ // Reset the saved error since we successfully seeked, let the Read
+ // and ReadAt decide.
+ if o.prevErr == io.EOF {
+ o.prevErr = nil
+ }
+
+ // Ask lower level to fetch again from source when necessary
+ o.seekData = (newOffset != o.currOffset) || o.seekData
+ o.currOffset = newOffset
+
+ // Return the effective offset.
+ return o.currOffset, nil
+}
+
+// Close - The behavior of Close after the first call returns error
+// for subsequent Close() calls.
+func (o *Object) Close() (err error) {
+ if o == nil {
+ return errInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // if already closed return an error.
+ if o.isClosed {
+ return o.prevErr
+ }
+
+ // Close successfully.
+ o.cancel()
+
+ // Close the request channel to indicate the internal go-routine to exit.
+ close(o.reqCh)
+
+ // Save for future operations.
+ errMsg := "Object is already closed. Bad file descriptor."
+ o.prevErr = errors.New(errMsg)
+ // Save here that we closed done channel successfully.
+ o.isClosed = true
+ return nil
+}
+
+// newObject instantiates a new *minio.Object*
+// ObjectInfo will be set by setObjectInfo
+func newObject(ctx context.Context, cancel context.CancelFunc, reqCh chan<- getRequest, resCh <-chan getResponse) *Object {
+ return &Object{
+ ctx: ctx,
+ cancel: cancel,
+ mutex: &sync.Mutex{},
+ reqCh: reqCh,
+ resCh: resCh,
+ }
+}
+
+// getObject - retrieve object from Object Storage.
+//
+// Additionally this function also takes range arguments to download the specified
+// range bytes of an object. Setting offset and length = 0 will download the full object.
+//
+// For more information about the HTTP Range header.
+// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
+ // Validate input arguments.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, ObjectInfo{}, nil, ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidBucketName",
+ Message: err.Error(),
+ }
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, ObjectInfo{}, nil, ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "XMinioInvalidObjectName",
+ Message: err.Error(),
+ }
+ }
+
+ // Execute GET on objectName.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: opts.toQueryValues(),
+ customHeader: opts.Header(),
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ if err != nil {
+ return nil, ObjectInfo{}, nil, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
+ return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header)
+ if err != nil {
+ closeResponse(resp)
+ return nil, ObjectInfo{}, nil, err
+ }
+
+ // do not close body here, caller will close
+ return resp.Body, objectStat, resp.Header, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go
new file mode 100644
index 000000000..a0216e201
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go
@@ -0,0 +1,203 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+)
+
+// AdvancedGetOptions for internal use by MinIO server - not intended for client use.
+type AdvancedGetOptions struct {
+ ReplicationDeleteMarker bool
+ IsReplicationReadyForDeleteMarker bool
+ ReplicationProxyRequest string
+}
+
+// GetObjectOptions are used to specify additional headers or options
+// during GET requests.
+type GetObjectOptions struct {
+ headers map[string]string
+ reqParams url.Values
+ ServerSideEncryption encrypt.ServerSide
+ VersionID string
+ PartNumber int
+
+ // Include any checksums, if object was uploaded with checksum.
+ // For multipart objects this is a checksum of part checksums.
+ // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ Checksum bool
+
+ // To be not used by external applications
+ Internal AdvancedGetOptions
+}
+
+// StatObjectOptions are used to specify additional headers or options
+// during GET info/stat requests.
+type StatObjectOptions = GetObjectOptions
+
+// Header returns the http.Header representation of the GET options.
+func (o GetObjectOptions) Header() http.Header {
+ headers := make(http.Header, len(o.headers))
+ for k, v := range o.headers {
+ headers.Set(k, v)
+ }
+ if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
+ o.ServerSideEncryption.Marshal(headers)
+ }
+ // this header is set for active-active replication scenario where GET/HEAD
+ // to site A is proxy'd to site B if object/version missing on site A.
+ if o.Internal.ReplicationProxyRequest != "" {
+ headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest)
+ }
+ if o.Checksum {
+ headers.Set("x-amz-checksum-mode", "ENABLED")
+ }
+ return headers
+}
+
+// Set adds a key value pair to the options. The
+// key-value pair will be part of the HTTP GET request
+// headers.
+func (o *GetObjectOptions) Set(key, value string) {
+ if o.headers == nil {
+ o.headers = make(map[string]string)
+ }
+ o.headers[http.CanonicalHeaderKey(key)] = value
+}
+
+// SetReqParam - set request query string parameter
+// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
+// If an unsupported key is passed in, it will be ignored and nothing will be done.
+func (o *GetObjectOptions) SetReqParam(key, value string) {
+ if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
+ // do nothing
+ return
+ }
+ if o.reqParams == nil {
+ o.reqParams = make(url.Values)
+ }
+ o.reqParams.Set(key, value)
+}
+
+// AddReqParam - add request query string parameter
+// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
+// If an unsupported key is passed in, it will be ignored and nothing will be done.
+func (o *GetObjectOptions) AddReqParam(key, value string) {
+ if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
+ // do nothing
+ return
+ }
+ if o.reqParams == nil {
+ o.reqParams = make(url.Values)
+ }
+ o.reqParams.Add(key, value)
+}
+
+// SetMatchETag - set match etag.
+func (o *GetObjectOptions) SetMatchETag(etag string) error {
+ if etag == "" {
+ return errInvalidArgument("ETag cannot be empty.")
+ }
+ o.Set("If-Match", "\""+etag+"\"")
+ return nil
+}
+
+// SetMatchETagExcept - set match etag except.
+func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
+ if etag == "" {
+ return errInvalidArgument("ETag cannot be empty.")
+ }
+ o.Set("If-None-Match", "\""+etag+"\"")
+ return nil
+}
+
+// SetUnmodified - set unmodified time since.
+func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return errInvalidArgument("Modified since cannot be empty.")
+ }
+ o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetModified - set modified time since.
+func (o *GetObjectOptions) SetModified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return errInvalidArgument("Modified since cannot be empty.")
+ }
+ o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetRange - set the start and end offset of the object to be read.
+// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
+func (o *GetObjectOptions) SetRange(start, end int64) error {
+ switch {
+ case start == 0 && end < 0:
+ // Read last '-end' bytes. `bytes=-N`.
+ o.Set("Range", fmt.Sprintf("bytes=%d", end))
+ case 0 < start && end == 0:
+ // Read everything starting from offset
+ // 'start'. `bytes=N-`.
+ o.Set("Range", fmt.Sprintf("bytes=%d-", start))
+ case 0 <= start && start <= end:
+ // Read everything starting at 'start' till the
+ // 'end'. `bytes=N-M`
+ o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
+ default:
+ // All other cases such as
+ // bytes=-3-
+ // bytes=5-3
+ // bytes=-2-4
+ // bytes=-3-0
+ // bytes=-3--2
+ // are invalid.
+ return errInvalidArgument(
+ fmt.Sprintf(
+ "Invalid range specified: start=%d end=%d",
+ start, end))
+ }
+ return nil
+}
+
+// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters.
+func (o *GetObjectOptions) toQueryValues() url.Values {
+ urlValues := make(url.Values)
+ if o.VersionID != "" {
+ urlValues.Set("versionId", o.VersionID)
+ }
+ if o.PartNumber > 0 {
+ urlValues.Set("partNumber", strconv.Itoa(o.PartNumber))
+ }
+
+ if o.reqParams != nil {
+ for key, values := range o.reqParams {
+ for _, value := range values {
+ urlValues.Add(key, value)
+ }
+ }
+ }
+
+ return urlValues
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go
new file mode 100644
index 000000000..31b6edf2e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-list.go
@@ -0,0 +1,1057 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// ListBuckets list all buckets owned by this authenticated user.
+//
+// This call requires explicit authentication, no anonymous requests are
+// allowed for listing buckets.
+//
+// api := client.New(....)
+// for message := range api.ListBuckets(context.Background()) {
+// fmt.Println(message)
+// }
+func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
+ // Execute GET on service.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex})
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, "", "")
+ }
+ }
+ listAllMyBucketsResult := listAllMyBucketsResult{}
+ err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
+ if err != nil {
+ return nil, err
+ }
+ return listAllMyBucketsResult.Buckets.Bucket, nil
+}
+
+// Bucket List Operations.
+func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
+ // Allocate new list objects channel.
+ objectStatCh := make(chan ObjectInfo, 1)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if opts.Recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+
+ // Return object owner information by default
+ fetchOwner := true
+
+ sendObjectInfo := func(info ObjectInfo) {
+ select {
+ case objectStatCh <- info:
+ case <-ctx.Done():
+ }
+ }
+
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(objectStatCh)
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return objectStatCh
+ }
+
+ // Validate incoming object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
+ defer close(objectStatCh)
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return objectStatCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(objectStatCh chan<- ObjectInfo) {
+ defer func() {
+ if contextCanceled(ctx) {
+ objectStatCh <- ObjectInfo{
+ Err: ctx.Err(),
+ }
+ }
+ close(objectStatCh)
+ }()
+
+ // Save continuationToken for next request.
+ var continuationToken string
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken,
+ fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers)
+ if err != nil {
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, object := range result.Contents {
+ object.ETag = trimEtag(object.ETag)
+ select {
+ // Send object content.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ select {
+ // Send object prefixes.
+ case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
+ // If receives done from the caller, return here.
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ // If continuation token present, save it for next request.
+ if result.NextContinuationToken != "" {
+ continuationToken = result.NextContinuationToken
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+
+ // Add this to catch broken S3 API implementations.
+ if continuationToken == "" {
+ sendObjectInfo(ObjectInfo{
+ Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL),
+ })
+ return
+ }
+ }
+ }(objectStatCh)
+ return objectStatCh
+}
+
+// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request parameters :-
+// ---------
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?continuation-token - Used to continue iterating over a set of objects
+// ?metadata - Specifies if we want metadata for the objects as part of list operation.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?start-after - Sets a marker to start listing lexically at this key onwards.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ListBucketV2Result{}, err
+ }
+ // Validate object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ return ListBucketV2Result{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+
+ // Always set list-type in ListObjects V2
+ urlValues.Set("list-type", "2")
+
+ if metadata {
+ urlValues.Set("metadata", "true")
+ }
+
+ // Set this conditionally if asked
+ if startAfter != "" {
+ urlValues.Set("start-after", startAfter)
+ }
+
+ // Always set encoding-type in ListObjects V2
+ urlValues.Set("encoding-type", "url")
+
+ // Set object prefix, prefix value to be set to empty is okay.
+ urlValues.Set("prefix", objectPrefix)
+
+ // Set delimiter, delimiter value to be set to empty is okay.
+ urlValues.Set("delimiter", delimiter)
+
+ // Set continuation token
+ if continuationToken != "" {
+ urlValues.Set("continuation-token", continuationToken)
+ }
+
+ // Fetch owner when listing
+ if fetchOwner {
+ urlValues.Set("fetch-owner", "true")
+ }
+
+ // Set max keys.
+ if maxkeys > 0 {
+ urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+ }
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListBucketV2Result{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Decode listBuckets XML.
+ listBucketResult := ListBucketV2Result{}
+ if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
+ return listBucketResult, err
+ }
+
+ // This is an additional verification check to make
+ // sure proper responses are received.
+ if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
+ return listBucketResult, ErrorResponse{
+ Code: "NotImplemented",
+ Message: "Truncated response should have continuation token set",
+ }
+ }
+
+ for i, obj := range listBucketResult.Contents {
+ listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType)
+ if err != nil {
+ return listBucketResult, err
+ }
+ listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond)
+ }
+
+ for i, obj := range listBucketResult.CommonPrefixes {
+ listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType)
+ if err != nil {
+ return listBucketResult, err
+ }
+ }
+
+ // Success.
+ return listBucketResult, nil
+}
+
+func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
+ // Allocate new list objects channel.
+ objectStatCh := make(chan ObjectInfo, 1)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if opts.Recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+
+ sendObjectInfo := func(info ObjectInfo) {
+ select {
+ case objectStatCh <- info:
+ case <-ctx.Done():
+ }
+ }
+
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(objectStatCh)
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return objectStatCh
+ }
+ // Validate incoming object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
+ defer close(objectStatCh)
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return objectStatCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(objectStatCh chan<- ObjectInfo) {
+ defer func() {
+ if contextCanceled(ctx) {
+ objectStatCh <- ObjectInfo{
+ Err: ctx.Err(),
+ }
+ }
+ close(objectStatCh)
+ }()
+
+ marker := opts.StartAfter
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers)
+ if err != nil {
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, object := range result.Contents {
+ // Save the marker.
+ marker = object.Key
+ object.ETag = trimEtag(object.ETag)
+ select {
+ // Send object content.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ select {
+ // Send object prefixes.
+ case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
+ // If receives done from the caller, return here.
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ // If next marker present, save it for next request.
+ if result.NextMarker != "" {
+ marker = result.NextMarker
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectStatCh)
+ return objectStatCh
+}
+
+func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
+ // Allocate new list objects channel.
+ resultCh := make(chan ObjectInfo, 1)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if opts.Recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+
+ sendObjectInfo := func(info ObjectInfo) {
+ select {
+ case resultCh <- info:
+ case <-ctx.Done():
+ }
+ }
+
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(resultCh)
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return resultCh
+ }
+
+ // Validate incoming object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
+ defer close(resultCh)
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return resultCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(resultCh chan<- ObjectInfo) {
+ defer func() {
+ if contextCanceled(ctx) {
+ resultCh <- ObjectInfo{
+ Err: ctx.Err(),
+ }
+ }
+ close(resultCh)
+ }()
+
+ var (
+ keyMarker = ""
+ versionIDMarker = ""
+ )
+
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter)
+ if err != nil {
+ sendObjectInfo(ObjectInfo{
+ Err: err,
+ })
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, version := range result.Versions {
+ info := ObjectInfo{
+ ETag: trimEtag(version.ETag),
+ Key: version.Key,
+ LastModified: version.LastModified.Truncate(time.Millisecond),
+ Size: version.Size,
+ Owner: version.Owner,
+ StorageClass: version.StorageClass,
+ IsLatest: version.IsLatest,
+ VersionID: version.VersionID,
+ IsDeleteMarker: version.isDeleteMarker,
+ UserTags: version.UserTags,
+ UserMetadata: version.UserMetadata,
+ Internal: version.Internal,
+ }
+ select {
+ // Send object version info.
+ case resultCh <- info:
+ // If receives done from the caller, return here.
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ select {
+ // Send object prefixes.
+ case resultCh <- ObjectInfo{Key: obj.Prefix}:
+ // If receives done from the caller, return here.
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ // If next key marker is present, save it for next request.
+ if result.NextKeyMarker != "" {
+ keyMarker = result.NextKeyMarker
+ }
+
+ // If next version id marker is present, save it for next request.
+ if result.NextVersionIDMarker != "" {
+ versionIDMarker = result.NextVersionIDMarker
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(resultCh)
+ return resultCh
+}
+
+// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects
+// and their versions in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request parameters :-
+// ---------
+// ?key-marker - Specifies the key to start with when listing objects in a bucket.
+// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) {
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ListVersionsResult{}, err
+ }
+ // Validate object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
+ return ListVersionsResult{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+
+ // Set versions to trigger versioning API
+ urlValues.Set("versions", "")
+
+ // Set object prefix, prefix value to be set to empty is okay.
+ urlValues.Set("prefix", opts.Prefix)
+
+ // Set delimiter, delimiter value to be set to empty is okay.
+ urlValues.Set("delimiter", delimiter)
+
+ // Set object marker.
+ if keyMarker != "" {
+ urlValues.Set("key-marker", keyMarker)
+ }
+
+ // Set max keys.
+ if opts.MaxKeys > 0 {
+ urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys))
+ }
+
+ // Set version ID marker
+ if versionIDMarker != "" {
+ urlValues.Set("version-id-marker", versionIDMarker)
+ }
+
+ if opts.WithMetadata {
+ urlValues.Set("metadata", "true")
+ }
+
+ // Always set encoding-type
+ urlValues.Set("encoding-type", "url")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: opts.headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListVersionsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Decode ListVersionsResult XML.
+ listObjectVersionsOutput := ListVersionsResult{}
+ err = xmlDecoder(resp.Body, &listObjectVersionsOutput)
+ if err != nil {
+ return ListVersionsResult{}, err
+ }
+
+ for i, obj := range listObjectVersionsOutput.Versions {
+ listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType)
+ if err != nil {
+ return listObjectVersionsOutput, err
+ }
+ }
+
+ for i, obj := range listObjectVersionsOutput.CommonPrefixes {
+ listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType)
+ if err != nil {
+ return listObjectVersionsOutput, err
+ }
+ }
+
+ if listObjectVersionsOutput.NextKeyMarker != "" {
+ listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType)
+ if err != nil {
+ return listObjectVersionsOutput, err
+ }
+ }
+
+ return listObjectVersionsOutput, nil
+}
+
+// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request parameters :-
+// ---------
+// ?marker - Specifies the key to start with when listing objects in a bucket.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) {
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ListBucketResult{}, err
+ }
+ // Validate object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ return ListBucketResult{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+
+ // Set object prefix, prefix value to be set to empty is okay.
+ urlValues.Set("prefix", objectPrefix)
+
+ // Set delimiter, delimiter value to be set to empty is okay.
+ urlValues.Set("delimiter", delimiter)
+
+ // Set object marker.
+ if objectMarker != "" {
+ urlValues.Set("marker", objectMarker)
+ }
+
+ // Set max keys.
+ if maxkeys > 0 {
+ urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+ }
+
+ // Always set encoding-type
+ urlValues.Set("encoding-type", "url")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListBucketResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ // Decode listBuckets XML.
+ listBucketResult := ListBucketResult{}
+ err = xmlDecoder(resp.Body, &listBucketResult)
+ if err != nil {
+ return listBucketResult, err
+ }
+
+ for i, obj := range listBucketResult.Contents {
+ listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType)
+ if err != nil {
+ return listBucketResult, err
+ }
+ listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond)
+ }
+
+ for i, obj := range listBucketResult.CommonPrefixes {
+ listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType)
+ if err != nil {
+ return listBucketResult, err
+ }
+ }
+
+ if listBucketResult.NextMarker != "" {
+ listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType)
+ if err != nil {
+ return listBucketResult, err
+ }
+ }
+
+ return listBucketResult, nil
+}
+
+// ListObjectsOptions holds all options of a list object request
+type ListObjectsOptions struct {
+ // Include objects versions in the listing
+ WithVersions bool
+ // Include objects metadata in the listing
+ WithMetadata bool
+ // Only list objects with the prefix
+ Prefix string
+ // Ignore '/' delimiter
+ Recursive bool
+ // The maximum number of objects requested per
+ // batch, advanced use-case not useful for most
+ // applications
+ MaxKeys int
+ // StartAfter start listing lexically at this
+ // object onwards, this value can also be set
+ // for Marker when `UseV1` is set to true.
+ StartAfter string
+
+ // Use the deprecated list objects V1 API
+ UseV1 bool
+
+ headers http.Header
+}
+
+// Set adds a key value pair to the options. The
+// key-value pair will be part of the HTTP GET request
+// headers.
+func (o *ListObjectsOptions) Set(key, value string) {
+ if o.headers == nil {
+ o.headers = make(http.Header)
+ }
+ o.headers.Set(key, value)
+}
+
+// ListObjects returns objects list after evaluating the passed options.
+//
+// api := client.New(....)
+// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
+// fmt.Println(object)
+// }
+//
+// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error()
+// caller must drain the channel entirely and wait until channel is closed before proceeding, without
+// waiting on the channel to be closed completely you might leak goroutines.
+func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
+ if opts.WithVersions {
+ return c.listObjectVersions(ctx, bucketName, opts)
+ }
+
+ // Use legacy list objects v1 API
+ if opts.UseV1 {
+ return c.listObjects(ctx, bucketName, opts)
+ }
+
+ // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1.
+ if location, ok := c.bucketLocCache.Get(bucketName); ok {
+ if location == "snowball" {
+ return c.listObjects(ctx, bucketName, opts)
+ }
+ }
+
+ return c.listObjectsV2(ctx, bucketName, opts)
+}
+
+// ListIncompleteUploads - List incompletely uploaded multipart objects.
+//
+// ListIncompleteUploads lists all incompleted objects matching the
+// objectPrefix from the specified bucket. If recursion is enabled
+// it would list all subdirectories and all its contents.
+//
+// Your input parameters are just bucketName, objectPrefix, recursive.
+// If you enable recursive as 'true' this function will return back all
+// the multipart objects in a given bucket name.
+//
+// api := client.New(....)
+// // Recurively list all objects in 'mytestbucket'
+// recursive := true
+// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) {
+// fmt.Println(message)
+// }
+func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
+ return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
+}
+
+// contextCanceled returns whether a context is canceled.
+func contextCanceled(ctx context.Context) bool {
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ return false
+ }
+}
+
+// listIncompleteUploads lists all incomplete uploads.
+func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
+ // Allocate channel for multipart uploads.
+ objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
+ // Delimiter is set to "/" by default.
+ delimiter := "/"
+ if recursive {
+ // If recursive do not delimit.
+ delimiter = ""
+ }
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(objectMultipartStatCh)
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return objectMultipartStatCh
+ }
+ // Validate incoming object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ defer close(objectMultipartStatCh)
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return objectMultipartStatCh
+ }
+ go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
+ defer func() {
+ if contextCanceled(ctx) {
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: ctx.Err(),
+ }
+ }
+ close(objectMultipartStatCh)
+ }()
+
+ // object and upload ID marker for future requests.
+ var objectMarker string
+ var uploadIDMarker string
+ for {
+ // list all multipart uploads.
+ result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0)
+ if err != nil {
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return
+ }
+ objectMarker = result.NextKeyMarker
+ uploadIDMarker = result.NextUploadIDMarker
+
+ // Send all multipart uploads.
+ for _, obj := range result.Uploads {
+ // Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
+ select {
+ // Send individual uploads here.
+ case objectMultipartStatCh <- obj:
+ // If the context is canceled
+ case <-ctx.Done():
+ return
+ }
+ }
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ select {
+ // Send delimited prefixes here.
+ case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}:
+ // If context is canceled.
+ case <-ctx.Done():
+ return
+ }
+ }
+ // Listing ends if result not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectMultipartStatCh)
+ // return.
+ return objectMultipartStatCh
+}
+
+// listMultipartUploadsQuery - (List Multipart Uploads).
+// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
+// request parameters. :-
+// ---------
+// ?key-marker - Specifies the multipart upload after which listing should begin.
+// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
+func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set uploads.
+ urlValues.Set("uploads", "")
+ // Set object key marker.
+ if keyMarker != "" {
+ urlValues.Set("key-marker", keyMarker)
+ }
+ // Set upload id marker.
+ if uploadIDMarker != "" {
+ urlValues.Set("upload-id-marker", uploadIDMarker)
+ }
+
+ // Set object prefix, prefix value to be set to empty is okay.
+ urlValues.Set("prefix", prefix)
+
+ // Set delimiter, delimiter value to be set to empty is okay.
+ urlValues.Set("delimiter", delimiter)
+
+ // Always set encoding-type
+ urlValues.Set("encoding-type", "url")
+
+ // maxUploads should be 1000 or less.
+ if maxUploads > 0 {
+ // Set max-uploads.
+ urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
+ }
+
+ // Execute GET on bucketName to list multipart uploads.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListMultipartUploadsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ // Decode response body.
+ listMultipartUploadsResult := ListMultipartUploadsResult{}
+ err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
+ if err != nil {
+ return listMultipartUploadsResult, err
+ }
+
+ listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType)
+ if err != nil {
+ return listMultipartUploadsResult, err
+ }
+
+ listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType)
+ if err != nil {
+ return listMultipartUploadsResult, err
+ }
+
+ for i, obj := range listMultipartUploadsResult.Uploads {
+ listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType)
+ if err != nil {
+ return listMultipartUploadsResult, err
+ }
+ }
+
+ for i, obj := range listMultipartUploadsResult.CommonPrefixes {
+ listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType)
+ if err != nil {
+ return listMultipartUploadsResult, err
+ }
+ }
+
+ return listMultipartUploadsResult, nil
+}
+
+// listObjectParts list all object parts recursively.
+//
+//lint:ignore U1000 Keep this around
+func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
+ // Part number marker for the next batch of request.
+ var nextPartNumberMarker int
+ partsInfo = make(map[int]ObjectPart)
+ for {
+ // Get list of uploaded parts a maximum of 1000 per request.
+ listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
+ if err != nil {
+ return nil, err
+ }
+ // Append to parts info.
+ for _, part := range listObjPartsResult.ObjectParts {
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ part.ETag = trimEtag(part.ETag)
+ partsInfo[part.PartNumber] = part
+ }
+ // Keep part number marker, for the next iteration.
+ nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
+ // Listing ends result is not truncated, return right here.
+ if !listObjPartsResult.IsTruncated {
+ break
+ }
+ }
+
+ // Return all the parts.
+ return partsInfo, nil
+}
+
+// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name.
+func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) {
+ var uploadIDs []string
+ // Make list incomplete uploads recursive.
+ isRecursive := true
+ // List all incomplete uploads.
+ for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) {
+ if mpUpload.Err != nil {
+ return nil, mpUpload.Err
+ }
+ if objectName == mpUpload.Key {
+ uploadIDs = append(uploadIDs, mpUpload.UploadID)
+ }
+ }
+ // Return the latest upload id.
+ return uploadIDs, nil
+}
+
+// listObjectPartsQuery (List Parts query)
+// - lists some or all (up to 1000) parts that have been uploaded
+// for a specific multipart upload
+//
+// You can use the request parameters as selection criteria to return
+// a subset of the uploads in a bucket, request parameters :-
+// ---------
+// ?part-number-marker - Specifies the part after which listing should
+// begin.
+// ?max-parts - Maximum parts to be listed per request.
+func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set part number marker.
+ urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
+ // Set upload id.
+ urlValues.Set("uploadId", uploadID)
+
+ // maxParts should be 1000 or less.
+ if maxParts > 0 {
+ // Set max parts.
+ urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
+ }
+
+ // Execute GET on objectName to get list of parts.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListObjectPartsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Decode list object parts XML.
+ listObjectPartsResult := ListObjectPartsResult{}
+ err = xmlDecoder(resp.Body, &listObjectPartsResult)
+ if err != nil {
+ return listObjectPartsResult, err
+ }
+ return listObjectPartsResult, nil
+}
+
+// Decode an S3 object name according to the encoding type
+func decodeS3Name(name, encodingType string) (string, error) {
+ switch encodingType {
+ case "url":
+ return url.QueryUnescape(name)
+ default:
+ return name, nil
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go
new file mode 100644
index 000000000..0c027d550
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go
@@ -0,0 +1,176 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// objectLegalHold - object legal hold specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html
+type objectLegalHold struct {
+ XMLNS string `xml:"xmlns,attr,omitempty"`
+ XMLName xml.Name `xml:"LegalHold"`
+ Status LegalHoldStatus `xml:"Status,omitempty"`
+}
+
+// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call
+type PutObjectLegalHoldOptions struct {
+ VersionID string
+ Status *LegalHoldStatus
+}
+
+// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call
+type GetObjectLegalHoldOptions struct {
+ VersionID string
+}
+
+// LegalHoldStatus - object legal hold status.
+type LegalHoldStatus string
+
+const (
+ // LegalHoldEnabled indicates legal hold is enabled
+ LegalHoldEnabled LegalHoldStatus = "ON"
+
+ // LegalHoldDisabled indicates legal hold is disabled
+ LegalHoldDisabled LegalHoldStatus = "OFF"
+)
+
+func (r LegalHoldStatus) String() string {
+ return string(r)
+}
+
+// IsValid - check whether this legal hold status is valid or not.
+func (r LegalHoldStatus) IsValid() bool {
+ return r == LegalHoldEnabled || r == LegalHoldDisabled
+}
+
+func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) {
+ if status == nil {
+ return nil, fmt.Errorf("Status not set")
+ }
+ if !status.IsValid() {
+ return nil, fmt.Errorf("invalid legal hold status `%v`", status)
+ }
+ legalHold := &objectLegalHold{
+ Status: *status,
+ }
+ return legalHold, nil
+}
+
+// PutObjectLegalHold : sets object legal hold for a given object and versionID.
+func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("legal-hold", "")
+
+ if opts.VersionID != "" {
+ urlValues.Set("versionId", opts.VersionID)
+ }
+
+ lh, err := newObjectLegalHold(opts.Status)
+ if err != nil {
+ return err
+ }
+
+ lhData, err := xml.Marshal(lh)
+ if err != nil {
+ return err
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(lhData),
+ contentLength: int64(len(lhData)),
+ contentMD5Base64: sumMD5Base64(lhData),
+ contentSHA256Hex: sum256Hex(lhData),
+ }
+
+ // Execute PUT Object Legal Hold.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ return nil
+}
+
+// GetObjectLegalHold gets legal-hold status of given object.
+func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ urlValues := make(url.Values)
+ urlValues.Set("legal-hold", "")
+
+ if opts.VersionID != "" {
+ urlValues.Set("versionId", opts.VersionID)
+ }
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ lh := &objectLegalHold{}
+ if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil {
+ return nil, err
+ }
+
+ return &lh.Status, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go
new file mode 100644
index 000000000..f0a439853
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go
@@ -0,0 +1,241 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2019 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// RetentionMode - object retention mode.
+type RetentionMode string
+
+const (
+ // Governance - governance mode.
+ Governance RetentionMode = "GOVERNANCE"
+
+ // Compliance - compliance mode.
+ Compliance RetentionMode = "COMPLIANCE"
+)
+
+func (r RetentionMode) String() string {
+ return string(r)
+}
+
+// IsValid - check whether this retention mode is valid or not.
+func (r RetentionMode) IsValid() bool {
+ return r == Governance || r == Compliance
+}
+
+// ValidityUnit - retention validity unit.
+type ValidityUnit string
+
+const (
+ // Days - denotes no. of days.
+ Days ValidityUnit = "DAYS"
+
+ // Years - denotes no. of years.
+ Years ValidityUnit = "YEARS"
+)
+
+func (unit ValidityUnit) String() string {
+ return string(unit)
+}
+
+// IsValid - check whether this validity unit is valid or not.
+func (unit ValidityUnit) isValid() bool {
+ return unit == Days || unit == Years
+}
+
+// Retention - bucket level retention configuration.
+type Retention struct {
+ Mode RetentionMode
+ Validity time.Duration
+}
+
+func (r Retention) String() string {
+ return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity)
+}
+
+// IsEmpty - returns whether retention is empty or not.
+func (r Retention) IsEmpty() bool {
+ return r.Mode == "" || r.Validity == 0
+}
+
+// objectLockConfig - object lock configuration specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
+type objectLockConfig struct {
+ XMLNS string `xml:"xmlns,attr,omitempty"`
+ XMLName xml.Name `xml:"ObjectLockConfiguration"`
+ ObjectLockEnabled string `xml:"ObjectLockEnabled"`
+ Rule *struct {
+ DefaultRetention struct {
+ Mode RetentionMode `xml:"Mode"`
+ Days *uint `xml:"Days"`
+ Years *uint `xml:"Years"`
+ } `xml:"DefaultRetention"`
+ } `xml:"Rule,omitempty"`
+}
+
+func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) {
+ config := &objectLockConfig{
+ ObjectLockEnabled: "Enabled",
+ }
+
+ if mode != nil && validity != nil && unit != nil {
+ if !mode.IsValid() {
+ return nil, fmt.Errorf("invalid retention mode `%v`", mode)
+ }
+
+ if !unit.isValid() {
+ return nil, fmt.Errorf("invalid validity unit `%v`", unit)
+ }
+
+ config.Rule = &struct {
+ DefaultRetention struct {
+ Mode RetentionMode `xml:"Mode"`
+ Days *uint `xml:"Days"`
+ Years *uint `xml:"Years"`
+ } `xml:"DefaultRetention"`
+ }{}
+
+ config.Rule.DefaultRetention.Mode = *mode
+ if *unit == Days {
+ config.Rule.DefaultRetention.Days = validity
+ } else {
+ config.Rule.DefaultRetention.Years = validity
+ }
+
+ return config, nil
+ }
+
+ if mode == nil && validity == nil && unit == nil {
+ return config, nil
+ }
+
+ return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed")
+}
+
+// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
+func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("object-lock", "")
+
+ config, err := newObjectLockConfig(mode, validity, unit)
+ if err != nil {
+ return err
+ }
+
+ configData, err := xml.Marshal(config)
+ if err != nil {
+ return err
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(configData),
+ contentLength: int64(len(configData)),
+ contentMD5Base64: sumMD5Base64(configData),
+ contentSHA256Hex: sum256Hex(configData),
+ }
+
+ // Execute PUT bucket object lock configuration.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// GetObjectLockConfig gets object lock configuration of given bucket.
+func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", nil, nil, nil, err
+ }
+
+ urlValues := make(url.Values)
+ urlValues.Set("object-lock", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return "", nil, nil, nil, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ config := &objectLockConfig{}
+ if err = xml.NewDecoder(resp.Body).Decode(config); err != nil {
+ return "", nil, nil, nil, err
+ }
+
+ if config.Rule != nil {
+ mode = &config.Rule.DefaultRetention.Mode
+ if config.Rule.DefaultRetention.Days != nil {
+ validity = config.Rule.DefaultRetention.Days
+ days := Days
+ unit = &days
+ } else {
+ validity = config.Rule.DefaultRetention.Years
+ years := Years
+ unit = &years
+ }
+ return config.ObjectLockEnabled, mode, validity, unit, nil
+ }
+ return config.ObjectLockEnabled, nil, nil, nil, nil
+}
+
+// GetBucketObjectLockConfig gets object lock configuration of given bucket.
+func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
+ _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName)
+ return mode, validity, unit, err
+}
+
+// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
+func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
+ return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go
new file mode 100644
index 000000000..b29cb1f8d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go
@@ -0,0 +1,165 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2019-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// objectRetention - object retention specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
+type objectRetention struct {
+ XMLNS string `xml:"xmlns,attr,omitempty"`
+ XMLName xml.Name `xml:"Retention"`
+ Mode RetentionMode `xml:"Mode,omitempty"`
+ RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"`
+}
+
+func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) {
+ objectRetention := &objectRetention{}
+
+ if date != nil && !date.IsZero() {
+ objectRetention.RetainUntilDate = date
+ }
+ if mode != nil {
+ if !mode.IsValid() {
+ return nil, fmt.Errorf("invalid retention mode `%v`", mode)
+ }
+ objectRetention.Mode = *mode
+ }
+
+ return objectRetention, nil
+}
+
+// PutObjectRetentionOptions represents options specified by user for PutObject call
+type PutObjectRetentionOptions struct {
+ GovernanceBypass bool
+ Mode *RetentionMode
+ RetainUntilDate *time.Time
+ VersionID string
+}
+
+// PutObjectRetention sets object retention for a given object and versionID.
+func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("retention", "")
+
+ if opts.VersionID != "" {
+ urlValues.Set("versionId", opts.VersionID)
+ }
+
+ retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate)
+ if err != nil {
+ return err
+ }
+
+ retentionData, err := xml.Marshal(retention)
+ if err != nil {
+ return err
+ }
+
+ // Build headers.
+ headers := make(http.Header)
+
+ if opts.GovernanceBypass {
+ // Set the bypass goverenance retention header
+ headers.Set(amzBypassGovernance, "true")
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(retentionData),
+ contentLength: int64(len(retentionData)),
+ contentMD5Base64: sumMD5Base64(retentionData),
+ contentSHA256Hex: sum256Hex(retentionData),
+ customHeader: headers,
+ }
+
+ // Execute PUT Object Retention.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ return nil
+}
+
+// GetObjectRetention gets retention of given object.
+func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, nil, err
+ }
+
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, nil, err
+ }
+ urlValues := make(url.Values)
+ urlValues.Set("retention", "")
+ if versionID != "" {
+ urlValues.Set("versionId", versionID)
+ }
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ retention := &objectRetention{}
+ if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil {
+ return nil, nil, err
+ }
+
+ return &retention.Mode, retention.RetainUntilDate, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
new file mode 100644
index 000000000..6623e262a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
@@ -0,0 +1,177 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/minio/minio-go/v7/pkg/tags"
+)
+
+// PutObjectTaggingOptions holds an object version id
+// to update tag(s) of a specific object version
+type PutObjectTaggingOptions struct {
+ VersionID string
+ Internal AdvancedObjectTaggingOptions
+}
+
+// AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use.
+type AdvancedObjectTaggingOptions struct {
+ ReplicationProxyRequest string
+}
+
+// PutObjectTagging replaces or creates object tag(s) and can target
+// a specific object version in a versioned bucket.
+func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("tagging", "")
+
+ if opts.VersionID != "" {
+ urlValues.Set("versionId", opts.VersionID)
+ }
+ headers := make(http.Header, 0)
+ if opts.Internal.ReplicationProxyRequest != "" {
+ headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
+ }
+ reqBytes, err := xml.Marshal(otags)
+ if err != nil {
+ return err
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(reqBytes),
+ contentLength: int64(len(reqBytes)),
+ contentMD5Base64: sumMD5Base64(reqBytes),
+ customHeader: headers,
+ }
+
+ // Execute PUT to set a object tagging.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ return nil
+}
+
+// GetObjectTaggingOptions holds the object version ID
+// to fetch the tagging key/value pairs
+type GetObjectTaggingOptions struct {
+ VersionID string
+ Internal AdvancedObjectTaggingOptions
+}
+
+// GetObjectTagging fetches object tag(s) with options to target
+// a specific object version in a versioned bucket.
+func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("tagging", "")
+
+ if opts.VersionID != "" {
+ urlValues.Set("versionId", opts.VersionID)
+ }
+ headers := make(http.Header, 0)
+ if opts.Internal.ReplicationProxyRequest != "" {
+ headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
+ }
+ // Execute GET on object to get object tag(s)
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: headers,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ return tags.ParseObjectXML(resp.Body)
+}
+
+// RemoveObjectTaggingOptions holds the version id of the object to remove
+type RemoveObjectTaggingOptions struct {
+ VersionID string
+ Internal AdvancedObjectTaggingOptions
+}
+
+// RemoveObjectTagging removes object tag(s) with options to control a specific object
+// version in a versioned bucket
+func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("tagging", "")
+
+ if opts.VersionID != "" {
+ urlValues.Set("versionId", opts.VersionID)
+ }
+ headers := make(http.Header, 0)
+ if opts.Internal.ReplicationProxyRequest != "" {
+ headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
+ }
+ // Execute DELETE on object to remove object tag(s)
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: headers,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil {
+ // S3 returns "204 No content" after Object tag deletion.
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go
new file mode 100644
index 000000000..9e85f8181
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go
@@ -0,0 +1,228 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/minio/minio-go/v7/pkg/signer"
+)
+
+// presignURL - Returns a presigned URL for an input 'method'.
+// Expires maximum is 7days - ie. 604800 and minimum is 1.
+func (c *Client) presignURL(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
+ // Input validation.
+ if method == "" {
+ return nil, errInvalidArgument("method cannot be empty.")
+ }
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ if err = isValidExpiry(expires); err != nil {
+ return nil, err
+ }
+
+ // Convert expires into seconds.
+ expireSeconds := int64(expires / time.Second)
+ reqMetadata := requestMetadata{
+ presignURL: true,
+ bucketName: bucketName,
+ objectName: objectName,
+ expires: expireSeconds,
+ queryValues: reqParams,
+ extraPresignHeader: extraHeaders,
+ }
+
+ // Instantiate a new request.
+ // Since expires is set newRequest will presign the request.
+ var req *http.Request
+ if req, err = c.newRequest(ctx, method, reqMetadata); err != nil {
+ return nil, err
+ }
+ return req.URL, nil
+}
+
+// PresignedGetObject - Returns a presigned URL to access an object
+// data without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
+func (c *Client) PresignedGetObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil)
+}
+
+// PresignedHeadObject - Returns a presigned URL to access
+// object metadata without credentials. URL can have a maximum expiry
+// of upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
+func (c *Client) PresignedHeadObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil)
+}
+
+// PresignedPutObject - Returns a presigned URL to upload an object
+// without credentials. URL can have a maximum expiry of upto 7days
+// or a minimum of 1sec.
+func (c *Client) PresignedPutObject(ctx context.Context, bucketName, objectName string, expires time.Duration) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil)
+}
+
+// PresignHeader - similar to Presign() but allows including HTTP headers that
+// will be used to build the signature. The request using the resulting URL will
+// need to have the exact same headers to be added for signature validation to
+// pass.
+//
+// FIXME: The extra header parameter should be included in Presign() in the next
+// major version bump, and this function should then be deprecated.
+func (c *Client) PresignHeader(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
+ return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)
+}
+
+// Presign - returns a presigned URL for any http method of your choice along
+// with custom request params and extra signed headers. URL can have a maximum
+// expiry of upto 7days or a minimum of 1sec.
+func (c *Client) Presign(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil)
+}
+
+// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
+func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
+ // Validate input arguments.
+ if p.expiration.IsZero() {
+ return nil, nil, errors.New("Expiration time must be specified")
+ }
+ if _, ok := p.formData["key"]; !ok {
+ return nil, nil, errors.New("object key must be specified")
+ }
+ if _, ok := p.formData["bucket"]; !ok {
+ return nil, nil, errors.New("bucket name must be specified")
+ }
+
+ bucketName := p.formData["bucket"]
+ // Fetch the bucket location.
+ location, err := c.getBucketLocation(ctx, bucketName)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName)
+
+ u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get credentials from the configured credentials provider.
+ credValues, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var (
+ signerType = credValues.SignerType
+ sessionToken = credValues.SessionToken
+ accessKeyID = credValues.AccessKeyID
+ secretAccessKey = credValues.SecretAccessKey
+ )
+
+ if signerType.IsAnonymous() {
+ return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials")
+ }
+
+ // Keep time.
+ t := time.Now().UTC()
+ // For signature version '2' handle here.
+ if signerType.IsV2() {
+ policyBase64 := p.base64()
+ p.formData["policy"] = policyBase64
+ // For Google endpoint set this value to be 'GoogleAccessId'.
+ if s3utils.IsGoogleEndpoint(*c.endpointURL) {
+ p.formData["GoogleAccessId"] = accessKeyID
+ } else {
+ // For all other endpoints set this value to be 'AWSAccessKeyId'.
+ p.formData["AWSAccessKeyId"] = accessKeyID
+ }
+ // Sign the policy.
+ p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
+ return u, p.formData, nil
+ }
+
+ // Add date policy.
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-date",
+ value: t.Format(iso8601DateFormat),
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // Add algorithm policy.
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-algorithm",
+ value: signV4Algorithm,
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // Add a credential policy.
+ credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3)
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-credential",
+ value: credential,
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ if sessionToken != "" {
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-security-token",
+ value: sessionToken,
+ }); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // Get base64 encoded policy.
+ policyBase64 := p.base64()
+
+ // Fill in the form data.
+ p.formData["policy"] = policyBase64
+ p.formData["x-amz-algorithm"] = signV4Algorithm
+ p.formData["x-amz-credential"] = credential
+ p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
+ if sessionToken != "" {
+ p.formData["x-amz-security-token"] = sessionToken
+ }
+ p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
+ return u, p.formData, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
new file mode 100644
index 000000000..737666937
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
@@ -0,0 +1,123 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "net/http"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// Bucket operations
+func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
+ // Validate the input arguments.
+ if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
+ return err
+ }
+
+ err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking)
+ if err != nil && (opts.Region == "" || opts.Region == "us-east-1") {
+ if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" {
+ err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking)
+ }
+ }
+ return err
+}
+
+func (c *Client) doMakeBucket(ctx context.Context, bucketName, location string, objectLockEnabled bool) (err error) {
+ defer func() {
+ // Save the location into cache on a successful makeBucket response.
+ if err == nil {
+ c.bucketLocCache.Set(bucketName, location)
+ }
+ }()
+
+ // If location is empty, treat is a default region 'us-east-1'.
+ if location == "" {
+ location = "us-east-1"
+ // For custom region clients, default
+ // to custom region instead not 'us-east-1'.
+ if c.region != "" {
+ location = c.region
+ }
+ }
+ // PUT bucket request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ bucketLocation: location,
+ }
+
+ if objectLockEnabled {
+ headers := make(http.Header)
+ headers.Add("x-amz-bucket-object-lock-enabled", "true")
+ reqMetadata.customHeader = headers
+ }
+
+ // If location is not 'us-east-1' create bucket location config.
+ if location != "us-east-1" && location != "" {
+ createBucketConfig := createBucketConfiguration{}
+ createBucketConfig.Location = location
+ var createBucketConfigBytes []byte
+ createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
+ if err != nil {
+ return err
+ }
+ reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
+ reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
+ reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
+ reqMetadata.contentLength = int64(len(createBucketConfigBytes))
+ }
+
+ // Execute PUT to create a new bucket.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Success.
+ return nil
+}
+
+// MakeBucketOptions holds all options to tweak bucket creation
+type MakeBucketOptions struct {
+ // Bucket location
+ Region string
+ // Enable object locking
+ ObjectLocking bool
+}
+
+// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts.
+//
+// Location is an optional argument, by default all buckets are
+// created in US Standard Region.
+//
+// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
+// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
+func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
+ return c.makeBucket(ctx, bucketName, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
new file mode 100644
index 000000000..9ccb97cbb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
@@ -0,0 +1,149 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "math"
+ "os"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+const nullVersionID = "null"
+
+// Verify if reader is *minio.Object
+func isObject(reader io.Reader) (ok bool) {
+ _, ok = reader.(*Object)
+ return
+}
+
+// Verify if reader is a generic ReaderAt
+func isReadAt(reader io.Reader) (ok bool) {
+ var v *os.File
+ v, ok = reader.(*os.File)
+ if ok {
+ // Stdin, Stdout and Stderr all have *os.File type
+ // which happen to also be io.ReaderAt compatible
+ // we need to add special conditions for them to
+ // be ignored by this function.
+ for _, f := range []string{
+ "/dev/stdin",
+ "/dev/stdout",
+ "/dev/stderr",
+ } {
+ if f == v.Name() {
+ ok = false
+ break
+ }
+ }
+ } else {
+ _, ok = reader.(io.ReaderAt)
+ }
+ return
+}
+
+// OptimalPartInfo - calculate the optimal part info for a given
+// object size.
+//
+// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
+// object storage it will have the following parameters as constants.
+//
+// maxPartsCount - 10000
+// minPartSize - 16MiB
+// maxMultipartPutObjectSize - 5TiB
+func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) {
+ // object size is '-1' set it to 5TiB.
+ var unknownSize bool
+ if objectSize == -1 {
+ unknownSize = true
+ objectSize = maxMultipartPutObjectSize
+ }
+
+ // object size is larger than supported maximum.
+ if objectSize > maxMultipartPutObjectSize {
+ err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
+ return
+ }
+
+ var partSizeFlt float64
+ if configuredPartSize > 0 {
+ if int64(configuredPartSize) > objectSize {
+ err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "")
+ return
+ }
+
+ if !unknownSize {
+ if objectSize > (int64(configuredPartSize) * maxPartsCount) {
+ err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.")
+ return
+ }
+ }
+
+ if configuredPartSize < absMinPartSize {
+ err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.")
+ return
+ }
+
+ if configuredPartSize > maxPartSize {
+ err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.")
+ return
+ }
+
+ partSizeFlt = float64(configuredPartSize)
+ if unknownSize {
+ // If input has unknown size and part size is configured
+ // keep it to maximum allowed as per 10000 parts.
+ objectSize = int64(configuredPartSize) * maxPartsCount
+ }
+ } else {
+ configuredPartSize = minPartSize
+ // Use floats for part size for all calculations to avoid
+ // overflows during float64 to int64 conversions.
+ partSizeFlt = float64(objectSize / maxPartsCount)
+ partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize)
+ }
+
+ // Total parts count.
+ totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
+ // Part size.
+ partSize = int64(partSizeFlt)
+ // Last part size.
+ lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
+ return totalPartsCount, partSize, lastPartSize, nil
+}
+
+// getUploadID - fetch upload id if already present for an object name
+// or initiate a new request to fetch a new upload id.
+func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return "", err
+ }
+
+ // Initiate multipart upload for an object.
+ initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return "", err
+ }
+ return initMultipartUploadResult.UploadID, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
new file mode 100644
index 000000000..0ae9142e1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
@@ -0,0 +1,164 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2023 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "io"
+ "mime/multipart"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+)
+
+// PutObjectFanOutEntry is per object entry fan-out metadata
+type PutObjectFanOutEntry struct {
+ Key string `json:"key"`
+ UserMetadata map[string]string `json:"metadata,omitempty"`
+ UserTags map[string]string `json:"tags,omitempty"`
+ ContentType string `json:"contentType,omitempty"`
+ ContentEncoding string `json:"contentEncoding,omitempty"`
+ ContentDisposition string `json:"contentDisposition,omitempty"`
+ ContentLanguage string `json:"contentLanguage,omitempty"`
+ CacheControl string `json:"cacheControl,omitempty"`
+ Retention RetentionMode `json:"retention,omitempty"`
+ RetainUntilDate *time.Time `json:"retainUntil,omitempty"`
+}
+
+// PutObjectFanOutRequest this is the request structure sent
+// to the server to fan-out the stream to multiple objects.
+type PutObjectFanOutRequest struct {
+ Entries []PutObjectFanOutEntry
+ Checksum Checksum
+ SSE encrypt.ServerSide
+}
+
+// PutObjectFanOutResponse this is the response structure sent
+// by the server upon success or failure for each object
+// fan-out keys. Additionally, this response carries ETag,
+// VersionID and LastModified for each object fan-out.
+type PutObjectFanOutResponse struct {
+ Key string `json:"key"`
+ ETag string `json:"etag,omitempty"`
+ VersionID string `json:"versionId,omitempty"`
+ LastModified *time.Time `json:"lastModified,omitempty"`
+ Error string `json:"error,omitempty"`
+}
+
+// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single
+// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry
+// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is
+// mandatory, rest of the other options in PutObjectFanOutRequest are optional.
+func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) {
+ if len(fanOutReq.Entries) == 0 {
+ return nil, errInvalidArgument("fan out requests cannot be empty")
+ }
+
+ policy := NewPostPolicy()
+ policy.SetBucket(bucket)
+ policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16))
+
+ // Expires in 15 minutes.
+ policy.SetExpires(time.Now().UTC().Add(15 * time.Minute))
+
+ // Set encryption headers if any.
+ policy.SetEncryption(fanOutReq.SSE)
+
+ // Set checksum headers if any.
+ policy.SetChecksum(fanOutReq.Checksum)
+
+ url, formData, err := c.PresignedPostPolicy(ctx, policy)
+ if err != nil {
+ return nil, err
+ }
+
+ r, w := io.Pipe()
+
+ req, err := http.NewRequest(http.MethodPost, url.String(), r)
+ if err != nil {
+ w.Close()
+ return nil, err
+ }
+
+ var b strings.Builder
+ enc := json.NewEncoder(&b)
+ for _, req := range fanOutReq.Entries {
+ if req.Key == "" {
+ w.Close()
+ return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty")
+ }
+ if err = enc.Encode(&req); err != nil {
+ w.Close()
+ return nil, err
+ }
+ }
+
+ mwriter := multipart.NewWriter(w)
+ req.Header.Add("Content-Type", mwriter.FormDataContentType())
+
+ go func() {
+ defer w.Close()
+ defer mwriter.Close()
+
+ for k, v := range formData {
+ if err := mwriter.WriteField(k, v); err != nil {
+ return
+ }
+ }
+
+ if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil {
+ return
+ }
+
+ mw, err := mwriter.CreateFormFile("file", "fanout-content")
+ if err != nil {
+ return
+ }
+
+ if _, err = io.Copy(mw, fanOutData); err != nil {
+ return
+ }
+ }()
+
+ resp, err := c.do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer closeResponse(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucket, "fanout-content")
+ }
+
+ dec := json.NewDecoder(resp.Body)
+ fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries))
+ for dec.More() {
+ var m PutObjectFanOutResponse
+ if err = dec.Decode(&m); err != nil {
+ return nil, err
+ }
+ fanOutResp = append(fanOutResp, m)
+ }
+
+ return fanOutResp, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go
new file mode 100644
index 000000000..4d29dfc18
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go
@@ -0,0 +1,64 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "mime"
+ "os"
+ "path/filepath"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
+func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Open the referenced file.
+ fileReader, err := os.Open(filePath)
+ // If any error fail quickly here.
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ defer fileReader.Close()
+
+ // Save the file stat.
+ fileStat, err := fileReader.Stat()
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Save the file size.
+ fileSize := fileStat.Size()
+
+ // Set contentType based on filepath extension if not given or default
+ // value of "application/octet-stream" if the extension has no associated type.
+ if opts.ContentType == "" {
+ if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
+ opts.ContentType = "application/octet-stream"
+ }
+ }
+ return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
new file mode 100644
index 000000000..a70cbea9e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -0,0 +1,465 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
+ opts PutObjectOptions,
+) (info UploadInfo, err error) {
+ info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObject(ctx, bucketName, objectName, reader, size, opts)
+ }
+ }
+ return info, err
+}
+
+func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Total data read and written to server. should be equal to
+ // 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Choose hash algorithms to be calculated by hashCopyN,
+ // avoid sha256 with non-v4 signature request or
+ // HTTPS connection.
+ hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
+ if len(hashSums) == 0 {
+ if opts.UserMetadata == nil {
+ opts.UserMetadata = make(map[string]string, 1)
+ }
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
+ }
+
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
+
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Part number always starts with '1'.
+ partNumber := 1
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ buf := make([]byte, partSize)
+
+ // Create checksums
+ // CRC32C is ~50% faster on AMD64 @ 30GB/s
+ var crcBytes []byte
+ customHeader := make(http.Header)
+ crc := opts.AutoChecksum.Hasher()
+ for partNumber <= totalPartsCount {
+ length, rErr := readFull(reader, buf)
+ if rErr == io.EOF && partNumber > 1 {
+ break
+ }
+
+ if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
+ return UploadInfo{}, rErr
+ }
+
+ // Calculates hash sums while copying partSize bytes into cw.
+ for k, v := range hashAlgos {
+ v.Write(buf[:length])
+ hashSums[k] = v.Sum(nil)
+ v.Close()
+ }
+
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
+
+ // Checksums..
+ var (
+ md5Base64 string
+ sha256Hex string
+ )
+
+ if hashSums["md5"] != nil {
+ md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
+ }
+ if hashSums["sha256"] != nil {
+ sha256Hex = hex.EncodeToString(hashSums["sha256"])
+ }
+ if len(hashSums) == 0 {
+ crc.Reset()
+ crc.Write(buf[:length])
+ cSum := crc.Sum(nil)
+ customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
+ crcBytes = append(crcBytes, cSum...)
+ }
+
+ p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
+ // Proceed to upload the part.
+ objPart, uerr := c.uploadPart(ctx, p)
+ if uerr != nil {
+ return UploadInfo{}, uerr
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += int64(length)
+
+ // Increment part number.
+ partNumber++
+
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if rErr == io.EOF {
+ break
+ }
+ }
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ ChecksumCRC32: part.ChecksumCRC32,
+ ChecksumCRC32C: part.ChecksumCRC32C,
+ ChecksumSHA1: part.ChecksumSHA1,
+ ChecksumSHA256: part.ChecksumSHA256,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ opts = PutObjectOptions{
+ ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
+ }
+ if len(crcBytes) > 0 {
+ // Add hash of hashes.
+ crc.Reset()
+ crc.Write(crcBytes)
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ }
+ uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ uploadInfo.Size = totalUploadedSize
+ return uploadInfo, nil
+}
+
+// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
+func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploads", "")
+
+ if opts.Internal.SourceVersionID != "" {
+ if opts.Internal.SourceVersionID != nullVersionID {
+ if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
+ return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
+ }
+ }
+ urlValues.Set("versionId", opts.Internal.SourceVersionID)
+ }
+
+ // Set ContentType header.
+ customHeader := opts.Header()
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: customHeader,
+ }
+
+ // Execute POST on an objectName to initiate multipart upload.
+ resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Decode xml for new multipart upload.
+ initiateMultipartUploadResult := initiateMultipartUploadResult{}
+ err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
+ if err != nil {
+ return initiateMultipartUploadResult, err
+ }
+ return initiateMultipartUploadResult, nil
+}
+
+type uploadPartParams struct {
+ bucketName string
+ objectName string
+ uploadID string
+ reader io.Reader
+ partNumber int
+ md5Base64 string
+ sha256Hex string
+ size int64
+ sse encrypt.ServerSide
+ streamSha256 bool
+ customHeader http.Header
+ trailer http.Header
+}
+
+// uploadPart - Uploads a part in a multipart upload.
+func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(p.bucketName); err != nil {
+ return ObjectPart{}, err
+ }
+ if err := s3utils.CheckValidObjectName(p.objectName); err != nil {
+ return ObjectPart{}, err
+ }
+ if p.size > maxPartSize {
+ return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName)
+ }
+ if p.size <= -1 {
+ return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName)
+ }
+ if p.partNumber <= 0 {
+ return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.")
+ }
+ if p.uploadID == "" {
+ return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.")
+ }
+
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set part number.
+ urlValues.Set("partNumber", strconv.Itoa(p.partNumber))
+ // Set upload id.
+ urlValues.Set("uploadId", p.uploadID)
+
+ // Set encryption headers, if any.
+ if p.customHeader == nil {
+ p.customHeader = make(http.Header)
+ }
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
+ // Server-side encryption is supported by the S3 Multipart Upload actions.
+ // Unless you are using a customer-provided encryption key, you don't need
+ // to specify the encryption parameters in each UploadPart request.
+ if p.sse != nil && p.sse.Type() == encrypt.SSEC {
+ p.sse.Marshal(p.customHeader)
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: p.bucketName,
+ objectName: p.objectName,
+ queryValues: urlValues,
+ customHeader: p.customHeader,
+ contentBody: p.reader,
+ contentLength: p.size,
+ contentMD5Base64: p.md5Base64,
+ contentSHA256Hex: p.sha256Hex,
+ streamSha256: p.streamSha256,
+ trailer: p.trailer,
+ }
+
+ // Execute PUT on each part.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectPart{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName)
+ }
+ }
+ // Once successfully uploaded, return completed part.
+ h := resp.Header
+ objPart := ObjectPart{
+ ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
+ ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
+ ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
+ ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
+ }
+ objPart.Size = p.size
+ objPart.PartNumber = p.partNumber
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ objPart.ETag = trimEtag(h.Get("ETag"))
+ return objPart, nil
+}
+
+// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
+func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
+ complete completeMultipartUpload, opts PutObjectOptions,
+) (UploadInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploadId", uploadID)
+ // Marshal complete multipart body.
+ completeMultipartUploadBytes, err := xml.Marshal(complete)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ headers := opts.Header()
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
+ headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
+ headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
+ headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload
+ }
+
+ // Instantiate all the complete multipart buffer.
+ completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: completeMultipartUploadBuffer,
+ contentLength: int64(len(completeMultipartUploadBytes)),
+ contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
+ customHeader: headers,
+ }
+
+ // Execute POST to complete multipart upload for an objectName.
+ resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Read resp.Body into a []bytes to parse for Error response inside the body
+ var b []byte
+ b, err = io.ReadAll(resp.Body)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ // Decode completed multipart upload response on success.
+ completeMultipartUploadResult := completeMultipartUploadResult{}
+ err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
+ if err != nil {
+ // xml parsing failure due to presence an ill-formed xml fragment
+ return UploadInfo{}, err
+ } else if completeMultipartUploadResult.Bucket == "" {
+ // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
+ // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
+ // of the members.
+
+ // Decode completed multipart upload response on failure
+ completeMultipartUploadErr := ErrorResponse{}
+ err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
+ if err != nil {
+ // xml parsing failure due to presence an ill-formed xml fragment
+ return UploadInfo{}, err
+ }
+ return UploadInfo{}, completeMultipartUploadErr
+ }
+
+ // extract lifecycle expiry date and rule ID
+ expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
+
+ return UploadInfo{
+ Bucket: completeMultipartUploadResult.Bucket,
+ Key: completeMultipartUploadResult.Key,
+ ETag: trimEtag(completeMultipartUploadResult.ETag),
+ VersionID: resp.Header.Get(amzVersionID),
+ Location: completeMultipartUploadResult.Location,
+ Expiration: expTime,
+ ExpirationRuleID: ruleID,
+
+ ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
+ ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
+ ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
+ ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
new file mode 100644
index 000000000..eef976c8c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
@@ -0,0 +1,831 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/google/uuid"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// putObjectMultipartStream - upload a large object using
+// multipart upload and streaming signature for signing payload.
+// Comprehensive put object operation involving multipart uploads.
+//
+// Following code handles these types of readers.
+//
+// - *minio.Object
+// - Any reader which has a method 'ReadAt()'
+func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, size int64, opts PutObjectOptions,
+) (info UploadInfo, err error) {
+ if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
+ info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
+ } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
+ // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
+ info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
+ } else {
+ info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts)
+ }
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObject(ctx, bucketName, objectName, reader, size, opts)
+ }
+ }
+ return info, err
+}
+
+// uploadedPartRes - the response received from a part upload.
+type uploadedPartRes struct {
+ Error error // Any error encountered while uploading the part.
+ PartNum int // Number of the part uploaded.
+ Size int64 // Size of the part uploaded.
+ Part ObjectPart
+}
+
+type uploadPartReq struct {
+ PartNum int // Number of the part uploaded.
+ Part ObjectPart // Size of the part uploaded.
+}
+
+// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB.
+// Supports all readers which implements io.ReaderAt interface
+// (ReadAt method).
+//
+// NOTE: This function is meant to be used for all readers which
+// implement io.ReaderAt which allows us for resuming multipart
+// uploads but reading at an offset, which would avoid re-read the
+// data which was already uploaded. Internally this function uses
+// temporary files for staging all the data, these temporary files are
+// cleaned automatically when the caller i.e http client closes the
+// stream after uploading all the contents successfully.
+func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
+ reader io.ReaderAt, size int64, opts PutObjectOptions,
+) (info UploadInfo, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ if opts.Checksum.IsSet() {
+ opts.AutoChecksum = opts.Checksum
+ }
+ withChecksum := c.trailingHeaderSupport
+ if withChecksum {
+ if opts.UserMetadata == nil {
+ opts.UserMetadata = make(map[string]string, 1)
+ }
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
+ }
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
+
+ // Aborts the multipart upload in progress, if the
+ // function returns any error, since we do not resume
+ // we should purge the parts which have been uploaded
+ // to relinquish storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Declare a channel that sends the next part number to be uploaded.
+ uploadPartsCh := make(chan uploadPartReq)
+
+ // Declare a channel that sends back the response of a part upload.
+ uploadedPartsCh := make(chan uploadedPartRes)
+
+ // Used for readability, lastPartNumber is always totalPartsCount.
+ lastPartNumber := totalPartsCount
+
+ partitionCtx, partitionCancel := context.WithCancel(ctx)
+ defer partitionCancel()
+ // Send each part number to the channel to be processed.
+ go func() {
+ defer close(uploadPartsCh)
+
+ for p := 1; p <= totalPartsCount; p++ {
+ select {
+ case <-partitionCtx.Done():
+ return
+ case uploadPartsCh <- uploadPartReq{PartNum: p}:
+ }
+ }
+ }()
+
+ // Receive each part number from the channel allowing three parallel uploads.
+ for w := 1; w <= opts.getNumThreads(); w++ {
+ go func(partSize int64) {
+ for {
+ var uploadReq uploadPartReq
+ var ok bool
+ select {
+ case <-ctx.Done():
+ return
+ case uploadReq, ok = <-uploadPartsCh:
+ if !ok {
+ return
+ }
+ // Each worker will draw from the part channel and upload in parallel.
+ }
+
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(uploadReq.PartNum-1) * partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if uploadReq.PartNum == lastPartNumber {
+ readOffset = size - lastPartSize
+ partSize = lastPartSize
+ }
+
+ sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
+ trailer := make(http.Header, 1)
+ if withChecksum {
+ crc := opts.AutoChecksum.Hasher()
+ trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
+ sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) {
+ trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(hash))
+ })
+ }
+
+ // Proceed to upload the part.
+ p := uploadPartParams{
+ bucketName: bucketName,
+ objectName: objectName,
+ uploadID: uploadID,
+ reader: sectionReader,
+ partNumber: uploadReq.PartNum,
+ size: partSize,
+ sse: opts.ServerSideEncryption,
+ streamSha256: !opts.DisableContentSha256,
+ sha256Hex: "",
+ trailer: trailer,
+ }
+ objPart, err := c.uploadPart(ctx, p)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
+ // Save successfully uploaded part metadata.
+ uploadReq.Part = objPart
+
+ // Send successful part info through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: objPart.Size,
+ PartNum: uploadReq.PartNum,
+ Part: uploadReq.Part,
+ }
+ }
+ }(partSize)
+ }
+
+ // Gather the responses as they occur and update any
+ // progress bar.
+ for u := 1; u <= totalPartsCount; u++ {
+ select {
+ case <-ctx.Done():
+ return UploadInfo{}, ctx.Err()
+ case uploadRes := <-uploadedPartsCh:
+ if uploadRes.Error != nil {
+ return UploadInfo{}, uploadRes.Error
+ }
+
+ // Update the totalUploadedSize.
+ totalUploadedSize += uploadRes.Size
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: uploadRes.Part.ETag,
+ PartNumber: uploadRes.Part.PartNumber,
+ ChecksumCRC32: uploadRes.Part.ChecksumCRC32,
+ ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C,
+ ChecksumSHA1: uploadRes.Part.ChecksumSHA1,
+ ChecksumSHA256: uploadRes.Part.ChecksumSHA256,
+ })
+ }
+ }
+
+ // Verify if we uploaded all the data.
+ if totalUploadedSize != size {
+ return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+
+ opts = PutObjectOptions{
+ ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
+ }
+ if withChecksum {
+ // Add hash of hashes.
+ crc := opts.AutoChecksum.Hasher()
+ for _, part := range complMultipartUpload.Parts {
+ cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum))
+ if err == nil {
+ crc.Write(cs)
+ }
+ }
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ }
+
+ uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ uploadInfo.Size = totalUploadedSize
+ return uploadInfo, nil
+}
+
+func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, size int64, opts PutObjectOptions,
+) (info UploadInfo, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ if opts.Checksum.IsSet() {
+ opts.AutoChecksum = opts.Checksum
+ opts.SendContentMd5 = false
+ }
+
+ if !opts.SendContentMd5 {
+ if opts.UserMetadata == nil {
+ opts.UserMetadata = make(map[string]string, 1)
+ }
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ // Initiates a new multipart request
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
+
+ // Aborts the multipart upload if the function returns
+ // any error, since we do not resume we should purge
+ // the parts which have been uploaded to relinquish
+ // storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Create checksums
+ // CRC32C is ~50% faster on AMD64 @ 30GB/s
+ var crcBytes []byte
+ customHeader := make(http.Header)
+ crc := opts.AutoChecksum.Hasher()
+ md5Hash := c.md5Hasher()
+ defer md5Hash.Close()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ buf := make([]byte, partSize)
+
+ // Avoid declaring variables in the for loop
+ var md5Base64 string
+
+ // Part number always starts with '1'.
+ var partNumber int
+ for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+
+ // Proceed to upload the part.
+ if partNumber == totalPartsCount {
+ partSize = lastPartSize
+ }
+
+ length, rerr := readFull(reader, buf)
+ if rerr == io.EOF && partNumber > 1 {
+ break
+ }
+
+ if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
+ return UploadInfo{}, rerr
+ }
+
+ // Calculate md5sum.
+ if opts.SendContentMd5 {
+ md5Hash.Reset()
+ md5Hash.Write(buf[:length])
+ md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
+ } else {
+ // Add CRC32C instead.
+ crc.Reset()
+ crc.Write(buf[:length])
+ cSum := crc.Sum(nil)
+ customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum))
+ crcBytes = append(crcBytes, cSum...)
+ }
+
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress)
+ p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
+ objPart, uerr := c.uploadPart(ctx, p)
+ if uerr != nil {
+ return UploadInfo{}, uerr
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += partSize
+ }
+
+ // Verify if we uploaded all the data.
+ if size > 0 {
+ if totalUploadedSize != size {
+ return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+ }
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ ChecksumCRC32: part.ChecksumCRC32,
+ ChecksumCRC32C: part.ChecksumCRC32C,
+ ChecksumSHA1: part.ChecksumSHA1,
+ ChecksumSHA256: part.ChecksumSHA256,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+
+ opts = PutObjectOptions{
+ ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
+ }
+ if len(crcBytes) > 0 {
+ // Add hash of hashes.
+ crc.Reset()
+ crc.Write(crcBytes)
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ }
+ uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ uploadInfo.Size = totalUploadedSize
+ return uploadInfo, nil
+}
+
+// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
+// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
+func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, opts PutObjectOptions,
+) (info UploadInfo, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+ if opts.Checksum.IsSet() {
+ opts.SendContentMd5 = false
+ opts.AutoChecksum = opts.Checksum
+ }
+ if !opts.SendContentMd5 {
+ if opts.UserMetadata == nil {
+ opts.UserMetadata = make(map[string]string, 1)
+ }
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
+ }
+
+ // Cancel all when an error occurs.
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Initiates a new multipart request
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
+
+ // Aborts the multipart upload if the function returns
+ // any error, since we do not resume we should purge
+ // the parts which have been uploaded to relinquish
+ // storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Create checksums
+ // CRC32C is ~50% faster on AMD64 @ 30GB/s
+ var crcBytes []byte
+ crc := opts.AutoChecksum.Hasher()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ nBuffers := int64(opts.NumThreads)
+ bufs := make(chan []byte, nBuffers)
+ all := make([]byte, nBuffers*partSize)
+ for i := int64(0); i < nBuffers; i++ {
+ bufs <- all[i*partSize : i*partSize+partSize]
+ }
+
+ var wg sync.WaitGroup
+ var mu sync.Mutex
+ errCh := make(chan error, opts.NumThreads)
+
+ reader = newHook(reader, opts.Progress)
+
+ // Part number always starts with '1'.
+ var partNumber int
+ for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+ // Proceed to upload the part.
+ var buf []byte
+ select {
+ case buf = <-bufs:
+ case err = <-errCh:
+ cancel()
+ wg.Wait()
+ return UploadInfo{}, err
+ }
+
+ if int64(len(buf)) != partSize {
+ return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize)
+ }
+
+ length, rerr := readFull(reader, buf)
+ if rerr == io.EOF && partNumber > 1 {
+ // Done
+ break
+ }
+
+ if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
+ cancel()
+ wg.Wait()
+ return UploadInfo{}, rerr
+ }
+
+ // Calculate md5sum.
+ customHeader := make(http.Header)
+ if !opts.SendContentMd5 {
+ // Add Checksum instead.
+ crc.Reset()
+ crc.Write(buf[:length])
+ cSum := crc.Sum(nil)
+ customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
+ crcBytes = append(crcBytes, cSum...)
+ }
+
+ wg.Add(1)
+ go func(partNumber int) {
+ // Avoid declaring variables in the for loop
+ var md5Base64 string
+
+ if opts.SendContentMd5 {
+ md5Hash := c.md5Hasher()
+ md5Hash.Write(buf[:length])
+ md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
+ md5Hash.Close()
+ }
+
+ defer wg.Done()
+ p := uploadPartParams{
+ bucketName: bucketName,
+ objectName: objectName,
+ uploadID: uploadID,
+ reader: bytes.NewReader(buf[:length]),
+ partNumber: partNumber,
+ md5Base64: md5Base64,
+ size: int64(length),
+ sse: opts.ServerSideEncryption,
+ streamSha256: !opts.DisableContentSha256,
+ customHeader: customHeader,
+ }
+ objPart, uerr := c.uploadPart(ctx, p)
+ if uerr != nil {
+ errCh <- uerr
+ return
+ }
+
+ // Save successfully uploaded part metadata.
+ mu.Lock()
+ partsInfo[partNumber] = objPart
+ mu.Unlock()
+
+ // Send buffer back so it can be reused.
+ bufs <- buf
+ }(partNumber)
+
+ // Save successfully uploaded size.
+ totalUploadedSize += int64(length)
+ }
+ wg.Wait()
+
+ // Collect any error
+ select {
+ case err = <-errCh:
+ return UploadInfo{}, err
+ default:
+ }
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ ChecksumCRC32: part.ChecksumCRC32,
+ ChecksumCRC32C: part.ChecksumCRC32C,
+ ChecksumSHA1: part.ChecksumSHA1,
+ ChecksumSHA256: part.ChecksumSHA256,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+
+ opts = PutObjectOptions{
+ ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
+ }
+ if len(crcBytes) > 0 {
+ // Add hash of hashes.
+ crc.Reset()
+ crc.Write(crcBytes)
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ }
+ uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ uploadInfo.Size = totalUploadedSize
+ return uploadInfo, nil
+}
+
+// putObject special function used Google Cloud Storage. This special function
+// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
+func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Size -1 is only supported on Google Cloud Storage, we error
+ // out in all other situations.
+ if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) {
+ return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName)
+ }
+
+ if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
+ return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
+ }
+ if opts.Checksum.IsSet() {
+ opts.SendContentMd5 = false
+ }
+
+ var readSeeker io.Seeker
+ if size > 0 {
+ if isReadAt(reader) && !isObject(reader) {
+ seeker, ok := reader.(io.Seeker)
+ if ok {
+ offset, err := seeker.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return UploadInfo{}, errInvalidArgument(err.Error())
+ }
+ reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
+ readSeeker = reader.(io.Seeker)
+ }
+ }
+ }
+
+ var md5Base64 string
+ if opts.SendContentMd5 {
+ // Calculate md5sum.
+ hash := c.md5Hasher()
+
+ if readSeeker != nil {
+ if _, err := io.Copy(hash, reader); err != nil {
+ return UploadInfo{}, err
+ }
+ // Seek back to beginning of io.NewSectionReader's offset.
+ _, err = readSeeker.Seek(0, io.SeekStart)
+ if err != nil {
+ return UploadInfo{}, errInvalidArgument(err.Error())
+ }
+ } else {
+ // Create a buffer.
+ buf := make([]byte, size)
+
+ length, err := readFull(reader, buf)
+ if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
+ return UploadInfo{}, err
+ }
+
+ hash.Write(buf[:length])
+ reader = bytes.NewReader(buf[:length])
+ }
+
+ md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
+ hash.Close()
+ }
+
+ // Update progress reader appropriately to the latest offset as we
+ // read from the source.
+ progressReader := newHook(reader, opts.Progress)
+
+ // This function does not calculate sha256 and md5sum for payload.
+ // Execute put object.
+ return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts)
+}
+
+// putObjectDo - executes the put object http operation.
+// NOTE: You must have WRITE permissions on a bucket to add an object to it.
+func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+ // Set headers.
+ customHeader := opts.Header()
+
+ // Populate request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Base64: md5Base64,
+ contentSHA256Hex: sha256Hex,
+ streamSha256: !opts.DisableContentSha256,
+ }
+ // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
+ addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
+ if opts.Checksum.IsSet() {
+ reqMetadata.addCrc = &opts.Checksum
+ } else if addCrc {
+ // If user has added checksums, don't add them ourselves.
+ for k := range opts.UserMetadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
+ addCrc = false
+ }
+ }
+ if addCrc {
+ opts.AutoChecksum.SetDefault(ChecksumCRC32C)
+ reqMetadata.addCrc = &opts.AutoChecksum
+ }
+ }
+
+ if opts.Internal.SourceVersionID != "" {
+ if opts.Internal.SourceVersionID != nullVersionID {
+ if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
+ return UploadInfo{}, errInvalidArgument(err.Error())
+ }
+ }
+ urlValues := make(url.Values)
+ urlValues.Set("versionId", opts.Internal.SourceVersionID)
+ reqMetadata.queryValues = urlValues
+ }
+
+ // Execute PUT an objectName.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // extract lifecycle expiry date and rule ID
+ expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
+ h := resp.Header
+ return UploadInfo{
+ Bucket: bucketName,
+ Key: objectName,
+ ETag: trimEtag(h.Get("ETag")),
+ VersionID: h.Get(amzVersionID),
+ Size: size,
+ Expiration: expTime,
+ ExpirationRuleID: ruleID,
+
+ // Checksum values
+ ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
+ ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
+ ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
+ ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
new file mode 100644
index 000000000..d769648a7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -0,0 +1,511 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "golang.org/x/net/http/httpguts"
+)
+
+// ReplicationStatus represents replication status of object
+type ReplicationStatus string
+
+const (
+ // ReplicationStatusPending indicates replication is pending
+ ReplicationStatusPending ReplicationStatus = "PENDING"
+ // ReplicationStatusComplete indicates replication completed ok
+ ReplicationStatusComplete ReplicationStatus = "COMPLETED"
+ // ReplicationStatusFailed indicates replication failed
+ ReplicationStatusFailed ReplicationStatus = "FAILED"
+ // ReplicationStatusReplica indicates object is a replica of a source
+ ReplicationStatusReplica ReplicationStatus = "REPLICA"
+)
+
+// Empty returns true if no replication status set.
+func (r ReplicationStatus) Empty() bool {
+ return r == ""
+}
+
+// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition
+// implementation on MinIO server
+type AdvancedPutOptions struct {
+ SourceVersionID string
+ SourceETag string
+ ReplicationStatus ReplicationStatus
+ SourceMTime time.Time
+ ReplicationRequest bool
+ RetentionTimestamp time.Time
+ TaggingTimestamp time.Time
+ LegalholdTimestamp time.Time
+ ReplicationValidityCheck bool
+}
+
+// PutObjectOptions represents options specified by user for PutObject call
+type PutObjectOptions struct {
+ UserMetadata map[string]string
+ UserTags map[string]string
+ Progress io.Reader
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ ContentLanguage string
+ CacheControl string
+ Expires time.Time
+ Mode RetentionMode
+ RetainUntilDate time.Time
+ ServerSideEncryption encrypt.ServerSide
+ NumThreads uint
+ StorageClass string
+ WebsiteRedirectLocation string
+ PartSize uint64
+ LegalHold LegalHoldStatus
+ SendContentMd5 bool
+ DisableContentSha256 bool
+ DisableMultipart bool
+
+ // AutoChecksum is the type of checksum that will be added if no other checksum is added,
+ // like MD5 or SHA256 streaming checksum, and it is feasible for the upload type.
+ // If none is specified CRC32C is used, since it is generally the fastest.
+ AutoChecksum ChecksumType
+
+ // Checksum will force a checksum of the specific type.
+ // This requires that the client was created with "TrailingHeaders:true" option,
+ // and that the destination server supports it.
+ // Unavailable with V2 signatures & Google endpoints.
+ // This will disable content MD5 checksums if set.
+ Checksum ChecksumType
+
+ // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
+ // fill them serially and upload them in parallel.
+ // This can be used for faster uploads on non-seekable or slow-to-seek input.
+ ConcurrentStreamParts bool
+ Internal AdvancedPutOptions
+
+ customHeaders http.Header
+}
+
+// SetMatchETag if etag matches while PUT MinIO returns an error
+// this is a MinIO specific extension to support optimistic locking
+// semantics.
+func (opts *PutObjectOptions) SetMatchETag(etag string) {
+ if opts.customHeaders == nil {
+ opts.customHeaders = http.Header{}
+ }
+ if etag == "*" {
+ opts.customHeaders.Set("If-Match", "*")
+ } else {
+ opts.customHeaders.Set("If-Match", "\""+etag+"\"")
+ }
+}
+
+// SetMatchETagExcept if etag does not match while PUT MinIO returns an
+// error this is a MinIO specific extension to support optimistic locking
+// semantics.
+func (opts *PutObjectOptions) SetMatchETagExcept(etag string) {
+ if opts.customHeaders == nil {
+ opts.customHeaders = http.Header{}
+ }
+ if etag == "*" {
+ opts.customHeaders.Set("If-None-Match", "*")
+ } else {
+ opts.customHeaders.Set("If-None-Match", "\""+etag+"\"")
+ }
+}
+
+// getNumThreads - gets the number of threads to be used in the multipart
+// put object operation
+func (opts PutObjectOptions) getNumThreads() (numThreads int) {
+ if opts.NumThreads > 0 {
+ numThreads = int(opts.NumThreads)
+ } else {
+ numThreads = totalWorkers
+ }
+ return
+}
+
+// Header - constructs the headers from metadata entered by user in
+// PutObjectOptions struct
+func (opts PutObjectOptions) Header() (header http.Header) {
+ header = make(http.Header)
+
+ contentType := opts.ContentType
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ header.Set("Content-Type", contentType)
+
+ if opts.ContentEncoding != "" {
+ header.Set("Content-Encoding", opts.ContentEncoding)
+ }
+ if opts.ContentDisposition != "" {
+ header.Set("Content-Disposition", opts.ContentDisposition)
+ }
+ if opts.ContentLanguage != "" {
+ header.Set("Content-Language", opts.ContentLanguage)
+ }
+ if opts.CacheControl != "" {
+ header.Set("Cache-Control", opts.CacheControl)
+ }
+
+ if !opts.Expires.IsZero() {
+ header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat))
+ }
+
+ if opts.Mode != "" {
+ header.Set(amzLockMode, opts.Mode.String())
+ }
+
+ if !opts.RetainUntilDate.IsZero() {
+ header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339))
+ }
+
+ if opts.LegalHold != "" {
+ header.Set(amzLegalHoldHeader, opts.LegalHold.String())
+ }
+
+ if opts.ServerSideEncryption != nil {
+ opts.ServerSideEncryption.Marshal(header)
+ }
+
+ if opts.StorageClass != "" {
+ header.Set(amzStorageClass, opts.StorageClass)
+ }
+
+ if opts.WebsiteRedirectLocation != "" {
+ header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation)
+ }
+
+ if !opts.Internal.ReplicationStatus.Empty() {
+ header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
+ }
+ if !opts.Internal.SourceMTime.IsZero() {
+ header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano))
+ }
+ if opts.Internal.SourceETag != "" {
+ header.Set(minIOBucketSourceETag, opts.Internal.SourceETag)
+ }
+ if opts.Internal.ReplicationRequest {
+ header.Set(minIOBucketReplicationRequest, "true")
+ }
+ if opts.Internal.ReplicationValidityCheck {
+ header.Set(minIOBucketReplicationCheck, "true")
+ }
+ if !opts.Internal.LegalholdTimestamp.IsZero() {
+ header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
+ }
+ if !opts.Internal.RetentionTimestamp.IsZero() {
+ header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
+ }
+ if !opts.Internal.TaggingTimestamp.IsZero() {
+ header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
+ }
+
+ if len(opts.UserTags) != 0 {
+ header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
+ }
+
+ for k, v := range opts.UserMetadata {
+ if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) {
+ header.Set(k, v)
+ } else {
+ header.Set("x-amz-meta-"+k, v)
+ }
+ }
+
+ // set any other additional custom headers.
+ for k, v := range opts.customHeaders {
+ header[k] = v
+ }
+
+ return
+}
+
+// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
+func (opts PutObjectOptions) validate(c *Client) (err error) {
+ for k, v := range opts.UserMetadata {
+ if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) {
+ return errInvalidArgument(k + " unsupported user defined metadata name")
+ }
+ if !httpguts.ValidHeaderFieldValue(v) {
+ return errInvalidArgument(v + " unsupported user defined metadata value")
+ }
+ }
+ if opts.Mode != "" && !opts.Mode.IsValid() {
+ return errInvalidArgument(opts.Mode.String() + " unsupported retention mode")
+ }
+ if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
+ return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
+ }
+ if opts.Checksum.IsSet() {
+ switch {
+ case !c.trailingHeaderSupport:
+ return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled")
+ case c.overrideSignerType.IsV2():
+ return errInvalidArgument("Checksum cannot be used with v2 signatures")
+ case s3utils.IsGoogleEndpoint(*c.endpointURL):
+ return errInvalidArgument("Checksum cannot be used with GCS endpoints")
+ }
+ }
+
+ return nil
+}
+
+// completedParts is a collection of parts sortable by their part numbers.
+// used for sorting the uploaded parts before completing the multipart request.
+type completedParts []CompletePart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
+
+// PutObject creates an object in a bucket.
+//
+// You must have WRITE permissions on a bucket to create an object.
+//
+// - For size smaller than 16MiB PutObject automatically does a
+// single atomic PUT operation.
+//
+// - For size larger than 16MiB PutObject automatically does a
+// multipart upload operation.
+//
+// - For size input as -1 PutObject does a multipart Put operation
+// until input stream reaches EOF. Maximum object size that can
+// be uploaded through this operation will be 5TiB.
+//
+// WARNING: Passing down '-1' will use memory and these cannot
+// be reused for best outcomes for PutObject(), pass the size always.
+//
+// NOTE: Upon errors during upload multipart operation is entirely aborted.
+func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
+ opts PutObjectOptions,
+) (info UploadInfo, err error) {
+ if objectSize < 0 && opts.DisableMultipart {
+ return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
+ }
+
+ err = opts.validate(c)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
+}
+
+func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
+ // Check for largest object size allowed.
+ if size > int64(maxMultipartPutObjectSize) {
+ return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
+ }
+ opts.AutoChecksum.SetDefault(ChecksumCRC32C)
+
+ // NOTE: Streaming signature is not supported by GCS.
+ if s3utils.IsGoogleEndpoint(*c.endpointURL) {
+ return c.putObject(ctx, bucketName, objectName, reader, size, opts)
+ }
+
+ partSize := opts.PartSize
+ if opts.PartSize == 0 {
+ partSize = minPartSize
+ }
+
+ if c.overrideSignerType.IsV2() {
+ if size >= 0 && size < int64(partSize) || opts.DisableMultipart {
+ return c.putObject(ctx, bucketName, objectName, reader, size, opts)
+ }
+ return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
+ }
+
+ if size < 0 {
+ if opts.DisableMultipart {
+ return UploadInfo{}, errors.New("no length provided and multipart disabled")
+ }
+ if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
+ return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
+ }
+ return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
+ }
+
+ if size <= int64(partSize) || opts.DisableMultipart {
+ return c.putObject(ctx, bucketName, objectName, reader, size, opts)
+ }
+
+ return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
+}
+
+func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Total data read and written to server. should be equal to
+ // 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ if opts.Checksum.IsSet() {
+ opts.SendContentMd5 = false
+ opts.AutoChecksum = opts.Checksum
+ }
+ if !opts.SendContentMd5 {
+ if opts.UserMetadata == nil {
+ opts.UserMetadata = make(map[string]string, 1)
+ }
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
+ }
+
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
+
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Part number always starts with '1'.
+ partNumber := 1
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ buf := make([]byte, partSize)
+
+ // Create checksums
+ // CRC32C is ~50% faster on AMD64 @ 30GB/s
+ var crcBytes []byte
+ customHeader := make(http.Header)
+ crc := opts.AutoChecksum.Hasher()
+
+ for partNumber <= totalPartsCount {
+ length, rerr := readFull(reader, buf)
+ if rerr == io.EOF && partNumber > 1 {
+ break
+ }
+
+ if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF {
+ return UploadInfo{}, rerr
+ }
+
+ var md5Base64 string
+ if opts.SendContentMd5 {
+ // Calculate md5sum.
+ hash := c.md5Hasher()
+ hash.Write(buf[:length])
+ md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
+ hash.Close()
+ } else {
+ crc.Reset()
+ crc.Write(buf[:length])
+ cSum := crc.Sum(nil)
+ customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
+ crcBytes = append(crcBytes, cSum...)
+ }
+
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
+
+ // Proceed to upload the part.
+ p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
+ objPart, uerr := c.uploadPart(ctx, p)
+ if uerr != nil {
+ return UploadInfo{}, uerr
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += int64(length)
+
+ // Increment part number.
+ partNumber++
+
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if rerr == io.EOF {
+ break
+ }
+ }
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ ChecksumCRC32: part.ChecksumCRC32,
+ ChecksumCRC32C: part.ChecksumCRC32C,
+ ChecksumSHA1: part.ChecksumSHA1,
+ ChecksumSHA256: part.ChecksumSHA256,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+
+ opts = PutObjectOptions{
+ ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
+ }
+ if len(crcBytes) > 0 {
+ // Add hash of hashes.
+ crc.Reset()
+ crc.Write(crcBytes)
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ }
+ uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ uploadInfo.Size = totalUploadedSize
+ return uploadInfo, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
new file mode 100644
index 000000000..6b6559bf7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
@@ -0,0 +1,246 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2021 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/klauspost/compress/s2"
+)
+
+// SnowballOptions contains options for PutObjectsSnowball calls.
+type SnowballOptions struct {
+ // Opts is options applied to all objects.
+ Opts PutObjectOptions
+
+ // Processing options:
+
+ // InMemory specifies that all objects should be collected in memory
+ // before they are uploaded.
+ // If false a temporary file will be created.
+ InMemory bool
+
+ // Compress enabled content compression before upload.
+ // Compression will typically reduce memory and network usage,
+ // Compression can safely be enabled with MinIO hosts.
+ Compress bool
+
+ // SkipErrs if enabled will skip any errors while reading the
+ // object content while creating the snowball archive
+ SkipErrs bool
+}
+
+// SnowballObject contains information about a single object to be added to the snowball.
+type SnowballObject struct {
+ // Key is the destination key, including prefix.
+ Key string
+
+ // Size is the content size of this object.
+ Size int64
+
+ // Modtime to apply to the object.
+ // If Modtime is the zero value current time will be used.
+ ModTime time.Time
+
+ // Content of the object.
+ // Exactly 'Size' number of bytes must be provided.
+ Content io.Reader
+
+ // VersionID of the object; if empty, a new versionID will be generated
+ VersionID string
+
+ // Headers contains more options for this object upload, the same as you
+ // would include in a regular PutObject operation, such as user metadata
+ // and content-disposition, expires, ..
+ Headers http.Header
+
+ // Close will be called when an object has finished processing.
+ // Note that if PutObjectsSnowball returns because of an error,
+ // objects not consumed from the input will NOT have been closed.
+ // Leave as nil for no callback.
+ Close func()
+}
+
+type nopReadSeekCloser struct {
+ io.ReadSeeker
+}
+
+func (n nopReadSeekCloser) Close() error {
+ return nil
+}
+
+// This is available as io.ReadSeekCloser from go1.16
+type readSeekCloser interface {
+ io.Reader
+ io.Closer
+ io.Seeker
+}
+
+// PutObjectsSnowball will put multiple objects with a single put call.
+// A (compressed) TAR file will be created which will contain multiple objects.
+// The key for each object will be used for the destination in the specified bucket.
+// Total size should be < 5TB.
+// This function blocks until 'objs' is closed and the content has been uploaded.
+func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) {
+ err = opts.Opts.validate(&c)
+ if err != nil {
+ return err
+ }
+ var tmpWriter io.Writer
+ var getTmpReader func() (rc readSeekCloser, sz int64, err error)
+ if opts.InMemory {
+ b := bytes.NewBuffer(nil)
+ tmpWriter = b
+ getTmpReader = func() (readSeekCloser, int64, error) {
+ return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
+ }
+ } else {
+ f, err := os.CreateTemp("", "s3-putsnowballobjects-*")
+ if err != nil {
+ return err
+ }
+ name := f.Name()
+ tmpWriter = f
+ var once sync.Once
+ defer once.Do(func() {
+ f.Close()
+ })
+ defer os.Remove(name)
+ getTmpReader = func() (readSeekCloser, int64, error) {
+ once.Do(func() {
+ f.Close()
+ })
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, 0, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ return f, st.Size(), nil
+ }
+ }
+ flush := func() error { return nil }
+ if !opts.Compress {
+ if !opts.InMemory {
+ // Insert buffer for writes.
+ buf := bufio.NewWriterSize(tmpWriter, 1<<20)
+ flush = buf.Flush
+ tmpWriter = buf
+ }
+ } else {
+ s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression())
+ flush = s2c.Close
+ defer s2c.Close()
+ tmpWriter = s2c
+ }
+ t := tar.NewWriter(tmpWriter)
+
+objectLoop:
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case obj, ok := <-objs:
+ if !ok {
+ break objectLoop
+ }
+
+ closeObj := func() {}
+ if obj.Close != nil {
+ closeObj = obj.Close
+ }
+
+ // Trim accidental slash prefix.
+ obj.Key = strings.TrimPrefix(obj.Key, "/")
+ header := tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: obj.Key,
+ Size: obj.Size,
+ ModTime: obj.ModTime,
+ Format: tar.FormatPAX,
+ }
+ if header.ModTime.IsZero() {
+ header.ModTime = time.Now().UTC()
+ }
+
+ header.PAXRecords = make(map[string]string)
+ if obj.VersionID != "" {
+ header.PAXRecords["minio.versionId"] = obj.VersionID
+ }
+ for k, vals := range obj.Headers {
+ header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",")
+ }
+
+ if err := t.WriteHeader(&header); err != nil {
+ closeObj()
+ return err
+ }
+ n, err := io.Copy(t, obj.Content)
+ if err != nil {
+ closeObj()
+ if opts.SkipErrs {
+ continue
+ }
+ return err
+ }
+ if n != obj.Size {
+ closeObj()
+ if opts.SkipErrs {
+ continue
+ }
+ return io.ErrUnexpectedEOF
+ }
+ closeObj()
+ }
+ }
+ // Flush tar
+ err = t.Flush()
+ if err != nil {
+ return err
+ }
+ // Flush compression
+ err = flush()
+ if err != nil {
+ return err
+ }
+ if opts.Opts.UserMetadata == nil {
+ opts.Opts.UserMetadata = map[string]string{}
+ }
+ opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true"
+ opts.Opts.DisableMultipart = true
+ rc, sz, err := getTmpReader()
+ if err != nil {
+ return err
+ }
+ defer rc.Close()
+ rand := c.random.Uint64()
+ _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts)
+ return err
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go
new file mode 100644
index 000000000..d2e932923
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-remove.go
@@ -0,0 +1,548 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "io"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+//revive:disable
+
+// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions.
+type BucketOptions = RemoveBucketOptions
+
+//revive:enable
+
+// RemoveBucketOptions special headers to purge buckets, only
+// useful when endpoint is MinIO
+type RemoveBucketOptions struct {
+ ForceDelete bool
+}
+
+// RemoveBucketWithOptions deletes the bucket name.
+//
+// All objects (including all object versions and delete markers)
+// in the bucket will be deleted forcibly if bucket options set
+// ForceDelete to 'true'.
+func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Build headers.
+ headers := make(http.Header)
+ if opts.ForceDelete {
+ headers.Set(minIOForceDelete, "true")
+ }
+
+ // Execute DELETE on bucket.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Remove the location from cache on a successful delete.
+ c.bucketLocCache.Delete(bucketName)
+ return nil
+}
+
+// RemoveBucket deletes the bucket name.
+//
+// All objects (including all object versions and delete markers).
+// in the bucket must be deleted before successfully attempting this request.
+func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Execute DELETE on bucket.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Remove the location from cache on a successful delete.
+ c.bucketLocCache.Delete(bucketName)
+
+ return nil
+}
+
+// AdvancedRemoveOptions intended for internal use by replication
+type AdvancedRemoveOptions struct {
+ ReplicationDeleteMarker bool
+ ReplicationStatus ReplicationStatus
+ ReplicationMTime time.Time
+ ReplicationRequest bool
+ ReplicationValidityCheck bool // check permissions
+}
+
+// RemoveObjectOptions represents options specified by user for RemoveObject call
+type RemoveObjectOptions struct {
+ ForceDelete bool
+ GovernanceBypass bool
+ VersionID string
+ Internal AdvancedRemoveOptions
+}
+
+// RemoveObject removes an object from a bucket.
+func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ res := c.removeObject(ctx, bucketName, objectName, opts)
+ return res.Err
+}
+
+func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+
+ if opts.VersionID != "" {
+ urlValues.Set("versionId", opts.VersionID)
+ }
+
+ // Build headers.
+ headers := make(http.Header)
+
+ if opts.GovernanceBypass {
+ // Set the bypass goverenance retention header
+ headers.Set(amzBypassGovernance, "true")
+ }
+ if opts.Internal.ReplicationDeleteMarker {
+ headers.Set(minIOBucketReplicationDeleteMarker, "true")
+ }
+ if !opts.Internal.ReplicationMTime.IsZero() {
+ headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano))
+ }
+ if !opts.Internal.ReplicationStatus.Empty() {
+ headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
+ }
+ if opts.Internal.ReplicationRequest {
+ headers.Set(minIOBucketReplicationRequest, "true")
+ }
+ if opts.Internal.ReplicationValidityCheck {
+ headers.Set(minIOBucketReplicationCheck, "true")
+ }
+ if opts.ForceDelete {
+ headers.Set(minIOForceDelete, "true")
+ }
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Hex: emptySHA256Hex,
+ queryValues: urlValues,
+ customHeader: headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return RemoveObjectResult{Err: err}
+ }
+ if resp != nil {
+ // if some unexpected error happened and max retry is reached, we want to let client know
+ if resp.StatusCode != http.StatusNoContent {
+ err := httpRespToErrorResponse(resp, bucketName, objectName)
+ return RemoveObjectResult{Err: err}
+ }
+ }
+
+ // DeleteObject always responds with http '204' even for
+ // objects which do not exist. So no need to handle them
+ // specifically.
+ return RemoveObjectResult{
+ ObjectName: objectName,
+ ObjectVersionID: opts.VersionID,
+ DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true",
+ DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"),
+ }
+}
+
+// RemoveObjectError - container of Multi Delete S3 API error
+type RemoveObjectError struct {
+ ObjectName string
+ VersionID string
+ Err error
+}
+
+// RemoveObjectResult - container of Multi Delete S3 API result
+type RemoveObjectResult struct {
+ ObjectName string
+ ObjectVersionID string
+
+ DeleteMarker bool
+ DeleteMarkerVersionID string
+
+ Err error
+}
+
+// generateRemoveMultiObjects - generate the XML request for remove multi objects request
+func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte {
+ delObjects := []deleteObject{}
+ for _, obj := range objects {
+ delObjects = append(delObjects, deleteObject{
+ Key: obj.Key,
+ VersionID: obj.VersionID,
+ })
+ }
+ xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false})
+ return xmlBytes
+}
+
+// processRemoveMultiObjectsResponse - parse the remove multi objects web service
+// and return the success/failure result status for each object
+func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) {
+ // Parse multi delete XML response
+ rmResult := &deleteMultiObjectsResult{}
+ err := xmlDecoder(body, rmResult)
+ if err != nil {
+ resultCh <- RemoveObjectResult{ObjectName: "", Err: err}
+ return
+ }
+
+ // Fill deletion that returned success
+ for _, obj := range rmResult.DeletedObjects {
+ resultCh <- RemoveObjectResult{
+ ObjectName: obj.Key,
+ // Only filled with versioned buckets
+ ObjectVersionID: obj.VersionID,
+ DeleteMarker: obj.DeleteMarker,
+ DeleteMarkerVersionID: obj.DeleteMarkerVersionID,
+ }
+ }
+
+ // Fill deletion that returned an error.
+ for _, obj := range rmResult.UnDeletedObjects {
+ // Version does not exist is not an error ignore and continue.
+ switch obj.Code {
+ case "InvalidArgument", "NoSuchVersion":
+ continue
+ }
+ resultCh <- RemoveObjectResult{
+ ObjectName: obj.Key,
+ ObjectVersionID: obj.VersionID,
+ Err: ErrorResponse{
+ Code: obj.Code,
+ Message: obj.Message,
+ },
+ }
+ }
+}
+
+// RemoveObjectsOptions represents options specified by user for RemoveObjects call
+type RemoveObjectsOptions struct {
+ GovernanceBypass bool
+}
+
+// RemoveObjects removes multiple objects from a bucket while
+// it is possible to specify objects versions which are received from
+// objectsCh. Remove failures are sent back via error channel.
+func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError {
+ errorCh := make(chan RemoveObjectError, 1)
+
+ // Validate if bucket name is valid.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: err,
+ }
+ return errorCh
+ }
+ // Validate objects channel to be properly allocated.
+ if objectsCh == nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: errInvalidArgument("Objects channel cannot be nil"),
+ }
+ return errorCh
+ }
+
+ resultCh := make(chan RemoveObjectResult, 1)
+ go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts)
+ go func() {
+ defer close(errorCh)
+ for res := range resultCh {
+ // Send only errors to the error channel
+ if res.Err == nil {
+ continue
+ }
+ errorCh <- RemoveObjectError{
+ ObjectName: res.ObjectName,
+ VersionID: res.ObjectVersionID,
+ Err: res.Err,
+ }
+ }
+ }()
+
+ return errorCh
+}
+
+// RemoveObjectsWithResult removes multiple objects from a bucket while
+// it is possible to specify objects versions which are received from
+// objectsCh. Remove results, successes and failures are sent back via
+// RemoveObjectResult channel
+func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult {
+ resultCh := make(chan RemoveObjectResult, 1)
+
+ // Validate if bucket name is valid.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(resultCh)
+ resultCh <- RemoveObjectResult{
+ Err: err,
+ }
+ return resultCh
+ }
+ // Validate objects channel to be properly allocated.
+ if objectsCh == nil {
+ defer close(resultCh)
+ resultCh <- RemoveObjectResult{
+ Err: errInvalidArgument("Objects channel cannot be nil"),
+ }
+ return resultCh
+ }
+
+ go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts)
+ return resultCh
+}
+
+// Return true if the character is within the allowed characters in an XML 1.0 document
+// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets
+func validXMLChar(r rune) (ok bool) {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xD7FF ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
+
+func hasInvalidXMLChar(str string) bool {
+ for _, s := range str {
+ if !validXMLChar(s) {
+ return true
+ }
+ }
+ return false
+}
+
+// Generate and call MultiDelete S3 requests based on entries received from objectsCh
+func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
+ maxEntries := 1000
+ finish := false
+ urlValues := make(url.Values)
+ urlValues.Set("delete", "")
+
+ // Close result channel when Multi delete finishes.
+ defer close(resultCh)
+
+ // Loop over entries by 1000 and call MultiDelete requests
+ for {
+ if finish {
+ break
+ }
+ count := 0
+ var batch []ObjectInfo
+
+ // Try to gather 1000 entries
+ for object := range objectsCh {
+ if hasInvalidXMLChar(object.Key) {
+ // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document.
+ removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
+ VersionID: object.VersionID,
+ GovernanceBypass: opts.GovernanceBypass,
+ })
+ if err := removeResult.Err; err != nil {
+ // Version does not exist is not an error ignore and continue.
+ switch ToErrorResponse(err).Code {
+ case "InvalidArgument", "NoSuchVersion":
+ continue
+ }
+ resultCh <- removeResult
+ }
+
+ resultCh <- removeResult
+ continue
+ }
+
+ batch = append(batch, object)
+ if count++; count >= maxEntries {
+ break
+ }
+ }
+ if count == 0 {
+ // Multi Objects Delete API doesn't accept empty object list, quit immediately
+ break
+ }
+ if count < maxEntries {
+ // We didn't have 1000 entries, so this is the last batch
+ finish = true
+ }
+
+ // Build headers.
+ headers := make(http.Header)
+ if opts.GovernanceBypass {
+ // Set the bypass goverenance retention header
+ headers.Set(amzBypassGovernance, "true")
+ }
+
+ // Generate remove multi objects XML request
+ removeBytes := generateRemoveMultiObjectsRequest(batch)
+ // Execute POST on bucket to remove objects.
+ resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Base64: sumMD5Base64(removeBytes),
+ contentSHA256Hex: sum256Hex(removeBytes),
+ customHeader: headers,
+ })
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ e := httpRespToErrorResponse(resp, bucketName, "")
+ resultCh <- RemoveObjectResult{ObjectName: "", Err: e}
+ }
+ }
+ if err != nil {
+ for _, b := range batch {
+ resultCh <- RemoveObjectResult{
+ ObjectName: b.Key,
+ ObjectVersionID: b.VersionID,
+ Err: err,
+ }
+ }
+ continue
+ }
+
+ // Process multiobjects remove xml response
+ processRemoveMultiObjectsResponse(resp.Body, resultCh)
+
+ closeResponse(resp)
+ }
+}
+
+// RemoveIncompleteUpload aborts an partially uploaded object.
+func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+ // Find multipart upload ids of the object to be aborted.
+ uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName)
+ if err != nil {
+ return err
+ }
+
+ for _, uploadID := range uploadIDs {
+ // abort incomplete multipart upload, based on the upload id passed.
+ err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// abortMultipartUpload aborts a multipart upload for the given
+// uploadID, all previously uploaded parts are deleted.
+func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploadId", uploadID)
+
+ // Execute DELETE on multipart upload.
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ // Abort has no response body, handle it for any errors.
+ var errorResponse ErrorResponse
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ // This is needed specifically for abort and it cannot
+ // be converged into default case.
+ errorResponse = ErrorResponse{
+ Code: "NoSuchUpload",
+ Message: "The specified multipart upload does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ default:
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ return errorResponse
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go
new file mode 100644
index 000000000..9ec8f4f24
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-restore.go
@@ -0,0 +1,182 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2018-2021 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/minio/minio-go/v7/pkg/tags"
+)
+
+// RestoreType represents the restore request type
+type RestoreType string
+
+const (
+ // RestoreSelect represents the restore SELECT operation
+ RestoreSelect = RestoreType("SELECT")
+)
+
+// TierType represents a retrieval tier
+type TierType string
+
+const (
+ // TierStandard is the standard retrieval tier
+ TierStandard = TierType("Standard")
+ // TierBulk is the bulk retrieval tier
+ TierBulk = TierType("Bulk")
+ // TierExpedited is the expedited retrieval tier
+ TierExpedited = TierType("Expedited")
+)
+
+// GlacierJobParameters represents the retrieval tier parameter
+type GlacierJobParameters struct {
+ Tier TierType
+}
+
+// Encryption contains the type of server-side encryption used during object retrieval
+type Encryption struct {
+ EncryptionType string
+ KMSContext string
+ KMSKeyID string `xml:"KMSKeyId"`
+}
+
+// MetadataEntry represents a metadata information of the restored object.
+type MetadataEntry struct {
+ Name string
+ Value string
+}
+
+// S3 holds properties of the copy of the archived object
+type S3 struct {
+ AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"`
+ BucketName string
+ Prefix string
+ CannedACL *string `xml:"CannedACL,omitempty"`
+ Encryption *Encryption `xml:"Encryption,omitempty"`
+ StorageClass *string `xml:"StorageClass,omitempty"`
+ Tagging *tags.Tags `xml:"Tagging,omitempty"`
+ UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"`
+}
+
+// SelectParameters holds the select request parameters
+type SelectParameters struct {
+ XMLName xml.Name `xml:"SelectParameters"`
+ ExpressionType QueryExpressionType
+ Expression string
+ InputSerialization SelectObjectInputSerialization
+ OutputSerialization SelectObjectOutputSerialization
+}
+
+// OutputLocation holds properties of the copy of the archived object
+type OutputLocation struct {
+ XMLName xml.Name `xml:"OutputLocation"`
+ S3 S3 `xml:"S3"`
+}
+
+// RestoreRequest holds properties of the restore object request
+type RestoreRequest struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"`
+ Type *RestoreType `xml:"Type,omitempty"`
+ Tier *TierType `xml:"Tier,omitempty"`
+ Days *int `xml:"Days,omitempty"`
+ GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"`
+ Description *string `xml:"Description,omitempty"`
+ SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
+ OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"`
+}
+
+// SetDays sets the days parameter of the restore request
+func (r *RestoreRequest) SetDays(v int) {
+ r.Days = &v
+}
+
+// SetGlacierJobParameters sets the GlacierJobParameters of the restore request
+func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) {
+ r.GlacierJobParameters = &v
+}
+
+// SetType sets the type of the restore request
+func (r *RestoreRequest) SetType(v RestoreType) {
+ r.Type = &v
+}
+
+// SetTier sets the retrieval tier of the restore request
+func (r *RestoreRequest) SetTier(v TierType) {
+ r.Tier = &v
+}
+
+// SetDescription sets the description of the restore request
+func (r *RestoreRequest) SetDescription(v string) {
+ r.Description = &v
+}
+
+// SetSelectParameters sets SelectParameters of the restore select request
+func (r *RestoreRequest) SetSelectParameters(v SelectParameters) {
+ r.SelectParameters = &v
+}
+
+// SetOutputLocation sets the properties of the copy of the archived object
+func (r *RestoreRequest) SetOutputLocation(v OutputLocation) {
+ r.OutputLocation = &v
+}
+
+// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API
+func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ restoreRequestBytes, err := xml.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ urlValues := make(url.Values)
+ urlValues.Set("restore", "")
+ if versionID != "" {
+ urlValues.Set("versionId", versionID)
+ }
+
+ // Execute POST on bucket/object.
+ resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentMD5Base64: sumMD5Base64(restoreRequestBytes),
+ contentSHA256Hex: sum256Hex(restoreRequestBytes),
+ contentBody: bytes.NewReader(restoreRequestBytes),
+ contentLength: int64(len(restoreRequestBytes)),
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
new file mode 100644
index 000000000..790606c50
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -0,0 +1,406 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "errors"
+ "io"
+ "reflect"
+ "time"
+)
+
+// listAllMyBucketsResult container for listBuckets response.
+type listAllMyBucketsResult struct {
+ // Container for one or more buckets.
+ Buckets struct {
+ Bucket []BucketInfo
+ }
+ Owner owner
+}
+
+// owner container for bucket owner information.
+type owner struct {
+ DisplayName string
+ ID string
+}
+
+// CommonPrefix container for prefix response.
+type CommonPrefix struct {
+ Prefix string
+}
+
+// ListBucketV2Result container for listObjects response version 2.
+type ListBucketV2Result struct {
+ // A response can contain CommonPrefixes only if you have
+ // specified a delimiter.
+ CommonPrefixes []CommonPrefix
+ // Metadata about each object returned.
+ Contents []ObjectInfo
+ Delimiter string
+
+ // Encoding type used to encode object keys in the response.
+ EncodingType string
+
+ // A flag that indicates whether or not ListObjects returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated bool
+ MaxKeys int64
+ Name string
+
+ // Hold the token that will be sent in the next request to fetch the next group of keys
+ NextContinuationToken string
+
+ ContinuationToken string
+ Prefix string
+
+ // FetchOwner and StartAfter are currently not used
+ FetchOwner string
+ StartAfter string
+}
+
+// Version is an element in the list object versions response
+type Version struct {
+ ETag string
+ IsLatest bool
+ Key string
+ LastModified time.Time
+ Owner Owner
+ Size int64
+ StorageClass string
+ VersionID string `xml:"VersionId"`
+
+ // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
+ // Only returned by MinIO servers.
+ UserMetadata StringMap `json:"userMetadata,omitempty"`
+
+ // x-amz-tagging values in their k/v values.
+ // Only returned by MinIO servers.
+ UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
+
+ Internal *struct {
+ K int // Data blocks
+ M int // Parity blocks
+ } `xml:"Internal"`
+
+ isDeleteMarker bool
+}
+
+// ListVersionsResult is an element in the list object versions response
+// and has a special Unmarshaler because we need to preserver the order
+// of and in ListVersionsResult.Versions slice
+type ListVersionsResult struct {
+ Versions []Version
+
+ CommonPrefixes []CommonPrefix
+ Name string
+ Prefix string
+ Delimiter string
+ MaxKeys int64
+ EncodingType string
+ IsTruncated bool
+ KeyMarker string
+ VersionIDMarker string
+ NextKeyMarker string
+ NextVersionIDMarker string
+}
+
+// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom
+// code will unmarshal and tags and save them in Versions field to
+// preserve the lexical order of the listing.
+func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) {
+ for {
+ // Read tokens from the XML document in a stream.
+ t, err := d.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ se, ok := t.(xml.StartElement)
+ if ok {
+ tagName := se.Name.Local
+ switch tagName {
+ case "Name", "Prefix",
+ "Delimiter", "EncodingType",
+ "KeyMarker", "NextKeyMarker":
+ var s string
+ if err = d.DecodeElement(&s, &se); err != nil {
+ return err
+ }
+ v := reflect.ValueOf(l).Elem().FieldByName(tagName)
+ if v.IsValid() {
+ v.SetString(s)
+ }
+ case "VersionIdMarker":
+ // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name
+ var s string
+ if err = d.DecodeElement(&s, &se); err != nil {
+ return err
+ }
+ l.VersionIDMarker = s
+ case "NextVersionIdMarker":
+ // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name
+ var s string
+ if err = d.DecodeElement(&s, &se); err != nil {
+ return err
+ }
+ l.NextVersionIDMarker = s
+ case "IsTruncated": // bool
+ var b bool
+ if err = d.DecodeElement(&b, &se); err != nil {
+ return err
+ }
+ l.IsTruncated = b
+ case "MaxKeys": // int64
+ var i int64
+ if err = d.DecodeElement(&i, &se); err != nil {
+ return err
+ }
+ l.MaxKeys = i
+ case "CommonPrefixes":
+ var cp CommonPrefix
+ if err = d.DecodeElement(&cp, &se); err != nil {
+ return err
+ }
+ l.CommonPrefixes = append(l.CommonPrefixes, cp)
+ case "DeleteMarker", "Version":
+ var v Version
+ if err = d.DecodeElement(&v, &se); err != nil {
+ return err
+ }
+ if tagName == "DeleteMarker" {
+ v.isDeleteMarker = true
+ }
+ l.Versions = append(l.Versions, v)
+ default:
+ return errors.New("unrecognized option:" + tagName)
+ }
+
+ }
+ }
+ return nil
+}
+
+// ListBucketResult container for listObjects response.
+type ListBucketResult struct {
+ // A response can contain CommonPrefixes only if you have
+ // specified a delimiter.
+ CommonPrefixes []CommonPrefix
+ // Metadata about each object returned.
+ Contents []ObjectInfo
+ Delimiter string
+
+ // Encoding type used to encode object keys in the response.
+ EncodingType string
+
+ // A flag that indicates whether or not ListObjects returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated bool
+ Marker string
+ MaxKeys int64
+ Name string
+
+ // When response is truncated (the IsTruncated element value in
+ // the response is true), you can use the key name in this field
+ // as marker in the subsequent request to get next set of objects.
+ // Object storage lists objects in alphabetical order Note: This
+ // element is returned only if you have delimiter request
+ // parameter specified. If response does not include the NextMaker
+ // and it is truncated, you can use the value of the last Key in
+ // the response as the marker in the subsequent request to get the
+ // next set of object keys.
+ NextMarker string
+ Prefix string
+}
+
+// ListMultipartUploadsResult container for ListMultipartUploads response
+type ListMultipartUploadsResult struct {
+ Bucket string
+ KeyMarker string
+ UploadIDMarker string `xml:"UploadIdMarker"`
+ NextKeyMarker string
+ NextUploadIDMarker string `xml:"NextUploadIdMarker"`
+ EncodingType string
+ MaxUploads int64
+ IsTruncated bool
+ Uploads []ObjectMultipartInfo `xml:"Upload"`
+ Prefix string
+ Delimiter string
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ CommonPrefixes []CommonPrefix
+}
+
+// initiator container for who initiated multipart upload.
+type initiator struct {
+ ID string
+ DisplayName string
+}
+
+// copyObjectResult container for copy object response.
+type copyObjectResult struct {
+ ETag string
+ LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
+}
+
+// ObjectPart container for particular part of an object.
+type ObjectPart struct {
+ // Part number identifies the part.
+ PartNumber int
+
+ // Date and time the part was uploaded.
+ LastModified time.Time
+
+ // Entity tag returned when the part was uploaded, usually md5sum
+ // of the part.
+ ETag string
+
+ // Size of the uploaded part data.
+ Size int64
+
+ // Checksum values of each part.
+ ChecksumCRC32 string
+ ChecksumCRC32C string
+ ChecksumSHA1 string
+ ChecksumSHA256 string
+}
+
+// ListObjectPartsResult container for ListObjectParts response.
+type ListObjectPartsResult struct {
+ Bucket string
+ Key string
+ UploadID string `xml:"UploadId"`
+
+ Initiator initiator
+ Owner owner
+
+ StorageClass string
+ PartNumberMarker int
+ NextPartNumberMarker int
+ MaxParts int
+
+ // Indicates whether the returned list of parts is truncated.
+ IsTruncated bool
+ ObjectParts []ObjectPart `xml:"Part"`
+
+ EncodingType string
+}
+
+// initiateMultipartUploadResult container for InitiateMultiPartUpload
+// response.
+type initiateMultipartUploadResult struct {
+ Bucket string
+ Key string
+ UploadID string `xml:"UploadId"`
+}
+
+// completeMultipartUploadResult container for completed multipart
+// upload response.
+type completeMultipartUploadResult struct {
+ Location string
+ Bucket string
+ Key string
+ ETag string
+
+ // Checksum values, hash of hashes of parts.
+ ChecksumCRC32 string
+ ChecksumCRC32C string
+ ChecksumSHA1 string
+ ChecksumSHA256 string
+}
+
+// CompletePart sub container lists individual part numbers and their
+// md5sum, part of completeMultipartUpload.
+type CompletePart struct {
+ // Part number identifies the part.
+ PartNumber int
+ ETag string
+
+ // Checksum values
+ ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
+ ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
+ ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
+ ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
+}
+
+// Checksum will return the checksum for the given type.
+// Will return the empty string if not set.
+func (c CompletePart) Checksum(t ChecksumType) string {
+ switch {
+ case t.Is(ChecksumCRC32C):
+ return c.ChecksumCRC32C
+ case t.Is(ChecksumCRC32):
+ return c.ChecksumCRC32
+ case t.Is(ChecksumSHA1):
+ return c.ChecksumSHA1
+ case t.Is(ChecksumSHA256):
+ return c.ChecksumSHA256
+ }
+ return ""
+}
+
+// completeMultipartUpload container for completing multipart upload.
+type completeMultipartUpload struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
+ Parts []CompletePart `xml:"Part"`
+}
+
+// createBucketConfiguration container for bucket configuration.
+type createBucketConfiguration struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
+ Location string `xml:"LocationConstraint"`
+}
+
+// deleteObject container for Delete element in MultiObjects Delete XML request
+type deleteObject struct {
+ Key string
+ VersionID string `xml:"VersionId,omitempty"`
+}
+
+// deletedObject container for Deleted element in MultiObjects Delete XML response
+type deletedObject struct {
+ Key string
+ VersionID string `xml:"VersionId,omitempty"`
+ // These fields are ignored.
+ DeleteMarker bool
+ DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
+}
+
+// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
+type nonDeletedObject struct {
+ Key string
+ Code string
+ Message string
+ VersionID string `xml:"VersionId"`
+}
+
+// deletedMultiObjects container for MultiObjects Delete XML request
+type deleteMultiObjects struct {
+ XMLName xml.Name `xml:"Delete"`
+ Quiet bool
+ Objects []deleteObject `xml:"Object"`
+}
+
+// deletedMultiObjectsResult container for MultiObjects Delete XML response
+type deleteMultiObjectsResult struct {
+ XMLName xml.Name `xml:"DeleteResult"`
+ DeletedObjects []deletedObject `xml:"Deleted"`
+ UnDeletedObjects []nonDeletedObject `xml:"Error"`
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go
new file mode 100644
index 000000000..628d967ff
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-select.go
@@ -0,0 +1,757 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2018-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// CSVFileHeaderInfo - is the parameter for whether to utilize headers.
+type CSVFileHeaderInfo string
+
+// Constants for file header info.
+const (
+ CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
+ CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE"
+ CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE"
+)
+
+// SelectCompressionType - is the parameter for what type of compression is
+// present
+type SelectCompressionType string
+
+// Constants for compression types under select API.
+const (
+ SelectCompressionNONE SelectCompressionType = "NONE"
+ SelectCompressionGZIP SelectCompressionType = "GZIP"
+ SelectCompressionBZIP SelectCompressionType = "BZIP2"
+
+ // Non-standard compression schemes, supported by MinIO hosts:
+
+ SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression.
+ SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream
+ SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream
+ SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream
+)
+
+// CSVQuoteFields - is the parameter for how CSV fields are quoted.
+type CSVQuoteFields string
+
+// Constants for csv quote styles.
+const (
+ CSVQuoteFieldsAlways CSVQuoteFields = "Always"
+ CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded"
+)
+
+// QueryExpressionType - is of what syntax the expression is, this should only
+// be SQL
+type QueryExpressionType string
+
+// Constants for expression type.
+const (
+ QueryExpressionTypeSQL QueryExpressionType = "SQL"
+)
+
+// JSONType determines json input serialization type.
+type JSONType string
+
+// Constants for JSONTypes.
+const (
+ JSONDocumentType JSONType = "DOCUMENT"
+ JSONLinesType JSONType = "LINES"
+)
+
+// ParquetInputOptions parquet input specific options
+type ParquetInputOptions struct{}
+
+// CSVInputOptions csv input specific options
+type CSVInputOptions struct {
+ FileHeaderInfo CSVFileHeaderInfo
+ fileHeaderInfoSet bool
+
+ RecordDelimiter string
+ recordDelimiterSet bool
+
+ FieldDelimiter string
+ fieldDelimiterSet bool
+
+ QuoteCharacter string
+ quoteCharacterSet bool
+
+ QuoteEscapeCharacter string
+ quoteEscapeCharacterSet bool
+
+ Comments string
+ commentsSet bool
+}
+
+// SetFileHeaderInfo sets the file header info in the CSV input options
+func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) {
+ c.FileHeaderInfo = val
+ c.fileHeaderInfoSet = true
+}
+
+// SetRecordDelimiter sets the record delimiter in the CSV input options
+func (c *CSVInputOptions) SetRecordDelimiter(val string) {
+ c.RecordDelimiter = val
+ c.recordDelimiterSet = true
+}
+
+// SetFieldDelimiter sets the field delimiter in the CSV input options
+func (c *CSVInputOptions) SetFieldDelimiter(val string) {
+ c.FieldDelimiter = val
+ c.fieldDelimiterSet = true
+}
+
+// SetQuoteCharacter sets the quote character in the CSV input options
+func (c *CSVInputOptions) SetQuoteCharacter(val string) {
+ c.QuoteCharacter = val
+ c.quoteCharacterSet = true
+}
+
+// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options
+func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) {
+ c.QuoteEscapeCharacter = val
+ c.quoteEscapeCharacterSet = true
+}
+
+// SetComments sets the comments character in the CSV input options
+func (c *CSVInputOptions) SetComments(val string) {
+ c.Comments = val
+ c.commentsSet = true
+}
+
+// MarshalXML - produces the xml representation of the CSV input options struct
+func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+ if c.FileHeaderInfo != "" || c.fileHeaderInfoSet {
+ if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.RecordDelimiter != "" || c.recordDelimiterSet {
+ if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.FieldDelimiter != "" || c.fieldDelimiterSet {
+ if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.QuoteCharacter != "" || c.quoteCharacterSet {
+ if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet {
+ if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.Comments != "" || c.commentsSet {
+ if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil {
+ return err
+ }
+ }
+
+ return e.EncodeToken(xml.EndElement{Name: start.Name})
+}
+
+// CSVOutputOptions csv output specific options
+type CSVOutputOptions struct {
+ QuoteFields CSVQuoteFields
+ quoteFieldsSet bool
+
+ RecordDelimiter string
+ recordDelimiterSet bool
+
+ FieldDelimiter string
+ fieldDelimiterSet bool
+
+ QuoteCharacter string
+ quoteCharacterSet bool
+
+ QuoteEscapeCharacter string
+ quoteEscapeCharacterSet bool
+}
+
+// SetQuoteFields sets the quote field parameter in the CSV output options
+func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) {
+ c.QuoteFields = val
+ c.quoteFieldsSet = true
+}
+
+// SetRecordDelimiter sets the record delimiter character in the CSV output options
+func (c *CSVOutputOptions) SetRecordDelimiter(val string) {
+ c.RecordDelimiter = val
+ c.recordDelimiterSet = true
+}
+
+// SetFieldDelimiter sets the field delimiter character in the CSV output options
+func (c *CSVOutputOptions) SetFieldDelimiter(val string) {
+ c.FieldDelimiter = val
+ c.fieldDelimiterSet = true
+}
+
+// SetQuoteCharacter sets the quote character in the CSV output options
+func (c *CSVOutputOptions) SetQuoteCharacter(val string) {
+ c.QuoteCharacter = val
+ c.quoteCharacterSet = true
+}
+
+// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options
+func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) {
+ c.QuoteEscapeCharacter = val
+ c.quoteEscapeCharacterSet = true
+}
+
+// MarshalXML - produces the xml representation of the CSVOutputOptions struct
+func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+
+ if c.QuoteFields != "" || c.quoteFieldsSet {
+ if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.RecordDelimiter != "" || c.recordDelimiterSet {
+ if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.FieldDelimiter != "" || c.fieldDelimiterSet {
+ if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.QuoteCharacter != "" || c.quoteCharacterSet {
+ if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil {
+ return err
+ }
+ }
+
+ if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet {
+ if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil {
+ return err
+ }
+ }
+
+ return e.EncodeToken(xml.EndElement{Name: start.Name})
+}
+
+// JSONInputOptions json input specific options
+type JSONInputOptions struct {
+ Type JSONType
+ typeSet bool
+}
+
+// SetType sets the JSON type in the JSON input options
+func (j *JSONInputOptions) SetType(typ JSONType) {
+ j.Type = typ
+ j.typeSet = true
+}
+
+// MarshalXML - produces the xml representation of the JSONInputOptions struct
+func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+
+ if j.Type != "" || j.typeSet {
+ if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil {
+ return err
+ }
+ }
+
+ return e.EncodeToken(xml.EndElement{Name: start.Name})
+}
+
+// JSONOutputOptions - json output specific options
+type JSONOutputOptions struct {
+ RecordDelimiter string
+ recordDelimiterSet bool
+}
+
+// SetRecordDelimiter sets the record delimiter in the JSON output options
+func (j *JSONOutputOptions) SetRecordDelimiter(val string) {
+ j.RecordDelimiter = val
+ j.recordDelimiterSet = true
+}
+
+// MarshalXML - produces the xml representation of the JSONOutputOptions struct
+func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+
+ if j.RecordDelimiter != "" || j.recordDelimiterSet {
+ if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
+ return err
+ }
+ }
+
+ return e.EncodeToken(xml.EndElement{Name: start.Name})
+}
+
+// SelectObjectInputSerialization - input serialization parameters
+type SelectObjectInputSerialization struct {
+ CompressionType SelectCompressionType `xml:"CompressionType,omitempty"`
+ Parquet *ParquetInputOptions `xml:"Parquet,omitempty"`
+ CSV *CSVInputOptions `xml:"CSV,omitempty"`
+ JSON *JSONInputOptions `xml:"JSON,omitempty"`
+}
+
+// SelectObjectOutputSerialization - output serialization parameters.
+type SelectObjectOutputSerialization struct {
+ CSV *CSVOutputOptions `xml:"CSV,omitempty"`
+ JSON *JSONOutputOptions `xml:"JSON,omitempty"`
+}
+
+// SelectObjectOptions - represents the input select body
+type SelectObjectOptions struct {
+ XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"`
+ ServerSideEncryption encrypt.ServerSide `xml:"-"`
+ Expression string
+ ExpressionType QueryExpressionType
+ InputSerialization SelectObjectInputSerialization
+ OutputSerialization SelectObjectOutputSerialization
+ RequestProgress struct {
+ Enabled bool
+ }
+}
+
+// Header returns the http.Header representation of the SelectObject options.
+func (o SelectObjectOptions) Header() http.Header {
+ headers := make(http.Header)
+ if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
+ o.ServerSideEncryption.Marshal(headers)
+ }
+ return headers
+}
+
+// SelectObjectType - is the parameter which defines what type of object the
+// operation is being performed on.
+type SelectObjectType string
+
+// Constants for input data types.
+const (
+ SelectObjectTypeCSV SelectObjectType = "CSV"
+ SelectObjectTypeJSON SelectObjectType = "JSON"
+ SelectObjectTypeParquet SelectObjectType = "Parquet"
+)
+
+// preludeInfo is used for keeping track of necessary information from the
+// prelude.
+type preludeInfo struct {
+ totalLen uint32
+ headerLen uint32
+}
+
+// SelectResults is used for the streaming responses from the server.
+type SelectResults struct {
+ pipeReader *io.PipeReader
+ resp *http.Response
+ stats *StatsMessage
+ progress *ProgressMessage
+}
+
+// ProgressMessage is a struct for progress xml message.
+type ProgressMessage struct {
+ XMLName xml.Name `xml:"Progress" json:"-"`
+ StatsMessage
+}
+
+// StatsMessage is a struct for stat xml message.
+type StatsMessage struct {
+ XMLName xml.Name `xml:"Stats" json:"-"`
+ BytesScanned int64
+ BytesProcessed int64
+ BytesReturned int64
+}
+
+// messageType represents the type of message.
+type messageType string
+
+const (
+ errorMsg messageType = "error"
+ commonMsg messageType = "event"
+)
+
+// eventType represents the type of event.
+type eventType string
+
+// list of event-types returned by Select API.
+const (
+ endEvent eventType = "End"
+ recordsEvent eventType = "Records"
+ progressEvent eventType = "Progress"
+ statsEvent eventType = "Stats"
+)
+
+// contentType represents content type of event.
+type contentType string
+
+const (
+ xmlContent contentType = "text/xml"
+)
+
+// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API.
+func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+
+ selectReqBytes, err := xml.Marshal(opts)
+ if err != nil {
+ return nil, err
+ }
+
+ urlValues := make(url.Values)
+ urlValues.Set("select", "")
+ urlValues.Set("select-type", "2")
+
+ // Execute POST on bucket/object.
+ resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: opts.Header(),
+ contentMD5Base64: sumMD5Base64(selectReqBytes),
+ contentSHA256Hex: sum256Hex(selectReqBytes),
+ contentBody: bytes.NewReader(selectReqBytes),
+ contentLength: int64(len(selectReqBytes)),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return NewSelectResults(resp, bucketName)
+}
+
+// NewSelectResults creates a Select Result parser that parses the response
+// and returns a Reader that will return parsed and assembled select output.
+func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) {
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+ streamer := &SelectResults{
+ resp: resp,
+ stats: &StatsMessage{},
+ progress: &ProgressMessage{},
+ pipeReader: pipeReader,
+ }
+ streamer.start(pipeWriter)
+ return streamer, nil
+}
+
+// Close - closes the underlying response body and the stream reader.
+func (s *SelectResults) Close() error {
+ defer closeResponse(s.resp)
+ return s.pipeReader.Close()
+}
+
+// Read - is a reader compatible implementation for SelectObjectContent records.
+func (s *SelectResults) Read(b []byte) (n int, err error) {
+ return s.pipeReader.Read(b)
+}
+
+// Stats - information about a request's stats when processing is complete.
+func (s *SelectResults) Stats() *StatsMessage {
+ return s.stats
+}
+
+// Progress - information about the progress of a request.
+func (s *SelectResults) Progress() *ProgressMessage {
+ return s.progress
+}
+
+// start is the main function that decodes the large byte array into
+// several events that are sent through the eventstream.
+func (s *SelectResults) start(pipeWriter *io.PipeWriter) {
+ go func() {
+ for {
+ var prelude preludeInfo
+ headers := make(http.Header)
+ var err error
+
+ // Create CRC code
+ crc := crc32.New(crc32.IEEETable)
+ crcReader := io.TeeReader(s.resp.Body, crc)
+
+ // Extract the prelude(12 bytes) into a struct to extract relevant information.
+ prelude, err = processPrelude(crcReader, crc)
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ closeResponse(s.resp)
+ return
+ }
+
+ // Extract the headers(variable bytes) into a struct to extract relevant information
+ if prelude.headerLen > 0 {
+ if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil {
+ pipeWriter.CloseWithError(err)
+ closeResponse(s.resp)
+ return
+ }
+ }
+
+ // Get the actual payload length so that the appropriate amount of
+ // bytes can be read or parsed.
+ payloadLen := prelude.PayloadLen()
+
+ m := messageType(headers.Get("message-type"))
+
+ switch m {
+ case errorMsg:
+ pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\""))
+ closeResponse(s.resp)
+ return
+ case commonMsg:
+ // Get content-type of the payload.
+ c := contentType(headers.Get("content-type"))
+
+ // Get event type of the payload.
+ e := eventType(headers.Get("event-type"))
+
+ // Handle all supported events.
+ switch e {
+ case endEvent:
+ pipeWriter.Close()
+ closeResponse(s.resp)
+ return
+ case recordsEvent:
+ if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil {
+ pipeWriter.CloseWithError(err)
+ closeResponse(s.resp)
+ return
+ }
+ case progressEvent:
+ switch c {
+ case xmlContent:
+ if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil {
+ pipeWriter.CloseWithError(err)
+ closeResponse(s.resp)
+ return
+ }
+ default:
+ pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent))
+ closeResponse(s.resp)
+ return
+ }
+ case statsEvent:
+ switch c {
+ case xmlContent:
+ if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil {
+ pipeWriter.CloseWithError(err)
+ closeResponse(s.resp)
+ return
+ }
+ default:
+ pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent))
+ closeResponse(s.resp)
+ return
+ }
+ }
+ }
+
+ // Ensures that the full message's CRC is correct and
+ // that the message is not corrupted
+ if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil {
+ pipeWriter.CloseWithError(err)
+ closeResponse(s.resp)
+ return
+ }
+
+ }
+ }()
+}
+
+// PayloadLen is a function that calculates the length of the payload.
+func (p preludeInfo) PayloadLen() int64 {
+ return int64(p.totalLen - p.headerLen - 16)
+}
+
+// processPrelude is the function that reads the 12 bytes of the prelude and
+// ensures the CRC is correct while also extracting relevant information into
+// the struct,
+func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) {
+ var err error
+ pInfo := preludeInfo{}
+
+ // reads total length of the message (first 4 bytes)
+ pInfo.totalLen, err = extractUint32(prelude)
+ if err != nil {
+ return pInfo, err
+ }
+
+ // reads total header length of the message (2nd 4 bytes)
+ pInfo.headerLen, err = extractUint32(prelude)
+ if err != nil {
+ return pInfo, err
+ }
+
+ // checks that the CRC is correct (3rd 4 bytes)
+ preCRC := crc.Sum32()
+ if err := checkCRC(prelude, preCRC); err != nil {
+ return pInfo, err
+ }
+
+ return pInfo, nil
+}
+
+// extracts the relevant information from the Headers.
+func extractHeader(body io.Reader, myHeaders http.Header) error {
+ for {
+ // extracts the first part of the header,
+ headerTypeName, err := extractHeaderType(body)
+ if err != nil {
+ // Since end of file, we have read all of our headers
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ // reads the 7 present in the header and ignores it.
+ extractUint8(body)
+
+ headerValueName, err := extractHeaderValue(body)
+ if err != nil {
+ return err
+ }
+
+ myHeaders.Set(headerTypeName, headerValueName)
+
+ }
+ return nil
+}
+
+// extractHeaderType extracts the first half of the header message, the header type.
+func extractHeaderType(body io.Reader) (string, error) {
+ // extracts 2 bit integer
+ headerNameLen, err := extractUint8(body)
+ if err != nil {
+ return "", err
+ }
+ // extracts the string with the appropriate number of bytes
+ headerName, err := extractString(body, int(headerNameLen))
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimPrefix(headerName, ":"), nil
+}
+
+// extractsHeaderValue extracts the second half of the header message, the
+// header value
+func extractHeaderValue(body io.Reader) (string, error) {
+ bodyLen, err := extractUint16(body)
+ if err != nil {
+ return "", err
+ }
+ bodyName, err := extractString(body, int(bodyLen))
+ if err != nil {
+ return "", err
+ }
+ return bodyName, nil
+}
+
+// extracts a string from byte array of a particular number of bytes.
+func extractString(source io.Reader, lenBytes int) (string, error) {
+ myVal := make([]byte, lenBytes)
+ _, err := source.Read(myVal)
+ if err != nil {
+ return "", err
+ }
+ return string(myVal), nil
+}
+
+// extractUint32 extracts a 4 byte integer from the byte array.
+func extractUint32(r io.Reader) (uint32, error) {
+ buf := make([]byte, 4)
+ _, err := readFull(r, buf)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint32(buf), nil
+}
+
+// extractUint16 extracts a 2 byte integer from the byte array.
+func extractUint16(r io.Reader) (uint16, error) {
+ buf := make([]byte, 2)
+ _, err := readFull(r, buf)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint16(buf), nil
+}
+
+// extractUint8 extracts a 1 byte integer from the byte array.
+func extractUint8(r io.Reader) (uint8, error) {
+ buf := make([]byte, 1)
+ _, err := readFull(r, buf)
+ if err != nil {
+ return 0, err
+ }
+ return buf[0], nil
+}
+
+// checkCRC ensures that the CRC matches with the one from the reader.
+func checkCRC(r io.Reader, expect uint32) error {
+ msgCRC, err := extractUint32(r)
+ if err != nil {
+ return err
+ }
+
+ if msgCRC != expect {
+ return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect)
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go
new file mode 100644
index 000000000..11455beb3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-stat.go
@@ -0,0 +1,124 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to
+// control cancellations and timeouts.
+func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return false, err
+ }
+
+ // Execute HEAD on bucketName.
+ resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ if ToErrorResponse(err).Code == "NoSuchBucket" {
+ return false, nil
+ }
+ return false, err
+ }
+ if resp != nil {
+ resperr := httpRespToErrorResponse(resp, bucketName, "")
+ if ToErrorResponse(resperr).Code == "NoSuchBucket" {
+ return false, nil
+ }
+ if resp.StatusCode != http.StatusOK {
+ return false, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return true, nil
+}
+
+// StatObject verifies if object exists, you have permission to access it
+// and returns information about the object.
+func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidBucketName",
+ Message: err.Error(),
+ }
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "XMinioInvalidObjectName",
+ Message: err.Error(),
+ }
+ }
+ headers := opts.Header()
+ if opts.Internal.ReplicationDeleteMarker {
+ headers.Set(minIOBucketReplicationDeleteMarker, "true")
+ }
+ if opts.Internal.IsReplicationReadyForDeleteMarker {
+ headers.Set(isMinioTgtReplicationReady, "true")
+ }
+
+ // Execute HEAD on objectName.
+ resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: opts.toQueryValues(),
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ if resp != nil {
+ deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
+ replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true"
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
+ if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
+ errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "MethodNotAllowed",
+ Message: "The specified method is not allowed against this resource.",
+ BucketName: bucketName,
+ Key: objectName,
+ }
+ return ObjectInfo{
+ VersionID: resp.Header.Get(amzVersionID),
+ IsDeleteMarker: deleteMarker,
+ }, errResp
+ }
+ return ObjectInfo{
+ VersionID: resp.Header.Get(amzVersionID),
+ IsDeleteMarker: deleteMarker,
+ ReplicationReady: replicationReady, // whether delete marker can be replicated
+ }, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ return ToObjectInfo(bucketName, objectName, resp.Header)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
new file mode 100644
index 000000000..1d6b66502
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -0,0 +1,1010 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2024 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/http/cookiejar"
+ "net/http/httptrace"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ md5simd "github.com/minio/md5-simd"
+ "github.com/minio/minio-go/v7/pkg/credentials"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/minio/minio-go/v7/pkg/signer"
+ "golang.org/x/net/publicsuffix"
+)
+
+// Client implements Amazon S3 compatible methods.
+type Client struct {
+ // Standard options.
+
+ // Parsed endpoint url provided by the user.
+ endpointURL *url.URL
+
+ // Holds various credential providers.
+ credsProvider *credentials.Credentials
+
+ // Custom signerType value overrides all credentials.
+ overrideSignerType credentials.SignatureType
+
+ // User supplied.
+ appInfo struct {
+ appName string
+ appVersion string
+ }
+
+ // Indicate whether we are using https or not
+ secure bool
+
+ // Needs allocation.
+ httpClient *http.Client
+ httpTrace *httptrace.ClientTrace
+ bucketLocCache *bucketLocationCache
+
+ // Advanced functionality.
+ isTraceEnabled bool
+ traceErrorsOnly bool
+ traceOutput io.Writer
+
+ // S3 specific accelerated endpoint.
+ s3AccelerateEndpoint string
+ // S3 dual-stack endpoints are enabled by default.
+ s3DualstackEnabled bool
+
+ // Region endpoint
+ region string
+
+ // Random seed.
+ random *rand.Rand
+
+ // lookup indicates type of url lookup supported by server. If not specified,
+ // default to Auto.
+ lookup BucketLookupType
+
+ // Factory for MD5 hash functions.
+ md5Hasher func() md5simd.Hasher
+ sha256Hasher func() md5simd.Hasher
+
+ healthStatus int32
+
+ trailingHeaderSupport bool
+}
+
+// Options for New method
+type Options struct {
+ Creds *credentials.Credentials
+ Secure bool
+ Transport http.RoundTripper
+ Trace *httptrace.ClientTrace
+ Region string
+ BucketLookup BucketLookupType
+
+ // Allows setting a custom region lookup based on URL pattern
+ // not all URL patterns are covered by this library so if you
+ // have a custom endpoints with many regions you can use this
+ // function to perform region lookups appropriately.
+ CustomRegionViaURL func(u url.URL) string
+
+ // TrailingHeaders indicates server support of trailing headers.
+ // Only supported for v4 signatures.
+ TrailingHeaders bool
+
+ // Custom hash routines. Leave nil to use standard.
+ CustomMD5 func() md5simd.Hasher
+ CustomSHA256 func() md5simd.Hasher
+}
+
+// Global constants.
+const (
+ libraryName = "minio-go"
+ libraryVersion = "v7.0.77"
+)
+
+// User Agent should always following the below style.
+// Please open an issue to discuss any new changes here.
+//
+// MinIO (OS; ARCH) LIB/VER APP/VER
+const (
+ libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
+ libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
+)
+
+// BucketLookupType is type of url lookup supported by server.
+type BucketLookupType int
+
+// Different types of url lookup supported by the server.Initialized to BucketLookupAuto
+const (
+ BucketLookupAuto BucketLookupType = iota
+ BucketLookupDNS
+ BucketLookupPath
+)
+
+// New - instantiate minio client with options
+func New(endpoint string, opts *Options) (*Client, error) {
+ if opts == nil {
+ return nil, errors.New("no options provided")
+ }
+ clnt, err := privateNew(endpoint, opts)
+ if err != nil {
+ return nil, err
+ }
+ if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
+ // If Amazon S3 set to signature v4.
+ clnt.overrideSignerType = credentials.SignatureV4
+ // Amazon S3 endpoints are resolved into dual-stack endpoints by default
+ // for backwards compatibility.
+ clnt.s3DualstackEnabled = true
+ }
+
+ return clnt, nil
+}
+
+// EndpointURL returns the URL of the S3 endpoint.
+func (c *Client) EndpointURL() *url.URL {
+ endpoint := *c.endpointURL // copy to prevent callers from modifying internal state
+ return &endpoint
+}
+
+// lockedRandSource provides protected rand source, implements rand.Source interface.
+type lockedRandSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
+func (r *lockedRandSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+// Seed uses the provided seed value to initialize the generator to a
+// deterministic state.
+func (r *lockedRandSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+func privateNew(endpoint string, opts *Options) (*Client, error) {
+ // construct endpoint.
+ endpointURL, err := getEndpointURL(endpoint, opts.Secure)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize cookies to preserve server sent cookies if any and replay
+ // them upon each request.
+ jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ if err != nil {
+ return nil, err
+ }
+
+ // instantiate new Client.
+ clnt := new(Client)
+
+ // Save the credentials.
+ clnt.credsProvider = opts.Creds
+
+ // Remember whether we are using https or not
+ clnt.secure = opts.Secure
+
+ // Save endpoint URL, user agent for future uses.
+ clnt.endpointURL = endpointURL
+
+ transport := opts.Transport
+ if transport == nil {
+ transport, err = DefaultTransport(opts.Secure)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ clnt.httpTrace = opts.Trace
+
+ // Instantiate http client and bucket location cache.
+ clnt.httpClient = &http.Client{
+ Jar: jar,
+ Transport: transport,
+ CheckRedirect: func(_ *http.Request, _ []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+
+ // Sets custom region, if region is empty bucket location cache is used automatically.
+ if opts.Region == "" {
+ if opts.CustomRegionViaURL != nil {
+ opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL)
+ } else {
+ opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
+ }
+ }
+ clnt.region = opts.Region
+
+ // Instantiate bucket location cache.
+ clnt.bucketLocCache = newBucketLocationCache()
+
+ // Introduce a new locked random seed.
+ clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
+
+ // Add default md5 hasher.
+ clnt.md5Hasher = opts.CustomMD5
+ clnt.sha256Hasher = opts.CustomSHA256
+ if clnt.md5Hasher == nil {
+ clnt.md5Hasher = newMd5Hasher
+ }
+ if clnt.sha256Hasher == nil {
+ clnt.sha256Hasher = newSHA256Hasher
+ }
+
+ clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4()
+
+ // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
+ // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
+ clnt.lookup = opts.BucketLookup
+
+ // healthcheck is not initialized
+ clnt.healthStatus = unknown
+
+ // Return.
+ return clnt, nil
+}
+
+// SetAppInfo - add application details to user agent.
+func (c *Client) SetAppInfo(appName, appVersion string) {
+ // if app name and version not set, we do not set a new user agent.
+ if appName != "" && appVersion != "" {
+ c.appInfo.appName = appName
+ c.appInfo.appVersion = appVersion
+ }
+}
+
+// TraceOn - enable HTTP tracing.
+func (c *Client) TraceOn(outputStream io.Writer) {
+ // if outputStream is nil then default to os.Stdout.
+ if outputStream == nil {
+ outputStream = os.Stdout
+ }
+ // Sets a new output stream.
+ c.traceOutput = outputStream
+
+ // Enable tracing.
+ c.isTraceEnabled = true
+}
+
+// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced.
+func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) {
+ c.TraceOn(outputStream)
+ c.traceErrorsOnly = true
+}
+
+// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call.
+// If all tracing needs to be turned off, call TraceOff().
+func (c *Client) TraceErrorsOnlyOff() {
+ c.traceErrorsOnly = false
+}
+
+// TraceOff - disable HTTP tracing.
+func (c *Client) TraceOff() {
+ // Disable tracing.
+ c.isTraceEnabled = false
+ c.traceErrorsOnly = false
+}
+
+// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
+// requests. This feature is only specific to S3 for all other endpoints this
+// function does nothing. To read further details on s3 transfer acceleration
+// please vist -
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
+ c.s3AccelerateEndpoint = accelerateEndpoint
+ }
+}
+
+// SetS3EnableDualstack turns s3 dual-stack endpoints on or off for all requests.
+// The feature is only specific to S3 and is on by default. To read more about
+// Amazon S3 dual-stack endpoints visit -
+// https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html
+func (c *Client) SetS3EnableDualstack(enabled bool) {
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
+ c.s3DualstackEnabled = enabled
+ }
+}
+
+// Hash materials provides relevant initialized hash algo writers
+// based on the expected signature type.
+//
+// - For signature v4 request if the connection is insecure compute only sha256.
+// - For signature v4 request if the connection is secure compute only md5.
+// - For anonymous request compute md5.
+func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
+ hashSums = make(map[string][]byte)
+ hashAlgos = make(map[string]md5simd.Hasher)
+ if c.overrideSignerType.IsV4() {
+ if c.secure {
+ hashAlgos["md5"] = c.md5Hasher()
+ } else {
+ if isSha256Requested {
+ hashAlgos["sha256"] = c.sha256Hasher()
+ }
+ }
+ } else {
+ if c.overrideSignerType.IsAnonymous() {
+ hashAlgos["md5"] = c.md5Hasher()
+ }
+ }
+ if isMd5Requested {
+ hashAlgos["md5"] = c.md5Hasher()
+ }
+ return hashAlgos, hashSums
+}
+
+const (
+ unknown = -1
+ offline = 0
+ online = 1
+)
+
+// IsOnline returns true if healthcheck enabled and client is online.
+// If HealthCheck function has not been called this will always return true.
+func (c *Client) IsOnline() bool {
+ return !c.IsOffline()
+}
+
+// sets online healthStatus to offline
+func (c *Client) markOffline() {
+ atomic.CompareAndSwapInt32(&c.healthStatus, online, offline)
+}
+
+// IsOffline returns true if healthcheck enabled and client is offline
+// If HealthCheck function has not been called this will always return false.
+func (c *Client) IsOffline() bool {
+ return atomic.LoadInt32(&c.healthStatus) == offline
+}
+
+// HealthCheck starts a healthcheck to see if endpoint is up.
+// Returns a context cancellation function, to stop the health check,
+// and an error if health check is already started.
+func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
+ if atomic.LoadInt32(&c.healthStatus) != unknown {
+ return nil, fmt.Errorf("health check is running")
+ }
+ if hcDuration < 1*time.Second {
+ return nil, fmt.Errorf("health check duration should be at least 1 second")
+ }
+ probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
+ ctx, cancelFn := context.WithCancel(context.Background())
+ atomic.StoreInt32(&c.healthStatus, offline)
+ {
+ // Change to online, if we can connect.
+ gctx, gcancel := context.WithTimeout(ctx, 3*time.Second)
+ _, err := c.getBucketLocation(gctx, probeBucketName)
+ gcancel()
+ if !IsNetworkOrHostDown(err, false) {
+ switch ToErrorResponse(err).Code {
+ case "NoSuchBucket", "AccessDenied", "":
+ atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
+ }
+ }
+ }
+
+ go func(duration time.Duration) {
+ timer := time.NewTimer(duration)
+ defer timer.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ atomic.StoreInt32(&c.healthStatus, unknown)
+ return
+ case <-timer.C:
+ // Do health check the first time and ONLY if the connection is marked offline
+ if c.IsOffline() {
+ gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second)
+ _, err := c.getBucketLocation(gctx, probeBucketName)
+ gcancel()
+ if !IsNetworkOrHostDown(err, false) {
+ switch ToErrorResponse(err).Code {
+ case "NoSuchBucket", "AccessDenied", "":
+ atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
+ }
+ }
+ }
+
+ timer.Reset(duration)
+ }
+ }
+ }(hcDuration)
+ return cancelFn, nil
+}
+
+// requestMetadata - is container for all the values to make a request.
+type requestMetadata struct {
+ // If set newRequest presigns the URL.
+ presignURL bool
+
+ // User supplied.
+ bucketName string
+ objectName string
+ queryValues url.Values
+ customHeader http.Header
+ extraPresignHeader http.Header
+ expires int64
+
+ // Generated by our internal code.
+ bucketLocation string
+ contentBody io.Reader
+ contentLength int64
+ contentMD5Base64 string // carries base64 encoded md5sum
+ contentSHA256Hex string // carries hex encoded sha256sum
+ streamSha256 bool
+ addCrc *ChecksumType
+ trailer http.Header // (http.Request).Trailer. Requires v4 signature.
+}
+
+// dumpHTTP - dump HTTP request and response.
+func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error {
+ // Starts http dump.
+ _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
+ if err != nil {
+ return err
+ }
+
+ // Filter out Signature field from Authorization header.
+ origAuth := req.Header.Get("Authorization")
+ if origAuth != "" {
+ req.Header.Set("Authorization", redactSignature(origAuth))
+ }
+
+ // Only display request header.
+ reqTrace, err := httputil.DumpRequestOut(req, false)
+ if err != nil {
+ return err
+ }
+
+ // Write request to trace output.
+ _, err = fmt.Fprint(c.traceOutput, string(reqTrace))
+ if err != nil {
+ return err
+ }
+
+ // Only display response header.
+ var respTrace []byte
+
+ // For errors we make sure to dump response body as well.
+ if resp.StatusCode != http.StatusOK &&
+ resp.StatusCode != http.StatusPartialContent &&
+ resp.StatusCode != http.StatusNoContent {
+ respTrace, err = httputil.DumpResponse(resp, true)
+ if err != nil {
+ return err
+ }
+ } else {
+ respTrace, err = httputil.DumpResponse(resp, false)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Write response to trace output.
+ _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
+ if err != nil {
+ return err
+ }
+
+ // Ends the http dump.
+ _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
+ if err != nil {
+ return err
+ }
+
+ // Returns success.
+ return nil
+}
+
+// do - execute http request.
+func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
+ defer func() {
+ if IsNetworkOrHostDown(err, false) {
+ c.markOffline()
+ }
+ }()
+
+ resp, err = c.httpClient.Do(req)
+ if err != nil {
+ // Handle this specifically for now until future Golang versions fix this issue properly.
+ if urlErr, ok := err.(*url.Error); ok {
+ if strings.Contains(urlErr.Err.Error(), "EOF") {
+ return nil, &url.Error{
+ Op: urlErr.Op,
+ URL: urlErr.URL,
+ Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
+ }
+ }
+ }
+ return nil, err
+ }
+
+ // Response cannot be non-nil, report error if thats the case.
+ if resp == nil {
+ msg := "Response is empty. " + reportIssue
+ return nil, errInvalidArgument(msg)
+ }
+
+ // If trace is enabled, dump http request and response,
+ // except when the traceErrorsOnly enabled and the response's status code is ok
+ if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) {
+ err = c.dumpHTTP(req, resp)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return resp, nil
+}
+
+// List of success status.
+var successStatus = []int{
+ http.StatusOK,
+ http.StatusNoContent,
+ http.StatusPartialContent,
+}
+
+// executeMethod - instantiates a given method, and retries the
+// request upon any error up to maxRetries attempts in a binomially
+// delayed manner using a standard back off algorithm.
+func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
+ if c.IsOffline() {
+ return nil, errors.New(c.endpointURL.String() + " is offline.")
+ }
+
+ var retryable bool // Indicates if request can be retried.
+ var bodySeeker io.Seeker // Extracted seeker from io.Reader.
+ reqRetry := MaxRetry // Indicates how many times we can retry the request
+
+ if metadata.contentBody != nil {
+ // Check if body is seekable then it is retryable.
+ bodySeeker, retryable = metadata.contentBody.(io.Seeker)
+ switch bodySeeker {
+ case os.Stdin, os.Stdout, os.Stderr:
+ retryable = false
+ }
+ // Retry only when reader is seekable
+ if !retryable {
+ reqRetry = 1
+ }
+
+ // Figure out if the body can be closed - if yes
+ // we will definitely close it upon the function
+ // return.
+ bodyCloser, ok := metadata.contentBody.(io.Closer)
+ if ok {
+ defer bodyCloser.Close()
+ }
+ }
+
+ if metadata.addCrc != nil && metadata.contentLength > 0 {
+ if metadata.trailer == nil {
+ metadata.trailer = make(http.Header, 1)
+ }
+ crc := metadata.addCrc.Hasher()
+ metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
+ // Update trailer when done.
+ metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(hash))
+ })
+ metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
+ }
+
+ // Create cancel context to control 'newRetryTimer' go routine.
+ retryCtx, cancel := context.WithCancel(ctx)
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer cancel()
+
+ for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
+ // Retry executes the following function body if request has an
+ // error until maxRetries have been exhausted, retry attempts are
+ // performed after waiting for a given period of time in a
+ // binomial fashion.
+ if retryable {
+ // Seek back to beginning for each attempt.
+ if _, err = bodySeeker.Seek(0, 0); err != nil {
+ // If seek failed, no need to retry.
+ return nil, err
+ }
+ }
+
+ // Instantiate a new request.
+ var req *http.Request
+ req, err = c.newRequest(ctx, method, metadata)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if isS3CodeRetryable(errResponse.Code) {
+ continue // Retry.
+ }
+
+ return nil, err
+ }
+
+ // Initiate the request.
+ res, err = c.do(req)
+ if err != nil {
+ if isRequestErrorRetryable(ctx, err) {
+ // Retry the request
+ continue
+ }
+ return nil, err
+ }
+
+ // For any known successful http status, return quickly.
+ for _, httpStatus := range successStatus {
+ if httpStatus == res.StatusCode {
+ return res, nil
+ }
+ }
+
+ // Read the body to be saved later.
+ errBodyBytes, err := io.ReadAll(res.Body)
+ // res.Body should be closed
+ closeResponse(res)
+ if err != nil {
+ return nil, err
+ }
+
+ // Save the body.
+ errBodySeeker := bytes.NewReader(errBodyBytes)
+ res.Body = io.NopCloser(errBodySeeker)
+
+ // For errors verify if its retryable otherwise fail quickly.
+ errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
+
+ // Save the body back again.
+ errBodySeeker.Seek(0, 0) // Seek back to starting point.
+ res.Body = io.NopCloser(errBodySeeker)
+
+ // Bucket region if set in error response and the error
+ // code dictates invalid region, we can retry the request
+ // with the new region.
+ //
+ // Additionally, we should only retry if bucketLocation and custom
+ // region is empty.
+ if c.region == "" {
+ switch errResponse.Code {
+ case "AuthorizationHeaderMalformed":
+ fallthrough
+ case "InvalidRegion":
+ fallthrough
+ case "AccessDenied":
+ if errResponse.Region == "" {
+ // Region is empty we simply return the error.
+ return res, err
+ }
+ // Region is not empty figure out a way to
+ // handle this appropriately.
+ if metadata.bucketName != "" {
+ // Gather Cached location only if bucketName is present.
+ if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
+ } else {
+ // This is for ListBuckets() fallback.
+ if errResponse.Region != metadata.bucketLocation {
+ // Retry if the error response has a different region
+ // than the request we just made.
+ metadata.bucketLocation = errResponse.Region
+ continue // Retry
+ }
+ }
+ }
+ }
+
+ // Verify if error response code is retryable.
+ if isS3CodeRetryable(errResponse.Code) {
+ continue // Retry.
+ }
+
+ // Verify if http status code is retryable.
+ if isHTTPStatusRetryable(res.StatusCode) {
+ continue // Retry.
+ }
+
+ // For all other cases break out of the retry loop.
+ break
+ }
+
+ // Return an error when retry is canceled or deadlined
+ if e := retryCtx.Err(); e != nil {
+ return nil, e
+ }
+
+ return res, err
+}
+
+// newRequest - instantiate a new HTTP request for a given method.
+func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) {
+ // If no method is supplied default to 'POST'.
+ if method == "" {
+ method = http.MethodPost
+ }
+
+ location := metadata.bucketLocation
+ if location == "" {
+ if metadata.bucketName != "" {
+ // Gather location only if bucketName is present.
+ location, err = c.getBucketLocation(ctx, metadata.bucketName)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if location == "" {
+ location = getDefaultLocation(*c.endpointURL, c.region)
+ }
+ }
+
+ // Look if target url supports virtual host.
+ // We explicitly disallow MakeBucket calls to not use virtual DNS style,
+ // since the resolution may fail.
+ isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0)
+ isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket
+
+ // Construct a new target URL.
+ targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location,
+ isVirtualHost, metadata.queryValues)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.httpTrace != nil {
+ ctx = httptrace.WithClientTrace(ctx, c.httpTrace)
+ }
+
+ // Initialize a new HTTP request for the method.
+ req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
+ }
+
+ // Generate presign url if needed, return right here.
+ if metadata.expires != 0 && metadata.presignURL {
+ if signerType.IsAnonymous() {
+ return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
+ }
+ if metadata.extraPresignHeader != nil {
+ if signerType.IsV2() {
+ return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.")
+ }
+ for k, v := range metadata.extraPresignHeader {
+ req.Header.Set(k, v[0])
+ }
+ }
+ if signerType.IsV2() {
+ // Presign URL with signature v2.
+ req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
+ } else if signerType.IsV4() {
+ // Presign URL with signature v4.
+ req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
+ }
+ return req, nil
+ }
+
+ // Set 'User-Agent' header for the request.
+ c.setUserAgent(req)
+
+ // Set all headers.
+ for k, v := range metadata.customHeader {
+ req.Header.Set(k, v[0])
+ }
+
+ // Go net/http notoriously closes the request body.
+ // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
+ // This can cause underlying *os.File seekers to fail, avoid that
+ // by making sure to wrap the closer as a nop.
+ if metadata.contentLength == 0 {
+ req.Body = nil
+ } else {
+ req.Body = io.NopCloser(metadata.contentBody)
+ }
+
+ // Set incoming content-length.
+ req.ContentLength = metadata.contentLength
+ if req.ContentLength <= -1 {
+ // For unknown content length, we upload using transfer-encoding: chunked.
+ req.TransferEncoding = []string{"chunked"}
+ }
+
+ // set md5Sum for content protection.
+ if len(metadata.contentMD5Base64) > 0 {
+ req.Header.Set("Content-Md5", metadata.contentMD5Base64)
+ }
+
+ // For anonymous requests just return.
+ if signerType.IsAnonymous() {
+ return req, nil
+ }
+
+ switch {
+ case signerType.IsV2():
+ // Add signature version '2' authorization header.
+ req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
+ case metadata.streamSha256 && !c.secure:
+ if len(metadata.trailer) > 0 {
+ req.Trailer = metadata.trailer
+ }
+ // Streaming signature is used by default for a PUT object request.
+ // Additionally, we also look if the initialized client is secure,
+ // if yes then we don't need to perform streaming signature.
+ req = signer.StreamingSignV4(req, accessKeyID,
+ secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
+ default:
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ shaHeader := unsignedPayload
+ if metadata.contentSHA256Hex != "" {
+ shaHeader = metadata.contentSHA256Hex
+ if len(metadata.trailer) > 0 {
+ // Sanity check, we should not end up here if upstream is sane.
+ return nil, errors.New("internal error: contentSHA256Hex with trailer not supported")
+ }
+ } else if len(metadata.trailer) > 0 {
+ shaHeader = unsignedPayloadTrailer
+ }
+ req.Header.Set("X-Amz-Content-Sha256", shaHeader)
+
+ // Add signature version '4' authorization header.
+ req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer)
+ }
+
+ // Return request.
+ return req, nil
+}
+
+// set User agent.
+func (c *Client) setUserAgent(req *http.Request) {
+ req.Header.Set("User-Agent", libraryUserAgent)
+ if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
+ req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
+ }
+}
+
+// makeTargetURL make a new target url.
+func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
+ host := c.endpointURL.Host
+ // For Amazon S3 endpoint, try to fetch location based endpoint.
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
+ if c.s3AccelerateEndpoint != "" && bucketName != "" {
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+ // Disable transfer acceleration for non-compliant bucket names.
+ if strings.Contains(bucketName, ".") {
+ return nil, errTransferAccelerationBucket(bucketName)
+ }
+ // If transfer acceleration is requested set new host.
+ // For more details about enabling transfer acceleration read here.
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+ host = c.s3AccelerateEndpoint
+ } else {
+ // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint
+ if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) {
+ // Fetch new host based on the bucket location.
+ host = getS3Endpoint(bucketLocation, c.s3DualstackEnabled)
+ }
+ }
+ }
+
+ // Save scheme.
+ scheme := c.endpointURL.Scheme
+
+ // Strip port 80 and 443 so we won't send these ports in Host header.
+ // The reason is that browsers and curl automatically remove :80 and :443
+ // with the generated presigned urls, then a signature mismatch error.
+ if h, p, err := net.SplitHostPort(host); err == nil {
+ if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
+ host = h
+ if ip := net.ParseIP(h); ip != nil && ip.To4() == nil {
+ host = "[" + h + "]"
+ }
+ }
+ }
+
+ urlStr := scheme + "://" + host + "/"
+
+ // Make URL only if bucketName is available, otherwise use the
+ // endpoint URL.
+ if bucketName != "" {
+ // If endpoint supports virtual host style use that always.
+ // Currently only S3 and Google Cloud Storage would support
+ // virtual host style.
+ if isVirtualHostStyle {
+ urlStr = scheme + "://" + bucketName + "." + host + "/"
+ if objectName != "" {
+ urlStr += s3utils.EncodePath(objectName)
+ }
+ } else {
+ // If not fall back to using path style.
+ urlStr = urlStr + bucketName + "/"
+ if objectName != "" {
+ urlStr += s3utils.EncodePath(objectName)
+ }
+ }
+ }
+
+ // If there are any query values, add them to the end.
+ if len(queryValues) > 0 {
+ urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
+ }
+
+ return url.Parse(urlStr)
+}
+
+// returns true if virtual hosted style requests are to be used.
+func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
+ if bucketName == "" {
+ return false
+ }
+
+ if c.lookup == BucketLookupDNS {
+ return true
+ }
+ if c.lookup == BucketLookupPath {
+ return false
+ }
+
+ // default to virtual only for Amazon/Google storage. In all other cases use
+ // path style requests
+ return s3utils.IsVirtualHostSupported(url, bucketName)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
new file mode 100644
index 000000000..b1d3b3852
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
@@ -0,0 +1,256 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "sync"
+
+ "github.com/minio/minio-go/v7/pkg/credentials"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/minio/minio-go/v7/pkg/signer"
+)
+
+// bucketLocationCache - Provides simple mechanism to hold bucket
+// locations in memory.
+type bucketLocationCache struct {
+ // mutex is used for handling the concurrent
+ // read/write requests for cache.
+ sync.RWMutex
+
+ // items holds the cached bucket locations.
+ items map[string]string
+}
+
+// newBucketLocationCache - Provides a new bucket location cache to be
+// used internally with the client object.
+func newBucketLocationCache() *bucketLocationCache {
+ return &bucketLocationCache{
+ items: make(map[string]string),
+ }
+}
+
+// Get - Returns a value of a given key if it exists.
+func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
+ r.RLock()
+ defer r.RUnlock()
+ location, ok = r.items[bucketName]
+ return
+}
+
+// Set - Will persist a value into cache.
+func (r *bucketLocationCache) Set(bucketName, location string) {
+ r.Lock()
+ defer r.Unlock()
+ r.items[bucketName] = location
+}
+
+// Delete - Deletes a bucket name from cache.
+func (r *bucketLocationCache) Delete(bucketName string) {
+ r.Lock()
+ defer r.Unlock()
+ delete(r.items, bucketName)
+}
+
+// GetBucketLocation - get location for the bucket name from location cache, if not
+// fetch freshly by making a new request.
+func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ return c.getBucketLocation(ctx, bucketName)
+}
+
+// getBucketLocation - Get location for the bucketName from location map cache, if not
+// fetch freshly by making a new request.
+func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+
+ // Region set then no need to fetch bucket location.
+ if c.region != "" {
+ return c.region, nil
+ }
+
+ if location, ok := c.bucketLocCache.Get(bucketName); ok {
+ return location, nil
+ }
+
+ // Initialize a new request.
+ req, err := c.getBucketLocationRequest(ctx, bucketName)
+ if err != nil {
+ return "", err
+ }
+
+ // Initiate the request.
+ resp, err := c.do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return "", err
+ }
+ location, err := processBucketLocationResponse(resp, bucketName)
+ if err != nil {
+ return "", err
+ }
+ c.bucketLocCache.Set(bucketName, location)
+ return location, nil
+}
+
+// processes the getBucketLocation http response from the server.
+func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ err = httpRespToErrorResponse(resp, bucketName, "")
+ errResp := ToErrorResponse(err)
+ // For access denied error, it could be an anonymous
+ // request. Move forward and let the top level callers
+ // succeed if possible based on their policy.
+ switch errResp.Code {
+ case "NotImplemented":
+ switch errResp.Server {
+ case "AmazonSnowball":
+ return "snowball", nil
+ case "cloudflare":
+ return "us-east-1", nil
+ }
+ case "AuthorizationHeaderMalformed":
+ fallthrough
+ case "InvalidRegion":
+ fallthrough
+ case "AccessDenied":
+ if errResp.Region == "" {
+ return "us-east-1", nil
+ }
+ return errResp.Region, nil
+ }
+ return "", err
+ }
+ }
+
+ // Extract location.
+ var locationConstraint string
+ err = xmlDecoder(resp.Body, &locationConstraint)
+ if err != nil {
+ return "", err
+ }
+
+ location := locationConstraint
+ // Location is empty will be 'us-east-1'.
+ if location == "" {
+ location = "us-east-1"
+ }
+
+ // Location can be 'EU' convert it to meaningful 'eu-west-1'.
+ if location == "EU" {
+ location = "eu-west-1"
+ }
+
+ // Save the location into cache.
+
+ // Return.
+ return location, nil
+}
+
+// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
+func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) {
+ // Set location query.
+ urlValues := make(url.Values)
+ urlValues.Set("location", "")
+
+ // Set get bucket location always as path style.
+ targetURL := *c.endpointURL
+
+ // as it works in makeTargetURL method from api.go file
+ if h, p, err := net.SplitHostPort(targetURL.Host); err == nil {
+ if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" {
+ targetURL.Host = h
+ if ip := net.ParseIP(h); ip != nil && ip.To16() != nil {
+ targetURL.Host = "[" + h + "]"
+ }
+ }
+ }
+
+ isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName)
+
+ var urlStr string
+
+ if isVirtualStyle {
+ urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location"
+ } else {
+ targetURL.Path = path.Join(bucketName, "") + "/"
+ targetURL.RawQuery = urlValues.Encode()
+ urlStr = targetURL.String()
+ }
+
+ // Get a new HTTP request for the method.
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
+ }
+
+ if signerType.IsAnonymous() {
+ return req, nil
+ }
+
+ if signerType.IsV2() {
+ req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle)
+ return req, nil
+ }
+
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ contentSha256 := emptySHA256Hex
+ if c.secure {
+ contentSha256 = unsignedPayload
+ }
+
+ req.Header.Set("X-Amz-Content-Sha256", contentSha256)
+ req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
+ return req, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go
new file mode 100644
index 000000000..7eb1bf25a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/checksum.go
@@ -0,0 +1,223 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2023 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/base64"
+ "hash"
+ "hash/crc32"
+ "io"
+ "math/bits"
+ "net/http"
+)
+
+// ChecksumType contains information about the checksum type.
+type ChecksumType uint32
+
+const (
+
+ // ChecksumSHA256 indicates a SHA256 checksum.
+ ChecksumSHA256 ChecksumType = 1 << iota
+ // ChecksumSHA1 indicates a SHA-1 checksum.
+ ChecksumSHA1
+ // ChecksumCRC32 indicates a CRC32 checksum with IEEE table.
+ ChecksumCRC32
+ // ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
+ ChecksumCRC32C
+
+ // Keep after all valid checksums
+ checksumLast
+
+ // checksumMask is a mask for valid checksum types.
+ checksumMask = checksumLast - 1
+
+ // ChecksumNone indicates no checksum.
+ ChecksumNone ChecksumType = 0
+
+ amzChecksumAlgo = "x-amz-checksum-algorithm"
+ amzChecksumCRC32 = "x-amz-checksum-crc32"
+ amzChecksumCRC32C = "x-amz-checksum-crc32c"
+ amzChecksumSHA1 = "x-amz-checksum-sha1"
+ amzChecksumSHA256 = "x-amz-checksum-sha256"
+)
+
+// Is returns if c is all of t.
+func (c ChecksumType) Is(t ChecksumType) bool {
+ return c&t == t
+}
+
+// Key returns the header key.
+// returns empty string if invalid or none.
+func (c ChecksumType) Key() string {
+ switch c & checksumMask {
+ case ChecksumCRC32:
+ return amzChecksumCRC32
+ case ChecksumCRC32C:
+ return amzChecksumCRC32C
+ case ChecksumSHA1:
+ return amzChecksumSHA1
+ case ChecksumSHA256:
+ return amzChecksumSHA256
+ }
+ return ""
+}
+
+// KeyCapitalized returns the capitalized key as used in HTTP headers.
+func (c ChecksumType) KeyCapitalized() string {
+ return http.CanonicalHeaderKey(c.Key())
+}
+
+// RawByteLen returns the size of the un-encoded checksum.
+func (c ChecksumType) RawByteLen() int {
+ switch c & checksumMask {
+ case ChecksumCRC32, ChecksumCRC32C:
+ return 4
+ case ChecksumSHA1:
+ return sha1.Size
+ case ChecksumSHA256:
+ return sha256.Size
+ }
+ return 0
+}
+
+// Hasher returns a hasher corresponding to the checksum type.
+// Returns nil if no checksum.
+func (c ChecksumType) Hasher() hash.Hash {
+ switch c & checksumMask {
+ case ChecksumCRC32:
+ return crc32.NewIEEE()
+ case ChecksumCRC32C:
+ return crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ case ChecksumSHA1:
+ return sha1.New()
+ case ChecksumSHA256:
+ return sha256.New()
+ }
+ return nil
+}
+
+// IsSet returns whether the type is valid and known.
+func (c ChecksumType) IsSet() bool {
+ return bits.OnesCount32(uint32(c)) == 1
+}
+
+// SetDefault will set the checksum if not already set.
+func (c *ChecksumType) SetDefault(t ChecksumType) {
+ if !c.IsSet() {
+ *c = t
+ }
+}
+
+// String returns the type as a string.
+// CRC32, CRC32C, SHA1, and SHA256 for valid values.
+// Empty string for unset and "" if not valid.
+func (c ChecksumType) String() string {
+ switch c & checksumMask {
+ case ChecksumCRC32:
+ return "CRC32"
+ case ChecksumCRC32C:
+ return "CRC32C"
+ case ChecksumSHA1:
+ return "SHA1"
+ case ChecksumSHA256:
+ return "SHA256"
+ case ChecksumNone:
+ return ""
+ }
+ return ""
+}
+
+// ChecksumReader reads all of r and returns a checksum of type c.
+// Returns any error that may have occurred while reading.
+func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) {
+ h := c.Hasher()
+ if h == nil {
+ return Checksum{}, nil
+ }
+ _, err := io.Copy(h, r)
+ if err != nil {
+ return Checksum{}, err
+ }
+ return NewChecksum(c, h.Sum(nil)), nil
+}
+
+// ChecksumBytes returns a checksum of the content b with type c.
+func (c ChecksumType) ChecksumBytes(b []byte) Checksum {
+ h := c.Hasher()
+ if h == nil {
+ return Checksum{}
+ }
+ n, err := h.Write(b)
+ if err != nil || n != len(b) {
+ // Shouldn't happen with these checksummers.
+ return Checksum{}
+ }
+ return NewChecksum(c, h.Sum(nil))
+}
+
+// Checksum is a type and encoded value.
+type Checksum struct {
+ Type ChecksumType
+ r []byte
+}
+
+// NewChecksum sets the checksum to the value of b,
+// which is the raw hash output.
+// If the length of c does not match t.RawByteLen,
+// a checksum with ChecksumNone is returned.
+func NewChecksum(t ChecksumType, b []byte) Checksum {
+ if t.IsSet() && len(b) == t.RawByteLen() {
+ return Checksum{Type: t, r: b}
+ }
+ return Checksum{}
+}
+
+// NewChecksumString sets the checksum to the value of s,
+// which is the base 64 encoded raw hash output.
+// If the length of c does not match t.RawByteLen, it is not added.
+func NewChecksumString(t ChecksumType, s string) Checksum {
+ b, _ := base64.StdEncoding.DecodeString(s)
+ if t.IsSet() && len(b) == t.RawByteLen() {
+ return Checksum{Type: t, r: b}
+ }
+ return Checksum{}
+}
+
+// IsSet returns whether the checksum is valid and known.
+func (c Checksum) IsSet() bool {
+ return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen()
+}
+
+// Encoded returns the encoded value.
+// Returns the empty string if not set or valid.
+func (c Checksum) Encoded() string {
+ if !c.IsSet() {
+ return ""
+ }
+ return base64.StdEncoding.EncodeToString(c.r)
+}
+
+// Raw returns the raw checksum value if set.
+func (c Checksum) Raw() []byte {
+ if !c.IsSet() {
+ return nil
+ }
+ return c.r
+}
diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md
new file mode 100644
index 000000000..cb232c3c6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md
@@ -0,0 +1,80 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior, in compliance with the
+licensing terms applying to the Project developments.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful. However, these actions shall respect the
+licensing terms of the Project Developments that will always supersede such
+Code of Conduct.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at dev@min.io. The project team
+will review and investigate all complaints, and will respond in a way that it deems
+appropriate to the circumstances. The project team is obligated to maintain
+confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+This version includes a clarification to ensure that the code of conduct is in
+compliance with the free software licensing terms of the project.
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go
new file mode 100644
index 000000000..4099a37f9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/constants.go
@@ -0,0 +1,130 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+// Multipart upload defaults.
+
+// absMinPartSize - absolute minimum part size (5 MiB) below which
+// a part in a multipart upload may not be uploaded.
+const absMinPartSize = 1024 * 1024 * 5
+
+// minPartSize - minimum part size 16MiB per object after which
+// putObject behaves internally as multipart.
+const minPartSize = 1024 * 1024 * 16
+
+// maxPartsCount - maximum number of parts for a single multipart session.
+const maxPartsCount = 10000
+
+// maxPartSize - maximum part size 5GiB for a single multipart upload
+// operation.
+const maxPartSize = 1024 * 1024 * 1024 * 5
+
+// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
+// operation.
+const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
+
+// maxMultipartPutObjectSize - maximum size 5TiB of object for
+// Multipart operation.
+const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+// we don't want to sign the request payload
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when
+// we don't want to sign the request payload, but have a trailer.
+const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
+
+// Total number of parallel workers used for multipart operation.
+const totalWorkers = 4
+
+// Signature related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+)
+
+const (
+ // GetObjectAttributesTags are tags used to defined
+ // return values for the GetObjectAttributes API
+ GetObjectAttributesTags = "ETag,Checksum,StorageClass,ObjectSize,ObjectParts"
+ // GetObjectAttributesMaxParts defined the default maximum
+ // number of parts returned by GetObjectAttributes
+ GetObjectAttributesMaxParts = 1000
+)
+
+const (
+ // Response Headers
+
+ // ETag is a common response header
+ ETag = "ETag"
+
+ // Storage class header.
+ amzStorageClass = "X-Amz-Storage-Class"
+
+ // Website redirect location header
+ amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location"
+
+ // GetObjectAttributes headers
+ amzPartNumberMarker = "X-Amz-Part-Number-Marker"
+ amzExpectedBucketOnwer = "X-Amz-Expected-Bucket-Owner"
+ amzMaxParts = "X-Amz-Max-Parts"
+ amzObjectAttributes = "X-Amz-Object-Attributes"
+
+ // Object Tagging headers
+ amzTaggingHeader = "X-Amz-Tagging"
+ amzTaggingHeaderDirective = "X-Amz-Tagging-Directive"
+
+ amzVersionID = "X-Amz-Version-Id"
+ amzTaggingCount = "X-Amz-Tagging-Count"
+ amzExpiration = "X-Amz-Expiration"
+ amzRestore = "X-Amz-Restore"
+ amzReplicationStatus = "X-Amz-Replication-Status"
+ amzDeleteMarker = "X-Amz-Delete-Marker"
+
+ // Object legal hold header
+ amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold"
+
+ // Object retention header
+ amzLockMode = "X-Amz-Object-Lock-Mode"
+ amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date"
+ amzBypassGovernance = "X-Amz-Bypass-Governance-Retention"
+
+ // Replication status
+ amzBucketReplicationStatus = "X-Amz-Replication-Status"
+ // Minio specific Replication/lifecycle transition extension
+ minIOBucketSourceMTime = "X-Minio-Source-Mtime"
+
+ minIOBucketSourceETag = "X-Minio-Source-Etag"
+ minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
+ minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
+ minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
+ minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check"
+
+ // Header indicates last tag update time on source
+ minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp"
+ // Header indicates last retention update time on source
+ minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp"
+ // Header indicates last legalhold update time on source
+ minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp"
+ minIOForceDelete = "x-minio-force-delete"
+ // Header indicates delete marker replication request can be sent by source now.
+ minioTgtReplicationReady = "X-Minio-Replication-Ready"
+ // Header asks if delete marker replication request can be sent by source now.
+ isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready"
+)
diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go
new file mode 100644
index 000000000..99b99db9b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/core.go
@@ -0,0 +1,151 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+)
+
+// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
+type Core struct {
+ *Client
+}
+
+// NewCore - Returns new initialized a Core client, this CoreClient should be
+// only used under special conditions such as need to access lower primitives
+// and being able to use them to write your own wrappers.
+func NewCore(endpoint string, opts *Options) (*Core, error) {
+ var s3Client Core
+ client, err := New(endpoint, opts)
+ if err != nil {
+ return nil, err
+ }
+ s3Client.Client = client
+ return &s3Client, nil
+}
+
+// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
+// you can further filter the results.
+func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
+ return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil)
+}
+
+// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
+// continuationToken instead of marker to support iteration over the results.
+func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) {
+ return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil)
+}
+
+// CopyObject - copies an object from source object to destination object on server side.
+func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) {
+ return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts)
+}
+
+// CopyObjectPart - creates a part in a multipart upload by copying (a
+// part of) an existing object.
+func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
+ partID int, startOffset, length int64, metadata map[string]string,
+) (p CompletePart, err error) {
+ return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID,
+ partID, startOffset, length, metadata)
+}
+
+// PutObject - Upload object. Uploads using single PUT call.
+func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) {
+ hookReader := newHook(data, opts.Progress)
+ return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts)
+}
+
+// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
+func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
+ result, err := c.initiateMultipartUpload(ctx, bucket, object, opts)
+ return result.UploadID, err
+}
+
+// ListMultipartUploads - List incomplete uploads.
+func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
+ return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
+}
+
+// PutObjectPartOptions contains options for PutObjectPart API
+type PutObjectPartOptions struct {
+ Md5Base64, Sha256Hex string
+ SSE encrypt.ServerSide
+ CustomHeader, Trailer http.Header
+ DisableContentSha256 bool
+}
+
+// PutObjectPart - Upload an object part.
+func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int,
+ data io.Reader, size int64, opts PutObjectPartOptions,
+) (ObjectPart, error) {
+ p := uploadPartParams{
+ bucketName: bucket,
+ objectName: object,
+ uploadID: uploadID,
+ reader: data,
+ partNumber: partID,
+ md5Base64: opts.Md5Base64,
+ sha256Hex: opts.Sha256Hex,
+ size: size,
+ sse: opts.SSE,
+ streamSha256: !opts.DisableContentSha256,
+ customHeader: opts.CustomHeader,
+ trailer: opts.Trailer,
+ }
+ return c.uploadPart(ctx, p)
+}
+
+// ListObjectParts - List uploaded parts of an incomplete upload.x
+func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListObjectPartsResult, err error) {
+ return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
+}
+
+// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
+func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) {
+ res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
+ Parts: parts,
+ }, opts)
+ return res, err
+}
+
+// AbortMultipartUpload - Abort an incomplete upload.
+func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
+ return c.abortMultipartUpload(ctx, bucket, object, uploadID)
+}
+
+// GetBucketPolicy - fetches bucket access policy for a given bucket.
+func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) {
+ return c.getBucketPolicy(ctx, bucket)
+}
+
+// PutBucketPolicy - applies a new bucket access policy for a given bucket.
+func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error {
+ return c.putBucketPolicy(ctx, bucket, bucketPolicy)
+}
+
+// GetObject is a lower level API implemented to support reading
+// partial objects and also downloading objects with special conditions
+// matching etag, modtime etc.
+func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
+ return c.getObject(ctx, bucketName, objectName, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
new file mode 100644
index 000000000..780dc8997
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -0,0 +1,15046 @@
+//go:build mint
+// +build mint
+
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "archive/zip"
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "io"
+ "log/slog"
+ "math/rand"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "github.com/google/uuid"
+
+ "github.com/minio/minio-go/v7"
+ "github.com/minio/minio-go/v7/pkg/cors"
+ "github.com/minio/minio-go/v7/pkg/credentials"
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+ "github.com/minio/minio-go/v7/pkg/notification"
+ "github.com/minio/minio-go/v7/pkg/tags"
+)
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
+const (
+ letterIdxBits = 6 // 6 bits to represent a letter index
+ letterIdxMask = 1<= len(buf) {
+ err = nil
+ } else if n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+func baseLogger(testName, function string, args map[string]interface{}, startTime time.Time) *slog.Logger {
+ // calculate the test case duration
+ duration := time.Since(startTime)
+ // log with the fields as per mint
+ l := slog.With(
+ "name", "minio-go: "+testName,
+ "duration", duration.Nanoseconds()/1000000,
+ )
+ if function != "" {
+ l = l.With("function", function)
+ }
+ if len(args) > 0 {
+ l = l.With("args", args)
+ }
+ return l
+}
+
+// log successful test runs
+func logSuccess(testName, function string, args map[string]interface{}, startTime time.Time) {
+ baseLogger(testName, function, args, startTime).
+ With("status", "PASS").
+ Info("")
+}
+
+// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented,
+// and log as NA in that case and continue execution. Otherwise log as failure and return
+func logError(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) {
+ // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests
+ // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in
+ // addition to NotImplemented error returned from server
+ if isErrNotImplemented(err) {
+ logIgnored(testName, function, args, startTime, message)
+ } else {
+ logFailure(testName, function, args, startTime, alert, message, err)
+ if !isRunOnFail() {
+ panic(err)
+ }
+ }
+}
+
+// Log failed test runs, do not call this directly, use logError instead, as that correctly stops the test run
+func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) {
+ l := baseLogger(testName, function, args, startTime).With(
+ "status", "FAIL",
+ "alert", alert,
+ "message", message,
+ )
+
+ if err != nil {
+ l = l.With("error", err)
+ }
+
+ l.Error("")
+}
+
+// log not applicable test runs
+func logIgnored(testName, function string, args map[string]interface{}, startTime time.Time, alert string) {
+ baseLogger(testName, function, args, startTime).
+ With(
+ "status", "NA",
+ "alert", strings.Split(alert, " ")[0]+" is NotImplemented",
+ ).Info("")
+}
+
+// Delete objects in given bucket, recursively
+func cleanupBucket(bucketName string, c *minio.Client) error {
+ // Create a done channel to control 'ListObjectsV2' go routine.
+ doneCh := make(chan struct{})
+ // Exit cleanly upon return.
+ defer close(doneCh)
+ // Iterate over all objects in the bucket via listObjectsV2 and delete
+ for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) {
+ if objCh.Err != nil {
+ return objCh.Err
+ }
+ if objCh.Key != "" {
+ err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{})
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) {
+ if objPartInfo.Err != nil {
+ return objPartInfo.Err
+ }
+ if objPartInfo.Key != "" {
+ err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ // objects are already deleted, clear the buckets now
+ return c.RemoveBucket(context.Background(), bucketName)
+}
+
+func cleanupVersionedBucket(bucketName string, c *minio.Client) error {
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+ for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) {
+ if obj.Err != nil {
+ return obj.Err
+ }
+ if obj.Key != "" {
+ err := c.RemoveObject(context.Background(), bucketName, obj.Key,
+ minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true})
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) {
+ if objPartInfo.Err != nil {
+ return objPartInfo.Err
+ }
+ if objPartInfo.Key != "" {
+ err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ // objects are already deleted, clear the buckets now
+ err := c.RemoveBucket(context.Background(), bucketName)
+ if err != nil {
+ for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) {
+ slog.Info("found object", "key", obj.Key, "version", obj.VersionID)
+ }
+ }
+ return err
+}
+
+func isErrNotImplemented(err error) bool {
+ return minio.ToErrorResponse(err).Code == "NotImplemented"
+}
+
+func isRunOnFail() bool {
+ return os.Getenv("RUN_ON_FAIL") == "1"
+}
+
+func init() {
+ // If server endpoint is not set, all tests default to
+ // using https://play.min.io
+ if os.Getenv(serverEndpoint) == "" {
+ os.Setenv(serverEndpoint, "play.min.io")
+ os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F")
+ os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG")
+ os.Setenv(enableHTTPS, "1")
+ }
+}
+
+var mintDataDir = os.Getenv("MINT_DATA_DIR")
+
+func getMintDataDirFilePath(filename string) (fp string) {
+ if mintDataDir == "" {
+ return
+ }
+ return filepath.Join(mintDataDir, filename)
+}
+
+func newRandomReader(seed, size int64) io.Reader {
+ return io.LimitReader(rand.New(rand.NewSource(seed)), size)
+}
+
+func mustCrcReader(r io.Reader) uint32 {
+ crc := crc32.NewIEEE()
+ _, err := io.Copy(crc, r)
+ if err != nil {
+ panic(err)
+ }
+ return crc.Sum32()
+}
+
+func crcMatches(r io.Reader, want uint32) error {
+ crc := crc32.NewIEEE()
+ _, err := io.Copy(crc, r)
+ if err != nil {
+ panic(err)
+ }
+ got := crc.Sum32()
+ if got != want {
+ return fmt.Errorf("crc mismatch, want %x, got %x", want, got)
+ }
+ return nil
+}
+
+func crcMatchesName(r io.Reader, name string) error {
+ want := dataFileCRC32[name]
+ crc := crc32.NewIEEE()
+ _, err := io.Copy(crc, r)
+ if err != nil {
+ panic(err)
+ }
+ got := crc.Sum32()
+ if got != want {
+ return fmt.Errorf("crc mismatch, want %x, got %x", want, got)
+ }
+ return nil
+}
+
+// read data from file if it exists or optionally create a buffer of particular size
+func getDataReader(fileName string) io.ReadCloser {
+ if mintDataDir == "" {
+ size := int64(dataFileMap[fileName])
+ if _, ok := dataFileCRC32[fileName]; !ok {
+ dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size))
+ }
+ return io.NopCloser(newRandomReader(size, size))
+ }
+ reader, _ := os.Open(getMintDataDirFilePath(fileName))
+ if _, ok := dataFileCRC32[fileName]; !ok {
+ dataFileCRC32[fileName] = mustCrcReader(reader)
+ reader.Close()
+ reader, _ = os.Open(getMintDataDirFilePath(fileName))
+ }
+ return reader
+}
+
+// randString generates random names and prepends them with a known prefix.
+func randString(n int, src rand.Source, prefix string) string {
+ b := make([]byte, n)
+ // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
+ for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
+ if remain == 0 {
+ cache, remain = src.Int63(), letterIdxMax
+ }
+ if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+ b[i] = letterBytes[idx]
+ i--
+ }
+ cache >>= letterIdxBits
+ remain--
+ }
+ return prefix + string(b[0:30-len(prefix)])
+}
+
+var dataFileMap = map[string]int{
+ "datafile-0-b": 0,
+ "datafile-1-b": 1,
+ "datafile-1-kB": 1 * humanize.KiByte,
+ "datafile-10-kB": 10 * humanize.KiByte,
+ "datafile-33-kB": 33 * humanize.KiByte,
+ "datafile-100-kB": 100 * humanize.KiByte,
+ "datafile-1.03-MB": 1056 * humanize.KiByte,
+ "datafile-1-MB": 1 * humanize.MiByte,
+ "datafile-5-MB": 5 * humanize.MiByte,
+ "datafile-6-MB": 6 * humanize.MiByte,
+ "datafile-11-MB": 11 * humanize.MiByte,
+ "datafile-65-MB": 65 * humanize.MiByte,
+ "datafile-129-MB": 129 * humanize.MiByte,
+}
+
+var dataFileCRC32 = map[string]uint32{}
+
+func isFullMode() bool {
+ return os.Getenv("MINT_MODE") == "full"
+}
+
+func getFuncName() string {
+ return getFuncNameLoc(2)
+}
+
+func getFuncNameLoc(caller int) string {
+ pc, _, _, _ := runtime.Caller(caller)
+ return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.")
+}
+
+// Tests bucket re-create errors.
+func testMakeBucketError() {
+ region := "eu-central-1"
+
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ // initialize logging params
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": region,
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ Transport: createHTTPTransport(),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket Failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, c)
+
+ if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil {
+ logError(testName, function, args, startTime, "", "Bucket already exists", err)
+ return
+ }
+ // Verify valid error response from server.
+ if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+ minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testMetadataSizeLimit() {
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts.UserMetadata": "",
+ }
+ rand.Seed(startTime.Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ Transport: createHTTPTransport(),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
+ return
+ }
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ const HeaderSizeLimit = 8 * 1024
+ const UserMetadataLimit = 2 * 1024
+
+ // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail
+ metadata := make(map[string]string)
+ metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test")))
+ args["metadata"] = fmt.Sprint(metadata)
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil)
+ return
+ }
+
+ // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail
+ metadata = make(map[string]string)
+ metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test")))
+ args["metadata"] = fmt.Sprint(metadata)
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests various bucket supported formats.
+func testMakeBucketRegions() {
+ region := "eu-central-1"
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ // initialize logging params
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": region,
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ // Make a new bucket with '.' in its name, in 'us-west-2'. This
+ // request is internally staged into a path style instead of
+ // virtual host style.
+ region = "us-west-2"
+ args["region"] = region
+ if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func testPutObjectReadAt() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "objectContentType",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Object content type
+ objectContentType := "binary/octet-stream"
+ args["objectContentType"] = objectContentType
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Get Object failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat Object failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err)
+ return
+ }
+ if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "Content types don't match", err)
+ return
+ }
+ if err := crcMatchesName(r, "datafile-129-MB"); err != nil {
+ logError(testName, function, args, startTime, "", "data CRC check failed", err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testListObjectVersions() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ListObjectVersions(bucketName, prefix, recursive)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "prefix": "",
+ "recursive": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ bufSize := dataFileMap["datafile-10-kB"]
+ reader := getDataReader("datafile-10-kB")
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ reader.Close()
+
+ bufSize = dataFileMap["datafile-1-b"]
+ reader = getDataReader("datafile-1-b")
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ reader.Close()
+
+ err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected object deletion", err)
+ return
+ }
+
+ var deleteMarkers, versions int
+
+ objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ if info.Key != objectName {
+ logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil)
+ return
+ }
+ if info.VersionID == "" {
+ logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil)
+ return
+ }
+ if info.IsDeleteMarker {
+ deleteMarkers++
+ if !info.IsLatest {
+ logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil)
+ return
+ }
+ } else {
+ versions++
+ }
+ }
+
+ if deleteMarkers != 1 {
+ logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil)
+ return
+ }
+
+ if versions != 2 {
+ logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
+ return
+ }
+
+ // Delete all objects and their versions as long as the bucket itself
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testStatObjectWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "StatObject"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ bufSize := dataFileMap["datafile-10-kB"]
+ reader := getDataReader("datafile-10-kB")
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ reader.Close()
+
+ bufSize = dataFileMap["datafile-1-b"]
+ reader = getDataReader("datafile-1-b")
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ reader.Close()
+
+ objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+
+ var results []minio.ObjectInfo
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ results = append(results, info)
+ }
+
+ if len(results) != 2 {
+ logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
+ return
+ }
+
+ for i := 0; i < len(results); i++ {
+ opts := minio.StatObjectOptions{VersionID: results[i].VersionID}
+ statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "error during HEAD object", err)
+ return
+ }
+ if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err)
+ return
+ }
+ if statInfo.ETag != results[i].ETag {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
+ return
+ }
+ if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
+ return
+ }
+ if statInfo.Size != results[i].Size {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
+ return
+ }
+ }
+
+ // Delete all objects and their versions as long as the bucket itself
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testGetObjectWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Save the contents of datafiles to check with GetObject() reader output later
+ var buffers [][]byte
+ testFiles := []string{"datafile-1-b", "datafile-10-kB"}
+
+ for _, testFile := range testFiles {
+ r := getDataReader(testFile)
+ buf, err := io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "unexpected failure", err)
+ return
+ }
+ r.Close()
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ buffers = append(buffers, buf)
+ }
+
+ objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+
+ var results []minio.ObjectInfo
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ results = append(results, info)
+ }
+
+ if len(results) != 2 {
+ logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
+ return
+ }
+
+ sort.SliceStable(results, func(i, j int) bool {
+ return results[i].Size < results[j].Size
+ })
+
+ sort.SliceStable(buffers, func(i, j int) bool {
+ return len(buffers[i]) < len(buffers[j])
+ })
+
+ for i := 0; i < len(results); i++ {
+ opts := minio.GetObjectOptions{VersionID: results[i].VersionID}
+ reader, err := c.GetObject(context.Background(), bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "error during GET object", err)
+ return
+ }
+ statInfo, err := reader.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err)
+ return
+ }
+ if statInfo.ETag != results[i].ETag {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
+ return
+ }
+ if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
+ return
+ }
+ if statInfo.Size != results[i].Size {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
+ return
+ }
+
+ tmpBuffer := bytes.NewBuffer([]byte{})
+ _, err = io.Copy(tmpBuffer, reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "unexpected io.Copy()", err)
+ return
+ }
+
+ if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) {
+ logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err)
+ return
+ }
+ }
+
+ // Delete all objects and their versions as long as the bucket itself
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testPutObjectWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ const n = 10
+ // Read input...
+
+ // Save the data concurrently.
+ var wg sync.WaitGroup
+ wg.Add(n)
+ buffers := make([][]byte, n)
+ var errs [n]error
+ for i := 0; i < n; i++ {
+ r := newRandomReader(int64((1<<20)*i+i), int64(i))
+ buf, err := io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "unexpected failure", err)
+ return
+ }
+ buffers[i] = buf
+
+ go func(i int) {
+ defer wg.Done()
+ _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20})
+ }(i)
+ }
+ wg.Wait()
+ for _, err := range errs {
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ }
+
+ objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ var results []minio.ObjectInfo
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ results = append(results, info)
+ }
+
+ if len(results) != n {
+ logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
+ return
+ }
+
+ sort.Slice(results, func(i, j int) bool {
+ return results[i].Size < results[j].Size
+ })
+
+ sort.Slice(buffers, func(i, j int) bool {
+ return len(buffers[i]) < len(buffers[j])
+ })
+
+ for i := 0; i < len(results); i++ {
+ opts := minio.GetObjectOptions{VersionID: results[i].VersionID}
+ reader, err := c.GetObject(context.Background(), bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "error during GET object", err)
+ return
+ }
+ statInfo, err := reader.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err)
+ return
+ }
+ if statInfo.ETag != results[i].ETag {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
+ return
+ }
+ if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
+ return
+ }
+ if statInfo.Size != results[i].Size {
+ logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
+ return
+ }
+
+ tmpBuffer := bytes.NewBuffer([]byte{})
+ _, err = io.Copy(tmpBuffer, reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "unexpected io.Copy()", err)
+ return
+ }
+
+ if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) {
+ logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err)
+ return
+ }
+ }
+
+ // Delete all objects and their versions as long as the bucket itself
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testListMultipartUpload() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject()"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object.
+ opts := &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ }
+ c, err := minio.New(os.Getenv(serverEndpoint), opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+ core, err := minio.NewCore(os.Getenv(serverEndpoint), opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO core client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ ctx := context.Background()
+ err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+ defer func() {
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ }
+ }()
+ objName := "prefix/objectName"
+
+ want := minio.ListMultipartUploadsResult{
+ Bucket: bucketName,
+ KeyMarker: "",
+ UploadIDMarker: "",
+ NextKeyMarker: "",
+ NextUploadIDMarker: "",
+ EncodingType: "url",
+ MaxUploads: 1000,
+ IsTruncated: false,
+ Prefix: "prefix/objectName",
+ Delimiter: "/",
+ CommonPrefixes: nil,
+ }
+ for i := 0; i < 5; i++ {
+ uid, err := core.NewMultipartUpload(ctx, bucketName, objName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload failed", err)
+ return
+ }
+ want.Uploads = append(want.Uploads, minio.ObjectMultipartInfo{
+ Initiated: time.Time{},
+ StorageClass: "",
+ Key: objName,
+ Size: 0,
+ UploadID: uid,
+ Err: nil,
+ })
+
+ for j := 0; j < 5; j++ {
+ cmpGot := func(call string, got minio.ListMultipartUploadsResult) bool {
+ for i := range got.Uploads {
+ got.Uploads[i].Initiated = time.Time{}
+ }
+ if !reflect.DeepEqual(want, got) {
+ err := fmt.Errorf("want: %#v\ngot : %#v", want, got)
+ logError(testName, function, args, startTime, "", call+" failed", err)
+ }
+ return true
+ }
+ got, err := core.ListMultipartUploads(ctx, bucketName, objName, "", "", "/", 1000)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err)
+ return
+ }
+ if !cmpGot("ListMultipartUploads-prefix", got) {
+ return
+ }
+ got, err = core.ListMultipartUploads(ctx, bucketName, objName, objName, "", "/", 1000)
+ got.KeyMarker = ""
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err)
+ return
+ }
+ if !cmpGot("ListMultipartUploads-marker", got) {
+ return
+ }
+ }
+ if i > 2 {
+ err = core.AbortMultipartUpload(ctx, bucketName, objName, uid)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err)
+ return
+ }
+ want.Uploads = want.Uploads[:len(want.Uploads)-1]
+ }
+ }
+ for _, up := range want.Uploads {
+ err = core.AbortMultipartUpload(ctx, bucketName, objName, up.UploadID)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err)
+ return
+ }
+ }
+ logSuccess(testName, function, args, startTime)
+}
+
+func testCopyObjectWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ testFiles := []string{"datafile-1-b", "datafile-10-kB"}
+ for _, testFile := range testFiles {
+ r := getDataReader(testFile)
+ buf, err := io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "unexpected failure", err)
+ return
+ }
+ r.Close()
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ }
+
+ objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ var infos []minio.ObjectInfo
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ infos = append(infos, info)
+ }
+
+ sort.Slice(infos, func(i, j int) bool {
+ return infos[i].Size < infos[j].Size
+ })
+
+ reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err)
+ return
+ }
+
+ oldestContent, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
+ return
+ }
+
+ // Copy Source
+ srcOpts := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ VersionID: infos[0].VersionID,
+ }
+ args["src"] = srcOpts
+
+ dstOpts := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: objectName + "-copy",
+ }
+ args["dst"] = dstOpts
+
+ // Perform the Copy
+ if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // Destination object
+ readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer readerCopy.Close()
+
+ newestContent, err := io.ReadAll(readerCopy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
+ return
+ }
+
+ if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) {
+ logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
+ return
+ }
+
+ // Delete all objects and their versions as long as the bucket itself
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testConcurrentCopyObjectWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ testFiles := []string{"datafile-10-kB"}
+ for _, testFile := range testFiles {
+ r := getDataReader(testFile)
+ buf, err := io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "unexpected failure", err)
+ return
+ }
+ r.Close()
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ }
+
+ objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ var infos []minio.ObjectInfo
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ infos = append(infos, info)
+ }
+
+ sort.Slice(infos, func(i, j int) bool {
+ return infos[i].Size < infos[j].Size
+ })
+
+ reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err)
+ return
+ }
+
+ oldestContent, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
+ return
+ }
+
+ // Copy Source
+ srcOpts := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ VersionID: infos[0].VersionID,
+ }
+ args["src"] = srcOpts
+
+ dstOpts := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: objectName + "-copy",
+ }
+ args["dst"] = dstOpts
+
+ // Perform the Copy concurrently
+ const n = 10
+ var wg sync.WaitGroup
+ wg.Add(n)
+ var errs [n]error
+ for i := 0; i < n; i++ {
+ go func(i int) {
+ defer wg.Done()
+ _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts)
+ }(i)
+ }
+ wg.Wait()
+ for _, err := range errs {
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+ }
+
+ objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object})
+ infos = []minio.ObjectInfo{}
+ for info := range objectsInfo {
+ // Destination object
+ readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer readerCopy.Close()
+
+ newestContent, err := io.ReadAll(readerCopy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
+ return
+ }
+
+ if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) {
+ logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
+ return
+ }
+ infos = append(infos, info)
+ }
+
+ if len(infos) != n {
+ logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
+ return
+ }
+
+ // Delete all objects and their versions as long as the bucket itself
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testComposeObjectWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"}
+ testFiles := []string{"datafile-5-MB", "datafile-10-kB"}
+ var testFilesBytes [][]byte
+
+ for _, testFile := range testFiles {
+ r := getDataReader(testFile)
+ buf, err := io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "unexpected failure", err)
+ return
+ }
+ r.Close()
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ testFilesBytes = append(testFilesBytes, buf)
+ }
+
+ objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+
+ var results []minio.ObjectInfo
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ results = append(results, info)
+ }
+
+ sort.SliceStable(results, func(i, j int) bool {
+ return results[i].Size > results[j].Size
+ })
+
+ // Source objects to concatenate. We also specify decryption
+ // key for each
+ src1 := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ VersionID: results[0].VersionID,
+ }
+
+ src2 := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ VersionID: results[1].VersionID,
+ }
+
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: objectName + "-copy",
+ }
+
+ _, err = c.ComposeObject(context.Background(), dst, src1, src2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
+ }
+
+ // Destination object
+ readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err)
+ return
+ }
+ defer readerCopy.Close()
+
+ copyContentBytes, err := io.ReadAll(readerCopy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err)
+ return
+ }
+
+ var expectedContent []byte
+ for _, fileBytes := range testFilesBytes {
+ expectedContent = append(expectedContent, fileBytes...)
+ }
+
+ if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) {
+ logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
+ return
+ }
+
+ // Delete all objects and their versions as long as the bucket itself
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testRemoveObjectWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "DeleteObject()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ var version minio.ObjectInfo
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ version = info
+ break
+ }
+
+ err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DeleteObject failed", err)
+ return
+ }
+
+ objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ for range objectsInfo {
+ logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
+ return
+ }
+ // test delete marker version id is non-null
+ _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ // create delete marker
+ err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DeleteObject failed", err)
+ return
+ }
+ objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ idx := 0
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ if idx == 0 {
+ if !info.IsDeleteMarker {
+ logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err)
+ return
+ }
+ if info.VersionID == "" {
+ logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err)
+ return
+ }
+ }
+ idx++
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testRemoveObjectsWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "DeleteObjects()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ objectsVersions := make(chan minio.ObjectInfo)
+ go func() {
+ objectsVersionsInfo := c.ListObjects(context.Background(), bucketName,
+ minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ for info := range objectsVersionsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ objectsVersions <- info
+ }
+ close(objectsVersions)
+ }()
+
+ removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DeleteObjects call failed", err)
+ return
+ }
+
+ for e := range removeErrors {
+ if e.Err != nil {
+ logError(testName, function, args, startTime, "", "Single delete operation failed", err)
+ return
+ }
+ }
+
+ objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ for range objectsVersionsInfo {
+ logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
+ return
+ }
+
+ err = c.RemoveBucket(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testObjectTaggingWithVersioning() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "{Get,Set,Remove}ObjectTagging()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ err = c.EnableVersioning(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Enable versioning failed", err)
+ return
+ }
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ for _, file := range []string{"datafile-1-b", "datafile-10-kB"} {
+ _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ }
+
+ versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+
+ var versions []minio.ObjectInfo
+ for info := range versionsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ versions = append(versions, info)
+ }
+
+ sort.SliceStable(versions, func(i, j int) bool {
+ return versions[i].Size < versions[j].Size
+ })
+
+ tagsV1 := map[string]string{"key1": "val1"}
+ t1, err := tags.MapToObjectTags(tagsV1)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
+ return
+ }
+
+ err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
+ return
+ }
+
+ tagsV2 := map[string]string{"key2": "val2"}
+ t2, err := tags.MapToObjectTags(tagsV2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err)
+ return
+ }
+
+ err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err)
+ return
+ }
+
+ tagsEqual := func(tags1, tags2 map[string]string) bool {
+ for k1, v1 := range tags1 {
+ v2, found := tags2[k1]
+ if found {
+ if v1 != v2 {
+ return false
+ }
+ }
+ }
+ return true
+ }
+
+ gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectTagging failed", err)
+ return
+ }
+
+ if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) {
+ logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err)
+ return
+ }
+
+ gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err)
+ return
+ }
+
+ if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) {
+ logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err)
+ return
+ }
+
+ err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err)
+ return
+ }
+
+ emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName,
+ minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectTagging failed", err)
+ return
+ }
+
+ if len(emptyTags.ToMap()) != 0 {
+ logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err)
+ return
+ }
+
+ // Delete all objects and their versions as long as the bucket itself
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test PutObject with custom checksums.
+func testPutObjectWithChecksums() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+ tests := []struct {
+ cs minio.ChecksumType
+ }{
+ {cs: minio.ChecksumCRC32C},
+ {cs: minio.ChecksumCRC32},
+ {cs: minio.ChecksumSHA1},
+ {cs: minio.ChecksumSHA256},
+ }
+
+ for _, test := range tests {
+ bufSize := dataFileMap["datafile-10-kB"]
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ cmpChecksum := func(got, want string) {
+ if want != got {
+ logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
+ return
+ }
+ }
+
+ meta := map[string]string{}
+ reader := getDataReader("datafile-10-kB")
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ h := test.cs.Hasher()
+ h.Reset()
+
+ // Test with Wrong CRC.
+ meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
+ args["metadata"] = meta
+ args["range"] = "false"
+ args["checksum"] = test.cs.String()
+
+ resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
+ DisableMultipart: true,
+ UserMetadata: meta,
+ })
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObject did not fail on wrong CRC", err)
+ return
+ }
+
+ // Set correct CRC.
+ h.Write(b)
+ meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
+ reader.Close()
+
+ resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
+ DisableMultipart: true,
+ DisableContentSha256: true,
+ UserMetadata: meta,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ // Read the data back
+ gopts := minio.GetObjectOptions{Checksum: true}
+
+ r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
+ return
+ }
+
+ args["range"] = "true"
+ err = gopts.SetRange(100, 1000)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetRange failed", err)
+ return
+ }
+ r, err = c.GetObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ b, err = io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ st, err = r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ // Range requests should return empty checksums...
+ cmpChecksum(st.ChecksumSHA256, "")
+ cmpChecksum(st.ChecksumSHA1, "")
+ cmpChecksum(st.ChecksumCRC32, "")
+ cmpChecksum(st.ChecksumCRC32C, "")
+
+ delete(args, "range")
+ delete(args, "metadata")
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test PutObject with custom checksums.
+func testPutObjectWithTrailingChecksums() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress, TrailChecksum: xxx}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ TrailingHeaders: true,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+ tests := []struct {
+ cs minio.ChecksumType
+ }{
+ {cs: minio.ChecksumCRC32C},
+ {cs: minio.ChecksumCRC32},
+ {cs: minio.ChecksumSHA1},
+ {cs: minio.ChecksumSHA256},
+ }
+
+ for _, test := range tests {
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ bufSize := dataFileMap["datafile-10-kB"]
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ cmpChecksum := func(got, want string) {
+ if want != got {
+ logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
+ return
+ }
+ }
+
+ meta := map[string]string{}
+ reader := getDataReader("datafile-10-kB")
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ h := test.cs.Hasher()
+ h.Reset()
+
+ // Test with Wrong CRC.
+ args["metadata"] = meta
+ args["range"] = "false"
+ args["checksum"] = test.cs.String()
+
+ resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
+ DisableMultipart: true,
+ DisableContentSha256: true,
+ UserMetadata: meta,
+ Checksum: test.cs,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ h.Write(b)
+ meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
+
+ cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ // Read the data back
+ gopts := minio.GetObjectOptions{Checksum: true}
+
+ function = "GetObject(...)"
+ r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
+ return
+ }
+
+ function = "GetObject( Range...)"
+ args["range"] = "true"
+ err = gopts.SetRange(100, 1000)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetRange failed", err)
+ return
+ }
+ r, err = c.GetObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ b, err = io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ st, err = r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ // Range requests should return empty checksums...
+ cmpChecksum(st.ChecksumSHA256, "")
+ cmpChecksum(st.ChecksumSHA1, "")
+ cmpChecksum(st.ChecksumCRC32, "")
+ cmpChecksum(st.ChecksumCRC32C, "")
+
+ function = "GetObjectAttributes(...)"
+ s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
+ return
+ }
+ cmpChecksum(s.Checksum.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(s.Checksum.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(s.Checksum.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(s.Checksum.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ delete(args, "range")
+ delete(args, "metadata")
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test PutObject with custom checksums.
+func testPutMultipartObjectWithChecksums(trailing bool) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Progress: progress Checksum: %v}", trailing),
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ TrailingHeaders: trailing,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
+ r := bytes.NewReader(b)
+ tmp := make([]byte, partSize)
+ parts := 0
+ var all []byte
+ for {
+ n, err := io.ReadFull(r, tmp)
+ if err != nil && err != io.ErrUnexpectedEOF {
+ logError(testName, function, args, startTime, "", "Calc crc failed", err)
+ }
+ if n == 0 {
+ break
+ }
+ parts++
+ hasher.Reset()
+ hasher.Write(tmp[:n])
+ all = append(all, hasher.Sum(nil)...)
+ if err != nil {
+ break
+ }
+ }
+ hasher.Reset()
+ hasher.Write(all)
+ return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
+ }
+ defer cleanupBucket(bucketName, c)
+ tests := []struct {
+ cs minio.ChecksumType
+ }{
+ {cs: minio.ChecksumCRC32C},
+ {cs: minio.ChecksumCRC32},
+ {cs: minio.ChecksumSHA1},
+ {cs: minio.ChecksumSHA256},
+ }
+
+ for _, test := range tests {
+ bufSize := dataFileMap["datafile-129-MB"]
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+ args["checksum"] = test.cs.String()
+
+ cmpChecksum := func(got, want string) {
+ if want != got {
+ logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
+ //fmt.Printf("want %s, got %s\n", want, got)
+ return
+ }
+ }
+
+ const partSize = 10 << 20
+ reader := getDataReader("datafile-129-MB")
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ reader.Close()
+ h := test.cs.Hasher()
+ h.Reset()
+ want := hashMultiPart(b, partSize, test.cs.Hasher())
+
+ var cs minio.ChecksumType
+ rd := io.Reader(io.NopCloser(bytes.NewReader(b)))
+ if trailing {
+ cs = test.cs
+ rd = bytes.NewReader(b)
+ }
+ // Set correct CRC.
+ resp, err := c.PutObject(context.Background(), bucketName, objectName, rd, int64(bufSize), minio.PutObjectOptions{
+ DisableContentSha256: true,
+ DisableMultipart: false,
+ UserMetadata: nil,
+ PartSize: partSize,
+ AutoChecksum: test.cs,
+ Checksum: cs,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ switch test.cs {
+ case minio.ChecksumCRC32C:
+ cmpChecksum(resp.ChecksumCRC32C, want)
+ case minio.ChecksumCRC32:
+ cmpChecksum(resp.ChecksumCRC32, want)
+ case minio.ChecksumSHA1:
+ cmpChecksum(resp.ChecksumSHA1, want)
+ case minio.ChecksumSHA256:
+ cmpChecksum(resp.ChecksumSHA256, want)
+ }
+
+ s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
+ return
+ }
+ want = want[:strings.IndexByte(want, '-')]
+ switch test.cs {
+ case minio.ChecksumCRC32C:
+ cmpChecksum(s.Checksum.ChecksumCRC32C, want)
+ case minio.ChecksumCRC32:
+ cmpChecksum(s.Checksum.ChecksumCRC32, want)
+ case minio.ChecksumSHA1:
+ cmpChecksum(s.Checksum.ChecksumSHA1, want)
+ case minio.ChecksumSHA256:
+ cmpChecksum(s.Checksum.ChecksumSHA256, want)
+ }
+
+ // Read the data back
+ gopts := minio.GetObjectOptions{Checksum: true}
+ gopts.PartNumber = 2
+
+ // We cannot use StatObject, since it ignores partnumber.
+ r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ io.Copy(io.Discard, r)
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ // Test part 2 checksum...
+ h.Reset()
+ h.Write(b[partSize : 2*partSize])
+ want = base64.StdEncoding.EncodeToString(h.Sum(nil))
+
+ switch test.cs {
+ case minio.ChecksumCRC32C:
+ cmpChecksum(st.ChecksumCRC32C, want)
+ case minio.ChecksumCRC32:
+ cmpChecksum(st.ChecksumCRC32, want)
+ case minio.ChecksumSHA1:
+ cmpChecksum(st.ChecksumSHA1, want)
+ case minio.ChecksumSHA256:
+ cmpChecksum(st.ChecksumSHA256, want)
+ }
+
+ delete(args, "metadata")
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test PutObject with trailing checksums.
+func testTrailingChecksums() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ TrailingHeaders: true,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
+ r := bytes.NewReader(b)
+ tmp := make([]byte, partSize)
+ parts := 0
+ var all []byte
+ for {
+ n, err := io.ReadFull(r, tmp)
+ if err != nil && err != io.ErrUnexpectedEOF {
+ logError(testName, function, args, startTime, "", "Calc crc failed", err)
+ }
+ if n == 0 {
+ break
+ }
+ parts++
+ hasher.Reset()
+ hasher.Write(tmp[:n])
+ all = append(all, hasher.Sum(nil)...)
+ if err != nil {
+ break
+ }
+ }
+ hasher.Reset()
+ hasher.Write(all)
+ return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
+ }
+ defer cleanupBucket(bucketName, c)
+ tests := []struct {
+ header string
+ hasher hash.Hash
+
+ // Checksum values
+ ChecksumCRC32 string
+ ChecksumCRC32C string
+ ChecksumSHA1 string
+ ChecksumSHA256 string
+ PO minio.PutObjectOptions
+ }{
+ // Currently there is no way to override the checksum type.
+ {
+ header: "x-amz-checksum-crc32c",
+ hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
+ ChecksumCRC32C: "set",
+ PO: minio.PutObjectOptions{
+ DisableContentSha256: true,
+ DisableMultipart: false,
+ UserMetadata: nil,
+ PartSize: 5 << 20,
+ },
+ },
+ {
+ header: "x-amz-checksum-crc32c",
+ hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
+ ChecksumCRC32C: "set",
+ PO: minio.PutObjectOptions{
+ DisableContentSha256: true,
+ DisableMultipart: false,
+ UserMetadata: nil,
+ PartSize: 6_645_654, // Rather arbitrary size
+ },
+ },
+ {
+ header: "x-amz-checksum-crc32c",
+ hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
+ ChecksumCRC32C: "set",
+ PO: minio.PutObjectOptions{
+ DisableContentSha256: false,
+ DisableMultipart: false,
+ UserMetadata: nil,
+ PartSize: 5 << 20,
+ },
+ },
+ {
+ header: "x-amz-checksum-crc32c",
+ hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
+ ChecksumCRC32C: "set",
+ PO: minio.PutObjectOptions{
+ DisableContentSha256: false,
+ DisableMultipart: false,
+ UserMetadata: nil,
+ PartSize: 6_645_654, // Rather arbitrary size
+ },
+ },
+ }
+
+ for _, test := range tests {
+ bufSize := dataFileMap["datafile-11-MB"]
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ cmpChecksum := func(got, want string) {
+ if want != got {
+ logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %q, got %q", want, got))
+ return
+ }
+ }
+
+ reader := getDataReader("datafile-11-MB")
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ reader.Close()
+ h := test.hasher
+ h.Reset()
+ test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher)
+
+ // Set correct CRC.
+ // c.TraceOn(os.Stderr)
+ resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ // c.TraceOff()
+ cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
+ cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
+ cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
+ cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
+
+ // Read the data back
+ gopts := minio.GetObjectOptions{Checksum: true}
+ gopts.PartNumber = 2
+
+ // We cannot use StatObject, since it ignores partnumber.
+ r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ io.Copy(io.Discard, r)
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ // Test part 2 checksum...
+ h.Reset()
+ p2 := b[test.PO.PartSize:]
+ if len(p2) > int(test.PO.PartSize) {
+ p2 = p2[:test.PO.PartSize]
+ }
+ h.Write(p2)
+ got := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ if test.ChecksumSHA256 != "" {
+ cmpChecksum(st.ChecksumSHA256, got)
+ }
+ if test.ChecksumSHA1 != "" {
+ cmpChecksum(st.ChecksumSHA1, got)
+ }
+ if test.ChecksumCRC32 != "" {
+ cmpChecksum(st.ChecksumCRC32, got)
+ }
+ if test.ChecksumCRC32C != "" {
+ cmpChecksum(st.ChecksumCRC32C, got)
+ }
+
+ delete(args, "metadata")
+ }
+}
+
+// Test PutObject with custom checksums.
+func testPutObjectWithAutomaticChecksums() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ TrailingHeaders: true,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+ tests := []struct {
+ header string
+ hasher hash.Hash
+
+ // Checksum values
+ ChecksumCRC32 string
+ ChecksumCRC32C string
+ ChecksumSHA1 string
+ ChecksumSHA256 string
+ }{
+ // Built-in will only add crc32c, when no MD5 nor SHA256.
+ {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+ // defer c.TraceOff()
+
+ for i, test := range tests {
+ bufSize := dataFileMap["datafile-10-kB"]
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ cmpChecksum := func(got, want string) {
+ if want != got {
+ logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
+ return
+ }
+ }
+
+ meta := map[string]string{}
+ reader := getDataReader("datafile-10-kB")
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+
+ h := test.hasher
+ h.Reset()
+ h.Write(b)
+ meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
+ args["metadata"] = meta
+
+ resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
+ DisableMultipart: true,
+ UserMetadata: nil,
+ DisableContentSha256: true,
+ SendContentMd5: false,
+ })
+ if err == nil {
+ if i == 0 && resp.ChecksumCRC32C == "" {
+ logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend")
+ return
+ }
+ } else {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ // Usually this will be the same as above, since we skip automatic checksum when SHA256 content is sent.
+ // When/if we add a checksum control to PutObjectOptions this will make more sense.
+ resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
+ DisableMultipart: true,
+ UserMetadata: nil,
+ DisableContentSha256: false,
+ SendContentMd5: false,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ // The checksum will not be enabled on HTTP, since it uses SHA256 blocks.
+ if mustParseBool(os.Getenv(enableHTTPS)) {
+ cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+ }
+
+ // Set SHA256 header manually
+ sh256 := sha256.Sum256(b)
+ meta = map[string]string{"x-amz-checksum-sha256": base64.StdEncoding.EncodeToString(sh256[:])}
+ args["metadata"] = meta
+ resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
+ DisableMultipart: true,
+ UserMetadata: meta,
+ DisableContentSha256: true,
+ SendContentMd5: false,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+ delete(args, "metadata")
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testGetObjectAttributes() {
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.ObjectAttributesOptions{}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ TrailingHeaders: true,
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ err = c.MakeBucket(
+ context.Background(),
+ bucketName,
+ minio.MakeBucketOptions{Region: "us-east-1"},
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-")
+ args["bucketName"] = bucketNameV
+ err = c.MakeBucket(
+ context.Background(),
+ bucketNameV,
+ minio.MakeBucketOptions{Region: "us-east-1"},
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+ err = c.EnableVersioning(context.Background(), bucketNameV)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Unable to enable versioning", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+ defer cleanupVersionedBucket(bucketNameV, c)
+
+ testFiles := make(map[string]*objectAttributesNewObject)
+ testFiles["file1"] = &objectAttributesNewObject{
+ Object: "file1",
+ ObjectReaderType: "datafile-1.03-MB",
+ Bucket: bucketNameV,
+ ContentType: "custom/contenttype",
+ SendContentMd5: false,
+ }
+
+ testFiles["file2"] = &objectAttributesNewObject{
+ Object: "file2",
+ ObjectReaderType: "datafile-129-MB",
+ Bucket: bucketName,
+ ContentType: "custom/contenttype",
+ SendContentMd5: false,
+ }
+
+ for i, v := range testFiles {
+ bufSize := dataFileMap[v.ObjectReaderType]
+
+ reader := getDataReader(v.ObjectReaderType)
+
+ args["objectName"] = v.Object
+ testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{
+ ContentType: v.ContentType,
+ SendContentMd5: v.SendContentMd5,
+ Checksum: minio.ChecksumCRC32C,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ }
+
+ testTable := make(map[string]objectAttributesTableTest)
+
+ testTable["none-versioned"] = objectAttributesTableTest{
+ opts: minio.ObjectAttributesOptions{},
+ test: objectAttributesTestOptions{
+ TestFileName: "file2",
+ StorageClass: "STANDARD",
+ HasFullChecksum: true,
+ HasPartChecksums: true,
+ HasParts: true,
+ },
+ }
+
+ testTable["0-to-0-marker"] = objectAttributesTableTest{
+ opts: minio.ObjectAttributesOptions{
+ PartNumberMarker: 0,
+ MaxParts: 0,
+ },
+ test: objectAttributesTestOptions{
+ TestFileName: "file2",
+ StorageClass: "STANDARD",
+ HasFullChecksum: true,
+ HasPartChecksums: true,
+ HasParts: true,
+ },
+ }
+
+ testTable["0-marker-to-max"] = objectAttributesTableTest{
+ opts: minio.ObjectAttributesOptions{
+ PartNumberMarker: 0,
+ MaxParts: 10000,
+ },
+ test: objectAttributesTestOptions{
+ TestFileName: "file2",
+ StorageClass: "STANDARD",
+ HasFullChecksum: true,
+ HasPartChecksums: true,
+ HasParts: true,
+ },
+ }
+
+ testTable["0-to-1-marker"] = objectAttributesTableTest{
+ opts: minio.ObjectAttributesOptions{
+ PartNumberMarker: 0,
+ MaxParts: 1,
+ },
+ test: objectAttributesTestOptions{
+ TestFileName: "file2",
+ StorageClass: "STANDARD",
+ HasFullChecksum: true,
+ HasPartChecksums: true,
+ HasParts: true,
+ },
+ }
+
+ testTable["7-to-6-marker"] = objectAttributesTableTest{
+ opts: minio.ObjectAttributesOptions{
+ PartNumberMarker: 7,
+ MaxParts: 6,
+ },
+ test: objectAttributesTestOptions{
+ TestFileName: "file2",
+ StorageClass: "STANDARD",
+ HasFullChecksum: true,
+ HasPartChecksums: true,
+ HasParts: true,
+ },
+ }
+
+ testTable["versioned"] = objectAttributesTableTest{
+ opts: minio.ObjectAttributesOptions{},
+ test: objectAttributesTestOptions{
+ TestFileName: "file1",
+ StorageClass: "STANDARD",
+ HasFullChecksum: true,
+ },
+ }
+
+ for i, v := range testTable {
+
+ tf, ok := testFiles[v.test.TestFileName]
+ if !ok {
+ continue
+ }
+
+ args["objectName"] = tf.Object
+ args["bucketName"] = tf.Bucket
+ if tf.UploadInfo.VersionID != "" {
+ v.opts.VersionID = tf.UploadInfo.VersionID
+ }
+
+ s, err := c.GetObjectAttributes(context.Background(), tf.Bucket, tf.Object, v.opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
+ return
+ }
+
+ v.test.NumberOfParts = s.ObjectParts.PartsCount
+ v.test.ETag = tf.UploadInfo.ETag
+ v.test.ObjectSize = int(tf.UploadInfo.Size)
+
+ err = validateObjectAttributeRequest(s, &v.opts, &v.test)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed, table test: "+i, err)
+ return
+ }
+
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testGetObjectAttributesSSECEncryption() {
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.ObjectAttributesOptions{}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ TrailingHeaders: true,
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ Transport: createHTTPTransport(),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ err = c.MakeBucket(
+ context.Background(),
+ bucketName,
+ minio.MakeBucketOptions{Region: "us-east-1"},
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ objectName := "encrypted-object"
+ args["objectName"] = objectName
+ bufSize := dataFileMap["datafile-11-MB"]
+ reader := getDataReader("datafile-11-MB")
+
+ sse := encrypt.DefaultPBKDF([]byte("word1 word2 word3 word4"), []byte(bucketName+objectName))
+
+ info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
+ ContentType: "content/custom",
+ SendContentMd5: false,
+ ServerSideEncryption: sse,
+ PartSize: uint64(bufSize) / 2,
+ Checksum: minio.ChecksumCRC32C,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ opts := minio.ObjectAttributesOptions{
+ ServerSideEncryption: sse,
+ }
+ attr, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil)
+ return
+ }
+ err = validateObjectAttributeRequest(attr, &opts, &objectAttributesTestOptions{
+ TestFileName: info.Key,
+ ETag: info.ETag,
+ NumberOfParts: 2,
+ ObjectSize: int(info.Size),
+ HasFullChecksum: true,
+ HasParts: true,
+ HasPartChecksums: true,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testGetObjectAttributesErrorCases() {
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.ObjectAttributesOptions{}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ TrailingHeaders: true,
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+ unknownBucket := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-bucket-")
+ unknownObject := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-object-")
+
+ _, err = c.GetObjectAttributes(context.Background(), unknownBucket, unknownObject, minio.ObjectAttributesOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil)
+ return
+ }
+
+ errorResponse := err.(minio.ErrorResponse)
+ if errorResponse.Code != "NoSuchBucket" {
+ logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchBucket but got "+errorResponse.Code, nil)
+ return
+ }
+
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ err = c.MakeBucket(
+ context.Background(),
+ bucketName,
+ minio.MakeBucketOptions{Region: "us-east-1"},
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-")
+ args["bucketName"] = bucketNameV
+ err = c.MakeBucket(
+ context.Background(),
+ bucketNameV,
+ minio.MakeBucketOptions{Region: "us-east-1"},
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+ err = c.EnableVersioning(context.Background(), bucketNameV)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, c)
+ defer cleanupVersionedBucket(bucketNameV, c)
+
+ _, err = c.GetObjectAttributes(context.Background(), bucketName, unknownObject, minio.ObjectAttributesOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil)
+ return
+ }
+
+ errorResponse = err.(minio.ErrorResponse)
+ if errorResponse.Code != "NoSuchKey" {
+ logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchKey but got "+errorResponse.Code, nil)
+ return
+ }
+
+ _, err = c.GetObjectAttributes(context.Background(), bucketName, "", minio.ObjectAttributesOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes with empty object name should have failed", nil)
+ return
+ }
+
+ _, err = c.GetObjectAttributes(context.Background(), "", unknownObject, minio.ObjectAttributesOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil)
+ return
+ }
+
+ _, err = c.GetObjectAttributes(context.Background(), bucketNameV, unknownObject, minio.ObjectAttributesOptions{
+ VersionID: uuid.NewString(),
+ })
+ if err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil)
+ return
+ }
+ errorResponse = err.(minio.ErrorResponse)
+ if errorResponse.Code != "NoSuchVersion" {
+ logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchVersion but got "+errorResponse.Code, nil)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+type objectAttributesNewObject struct {
+ Object string
+ ObjectReaderType string
+ Bucket string
+ ContentType string
+ SendContentMd5 bool
+ UploadInfo minio.UploadInfo
+}
+
+type objectAttributesTableTest struct {
+ opts minio.ObjectAttributesOptions
+ test objectAttributesTestOptions
+}
+
+type objectAttributesTestOptions struct {
+ TestFileName string
+ ETag string
+ NumberOfParts int
+ StorageClass string
+ ObjectSize int
+ HasPartChecksums bool
+ HasFullChecksum bool
+ HasParts bool
+}
+
+func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.ObjectAttributesOptions, test *objectAttributesTestOptions) (err error) {
+ if opts.VersionID != "" {
+ if OA.VersionID != opts.VersionID {
+ err = fmt.Errorf("Expected versionId %s but got versionId %s", opts.VersionID, OA.VersionID)
+ return
+ }
+ }
+
+ partsMissingChecksum := false
+ foundPartChecksum := false
+ for _, v := range OA.ObjectParts.Parts {
+ checksumFound := false
+ if v.ChecksumSHA256 != "" {
+ checksumFound = true
+ } else if v.ChecksumSHA1 != "" {
+ checksumFound = true
+ } else if v.ChecksumCRC32 != "" {
+ checksumFound = true
+ } else if v.ChecksumCRC32C != "" {
+ checksumFound = true
+ }
+ if !checksumFound {
+ partsMissingChecksum = true
+ } else {
+ foundPartChecksum = true
+ }
+ }
+
+ if test.HasPartChecksums {
+ if partsMissingChecksum {
+ err = fmt.Errorf("One or all parts were missing a checksum")
+ return
+ }
+ } else {
+ if foundPartChecksum {
+ err = fmt.Errorf("Did not expect ObjectParts to have checksums but found one")
+ return
+ }
+ }
+
+ hasFullObjectChecksum := true
+ if OA.Checksum.ChecksumCRC32 == "" {
+ if OA.Checksum.ChecksumCRC32C == "" {
+ if OA.Checksum.ChecksumSHA1 == "" {
+ if OA.Checksum.ChecksumSHA256 == "" {
+ hasFullObjectChecksum = false
+ }
+ }
+ }
+ }
+
+ if test.HasFullChecksum {
+ if !hasFullObjectChecksum {
+ err = fmt.Errorf("Full object checksum not found")
+ return
+ }
+ } else {
+ if hasFullObjectChecksum {
+ err = fmt.Errorf("Did not expect a full object checksum but we got one")
+ return
+ }
+ }
+
+ if OA.ETag != test.ETag {
+ err = fmt.Errorf("Etags do not match, got %s but expected %s", OA.ETag, test.ETag)
+ return
+ }
+
+ if test.HasParts {
+ if len(OA.ObjectParts.Parts) < 1 {
+ err = fmt.Errorf("Was expecting ObjectParts but none were present")
+ return
+ }
+ }
+
+ if OA.StorageClass == "" {
+ err = fmt.Errorf("Was expecting a StorageClass but got none")
+ return
+ }
+
+ if OA.ObjectSize != test.ObjectSize {
+ err = fmt.Errorf("Was expecting a ObjectSize but got none")
+ return
+ }
+
+ if test.HasParts {
+ if opts.MaxParts == 0 {
+ if len(OA.ObjectParts.Parts) != OA.ObjectParts.PartsCount {
+ err = fmt.Errorf("expected %s parts but got %d", OA.ObjectParts.PartsCount, len(OA.ObjectParts.Parts))
+ return
+ }
+ } else if (opts.MaxParts + opts.PartNumberMarker) > OA.ObjectParts.PartsCount {
+ if len(OA.ObjectParts.Parts) != (OA.ObjectParts.PartsCount - opts.PartNumberMarker) {
+ err = fmt.Errorf("expected %d parts but got %d", (OA.ObjectParts.PartsCount - opts.PartNumberMarker), len(OA.ObjectParts.Parts))
+ return
+ }
+ } else if opts.MaxParts != 0 {
+ if opts.MaxParts != len(OA.ObjectParts.Parts) {
+ err = fmt.Errorf("expected %d parts but got %d", opts.MaxParts, len(OA.ObjectParts.Parts))
+ return
+ }
+ }
+ }
+
+ if OA.ObjectParts.NextPartNumberMarker == OA.ObjectParts.PartsCount {
+ if OA.ObjectParts.IsTruncated {
+ err = fmt.Errorf("Expected ObjectParts to NOT be truncated, but it was")
+ return
+ }
+ }
+
+ if OA.ObjectParts.NextPartNumberMarker != OA.ObjectParts.PartsCount {
+ if !OA.ObjectParts.IsTruncated {
+ err = fmt.Errorf("Expected ObjectParts to be truncated, but it was NOT")
+ return
+ }
+ }
+
+ return
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func testPutObjectWithMetadata() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+
+ args["metadata"] = map[string][]string{
+ "Content-Type": {customContentType},
+ "X-Amz-Meta-CustomKey": {"extra spaces in value"},
+ }
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
+ ContentType: customContentType,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+ if st.ContentType != customContentType && st.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err)
+ return
+ }
+ if err := crcMatchesName(r, "datafile-129-MB"); err != nil {
+ logError(testName, function, args, startTime, "", "data CRC check failed", err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testPutObjectWithContentLanguage() {
+ // initialize logging params
+ objectName := "test-object"
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": objectName,
+ "size": -1,
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ data := []byte{}
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{
+ ContentLanguage: "en",
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ if objInfo.Metadata.Get("Content-Language") != "en" {
+ logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test put object with streaming signature.
+func testPutObjectStreaming() {
+ // initialize logging params
+ objectName := "test-object"
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size,opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": objectName,
+ "size": -1,
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Upload an object.
+ sizes := []int64{0, 64*1024 - 1, 64 * 1024}
+
+ for _, size := range sizes {
+ data := newRandomReader(size, size)
+ ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
+ return
+ }
+
+ if ui.Size != size {
+ logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil)
+ return
+ }
+
+ objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if objInfo.Size != size {
+ logError(testName, function, args, startTime, "", "Unexpected size", err)
+ return
+ }
+
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get object seeker from the end, using whence set to '2'.
+func testGetObjectSeekEnd() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
+ }
+
+ pos, err := r.Seek(-100, 2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Object Seek failed", err)
+ return
+ }
+ if pos != st.Size-100 {
+ logError(testName, function, args, startTime, "", "Incorrect position", err)
+ return
+ }
+ buf2 := make([]byte, 100)
+ m, err := readFull(r, buf2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error reading through readFull", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err)
+ return
+ }
+ hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
+ hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
+ if hexBuf1 != hexBuf2 {
+ logError(testName, function, args, startTime, "", "Values at same index dont match", err)
+ return
+ }
+ pos, err = r.Seek(-100, 2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Object Seek failed", err)
+ return
+ }
+ if pos != st.Size-100 {
+ logError(testName, function, args, startTime, "", "Incorrect position", err)
+ return
+ }
+ if err = r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "ObjectClose failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get object reader to not throw error on being closed twice.
+func testGetObjectClosedTwice() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
+ }
+ if err := crcMatchesName(r, "datafile-33-kB"); err != nil {
+ logError(testName, function, args, startTime, "", "data CRC check failed", err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Already closed object. No error returned", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test RemoveObjects request where context cancels after timeout
+func testRemoveObjectsContext() {
+ // Initialize logging params.
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveObjects(ctx, bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ // Seed random based on current tie.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate put data.
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+ // Multi remove of 20 objects.
+ nrObjects := 20
+ objectsCh := make(chan minio.ObjectInfo)
+ go func() {
+ defer close(objectsCh)
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
+ minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ continue
+ }
+ objectsCh <- minio.ObjectInfo{
+ Key: info.Key,
+ VersionID: info.VersionID,
+ }
+ }
+ }()
+ // Set context to cancel in 1 nanosecond.
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Call RemoveObjects API with short timeout.
+ errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{})
+ // Check for error.
+ select {
+ case r := <-errorCh:
+ if r.Err == nil {
+ logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err)
+ return
+ }
+ }
+ // Set context with longer timeout.
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+ defer cancel()
+ // Perform RemoveObjects with the longer timeout. Expect the removals to succeed.
+ errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{})
+ select {
+ case r, more := <-errorCh:
+ if more || r.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test removing multiple objects with Remove API
+func testRemoveMultipleObjects() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveObjects(bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+ // Multi remove of 1100 objects
+ nrObjects := 200
+
+ objectsCh := make(chan minio.ObjectInfo)
+
+ go func() {
+ defer close(objectsCh)
+ // Upload objects and send them to objectsCh
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
+ minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ continue
+ }
+ objectsCh <- minio.ObjectInfo{
+ Key: info.Key,
+ VersionID: info.VersionID,
+ }
+ }
+ }()
+
+ // Call RemoveObjects API
+ errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{})
+
+ // Check if errorCh doesn't receive any error
+ select {
+ case r, more := <-errorCh:
+ if more {
+ logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test removing multiple objects and check for results
+func testRemoveMultipleObjectsWithResult() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveObjects(bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupVersionedBucket(bucketName, c)
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+ nrObjects := 10
+ nrLockedObjects := 5
+
+ objectsCh := make(chan minio.ObjectInfo)
+
+ go func() {
+ defer close(objectsCh)
+ // Upload objects and send them to objectsCh
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
+ minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ if i < nrLockedObjects {
+ // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC)
+ t := time.Now().Add(5 * time.Minute)
+ m := minio.RetentionMode(minio.Governance)
+ opts := minio.PutObjectRetentionOptions{
+ GovernanceBypass: false,
+ RetainUntilDate: &t,
+ Mode: &m,
+ VersionID: info.VersionID,
+ }
+ err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error setting retention", err)
+ return
+ }
+ }
+
+ objectsCh <- minio.ObjectInfo{
+ Key: info.Key,
+ VersionID: info.VersionID,
+ }
+ }
+ }()
+
+ // Call RemoveObjects API
+ resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{})
+
+ var foundNil, foundErr int
+
+ for {
+ // Check if errorCh doesn't receive any error
+ select {
+ case deleteRes, ok := <-resultCh:
+ if !ok {
+ goto out
+ }
+ if deleteRes.ObjectName == "" {
+ logError(testName, function, args, startTime, "", "Unexpected object name", nil)
+ return
+ }
+ if deleteRes.ObjectVersionID == "" {
+ logError(testName, function, args, startTime, "", "Unexpected object version ID", nil)
+ return
+ }
+
+ if deleteRes.Err == nil {
+ foundNil++
+ } else {
+ foundErr++
+ }
+ }
+ }
+out:
+ if foundNil+foundErr != nrObjects {
+ logError(testName, function, args, startTime, "", "Unexpected number of results", nil)
+ return
+ }
+
+ if foundNil != nrObjects-nrLockedObjects {
+ logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil)
+ return
+ }
+
+ if foundErr != nrLockedObjects {
+ logError(testName, function, args, startTime, "", "Unexpected number of errors", nil)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests FPutObject of a big file to trigger multipart
+func testFPutObjectMultipart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ fileName := getMintDataDirFilePath("datafile-129-MB")
+ if fileName == "" {
+ // Make a temp file with minPartSize bytes of data.
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+ // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File Close failed", err)
+ return
+ }
+ fileName = file.Name()
+ args["fileName"] = fileName
+ }
+ totalSize := dataFileMap["datafile-129-MB"]
+ // Set base object name
+ objectName := bucketName + "FPutObject" + "-standard"
+ args["objectName"] = objectName
+
+ objectContentType := "testapplication/octet-stream"
+ args["objectContentType"] = objectContentType
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ objInfo, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error", err)
+ return
+ }
+ if objInfo.Size != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err)
+ return
+ }
+ if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "ContentType doesn't match", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests FPutObject with null contentType (default = application/octet-stream)
+func testFPutObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ location := "us-east-1"
+
+ // Make a new bucket.
+ args["bucketName"] = bucketName
+ args["location"] = location
+ function = "MakeBucket(bucketName, location)"
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ fName := getMintDataDirFilePath("datafile-129-MB")
+ if fName == "" {
+ // Make a temp file with minPartSize bytes of data.
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+
+ // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ defer os.Remove(file.Name())
+ fName = file.Name()
+ }
+
+ // Set base object name
+ function = "FPutObject(bucketName, objectName, fileName, opts)"
+ objectName := bucketName + "FPutObject"
+ args["objectName"] = objectName + "-standard"
+ args["fileName"] = fName
+ args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"}
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+
+ if ui.Size != int64(dataFileMap["datafile-129-MB"]) {
+ logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err)
+ return
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ args["objectName"] = objectName + "-Octet"
+ _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+
+ srcFile, err := os.Open(fName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
+ }
+ defer srcFile.Close()
+ // Add extension to temp file name
+ tmpFile, err := os.Create(fName + ".gtar")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File create failed", err)
+ return
+ }
+ _, err = io.Copy(tmpFile, srcFile)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+ tmpFile.Close()
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ args["objectName"] = objectName + "-GTar"
+ args["opts"] = minio.PutObjectOptions{}
+ _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+
+ // Check headers
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-standard"
+ rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
+ return
+ }
+
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-Octet"
+ rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err)
+ return
+ }
+
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-GTar"
+ rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" {
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err)
+ return
+ }
+
+ os.Remove(fName + ".gtar")
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests FPutObject request when context cancels after timeout
+func testFPutObjectContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Upload 1 parts worth of data to use multipart upload.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ fName := getMintDataDirFilePath("datafile-1-MB")
+ if fName == "" {
+ // Make a temp file with 1 MiB bytes of data.
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+
+ // Upload 1 parts to trigger multipart upload
+ if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ defer os.Remove(file.Name())
+ fName = file.Name()
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObjectContext"
+ args["objectName"] = objectName
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Perform FPutObject with contentType provided (Expecting application/octet-stream)
+ _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+ // Perform FPutObject with a long timeout. Expect the put object to succeed
+ _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err)
+ return
+ }
+
+ _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests FPutObject request when context cancels after timeout
+func testFPutObjectContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{ContentType:objectContentType}",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Upload 1 parts worth of data to use multipart upload.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ fName := getMintDataDirFilePath("datafile-1-MB")
+ if fName == "" {
+ // Make a temp file with 1 MiB bytes of data.
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Temp file creation failed", err)
+ return
+ }
+
+ // Upload 1 parts to trigger multipart upload
+ if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ defer os.Remove(file.Name())
+ fName = file.Name()
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObjectContext"
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Perform FPutObject with contentType provided (Expecting application/octet-stream)
+ _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+ // Perform FPutObject with a long timeout. Expect the put object to succeed
+ _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err)
+ return
+ }
+
+ _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test validates putObject with context to see if request cancellation is honored.
+func testPutObjectContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(ctx, bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "opts": "",
+ }
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ cancel()
+ args["ctx"] = ctx
+ args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"}
+
+ _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+
+ defer cancel()
+ reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests get object with s3zip extensions.
+func testGetObjectS3Zip() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{"x-minio-extract": true}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer func() {
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+ }()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip"
+ args["objectName"] = objectName
+
+ var zipFile bytes.Buffer
+ zw := zip.NewWriter(&zipFile)
+ rng := rand.New(rand.NewSource(0xc0cac01a))
+ const nFiles = 500
+ for i := 0; i <= nFiles; i++ {
+ if i == nFiles {
+ // Make one large, compressible file.
+ i = 1000000
+ }
+ b := make([]byte, i)
+ if i < nFiles {
+ rng.Read(b)
+ }
+ wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "zw.Create failed", err)
+ return
+ }
+ wc.Write(b)
+ }
+ err = zw.Close()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "zw.Close failed", err)
+ return
+ }
+ buf := zipFile.Bytes()
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat object failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err)
+ return
+ }
+ r.Close()
+
+ zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf)))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "zip.NewReader failed", err)
+ return
+ }
+ lOpts := minio.ListObjectsOptions{}
+ lOpts.Set("x-minio-extract", "true")
+ lOpts.Prefix = objectName + "/"
+ lOpts.Recursive = true
+ list := c.ListObjects(context.Background(), bucketName, lOpts)
+ listed := map[string]minio.ObjectInfo{}
+ for item := range list {
+ if item.Err != nil {
+ break
+ }
+ listed[item.Key] = item
+ }
+ if len(listed) == 0 {
+ // Assume we are running against non-minio.
+ args["SKIPPED"] = true
+ logIgnored(testName, function, args, startTime, "s3zip does not appear to be present")
+ return
+ }
+
+ for _, file := range zr.File {
+ if file.FileInfo().IsDir() {
+ continue
+ }
+ args["zipfile"] = file.Name
+ zfr, err := file.Open()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file.Open failed", err)
+ return
+ }
+ want, err := io.ReadAll(zfr)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "fzip file read failed", err)
+ return
+ }
+
+ opts := minio.GetObjectOptions{}
+ opts.Set("x-minio-extract", "true")
+ key := path.Join(objectName, file.Name)
+ r, err = c.GetObject(context.Background(), bucketName, key, opts)
+ if err != nil {
+ terr := minio.ToErrorResponse(err)
+ if terr.StatusCode != http.StatusNotFound {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ }
+ return
+ }
+ got, err := io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ r.Close()
+ if !bytes.Equal(want, got) {
+ logError(testName, function, args, startTime, "", "Content mismatch", err)
+ return
+ }
+ oi, ok := listed[key]
+ if !ok {
+ logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key))
+ return
+ }
+ if int(oi.Size) != len(got) {
+ logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got)))
+ return
+ }
+ delete(listed, key)
+ }
+ delete(args, "zipfile")
+ if len(listed) > 0 {
+ logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed))
+ return
+ }
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer func() {
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+ }()
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat object failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ // This following function helps us to compare data from the reader after seek
+ // with the data from the original buffer
+ cmpData := func(r io.Reader, start, end int) {
+ if end-start == 0 {
+ return
+ }
+ buffer := bytes.NewBuffer([]byte{})
+ if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "CopyN failed", err)
+ return
+ }
+ }
+ if !bytes.Equal(buf[start:end], buffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+ }
+
+ // Generic seek error for errors other than io.EOF
+ seekErr := errors.New("seek error")
+
+ testCases := []struct {
+ offset int64
+ whence int
+ pos int64
+ err error
+ shouldCmp bool
+ start int
+ end int
+ }{
+ // Start from offset 0, fetch data and compare
+ {0, 0, 0, nil, true, 0, 0},
+ // Start from offset 2048, fetch data and compare
+ {2048, 0, 2048, nil, true, 2048, bufSize},
+ // Start from offset larger than possible
+ {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
+ // Move to offset 0 without comparing
+ {0, 0, 0, nil, false, 0, 0},
+ // Move one step forward and compare
+ {1, 1, 1, nil, true, 1, bufSize},
+ // Move larger than possible
+ {int64(bufSize), 1, 0, seekErr, false, 0, 0},
+ // Provide negative offset with CUR_SEEK
+ {int64(-1), 1, 0, seekErr, false, 0, 0},
+ // Test with whence SEEK_END and with positive offset
+ {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
+ // Test with whence SEEK_END and with negative offset
+ {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
+ // Test with whence SEEK_END and with large negative offset
+ {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
+ }
+
+ for i, testCase := range testCases {
+ // Perform seek operation
+ n, err := r.Seek(testCase.offset, testCase.whence)
+ // We expect an error
+ if testCase.err == seekErr && err == nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
+ return
+ }
+ // We expect a specific error
+ if testCase.err != seekErr && testCase.err != err {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
+ return
+ }
+ // If we expect an error go to the next loop
+ if testCase.err != nil {
+ continue
+ }
+ // Check the returned seek pos
+ if n != testCase.pos {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err)
+ return
+ }
+ // Compare only if shouldCmp is activated
+ if testCase.shouldCmp {
+ cmpData(r, testCase.start, testCase.end)
+ }
+ }
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests get object ReaderAt interface methods.
+func testGetObjectReadAtFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ offset := int64(2048)
+
+ // read directly
+ buf1 := make([]byte, 512)
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ // Test readAt before stat is called such that objectInfo doesn't change.
+ m, err := r.ReadAt(buf1, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf1) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf1, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ m, err = r.ReadAt(buf2, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf3) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf4) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+
+ buf5 := make([]byte, len(buf))
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ if m != len(buf5) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf, buf5) {
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
+ }
+
+ buf6 := make([]byte, len(buf)+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Reproduces issue https://github.com/minio/minio-go/issues/1137
+func testGetObjectReadAtWhenEOFWasReached() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // read directly
+ buf1 := make([]byte, len(buf))
+ buf2 := make([]byte, 512)
+
+ m, err := r.Read(buf1)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ }
+ if m != len(buf1) {
+ logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf1, buf) {
+ logError(testName, function, args, startTime, "", "Incorrect count of Read data", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ m, err = r.ReadAt(buf2, 512)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf2, buf[512:1024]) {
+ logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test Presigned Post Policy
+func testPresignedPostPolicy() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PresignedPostPolicy(policy)"
+ args := map[string]interface{}{
+ "policy": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 33K of data.
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ // Azure requires the key to not start with a number
+ metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
+ metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ policy := minio.NewPostPolicy()
+
+ if err := policy.SetBucket(""); err == nil {
+ logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetKey(""); err == nil {
+ logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
+ logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetContentType(""); err == nil {
+ logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
+ logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetUserMetadata("", ""); err == nil {
+ logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err)
+ return
+ }
+
+ policy.SetBucket(bucketName)
+ policy.SetKey(objectName)
+ policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+ policy.SetContentType("binary/octet-stream")
+ policy.SetContentLengthRange(10, 1024*1024)
+ policy.SetUserMetadata(metadataKey, metadataValue)
+
+ // Add CRC32C
+ checksum := minio.ChecksumCRC32C.ChecksumBytes(buf)
+ policy.SetChecksum(checksum)
+
+ args["policy"] = policy.String()
+
+ presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err)
+ return
+ }
+
+ var formBuf bytes.Buffer
+ writer := multipart.NewWriter(&formBuf)
+ for k, v := range formData {
+ writer.WriteField(k, v)
+ }
+
+ // Get a 33KB file to upload and test if set post policy works
+ filePath := getMintDataDirFilePath("datafile-33-kB")
+ if filePath == "" {
+ // Make a temp file with 33 KB data.
+ file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+ if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File Close failed", err)
+ return
+ }
+ filePath = file.Name()
+ }
+
+ // add file to post request
+ f, err := os.Open(filePath)
+ defer f.Close()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
+ }
+ w, err := writer.CreateFormFile("file", filePath)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CreateFormFile failed", err)
+ return
+ }
+
+ _, err = io.Copy(w, f)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ writer.Close()
+
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively canceled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: createHTTPTransport(),
+ }
+ args["url"] = presignedPostPolicyURL.String()
+
+ req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes()))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Http request failed", err)
+ return
+ }
+
+ req.Header.Set("Content-Type", writer.FormDataContentType())
+
+ // make post request with correct form data
+ res, err := httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Http request failed", err)
+ return
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusNoContent {
+ logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status))
+ return
+ }
+
+ // expected path should be absolute path of the object
+ var scheme string
+ if mustParseBool(os.Getenv(enableHTTPS)) {
+ scheme = "https://"
+ } else {
+ scheme = "http://"
+ }
+
+ expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
+ expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName
+
+ if !strings.Contains(expectedLocation, "s3.amazonaws.com/") {
+ // Test when not against AWS S3.
+ if val, ok := res.Header["Location"]; ok {
+ if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err)
+ return
+ }
+ } else {
+ logError(testName, function, args, startTime, "", "Location not found in header response", err)
+ return
+ }
+ }
+ want := checksum.Encoded()
+ if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests copy object
+func testCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(dst, src)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName+"-copy", c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ // Copy Source
+ src := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ // Set copy conditions.
+ MatchETag: objInfo.ETag,
+ MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
+ }
+ args["src"] = src
+
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName + "-copy",
+ Object: objectName + "-copy",
+ }
+
+ // Perform the Copy
+ if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // Source object
+ r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ // Destination object
+ readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ // Check the various fields of source object against destination object.
+ objInfo, err = r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err)
+ return
+ }
+
+ if err := crcMatchesName(r, "datafile-33-kB"); err != nil {
+ logError(testName, function, args, startTime, "", "data CRC check failed", err)
+ return
+ }
+ if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil {
+ logError(testName, function, args, startTime, "", "copy data CRC check failed", err)
+ return
+ }
+ // Close all the get readers before proceeding with CopyObject operations.
+ r.Close()
+ readerCopy.Close()
+
+ // CopyObject again but with wrong conditions
+ src = minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
+ NoMatchETag: objInfo.ETag,
+ }
+
+ // Perform the Copy which should fail
+ _, err = c.CopyObject(context.Background(), dst, src)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
+ return
+ }
+
+ src = minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ }
+
+ dst = minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ ReplaceMetadata: true,
+ UserMetadata: map[string]string{
+ "Copy": "should be same",
+ },
+ }
+ args["dst"] = dst
+ args["src"] = src
+
+ _, err = c.CopyObject(context.Background(), dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err)
+ return
+ }
+
+ oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ stOpts := minio.StatObjectOptions{}
+ stOpts.SetMatchETag(oi.ETag)
+ objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err)
+ return
+ }
+
+ if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" {
+ logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests SSE-C get object ReaderSeeker interface methods.
+func testSSECEncryptedGetObjectReadSeekFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer func() {
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+ }()
+
+ // Generate 129MiB of data.
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{
+ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat object failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ // This following function helps us to compare data from the reader after seek
+ // with the data from the original buffer
+ cmpData := func(r io.Reader, start, end int) {
+ if end-start == 0 {
+ return
+ }
+ buffer := bytes.NewBuffer([]byte{})
+ if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "CopyN failed", err)
+ return
+ }
+ }
+ if !bytes.Equal(buf[start:end], buffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+ }
+
+ testCases := []struct {
+ offset int64
+ whence int
+ pos int64
+ err error
+ shouldCmp bool
+ start int
+ end int
+ }{
+ // Start from offset 0, fetch data and compare
+ {0, 0, 0, nil, true, 0, 0},
+ // Start from offset 2048, fetch data and compare
+ {2048, 0, 2048, nil, true, 2048, bufSize},
+ // Start from offset larger than possible
+ {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
+ // Move to offset 0 without comparing
+ {0, 0, 0, nil, false, 0, 0},
+ // Move one step forward and compare
+ {1, 1, 1, nil, true, 1, bufSize},
+ // Move larger than possible
+ {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
+ // Provide negative offset with CUR_SEEK
+ {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
+ // Test with whence SEEK_END and with positive offset
+ {1024, 2, 0, io.EOF, false, 0, 0},
+ // Test with whence SEEK_END and with negative offset
+ {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
+ // Test with whence SEEK_END and with large negative offset
+ {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
+ // Test with invalid whence
+ {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
+ }
+
+ for i, testCase := range testCases {
+ // Perform seek operation
+ n, err := r.Seek(testCase.offset, testCase.whence)
+ if err != nil && testCase.err == nil {
+ // We expected success.
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ if err == nil && testCase.err != nil {
+ // We expected failure, but got success.
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ if err != nil && testCase.err != nil {
+ if err.Error() != testCase.err.Error() {
+ // We expect a specific error
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ }
+ // Check the returned seek pos
+ if n != testCase.pos {
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
+ return
+ }
+ // Compare only if shouldCmp is activated
+ if testCase.shouldCmp {
+ cmpData(r, testCase.start, testCase.end)
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests SSE-S3 get object ReaderSeeker interface methods.
+func testSSES3EncryptedGetObjectReadSeekFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer func() {
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+ }()
+
+ // Generate 129MiB of data.
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ ServerSideEncryption: encrypt.NewSSE(),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat object failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ // This following function helps us to compare data from the reader after seek
+ // with the data from the original buffer
+ cmpData := func(r io.Reader, start, end int) {
+ if end-start == 0 {
+ return
+ }
+ buffer := bytes.NewBuffer([]byte{})
+ if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "CopyN failed", err)
+ return
+ }
+ }
+ if !bytes.Equal(buf[start:end], buffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+ }
+
+ testCases := []struct {
+ offset int64
+ whence int
+ pos int64
+ err error
+ shouldCmp bool
+ start int
+ end int
+ }{
+ // Start from offset 0, fetch data and compare
+ {0, 0, 0, nil, true, 0, 0},
+ // Start from offset 2048, fetch data and compare
+ {2048, 0, 2048, nil, true, 2048, bufSize},
+ // Start from offset larger than possible
+ {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
+ // Move to offset 0 without comparing
+ {0, 0, 0, nil, false, 0, 0},
+ // Move one step forward and compare
+ {1, 1, 1, nil, true, 1, bufSize},
+ // Move larger than possible
+ {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
+ // Provide negative offset with CUR_SEEK
+ {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
+ // Test with whence SEEK_END and with positive offset
+ {1024, 2, 0, io.EOF, false, 0, 0},
+ // Test with whence SEEK_END and with negative offset
+ {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
+ // Test with whence SEEK_END and with large negative offset
+ {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
+ // Test with invalid whence
+ {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
+ }
+
+ for i, testCase := range testCases {
+ // Perform seek operation
+ n, err := r.Seek(testCase.offset, testCase.whence)
+ if err != nil && testCase.err == nil {
+ // We expected success.
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ if err == nil && testCase.err != nil {
+ // We expected failure, but got success.
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ if err != nil && testCase.err != nil {
+ if err.Error() != testCase.err.Error() {
+ // We expect a specific error
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ }
+ // Check the returned seek pos
+ if n != testCase.pos {
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
+ return
+ }
+ // Compare only if shouldCmp is activated
+ if testCase.shouldCmp {
+ cmpData(r, testCase.start, testCase.end)
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests SSE-C get object ReaderAt interface methods.
+func testSSECEncryptedGetObjectReadAtFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 129MiB of data.
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{
+ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ offset := int64(2048)
+
+ // read directly
+ buf1 := make([]byte, 512)
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ // Test readAt before stat is called such that objectInfo doesn't change.
+ m, err := r.ReadAt(buf1, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf1) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf1, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ m, err = r.ReadAt(buf2, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf3) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf4) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+
+ buf5 := make([]byte, len(buf))
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ if m != len(buf5) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf, buf5) {
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
+ }
+
+ buf6 := make([]byte, len(buf)+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests SSE-S3 get object ReaderAt interface methods.
+func testSSES3EncryptedGetObjectReadAtFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 129MiB of data.
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ ServerSideEncryption: encrypt.NewSSE(),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ offset := int64(2048)
+
+ // read directly
+ buf1 := make([]byte, 512)
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ // Test readAt before stat is called such that objectInfo doesn't change.
+ m, err := r.ReadAt(buf1, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf1) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf1, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ m, err = r.ReadAt(buf2, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf3) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf4) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+
+ buf5 := make([]byte, len(buf))
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ if m != len(buf5) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf, buf5) {
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
+ }
+
+ buf6 := make([]byte, len(buf)+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// testSSECEncryptionPutGet tests encryption with customer provided encryption keys
+func testSSECEncryptionPutGet() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "sse": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ testCases := []struct {
+ buf []byte
+ }{
+ {buf: bytes.Repeat([]byte("F"), 1)},
+ {buf: bytes.Repeat([]byte("F"), 15)},
+ {buf: bytes.Repeat([]byte("F"), 16)},
+ {buf: bytes.Repeat([]byte("F"), 17)},
+ {buf: bytes.Repeat([]byte("F"), 31)},
+ {buf: bytes.Repeat([]byte("F"), 32)},
+ {buf: bytes.Repeat([]byte("F"), 33)},
+ {buf: bytes.Repeat([]byte("F"), 1024)},
+ {buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ const password = "correct horse battery staple" // https://xkcd.com/936/
+
+ for i, testCase := range testCases {
+ // Generate a random object name
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Secured object
+ sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
+ args["sse"] = sse
+
+ // Put encrypted data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ // Compare the sent object with the received one
+ recvBuffer := bytes.NewBuffer([]byte{})
+ if _, err = io.Copy(recvBuffer, r); err != nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
+ return
+ }
+ if recvBuffer.Len() != len(testCase.buf) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
+ return
+ }
+ if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// TestEncryptionFPut tests encryption with customer specified encryption keys
+func testSSECEncryptionFPut() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "filePath": "",
+ "contentType": "",
+ "sse": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+ args["metadata"] = customContentType
+
+ testCases := []struct {
+ buf []byte
+ }{
+ {buf: bytes.Repeat([]byte("F"), 0)},
+ {buf: bytes.Repeat([]byte("F"), 1)},
+ {buf: bytes.Repeat([]byte("F"), 15)},
+ {buf: bytes.Repeat([]byte("F"), 16)},
+ {buf: bytes.Repeat([]byte("F"), 17)},
+ {buf: bytes.Repeat([]byte("F"), 31)},
+ {buf: bytes.Repeat([]byte("F"), 32)},
+ {buf: bytes.Repeat([]byte("F"), 33)},
+ {buf: bytes.Repeat([]byte("F"), 1024)},
+ {buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ const password = "correct horse battery staple" // https://xkcd.com/936/
+ for i, testCase := range testCases {
+ // Generate a random object name
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Secured object
+ sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
+ args["sse"] = sse
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file create failed", err)
+ return
+ }
+ _, err = file.Write(testCase.buf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file write failed", err)
+ return
+ }
+ file.Close()
+ // Put encrypted data
+ if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
+ logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ // Compare the sent object with the received one
+ recvBuffer := bytes.NewBuffer([]byte{})
+ if _, err = io.Copy(recvBuffer, r); err != nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
+ return
+ }
+ if recvBuffer.Len() != len(testCase.buf) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
+ return
+ }
+ if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
+ return
+ }
+
+ os.Remove(fileName)
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// testSSES3EncryptionPutGet tests SSE-S3 encryption
+func testSSES3EncryptionPutGet() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "sse": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ testCases := []struct {
+ buf []byte
+ }{
+ {buf: bytes.Repeat([]byte("F"), 1)},
+ {buf: bytes.Repeat([]byte("F"), 15)},
+ {buf: bytes.Repeat([]byte("F"), 16)},
+ {buf: bytes.Repeat([]byte("F"), 17)},
+ {buf: bytes.Repeat([]byte("F"), 31)},
+ {buf: bytes.Repeat([]byte("F"), 32)},
+ {buf: bytes.Repeat([]byte("F"), 33)},
+ {buf: bytes.Repeat([]byte("F"), 1024)},
+ {buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ for i, testCase := range testCases {
+ // Generate a random object name
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Secured object
+ sse := encrypt.NewSSE()
+ args["sse"] = sse
+
+ // Put encrypted data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
+ return
+ }
+
+ // Read the data back without any encryption headers
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ // Compare the sent object with the received one
+ recvBuffer := bytes.NewBuffer([]byte{})
+ if _, err = io.Copy(recvBuffer, r); err != nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
+ return
+ }
+ if recvBuffer.Len() != len(testCase.buf) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
+ return
+ }
+ if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// TestSSES3EncryptionFPut tests server side encryption
+func testSSES3EncryptionFPut() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "filePath": "",
+ "contentType": "",
+ "sse": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+ args["metadata"] = customContentType
+
+ testCases := []struct {
+ buf []byte
+ }{
+ {buf: bytes.Repeat([]byte("F"), 0)},
+ {buf: bytes.Repeat([]byte("F"), 1)},
+ {buf: bytes.Repeat([]byte("F"), 15)},
+ {buf: bytes.Repeat([]byte("F"), 16)},
+ {buf: bytes.Repeat([]byte("F"), 17)},
+ {buf: bytes.Repeat([]byte("F"), 31)},
+ {buf: bytes.Repeat([]byte("F"), 32)},
+ {buf: bytes.Repeat([]byte("F"), 33)},
+ {buf: bytes.Repeat([]byte("F"), 1024)},
+ {buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ for i, testCase := range testCases {
+ // Generate a random object name
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Secured object
+ sse := encrypt.NewSSE()
+ args["sse"] = sse
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file create failed", err)
+ return
+ }
+ _, err = file.Write(testCase.buf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file write failed", err)
+ return
+ }
+ file.Close()
+ // Put encrypted data
+ if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
+ logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ // Compare the sent object with the received one
+ recvBuffer := bytes.NewBuffer([]byte{})
+ if _, err = io.Copy(recvBuffer, r); err != nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
+ return
+ }
+ if recvBuffer.Len() != len(testCase.buf) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
+ return
+ }
+ if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
+ return
+ }
+
+ os.Remove(fileName)
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testBucketNotification() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "SetBucketNotification(bucketName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ if os.Getenv("NOTIFY_BUCKET") == "" ||
+ os.Getenv("NOTIFY_SERVICE") == "" ||
+ os.Getenv("NOTIFY_REGION") == "" ||
+ os.Getenv("NOTIFY_ACCOUNTID") == "" ||
+ os.Getenv("NOTIFY_RESOURCE") == "" {
+ logIgnored(testName, function, args, startTime, "Skipped notification test as it is not configured")
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ bucketName := os.Getenv("NOTIFY_BUCKET")
+ args["bucketName"] = bucketName
+
+ topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
+ queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
+
+ topicConfig := notification.NewConfig(topicArn)
+ topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll)
+ topicConfig.AddFilterSuffix("jpg")
+
+ queueConfig := notification.NewConfig(queueArn)
+ queueConfig.AddEvents(notification.ObjectCreatedAll)
+ queueConfig.AddFilterPrefix("photos/")
+
+ config := notification.Configuration{}
+ config.AddTopic(topicConfig)
+
+ // Add the same topicConfig again, should have no effect
+ // because it is duplicated
+ config.AddTopic(topicConfig)
+ if len(config.TopicConfigs) != 1 {
+ logError(testName, function, args, startTime, "", "Duplicate entry added", err)
+ return
+ }
+
+ // Add and remove a queue config
+ config.AddQueue(queueConfig)
+ config.RemoveQueueByArn(queueArn)
+
+ err = c.SetBucketNotification(context.Background(), bucketName, config)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketNotification failed", err)
+ return
+ }
+
+ config, err = c.GetBucketNotification(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketNotification failed", err)
+ return
+ }
+
+ if len(config.TopicConfigs) != 1 {
+ logError(testName, function, args, startTime, "", "Topic config is empty", err)
+ return
+ }
+
+ if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
+ logError(testName, function, args, startTime, "", "Couldn't get the suffix", err)
+ return
+ }
+
+ err = c.RemoveAllBucketNotification(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests comprehensive list of all methods.
+func testFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "testFunctional()"
+ functionAll := ""
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket.
+ function = "MakeBucket(bucketName, region)"
+ functionAll = "MakeBucket(bucketName, region)"
+ args["bucketName"] = bucketName
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+
+ defer cleanupBucket(bucketName, c)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File creation failed", err)
+ return
+ }
+ for i := 0; i < 3; i++ {
+ buf := make([]byte, rand.Intn(1<<19))
+ _, err = file.Write(buf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File write failed", err)
+ return
+ }
+ }
+ file.Close()
+
+ // Verify if bucket exits and you have access.
+ var exists bool
+ function = "BucketExists(bucketName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ exists, err = c.BucketExists(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "BucketExists failed", err)
+ return
+ }
+ if !exists {
+ logError(testName, function, args, startTime, "", "Could not find the bucket", err)
+ return
+ }
+
+ // Asserting the default bucket policy.
+ function = "GetBucketPolicy(ctx, bucketName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
+ }
+ if nilPolicy != "" {
+ logError(testName, function, args, startTime, "", "policy should be set to nil", err)
+ return
+ }
+
+ // Set the bucket policy to 'public readonly'.
+ function = "SetBucketPolicy(bucketName, readOnlyPolicy)"
+ functionAll += ", " + function
+
+ readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "bucketPolicy": readOnlyPolicy,
+ }
+
+ err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+ // should return policy `readonly`.
+ function = "GetBucketPolicy(ctx, bucketName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ _, err = c.GetBucketPolicy(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
+ }
+
+ // Make the bucket 'public writeonly'.
+ function = "SetBucketPolicy(bucketName, writeOnlyPolicy)"
+ functionAll += ", " + function
+
+ writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "bucketPolicy": writeOnlyPolicy,
+ }
+ err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+ // should return policy `writeonly`.
+ function = "GetBucketPolicy(ctx, bucketName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+
+ _, err = c.GetBucketPolicy(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
+ }
+
+ // Make the bucket 'public read/write'.
+ function = "SetBucketPolicy(bucketName, readWritePolicy)"
+ functionAll += ", " + function
+
+ readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
+
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "bucketPolicy": readWritePolicy,
+ }
+ err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+ // should return policy `readwrite`.
+ function = "GetBucketPolicy(bucketName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ _, err = c.GetBucketPolicy(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
+ }
+
+ // List all buckets.
+ function = "ListBuckets()"
+ functionAll += ", " + function
+ args = nil
+ buckets, err := c.ListBuckets(context.Background())
+
+ if len(buckets) == 0 {
+ logError(testName, function, args, startTime, "", "Found bucket list to be empty", err)
+ return
+ }
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ListBuckets failed", err)
+ return
+ }
+
+ // Verify if previously created bucket is listed in list buckets.
+ bucketFound := false
+ for _, bucket := range buckets {
+ if bucket.Name == bucketName {
+ bucketFound = true
+ }
+ }
+
+ // If bucket not found error out.
+ if !bucketFound {
+ logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err)
+ return
+ }
+
+ objectName := bucketName + "unique"
+
+ // Generate data
+ buf := bytes.Repeat([]byte("f"), 1<<19)
+
+ function = "PutObject(bucketName, objectName, reader, contentType)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "contentType": "",
+ }
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-nolength",
+ "contentType": "binary/octet-stream",
+ }
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Instantiate a done channel to close all listing.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+
+ objFound := false
+ isRecursive := true // Recursive is true.
+
+ function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
+ return
+ }
+
+ objFound = false
+ isRecursive = true // Recursive is true.
+ function = "ListObjects()"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
+ return
+ }
+
+ incompObjNotFound := true
+
+ function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) {
+ if objIncompl.Key != "" {
+ incompObjNotFound = false
+ break
+ }
+ }
+ if !incompObjNotFound {
+ logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
+ return
+ }
+
+ function = "GetObject(bucketName, objectName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+ newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ newReadBytes, err := io.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err)
+ return
+ }
+ newReader.Close()
+
+ function = "FGetObject(bucketName, objectName, fileName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "fileName": fileName + "-f",
+ }
+ err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObject failed", err)
+ return
+ }
+
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject success", err)
+ return
+ }
+
+ // Generate presigned HEAD object url.
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+ presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
+ return
+ }
+
+ transport := createHTTPTransport()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
+ return
+ }
+
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively canceled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: transport,
+ }
+
+ req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err)
+ return
+ }
+
+ // Verify if presigned url works.
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err)
+ return
+ }
+ if resp.Header.Get("ETag") == "" {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
+ return
+ }
+ resp.Body.Close()
+
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject success", err)
+ return
+ }
+
+ // Generate presigned GET object url.
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+ presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
+ }
+
+ // Verify if presigned url works.
+ req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
+ return
+ }
+
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
+ return
+ }
+ newPresignedBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+ resp.Body.Close()
+ if !bytes.Equal(newPresignedBytes, buf) {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ "reqParams": reqParams,
+ }
+ presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
+ }
+
+ // Verify if presigned url works.
+ req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
+ return
+ }
+
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
+ return
+ }
+ newPresignedBytes, err = io.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err)
+ return
+ }
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err)
+ return
+ }
+
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject success", err)
+ return
+ }
+
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presigned",
+ "expires": 3600 * time.Second,
+ }
+ presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
+ }
+
+ buf = bytes.Repeat([]byte("g"), 1<<19)
+
+ req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err)
+ return
+ }
+
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
+ }
+
+ newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err)
+ return
+ }
+
+ newReadBytes, err = io.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
+ return
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+
+ function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)"
+ functionAll += ", " + function
+ presignExtraHeaders := map[string][]string{
+ "mysecret": {"abcxxx"},
+ }
+ args = map[string]interface{}{
+ "method": "PUT",
+ "bucketName": bucketName,
+ "objectName": objectName + "-presign-custom",
+ "expires": 3600 * time.Second,
+ "extraHeaders": presignExtraHeaders,
+ }
+ presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Presigned failed", err)
+ return
+ }
+
+ // Generate data more than 32K
+ buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
+
+ req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err)
+ return
+ }
+
+ req.Header.Add("mysecret", "abcxxx")
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err)
+ return
+ }
+
+ // Download the uploaded object to verify
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presign-custom",
+ }
+ newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err)
+ return
+ }
+
+ newReadBytes, err = io.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
+ return
+ }
+ newReader.Close()
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err)
+ return
+ }
+
+ function = "RemoveObject(bucketName, objectName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+ err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+ args["objectName"] = objectName + "-f"
+ err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+
+ args["objectName"] = objectName + "-nolength"
+ err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+
+ args["objectName"] = objectName + "-presigned"
+ err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+
+ args["objectName"] = objectName + "-presign-custom"
+ err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+
+ function = "RemoveBucket(bucketName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ err = c.RemoveBucket(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
+ return
+ }
+ err = c.RemoveBucket(context.Background(), bucketName)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err)
+ return
+ }
+ if err.Error() != "The specified bucket does not exist" {
+ logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
+ return
+ }
+
+ os.Remove(fileName)
+ os.Remove(fileName + "-f")
+ logSuccess(testName, functionAll, args, startTime)
+}
+
+// Test for validating GetObject Reader* methods functioning when the
+// object is modified in the object store.
+func testGetObjectModified() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Upload an object.
+ objectName := "myobject"
+ args["objectName"] = objectName
+ content := "helloworld"
+ _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
+ return
+ }
+
+ defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
+
+ reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err)
+ return
+ }
+ defer reader.Close()
+
+ // Read a few bytes of the object.
+ b := make([]byte, 5)
+ n, err := reader.ReadAt(b, 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err)
+ return
+ }
+
+ // Upload different contents to the same object while object is being read.
+ newContent := "goodbyeworld"
+ _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
+ return
+ }
+
+ // Confirm that a Stat() call in between doesn't change the Object's cached etag.
+ _, err = reader.Stat()
+ expectedError := "At least one of the pre-conditions you specified did not hold"
+ if err.Error() != expectedError {
+ logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
+ return
+ }
+
+ // Read again only to find object contents have been modified since last read.
+ _, err = reader.ReadAt(b, int64(n))
+ if err.Error() != expectedError {
+ logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test validates putObject to upload a file seeked at a given offset.
+func testPutObjectUploadSeekedObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileToUpload": "",
+ "contentType": "binary/octet-stream",
+ }
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, c)
+
+ var tempfile *os.File
+
+ if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" {
+ tempfile, err = os.Open(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
+ }
+ args["fileToUpload"] = fileName
+ } else {
+ tempfile, err = os.CreateTemp("", "minio-go-upload-test-")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile create failed", err)
+ return
+ }
+ args["fileToUpload"] = tempfile.Name()
+
+ // Generate 100kB data
+ if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+
+ defer os.Remove(tempfile.Name())
+
+ // Seek back to the beginning of the file.
+ tempfile.Seek(0, 0)
+ }
+ length := 100 * humanize.KiByte
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ offset := length / 2
+ if _, err = tempfile.Seek(int64(offset), 0); err != nil {
+ logError(testName, function, args, startTime, "", "TempFile seek failed", err)
+ return
+ }
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ tempfile.Close()
+
+ obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer obj.Close()
+
+ n, err := obj.Seek(int64(offset), 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != int64(offset) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err)
+ return
+ }
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if st.Size != int64(length-offset) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests bucket re-create errors.
+func testMakeBucketErrorV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ region := "eu-west-1"
+ args["bucketName"] = bucketName
+ args["region"] = region
+
+ // Make a new bucket in 'eu-west-1'.
+ if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil {
+ logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err)
+ return
+ }
+ // Verify valid error response from server.
+ if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+ minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get object reader to not throw error on being closed twice.
+func testGetObjectClosedTwiceV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object is already closed, should return error", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests FPutObject hidden contentType setting
+func testFPutObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Make a temp file with 11*1024*1024 bytes of data.
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
+ n, err := io.CopyN(file, r, 11*1024*1024)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ if n != int64(11*1024*1024) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
+ }
+
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+ args["objectName"] = objectName
+ args["fileName"] = file.Name()
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ args["objectName"] = objectName + "-Octet"
+ args["contentType"] = ""
+
+ _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+
+ // Add extension to temp file name
+ fileName := file.Name()
+ err = os.Rename(fileName, fileName+".gtar")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Rename failed", err)
+ return
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ args["objectName"] = objectName + "-Octet"
+ args["contentType"] = ""
+ args["fileName"] = fileName + ".gtar"
+
+ _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+
+ // Check headers and sizes
+ rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ if rStandard.Size != 11*1024*1024 {
+ logError(testName, function, args, startTime, "", "Unexpected size", nil)
+ return
+ }
+
+ if rStandard.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err)
+ return
+ }
+
+ rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err)
+ return
+ }
+
+ if rOctet.Size != 11*1024*1024 {
+ logError(testName, function, args, startTime, "", "Unexpected size", nil)
+ return
+ }
+
+ rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rGTar.Size != 11*1024*1024 {
+ logError(testName, function, args, startTime, "", "Unexpected size", nil)
+ return
+ }
+ if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" {
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err)
+ return
+ }
+
+ os.Remove(fileName + ".gtar")
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests various bucket supported formats.
+func testMakeBucketRegionsV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err)
+ return
+ }
+
+ // Make a new bucket with '.' in its name, in 'us-west-2'. This
+ // request is internally staged into a path style instead of
+ // virtual host style.
+ if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil {
+ args["bucketName"] = bucketName + ".withperiod"
+ args["region"] = "us-west-2"
+ logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data.
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
+ }
+
+ offset := int64(2048)
+ n, err := r.Seek(offset, 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != offset {
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
+ return
+ }
+ n, err = r.Seek(0, 1)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != offset {
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
+ return
+ }
+ _, err = r.Seek(offset, 2)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err)
+ return
+ }
+ n, err = r.Seek(-offset, 2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != st.Size-offset {
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err)
+ return
+ }
+
+ var buffer1 bytes.Buffer
+ if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ }
+ if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+
+ // Seek again and read again.
+ n, err = r.Seek(offset-1, 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != (offset - 1) {
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err)
+ return
+ }
+
+ var buffer2 bytes.Buffer
+ if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ }
+ // Verify now lesser bytes.
+ if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests get object ReaderAt interface methods.
+func testGetObjectReadAtFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+
+ offset := int64(2048)
+
+ // Read directly
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ m, err := r.ReadAt(buf2, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf3) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf4) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+
+ buf5 := make([]byte, bufSize)
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ if m != len(buf5) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf, buf5) {
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
+ }
+
+ buf6 := make([]byte, bufSize+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Tests copy object
+func testCopyObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, c)
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName+"-copy", c)
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ r.Close()
+
+ // Copy Source
+ src := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
+ MatchETag: objInfo.ETag,
+ }
+ args["source"] = src
+
+ // Set copy conditions.
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName + "-copy",
+ Object: objectName + "-copy",
+ }
+ args["destination"] = dst
+
+ // Perform the Copy
+ _, err = c.CopyObject(context.Background(), dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // Source object
+ r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Destination object
+ readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err = r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err)
+ return
+ }
+
+ // Close all the readers.
+ r.Close()
+ readerCopy.Close()
+
+ // CopyObject again but with wrong conditions
+ src = minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC),
+ NoMatchETag: objInfo.ETag,
+ }
+
+ // Perform the Copy which should fail
+ _, err = c.CopyObject(context.Background(), dst, src)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testComposeObjectErrorCasesWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Test that more than 10K source objects cannot be
+ // concatenated.
+ srcArr := [10001]minio.CopySrcOptions{}
+ srcSlice := srcArr[:]
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "object",
+ }
+
+ args["destination"] = dst
+ // Just explain about srcArr in args["sourceList"]
+ // to stop having 10,001 null headers logged
+ args["sourceList"] = "source array of 10,001 elements"
+ if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil {
+ logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err)
+ return
+ } else if err.Error() != "There must be as least one and up to 10000 source objects." {
+ logError(testName, function, args, startTime, "", "Got unexpected error", err)
+ return
+ }
+
+ // Create a source with invalid offset spec and check that
+ // error is returned:
+ // 1. Create the source object.
+ const badSrcSize = 5 * 1024 * 1024
+ buf := bytes.Repeat([]byte("1"), badSrcSize)
+ _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ // 2. Set invalid range spec on the object (going beyond
+ // object size)
+ badSrc := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "badObject",
+ MatchRange: true,
+ Start: 1,
+ End: badSrcSize,
+ }
+
+ // 3. ComposeObject call should fail.
+ if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil {
+ logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err)
+ return
+ } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
+ logError(testName, function, args, startTime, "", "Got invalid error", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test expected error cases
+func testComposeObjectErrorCasesV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ testComposeObjectErrorCasesWrapper(c)
+}
+
+func testComposeMultipleSources(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{
+ "destination": "",
+ "sourceList": "",
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Upload a small source object
+ const srcSize = 1024 * 1024 * 5
+ buf := bytes.Repeat([]byte("1"), srcSize)
+ _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // We will append 10 copies of the object.
+ srcs := []minio.CopySrcOptions{}
+ for i := 0; i < 10; i++ {
+ srcs = append(srcs, minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "srcObject",
+ })
+ }
+
+ // make the last part very small
+ srcs[9].MatchRange = true
+
+ args["sourceList"] = srcs
+
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "dstObject",
+ }
+ args["destination"] = dst
+
+ ui, err := c.ComposeObject(context.Background(), dst, srcs...)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
+ }
+
+ if ui.Size != 9*srcSize+1 {
+ logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err)
+ return
+ }
+
+ objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ if objProps.Size != 9*srcSize+1 {
+ logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test concatenating multiple 10K objects V2
+func testCompose10KSourcesV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ testComposeMultipleSources(c)
+}
+
+func testEncryptedEmptyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object"))
+
+ // 1. create an sse-c encrypted object to copy by uploading
+ const srcSize = 0
+ var buf []byte // Empty buffer
+ args["objectName"] = "object"
+ _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ // 2. Test CopyObject for an empty object
+ src := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "object",
+ Encryption: sse,
+ }
+
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "new-object",
+ Encryption: sse,
+ }
+
+ if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
+ function = "CopyObject(dst, src)"
+ logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // 3. Test Key rotation
+ newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object"))
+ src = minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "new-object",
+ Encryption: sse,
+ }
+
+ dst = minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "new-object",
+ Encryption: newSSE,
+ }
+
+ if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
+ function = "CopyObject(dst, src)"
+ logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err)
+ return
+ }
+
+ // 4. Download the object.
+ reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer reader.Close()
+
+ decBytes, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(decBytes, buf) {
+ logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err)
+ return
+ }
+
+ delete(args, "objectName")
+ logSuccess(testName, function, args, startTime)
+}
+
+func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncNameLoc(2)
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+ var srcEncryption, dstEncryption encrypt.ServerSide
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // 1. create an sse-c encrypted object to copy by uploading
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+ _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
+ ServerSideEncryption: sseSrc,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ if sseSrc != nil && sseSrc.Type() != encrypt.S3 {
+ srcEncryption = sseSrc
+ }
+
+ // 2. copy object and change encryption key
+ src := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "srcObject",
+ Encryption: srcEncryption,
+ }
+ args["source"] = src
+
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "dstObject",
+ Encryption: sseDst,
+ }
+ args["destination"] = dst
+
+ _, err = c.CopyObject(context.Background(), dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ if sseDst != nil && sseDst.Type() != encrypt.S3 {
+ dstEncryption = sseDst
+ }
+ // 3. get copied object and check if content is equal
+ coreClient := minio.Core{c}
+ reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ decBytes, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(decBytes, buf) {
+ logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
+ return
+ }
+ reader.Close()
+
+ // Test key rotation for source object in-place.
+ var newSSE encrypt.ServerSide
+ if sseSrc != nil && sseSrc.Type() == encrypt.SSEC {
+ newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key
+ }
+ if sseSrc != nil && sseSrc.Type() == encrypt.S3 {
+ newSSE = encrypt.NewSSE()
+ }
+ if newSSE != nil {
+ dst = minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "srcObject",
+ Encryption: newSSE,
+ }
+ args["destination"] = dst
+
+ _, err = c.CopyObject(context.Background(), dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // Get copied object and check if content is equal
+ reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ decBytes, err = io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(decBytes, buf) {
+ logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
+ return
+ }
+ reader.Close()
+
+ // Test in-place decryption.
+ dst = minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "srcObject",
+ }
+ args["destination"] = dst
+
+ src = minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "srcObject",
+ Encryption: newSSE,
+ }
+ args["source"] = src
+ _, err = c.CopyObject(context.Background(), dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err)
+ return
+ }
+ }
+
+ // Get copied decrypted object and check if content is equal
+ reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer reader.Close()
+
+ decBytes, err = io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(decBytes, buf) {
+ logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test encrypted copy object
+func testUnencryptedToSSECCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst)
+}
+
+// Test encrypted copy object
+func testUnencryptedToSSES3CopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ var sseSrc encrypt.ServerSide
+ sseDst := encrypt.NewSSE()
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+// Test encrypted copy object
+func testUnencryptedToUnencryptedCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ var sseSrc, sseDst encrypt.ServerSide
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+// Test encrypted copy object
+func testEncryptedSSECToSSECCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
+ sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+// Test encrypted copy object
+func testEncryptedSSECToSSES3CopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
+ sseDst := encrypt.NewSSE()
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+// Test encrypted copy object
+func testEncryptedSSECToUnencryptedCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
+ var sseDst encrypt.ServerSide
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+// Test encrypted copy object
+func testEncryptedSSES3ToSSECCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ sseSrc := encrypt.NewSSE()
+ sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+// Test encrypted copy object
+func testEncryptedSSES3ToSSES3CopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ sseSrc := encrypt.NewSSE()
+ sseDst := encrypt.NewSSE()
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+// Test encrypted copy object
+func testEncryptedSSES3ToUnencryptedCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ sseSrc := encrypt.NewSSE()
+ var sseDst encrypt.ServerSide
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+// Test encrypted copy object
+func testEncryptedCopyObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
+ sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
+}
+
+func testDecryptedCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object"
+ if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName))
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{
+ ServerSideEncryption: encryption,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ src := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: objectName,
+ Encryption: encrypt.SSECopy(encryption),
+ }
+ args["source"] = src
+
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "decrypted-" + objectName,
+ }
+ args["destination"] = dst
+
+ if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+ if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ logSuccess(testName, function, args, startTime)
+}
+
+func testSSECMultipartEncryptedToSSECCopyObjectPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 6MB of data
+ buf := bytes.Repeat([]byte("abcdef"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ password := "correct horse battery staple"
+ srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
+
+ // Upload a 6MB object using multipart mechanism
+ uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ var completeParts []minio.CompletePart
+
+ part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1,
+ bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024,
+ minio.PutObjectPartOptions{SSE: srcencryption},
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectPart call failed", err)
+ return
+ }
+ completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
+
+ part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2,
+ bytes.NewReader(buf[5*1024*1024:]), 1024*1024,
+ minio.PutObjectPartOptions{SSE: srcencryption},
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectPart call failed", err)
+ return
+ }
+ completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+ dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
+
+ uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ encrypt.SSECopy(srcencryption).Marshal(header)
+ dstencryption.Marshal(header)
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = objInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (6*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
+ getOpts.SetRange(0, 6*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 6*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err)
+ return
+ }
+
+ getOpts.SetRange(6*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 6*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:6*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err)
+ return
+ }
+ if getBuf[6*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation
+func testSSECEncryptedToSSECCopyObjectPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ password := "correct horse battery staple"
+ srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
+ putmetadata := map[string]string{
+ "Content-Type": "binary/octet-stream",
+ }
+ opts := minio.PutObjectOptions{
+ UserMetadata: putmetadata,
+ ServerSideEncryption: srcencryption,
+ }
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+ dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ encrypt.SSECopy(srcencryption).Marshal(header)
+ dstencryption.Marshal(header)
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy
+func testSSECEncryptedToUnencryptedCopyPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ password := "correct horse battery staple"
+ srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
+
+ opts := minio.PutObjectOptions{
+ UserMetadata: map[string]string{
+ "Content-Type": "binary/octet-stream",
+ },
+ ServerSideEncryption: srcencryption,
+ }
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+ var dstencryption encrypt.ServerSide
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ encrypt.SSECopy(srcencryption).Marshal(header)
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy
+func testSSECEncryptedToSSES3CopyObjectPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ password := "correct horse battery staple"
+ srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
+ putmetadata := map[string]string{
+ "Content-Type": "binary/octet-stream",
+ }
+ opts := minio.PutObjectOptions{
+ UserMetadata: putmetadata,
+ ServerSideEncryption: srcencryption,
+ }
+
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+ dstencryption := encrypt.NewSSE()
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ encrypt.SSECopy(srcencryption).Marshal(header)
+ dstencryption.Marshal(header)
+
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part
+func testUnencryptedToSSECCopyObjectPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ password := "correct horse battery staple"
+ putmetadata := map[string]string{
+ "Content-Type": "binary/octet-stream",
+ }
+ opts := minio.PutObjectOptions{
+ UserMetadata: putmetadata,
+ }
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+ dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ dstencryption.Marshal(header)
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy
+func testUnencryptedToUnencryptedCopyPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ putmetadata := map[string]string{
+ "Content-Type": "binary/octet-stream",
+ }
+ opts := minio.PutObjectOptions{
+ UserMetadata: putmetadata,
+ }
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy
+func testUnencryptedToSSES3CopyObjectPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ opts := minio.PutObjectOptions{
+ UserMetadata: map[string]string{
+ "Content-Type": "binary/octet-stream",
+ },
+ }
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+ dstencryption := encrypt.NewSSE()
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ dstencryption.Marshal(header)
+
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part
+func testSSES3EncryptedToSSECCopyObjectPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ password := "correct horse battery staple"
+ srcEncryption := encrypt.NewSSE()
+ opts := minio.PutObjectOptions{
+ UserMetadata: map[string]string{
+ "Content-Type": "binary/octet-stream",
+ },
+ ServerSideEncryption: srcEncryption,
+ }
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+ dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ dstencryption.Marshal(header)
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy
+func testSSES3EncryptedToUnencryptedCopyPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ srcEncryption := encrypt.NewSSE()
+ opts := minio.PutObjectOptions{
+ UserMetadata: map[string]string{
+ "Content-Type": "binary/octet-stream",
+ },
+ ServerSideEncryption: srcEncryption,
+ }
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy
+func testSSES3EncryptedToSSES3CopyObjectPart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObjectPart(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ client, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Instantiate new core client object.
+ c := minio.Core{client}
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, client)
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ srcEncryption := encrypt.NewSSE()
+ opts := minio.PutObjectOptions{
+ UserMetadata: map[string]string{
+ "Content-Type": "binary/octet-stream",
+ },
+ ServerSideEncryption: srcEncryption,
+ }
+
+ uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err)
+ return
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+ dstencryption := encrypt.NewSSE()
+
+ uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
+ return
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+ metadata := make(map[string]string)
+ header := make(http.Header)
+ dstencryption.Marshal(header)
+
+ for k, v := range header {
+ metadata[k] = v[0]
+ }
+
+ metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
+ return
+ }
+
+ // Complete the multipart upload
+ _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
+ return
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject call failed", err)
+ return
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
+ return
+ }
+
+ // Now we read the data back
+ getOpts := minio.GetObjectOptions{}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf, buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
+ return
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject call failed", err)
+ return
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = readFull(r, getBuf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read buffer failed", err)
+ return
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
+ return
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+func testUserMetadataCopying() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // c.TraceOn(os.Stderr)
+ testUserMetadataCopyingWrapper(c)
+}
+
+func testUserMetadataCopyingWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ fetchMeta := func(object string) (h http.Header) {
+ objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ h = make(http.Header)
+ for k, vs := range objInfo.Metadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+ h.Add(k, vs[0])
+ }
+ }
+ return h
+ }
+
+ // 1. create a client encrypted object to copy by uploading
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+ metadata := make(http.Header)
+ metadata.Set("x-amz-meta-myheader", "myvalue")
+ m := make(map[string]string)
+ m["x-amz-meta-myheader"] = "myvalue"
+ _, err = c.PutObject(context.Background(), bucketName, "srcObject",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err)
+ return
+ }
+ if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // 2. create source
+ src := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "srcObject",
+ }
+
+ // 2.1 create destination with metadata set
+ dst1 := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "dstObject-1",
+ UserMetadata: map[string]string{"notmyheader": "notmyvalue"},
+ ReplaceMetadata: true,
+ }
+
+ // 3. Check that copying to an object with metadata set resets
+ // the headers on the copy.
+ args["source"] = src
+ args["destination"] = dst1
+ _, err = c.CopyObject(context.Background(), dst1, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ expectedHeaders := make(http.Header)
+ expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // 4. create destination with no metadata set and same source
+ dst2 := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "dstObject-2",
+ }
+
+ // 5. Check that copying to an object with no metadata set,
+ // copies metadata.
+ args["source"] = src
+ args["destination"] = dst2
+ _, err = c.CopyObject(context.Background(), dst2, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ expectedHeaders = metadata
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // 6. Compose a pair of sources.
+ dst3 := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "dstObject-3",
+ ReplaceMetadata: true,
+ }
+
+ function = "ComposeObject(destination, sources)"
+ args["source"] = []minio.CopySrcOptions{src, src}
+ args["destination"] = dst3
+ _, err = c.ComposeObject(context.Background(), dst3, src, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
+ }
+
+ // Check that no headers are copied in this case
+ if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // 7. Compose a pair of sources with dest user metadata set.
+ dst4 := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "dstObject-4",
+ UserMetadata: map[string]string{"notmyheader": "notmyvalue"},
+ ReplaceMetadata: true,
+ }
+
+ function = "ComposeObject(destination, sources)"
+ args["source"] = []minio.CopySrcOptions{src, src}
+ args["destination"] = dst4
+ _, err = c.ComposeObject(context.Background(), dst4, src, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
+ }
+
+ // Check that no headers are copied in this case
+ expectedHeaders = make(http.Header)
+ expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testUserMetadataCopyingV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
+ return
+ }
+
+ // c.TraceOn(os.Stderr)
+ testUserMetadataCopyingWrapper(c)
+}
+
+func testStorageClassMetadataPutObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testStorageClassMetadataPutObject()"
+ args := map[string]interface{}{}
+ testName := getFuncName()
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ fetchMeta := func(object string) (h http.Header) {
+ objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ h = make(http.Header)
+ for k, vs := range objInfo.Metadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
+ for _, v := range vs {
+ h.Add(k, v)
+ }
+ }
+ }
+ return h
+ }
+
+ metadata := make(http.Header)
+ metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
+
+ emptyMetadata := make(http.Header)
+
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
+
+ _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Get the returned metadata
+ returnedMeta := fetchMeta("srcObjectRRSClass")
+
+ // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
+ if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ metadata = make(http.Header)
+ metadata.Set("x-amz-storage-class", "STANDARD")
+
+ _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) {
+ logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testStorageClassInvalidMetadataPutObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testStorageClassInvalidMetadataPutObject()"
+ args := map[string]interface{}{}
+ testName := getFuncName()
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
+
+ _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+func testStorageClassMetadataCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testStorageClassMetadataCopyObject()"
+ args := map[string]interface{}{}
+ testName := getFuncName()
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ Transport: createHTTPTransport(),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ fetchMeta := func(object string) (h http.Header) {
+ objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{})
+ args["bucket"] = bucketName
+ args["object"] = object
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ h = make(http.Header)
+ for k, vs := range objInfo.Metadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
+ for _, v := range vs {
+ h.Add(k, v)
+ }
+ }
+ }
+ return h
+ }
+
+ metadata := make(http.Header)
+ metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
+
+ emptyMetadata := make(http.Header)
+
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize)
+
+ // Put an object with RRS Storage class
+ _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Make server side copy of object uploaded in previous step
+ src := minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "srcObjectRRSClass",
+ }
+ dst := minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "srcObjectRRSClassCopy",
+ }
+ if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err)
+ return
+ }
+
+ // Get the returned metadata
+ returnedMeta := fetchMeta("srcObjectRRSClassCopy")
+
+ // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
+ if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ metadata = make(http.Header)
+ metadata.Set("x-amz-storage-class", "STANDARD")
+
+ // Put an object with Standard Storage class
+ _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Make server side copy of object uploaded in previous step
+ src = minio.CopySrcOptions{
+ Bucket: bucketName,
+ Object: "srcObjectSSClass",
+ }
+ dst = minio.CopyDestOptions{
+ Bucket: bucketName,
+ Object: "srcObjectSSClassCopy",
+ }
+ if _, err = c.CopyObject(context.Background(), dst, src); err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed on SS", err)
+ return
+ }
+ // Fetch the meta data of copied object
+ if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) {
+ logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test put object with size -1 byte object.
+func testPutObjectNoLengthV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": -1,
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ objectName := bucketName + "unique"
+ args["objectName"] = objectName
+
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+ args["size"] = bufSize
+
+ // Upload an object.
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
+ return
+ }
+
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test put objects of unknown size.
+func testPutObjectsUnknownV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size,opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": "",
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Issues are revealed by trying to upload multiple files of unknown size
+ // sequentially (on 4GB machines)
+ for i := 1; i <= 4; i++ {
+ // Simulate that we could be receiving byte slices of data that we want
+ // to upload as a file
+ rpipe, wpipe := io.Pipe()
+ defer rpipe.Close()
+ go func() {
+ b := []byte("test")
+ wpipe.Write(b)
+ wpipe.Close()
+ }()
+
+ // Upload the object.
+ objectName := fmt.Sprintf("%sunique%d", bucketName, i)
+ args["objectName"] = objectName
+
+ ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
+ return
+ }
+
+ if ui.Size != 4 {
+ logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil)
+ return
+ }
+
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err)
+ return
+ }
+
+ if st.Size != int64(4) {
+ logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err)
+ return
+ }
+
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test put object with 0 byte object.
+func testPutObject0ByteV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": 0,
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ objectName := bucketName + "unique"
+ args["objectName"] = objectName
+ args["opts"] = minio.PutObjectOptions{}
+
+ // Upload an object.
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
+ return
+ }
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err)
+ return
+ }
+ if st.Size != 0 {
+ logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test expected error cases
+func testComposeObjectErrorCases() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ testComposeObjectErrorCasesWrapper(c)
+}
+
+// Test concatenating multiple 10K objects V4
+func testCompose10KSources() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ testComposeMultipleSources(c)
+}
+
+// Tests comprehensive list of all methods.
+func testFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "testFunctionalV2()"
+ functionAll := ""
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ Transport: createHTTPTransport(),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
+ return
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ location := "us-east-1"
+ // Make a new bucket.
+ function = "MakeBucket(bucketName, location)"
+ functionAll = "MakeBucket(bucketName, location)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "location": location,
+ }
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file create failed", err)
+ return
+ }
+ for i := 0; i < 3; i++ {
+ buf := make([]byte, rand.Intn(1<<19))
+ _, err = file.Write(buf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file write failed", err)
+ return
+ }
+ }
+ file.Close()
+
+ // Verify if bucket exits and you have access.
+ var exists bool
+ function = "BucketExists(bucketName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ exists, err = c.BucketExists(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "BucketExists failed", err)
+ return
+ }
+ if !exists {
+ logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err)
+ return
+ }
+
+ // Make the bucket 'public read/write'.
+ function = "SetBucketPolicy(bucketName, bucketPolicy)"
+ functionAll += ", " + function
+
+ readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}`
+
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "bucketPolicy": readWritePolicy,
+ }
+ err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+
+ // List all buckets.
+ function = "ListBuckets()"
+ functionAll += ", " + function
+ args = nil
+ buckets, err := c.ListBuckets(context.Background())
+ if len(buckets) == 0 {
+ logError(testName, function, args, startTime, "", "List buckets cannot be empty", err)
+ return
+ }
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ListBuckets failed", err)
+ return
+ }
+
+ // Verify if previously created bucket is listed in list buckets.
+ bucketFound := false
+ for _, bucket := range buckets {
+ if bucket.Name == bucketName {
+ bucketFound = true
+ }
+ }
+
+ // If bucket not found error out.
+ if !bucketFound {
+ logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err)
+ return
+ }
+
+ objectName := bucketName + "unique"
+
+ // Generate data
+ buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
+
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "contentType": "",
+ }
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err)
+ return
+ }
+
+ objectNameNoLength := objectName + "-nolength"
+ args["objectName"] = objectNameNoLength
+ _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if st.Size != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err)
+ return
+ }
+
+ // Instantiate a done channel to close all listing.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+
+ objFound := false
+ isRecursive := true // Recursive is true.
+ function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+ for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err)
+ return
+ }
+
+ incompObjNotFound := true
+ function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+ for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) {
+ if objIncompl.Key != "" {
+ incompObjNotFound = false
+ break
+ }
+ }
+ if !incompObjNotFound {
+ logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
+ return
+ }
+
+ function = "GetObject(bucketName, objectName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+ newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ newReadBytes, err := io.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ newReader.Close()
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+
+ function = "FGetObject(bucketName, objectName, fileName)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "fileName": fileName + "-f",
+ }
+ err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FgetObject failed", err)
+ return
+ }
+
+ // Generate presigned HEAD object url.
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+ presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
+ return
+ }
+
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively canceled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: createHTTPTransport(),
+ }
+
+ req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
+ return
+ }
+
+ // Verify if presigned url works.
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err)
+ return
+ }
+ if resp.Header.Get("ETag") == "" {
+ logError(testName, function, args, startTime, "", "Got empty ETag", err)
+ return
+ }
+ resp.Body.Close()
+
+ // Generate presigned GET object url.
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+ presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
+ }
+
+ // Verify if presigned url works.
+ req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
+ return
+ }
+
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
+ return
+ }
+ newPresignedBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ resp.Body.Close()
+ if !bytes.Equal(newPresignedBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ // Generate presigned GET object url.
+ args["reqParams"] = reqParams
+ presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
+ }
+
+ // Verify if presigned url works.
+ req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err)
+ return
+ }
+
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
+ return
+ }
+ newPresignedBytes, err = io.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+ // Verify content disposition.
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err)
+ return
+ }
+
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ functionAll += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presigned",
+ "expires": 3600 * time.Second,
+ }
+ presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
+ }
+
+ // Generate data more than 32K
+ buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
+
+ req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
+ return
+ }
+
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
+ return
+ }
+
+ // Download the uploaded object to verify
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presigned",
+ }
+ newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err)
+ return
+ }
+
+ newReadBytes, err = io.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
+ return
+ }
+ newReader.Close()
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err)
+ return
+ }
+
+ function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)"
+ functionAll += ", " + function
+ presignExtraHeaders := map[string][]string{
+ "mysecret": {"abcxxx"},
+ }
+ args = map[string]interface{}{
+ "method": "PUT",
+ "bucketName": bucketName,
+ "objectName": objectName + "-presign-custom",
+ "expires": 3600 * time.Second,
+ "extraHeaders": presignExtraHeaders,
+ }
+ _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err)
+ return
+ }
+
+ os.Remove(fileName)
+ os.Remove(fileName + "-f")
+ logSuccess(testName, functionAll, args, startTime)
+}
+
+// Test get object with GetObject with context
+func testGetObjectContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(ctx, bucketName, objectName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ cancel()
+
+ r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err)
+ return
+ }
+
+ if _, err = r.Stat(); err == nil {
+ logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err)
+ return
+ }
+ r.Close()
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Read the data back
+ r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "object Stat call failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "object Close() call failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get object with FGetObject with a user provided context
+func testFGetObjectContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FGetObject(ctx, bucketName, objectName, fileName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datafile-1-MB"]
+ reader := getDataReader("datafile-1-MB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ fileName := "tempfile-context"
+ args["fileName"] = fileName
+ // Read the data back
+ err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err)
+ return
+ }
+ if err = os.Remove(fileName + "-fcontext"); err != nil {
+ logError(testName, function, args, startTime, "", "Remove file failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get object with GetObject with a user provided context
+func testGetObjectRanges() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(ctx, bucketName, objectName, fileName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ rng := rand.NewSource(time.Now().UnixNano())
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rng, "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rng, "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the data back
+ tests := []struct {
+ start int64
+ end int64
+ }{
+ {
+ start: 1024,
+ end: 1024 + 1<<20,
+ },
+ {
+ start: 20e6,
+ end: 20e6 + 10000,
+ },
+ {
+ start: 40e6,
+ end: 40e6 + 10000,
+ },
+ {
+ start: 60e6,
+ end: 60e6 + 10000,
+ },
+ {
+ start: 80e6,
+ end: 80e6 + 10000,
+ },
+ {
+ start: 120e6,
+ end: int64(bufSize),
+ },
+ }
+ for _, test := range tests {
+ wantRC := getDataReader("datafile-129-MB")
+ io.CopyN(io.Discard, wantRC, test.start)
+ want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1))
+ opts := minio.GetObjectOptions{}
+ opts.SetRange(test.start, test.end)
+ args["opts"] = fmt.Sprintf("%+v", test)
+ obj, err := c.GetObject(ctx, bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err)
+ return
+ }
+ err = crcMatches(obj, want)
+ if err != nil {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get object ACLs with GetObjectACL with custom provided context
+func testGetObjectACLContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObjectACL(ctx, bucketName, objectName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datafile-1-MB"]
+ reader := getDataReader("datafile-1-MB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Add meta data to add a canned acl
+ metaData := map[string]string{
+ "X-Amz-Acl": "public-read-write",
+ }
+
+ _, err = c.PutObject(context.Background(), bucketName,
+ objectName, reader, int64(bufSize),
+ minio.PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ UserMetadata: metaData,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Read the data back
+ objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName)
+ if getObjectACLErr != nil {
+ logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr)
+ return
+ }
+
+ s, ok := objectInfo.Metadata["X-Amz-Acl"]
+ if !ok {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil)
+ return
+ }
+
+ if len(s) != 1 {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
+ return
+ }
+
+ // Do a very limited testing if this is not AWS S3
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ if s[0] != "private" {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+ return
+ }
+
+ if s[0] != "public-read-write" {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil)
+ return
+ }
+
+ bufSize = dataFileMap["datafile-1-MB"]
+ reader2 := getDataReader("datafile-1-MB")
+ defer reader2.Close()
+ // Save the data
+ objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Add meta data to add a canned acl
+ metaData = map[string]string{
+ "X-Amz-Grant-Read": "id=fooread@minio.go",
+ "X-Amz-Grant-Write": "id=foowrite@minio.go",
+ }
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Read the data back
+ objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName)
+ if getObjectACLErr == nil {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr)
+ return
+ }
+
+ if len(objectInfo.Metadata) != 3 {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil)
+ return
+ }
+
+ s, ok = objectInfo.Metadata["X-Amz-Grant-Read"]
+ if !ok {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil)
+ return
+ }
+
+ if len(s) != 1 {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
+ return
+ }
+
+ if s[0] != "fooread@minio.go" {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil)
+ return
+ }
+
+ s, ok = objectInfo.Metadata["X-Amz-Grant-Write"]
+ if !ok {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil)
+ return
+ }
+
+ if len(s) != 1 {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
+ return
+ }
+
+ if s[0] != "foowrite@minio.go" {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test validates putObject with context to see if request cancellation is honored for V2.
+func testPutObjectContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(ctx, bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "size": "",
+ "opts": "",
+ }
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, c)
+ bufSize := dataFileMap["datatfile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ args["ctx"] = ctx
+ args["size"] = bufSize
+ defer cancel()
+
+ _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+
+ defer cancel()
+ reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get object with GetObject with custom context
+func testGetObjectContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(ctx, bucketName, objectName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ cancel()
+
+ r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err)
+ return
+ }
+ if _, err = r.Stat(); err == nil {
+ logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err)
+ return
+ }
+ r.Close()
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "object Stat call failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", " object Close() call failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get object with FGetObject with custom context
+func testFGetObjectContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FGetObject(ctx, bucketName, objectName,fileName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ bufSize := dataFileMap["datatfile-1-MB"]
+ reader := getDataReader("datafile-1-MB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ fileName := "tempfile-context"
+ args["fileName"] = fileName
+
+ // Read the data back
+ err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err)
+ return
+ }
+
+ if err = os.Remove(fileName + "-fcontext"); err != nil {
+ logError(testName, function, args, startTime, "", "Remove file failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test list object v1 and V2
+func testListObjects() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectPrefix": "",
+ "recursive": "true",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ testObjects := []struct {
+ name string
+ storageClass string
+ }{
+ // Special characters
+ {"foo bar", "STANDARD"},
+ {"foo-%", "STANDARD"},
+ {"random-object-1", "STANDARD"},
+ {"random-object-2", "REDUCED_REDUNDANCY"},
+ }
+
+ for i, object := range testObjects {
+ bufSize := dataFileMap["datafile-33-kB"]
+ reader := getDataReader("datafile-33-kB")
+ defer reader.Close()
+ _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize),
+ minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass})
+ if err != nil {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err)
+ return
+ }
+ }
+
+ testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) {
+ var objCursor int
+
+ // check for object name and storage-class from listing object result
+ for objInfo := range listFn(context.Background(), bucket, opts) {
+ if objInfo.Err != nil {
+ logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err)
+ return
+ }
+ if objInfo.Key != testObjects[objCursor].name {
+ logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err)
+ return
+ }
+ if objInfo.StorageClass != testObjects[objCursor].storageClass {
+ // Ignored as Gateways (Azure/GCS etc) wont return storage class
+ logIgnored(testName, function, args, startTime, "ListObjects doesn't return expected storage class")
+ }
+ objCursor++
+ }
+
+ if objCursor != len(testObjects) {
+ logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New(""))
+ return
+ }
+ }
+
+ testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true})
+ testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true})
+ testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true})
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// testCors is runnable against S3 itself.
+// Just provide the env var MINIO_GO_TEST_BUCKET_CORS with bucket that is public and WILL BE DELETED.
+// Recreate this manually each time. Minio-go SDK does not support calling
+// SetPublicBucket (put-public-access-block) on S3, otherwise we could script the whole thing.
+func testCors() {
+ ctx := context.Background()
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "SetBucketCors(bucketName, cors)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "cors": "",
+ }
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Create or reuse a bucket that will get cors settings applied to it and deleted when done
+ bucketName := os.Getenv("MINIO_GO_TEST_BUCKET_CORS")
+ if bucketName == "" {
+ bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ }
+ args["bucketName"] = bucketName
+ defer cleanupBucket(bucketName, c)
+
+ publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}`
+ err = c.SetBucketPolicy(ctx, bucketName, publicPolicy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+
+ // Upload an object for testing.
+ objectContents := `some-text-file-contents`
+ reader := strings.NewReader(objectContents)
+ bufSize := int64(len(objectContents))
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+ bucketURL := c.EndpointURL().String() + "/" + bucketName + "/"
+ objectURL := bucketURL + objectName
+
+ httpClient := &http.Client{
+ Timeout: 30 * time.Second,
+ Transport: createHTTPTransport(),
+ }
+
+ errStrAccessForbidden := `AccessForbiddenCORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted`
+ testCases := []struct {
+ name string
+
+ // Cors rules to apply
+ applyCorsRules []cors.Rule
+
+ // Outbound request info
+ method string
+ url string
+ headers map[string]string
+
+ // Wanted response
+ wantStatus int
+ wantHeaders map[string]string
+ wantBodyContains string
+ }{
+ {
+ name: "apply bucket rules",
+ applyCorsRules: []cors.Rule{
+ {
+ AllowedOrigin: []string{"https"}, // S3 documents 'https' origin, but it does not actually work, see test below.
+ AllowedMethod: []string{"PUT"},
+ AllowedHeader: []string{"*"},
+ },
+ {
+ AllowedOrigin: []string{"http://www.example1.com"},
+ AllowedMethod: []string{"PUT"},
+ AllowedHeader: []string{"*"},
+ ExposeHeader: []string{"x-amz-server-side-encryption", "x-amz-request-id"},
+ MaxAgeSeconds: 3600,
+ },
+ {
+ AllowedOrigin: []string{"http://www.example2.com"},
+ AllowedMethod: []string{"POST"},
+ AllowedHeader: []string{"X-My-Special-Header"},
+ ExposeHeader: []string{"X-AMZ-Request-ID"},
+ },
+ {
+ AllowedOrigin: []string{"http://www.example3.com"},
+ AllowedMethod: []string{"PUT"},
+ AllowedHeader: []string{"X-Example-3-Special-Header"},
+ MaxAgeSeconds: 10,
+ },
+ {
+ AllowedOrigin: []string{"*"},
+ AllowedMethod: []string{"GET"},
+ AllowedHeader: []string{"*"},
+ ExposeHeader: []string{"x-amz-request-id", "X-AMZ-server-side-encryption"},
+ MaxAgeSeconds: 3600,
+ },
+ {
+ AllowedOrigin: []string{"http://multiplemethodstest.com"},
+ AllowedMethod: []string{"POST", "PUT", "DELETE"},
+ AllowedHeader: []string{"x-abc-*", "x-def-*"},
+ },
+ {
+ AllowedOrigin: []string{"http://UPPERCASEEXAMPLE.com"},
+ AllowedMethod: []string{"DELETE"},
+ },
+ {
+ AllowedOrigin: []string{"https://*"},
+ AllowedMethod: []string{"DELETE"},
+ AllowedHeader: []string{"x-abc-*", "x-def-*"},
+ },
+ },
+ },
+ {
+ name: "preflight to object url matches example1 rule",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "PUT",
+ "Access-Control-Request-Headers": "x-another-header,x-could-be-anything",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Methods": "PUT",
+ "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ // S3 additionally sets the following headers here, MinIO follows fetch spec and does not:
+ // "Access-Control-Expose-Headers": "",
+ },
+ },
+ {
+ name: "preflight to bucket url matches example1 rule",
+ method: http.MethodOptions,
+ url: bucketURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "PUT",
+ "Access-Control-Request-Headers": "x-another-header,x-could-be-anything",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Methods": "PUT",
+ "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ },
+ },
+ {
+ name: "preflight matches example2 rule with header given",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example2.com",
+ "Access-Control-Request-Method": "POST",
+ "Access-Control-Request-Headers": "X-My-Special-Header",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example2.com",
+ "Access-Control-Allow-Methods": "POST",
+ "Access-Control-Allow-Headers": "x-my-special-header",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "",
+ "Content-Length": "0",
+ },
+ },
+ {
+ name: "preflight matches example2 rule with no header given",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example2.com",
+ "Access-Control-Request-Method": "POST",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example2.com",
+ "Access-Control-Allow-Methods": "POST",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "",
+ "Content-Length": "0",
+ },
+ },
+ {
+ name: "preflight matches wildcard origin rule",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.couldbeanything.com",
+ "Access-Control-Request-Method": "GET",
+ "Access-Control-Request-Headers": "x-custom-header,x-other-custom-header",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Methods": "GET",
+ "Access-Control-Allow-Headers": "x-custom-header,x-other-custom-header",
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ },
+ },
+ {
+ name: "preflight does not match any rule",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.couldbeanything.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight does not match example1 rule because of method",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "POST",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "s3 processes cors rules even when request is not preflight if cors headers present test get",
+ method: http.MethodGet,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Request-Method": "PUT",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ // S3 additionally sets the following headers here, MinIO follows fetch spec and does not:
+ // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ // "Access-Control-Allow-Methods": "PUT",
+ // "Access-Control-Max-Age": "3600",
+ },
+ },
+ {
+ name: "s3 processes cors rules even when request is not preflight if cors headers present test put",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "GET",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Expose-Headers": "x-amz-request-id,x-amz-server-side-encryption",
+ // S3 additionally sets the following headers here, MinIO follows fetch spec and does not:
+ // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ // "Access-Control-Allow-Methods": "PUT",
+ // "Access-Control-Max-Age": "3600",
+ },
+ },
+ {
+ name: "s3 processes cors rules even when request is not preflight but there is no rule match",
+ method: http.MethodGet,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "get request matches wildcard origin rule and returns cors headers",
+ method: http.MethodGet,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "x-amz-request-id,X-AMZ-server-side-encryption",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Max-Age": "3600",
+ // "Access-Control-Allow-Methods": "GET",
+ },
+ },
+ {
+ name: "head request does not match rule and returns no cors headers",
+ method: http.MethodHead,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.nomatchingdomainfound.com",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "put request with origin does not match rule and returns no cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.nomatchingdomainfound.com",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "put request with no origin does not match rule and returns no cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{},
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "preflight for delete request with wildcard origin does not match",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.notsecureexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight for delete request with wildcard https origin matches secureexample",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "https://www.secureexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Methods": "DELETE",
+ "Access-Control-Allow-Origin": "https://www.secureexample.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "preflight for delete request matches secureexample with wildcard https origin and request headers",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "https://www.secureexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Methods": "DELETE",
+ "Access-Control-Allow-Origin": "https://www.secureexample.com",
+ "Access-Control-Allow-Headers": "x-abc-1,x-abc-second,x-def-1",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "preflight for delete request matches secureexample rejected because request header does not match",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "https://www.secureexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1,x-does-not-match",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight with https origin is documented by s3 as matching but it does not match",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "https://www.securebutdoesnotmatch.com",
+ "Access-Control-Request-Method": "PUT",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "put no origin no match returns no cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{},
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "put with origin match example1 returns cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ },
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Max-Age": "3600",
+ // "Access-Control-Allow-Methods": "PUT",
+ },
+ },
+ {
+ name: "put with origin and header match example1 returns cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "x-could-be-anything": "myvalue",
+ },
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Max-Age": "3600",
+ // "Access-Control-Allow-Methods": "PUT",
+ },
+ },
+ {
+ name: "put no match found returns no cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.unmatchingdomain.com",
+ },
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "put with origin match example3 returns cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example3.com",
+ "X-My-Special-Header": "myvalue",
+ },
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://www.example3.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Max-Age": "10",
+ // "Access-Control-Allow-Methods": "PUT",
+ },
+ },
+ {
+ name: "preflight matches example1 rule headers case is incorrect",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "PUT",
+ // Fetch standard guarantees that these are sent lowercase, here we test what happens when they are not.
+ "Access-Control-Request-Headers": "X-Another-Header,X-Could-Be-Anything",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Methods": "PUT",
+ "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ },
+ },
+ {
+ name: "preflight matches example1 rule headers are not sorted",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "PUT",
+ // Fetch standard guarantees that these are sorted, test what happens when they are not.
+ "Access-Control-Request-Headers": "a-customer-header,b-should-be-last",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Methods": "PUT",
+ "Access-Control-Allow-Headers": "a-customer-header,b-should-be-last",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ },
+ },
+ {
+ name: "preflight with case sensitivity in origin matches uppercase",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://UPPERCASEEXAMPLE.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Methods": "DELETE",
+ "Access-Control-Allow-Origin": "http://UPPERCASEEXAMPLE.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "preflight with case sensitivity in origin does not match when lowercase",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://uppercaseexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight match upper case with unknown header but no header restrictions",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://UPPERCASEEXAMPLE.com",
+ "Access-Control-Request-Method": "DELETE",
+ "Access-Control-Request-Headers": "x-unknown-1",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight for delete request matches multiplemethodstest.com origin and request headers",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://multiplemethodstest.com",
+ "Access-Control-Request-Method": "DELETE",
+ "Access-Control-Request-Headers": "x-abc-1",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://multiplemethodstest.com",
+ "Access-Control-Allow-Headers": "x-abc-1",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ // S3 returns POST, PUT, DELETE here, MinIO does not as spec does not require it.
+ // "Access-Control-Allow-Methods": "DELETE",
+ },
+ },
+ {
+ name: "delete request goes ahead because cors is only for browsers and does not block on the server side",
+ method: http.MethodDelete,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.justrandom.com",
+ },
+ wantStatus: http.StatusNoContent,
+ },
+ }
+
+ for i, test := range testCases {
+ testName := fmt.Sprintf("%s_%d_%s", testName, i+1, strings.ReplaceAll(test.name, " ", "_"))
+
+ // Apply the CORS rules
+ if test.applyCorsRules != nil {
+ corsConfig := &cors.Config{
+ CORSRules: test.applyCorsRules,
+ }
+ err = c.SetBucketCors(ctx, bucketName, corsConfig)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
+ return
+ }
+ }
+
+ // Make request
+ if test.method != "" && test.url != "" {
+ req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request creation failed", err)
+ return
+ }
+ req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion)
+
+ for k, v := range test.headers {
+ req.Header.Set(k, v)
+ }
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request failed", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ // Check returned status code
+ if resp.StatusCode != test.wantStatus {
+ errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode)
+ logError(testName, function, args, startTime, "", errStr, nil)
+ return
+ }
+
+ // Check returned body
+ if test.wantBodyContains != "" {
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to read response body", err)
+ return
+ }
+ if !strings.Contains(string(body), test.wantBodyContains) {
+ errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body))
+ logError(testName, function, args, startTime, "", errStr, nil)
+ return
+ }
+ }
+
+ // Check returned response headers
+ for k, v := range test.wantHeaders {
+ gotVal := resp.Header.Get(k)
+ if k == "Access-Control-Expose-Headers" {
+ // MinIO returns this in canonical form, S3 does not.
+ gotVal = strings.ToLower(gotVal)
+ v = strings.ToLower(v)
+ }
+ // Remove all spaces, S3 adds spaces after CSV values in headers, MinIO does not.
+ gotVal = strings.ReplaceAll(gotVal, " ", "")
+ if gotVal != v {
+ errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal)
+ logError(testName, function, args, startTime, "", errStr, nil)
+ return
+ }
+ }
+ }
+ logSuccess(testName, function, args, startTime)
+ }
+ logSuccess(testName, function, args, startTime)
+}
+
+func testCorsSetGetDelete() {
+ ctx := context.Background()
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "SetBucketCors(bucketName, cors)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "cors": "",
+ }
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, c)
+
+ // Set the CORS rules on the new bucket
+ corsRules := []cors.Rule{
+ {
+ AllowedOrigin: []string{"http://www.example1.com"},
+ AllowedMethod: []string{"PUT"},
+ AllowedHeader: []string{"*"},
+ },
+ {
+ AllowedOrigin: []string{"http://www.example2.com"},
+ AllowedMethod: []string{"POST"},
+ AllowedHeader: []string{"X-My-Special-Header"},
+ },
+ {
+ AllowedOrigin: []string{"*"},
+ AllowedMethod: []string{"GET"},
+ AllowedHeader: []string{"*"},
+ },
+ }
+ corsConfig := cors.NewConfig(corsRules)
+ err = c.SetBucketCors(ctx, bucketName, corsConfig)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
+ return
+ }
+
+ // Get the rules and check they match what we set
+ gotCorsConfig, err := c.GetBucketCors(ctx, bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketCors failed", err)
+ return
+ }
+ if !reflect.DeepEqual(corsConfig, gotCorsConfig) {
+ msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig)
+ logError(testName, function, args, startTime, "", msg, nil)
+ return
+ }
+
+ // Delete the rules
+ err = c.SetBucketCors(ctx, bucketName, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketCors failed to delete", err)
+ return
+ }
+
+ // Get the rules and check they are now empty
+ gotCorsConfig, err = c.GetBucketCors(ctx, bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketCors failed", err)
+ return
+ }
+ if gotCorsConfig != nil {
+ logError(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test deleting multiple objects with object retention set in Governance mode
+func testRemoveObjects() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveObjects(bucketName, objectsCh, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectPrefix": "",
+ "recursive": "true",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error uploading object", err)
+ return
+ }
+
+ // Replace with smaller...
+ bufSize = dataFileMap["datafile-10-kB"]
+ reader = getDataReader("datafile-10-kB")
+ defer reader.Close()
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error uploading object", err)
+ }
+
+ t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC)
+ m := minio.RetentionMode(minio.Governance)
+ opts := minio.PutObjectRetentionOptions{
+ GovernanceBypass: false,
+ RetainUntilDate: &t,
+ Mode: &m,
+ }
+ err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error setting retention", err)
+ return
+ }
+
+ objectsCh := make(chan minio.ObjectInfo)
+ // Send object names that are needed to be removed to objectsCh
+ go func() {
+ defer close(objectsCh)
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) {
+ if object.Err != nil {
+ logError(testName, function, args, startTime, "", "Error listing objects", object.Err)
+ return
+ }
+ objectsCh <- object
+ }
+ }()
+
+ for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) {
+ // Error is expected here because Retention is set on the object
+ // and RemoveObjects is called without Bypass Governance
+ if rErr.Err == nil {
+ logError(testName, function, args, startTime, "", "Expected error during deletion", nil)
+ return
+ }
+ }
+
+ objectsCh1 := make(chan minio.ObjectInfo)
+
+ // Send object names that are needed to be removed to objectsCh
+ go func() {
+ defer close(objectsCh1)
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) {
+ if object.Err != nil {
+ logError(testName, function, args, startTime, "", "Error listing objects", object.Err)
+ return
+ }
+ objectsCh1 <- object
+ }
+ }()
+
+ opts1 := minio.RemoveObjectsOptions{
+ GovernanceBypass: true,
+ }
+
+ for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) {
+ // Error is not expected here because Retention is set on the object
+ // and RemoveObjects is called with Bypass Governance
+ logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test get bucket tags
+func testGetBucketTagging() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetBucketTagging(bucketName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ _, err = c.GetBucketTagging(context.Background(), bucketName)
+ if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ logError(testName, function, args, startTime, "", "Invalid error from server failed", err)
+ return
+ }
+
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test setting tags for bucket
+func testSetBucketTagging() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "SetBucketTagging(bucketName, tags)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "tags": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ _, err = c.GetBucketTagging(context.Background(), bucketName)
+ if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ logError(testName, function, args, startTime, "", "Invalid error from server", err)
+ return
+ }
+
+ tag := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ t, err := tags.MapToBucketTags(map[string]string{
+ tag: expectedValue,
+ })
+ args["tags"] = t.String()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err)
+ return
+ }
+
+ err = c.SetBucketTagging(context.Background(), bucketName, t)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketTagging failed", err)
+ return
+ }
+
+ tagging, err := c.GetBucketTagging(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketTagging failed", err)
+ return
+ }
+
+ if tagging.ToMap()[tag] != expectedValue {
+ msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue)
+ logError(testName, function, args, startTime, "", msg, err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test removing bucket tags
+func testRemoveBucketTagging() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveBucketTagging(bucketName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ _, err = c.GetBucketTagging(context.Background(), bucketName)
+ if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ logError(testName, function, args, startTime, "", "Invalid error from server", err)
+ return
+ }
+
+ tag := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ t, err := tags.MapToBucketTags(map[string]string{
+ tag: expectedValue,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err)
+ return
+ }
+
+ err = c.SetBucketTagging(context.Background(), bucketName, t)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketTagging failed", err)
+ return
+ }
+
+ tagging, err := c.GetBucketTagging(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketTagging failed", err)
+ return
+ }
+
+ if tagging.ToMap()[tag] != expectedValue {
+ msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue)
+ logError(testName, function, args, startTime, "", msg, err)
+ return
+ }
+
+ err = c.RemoveBucketTagging(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveBucketTagging failed", err)
+ return
+ }
+
+ _, err = c.GetBucketTagging(context.Background(), bucketName)
+ if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ logError(testName, function, args, startTime, "", "Invalid error from server", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Convert string to bool and always return false if any error
+func mustParseBool(str string) bool {
+ b, err := strconv.ParseBool(str)
+ if err != nil {
+ return false
+ }
+ return b
+}
+
+func main() {
+ slog.SetDefault(slog.New(slog.NewJSONHandler(
+ os.Stdout,
+ &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
+ if a.Key == slog.MessageKey || a.Value.String() == "" {
+ return slog.Attr{}
+ }
+
+ return a
+ },
+ },
+ )))
+
+ tls := mustParseBool(os.Getenv(enableHTTPS))
+ kms := mustParseBool(os.Getenv(enableKMS))
+ if os.Getenv(enableKMS) == "" {
+ // Default to KMS tests.
+ kms = true
+ }
+
+ // execute tests
+ if isFullMode() {
+ testCorsSetGetDelete()
+ testCors()
+ testListMultipartUpload()
+ testGetObjectAttributes()
+ testGetObjectAttributesErrorCases()
+ testMakeBucketErrorV2()
+ testGetObjectClosedTwiceV2()
+ testFPutObjectV2()
+ testMakeBucketRegionsV2()
+ testGetObjectReadSeekFunctionalV2()
+ testGetObjectReadAtFunctionalV2()
+ testGetObjectRanges()
+ testCopyObjectV2()
+ testFunctionalV2()
+ testComposeObjectErrorCasesV2()
+ testCompose10KSourcesV2()
+ testUserMetadataCopyingV2()
+ testPutObjectWithChecksums()
+ testPutObjectWithTrailingChecksums()
+ testPutMultipartObjectWithChecksums(false)
+ testPutMultipartObjectWithChecksums(true)
+ testPutObject0ByteV2()
+ testPutObjectNoLengthV2()
+ testPutObjectsUnknownV2()
+ testGetObjectContextV2()
+ testFPutObjectContextV2()
+ testFGetObjectContextV2()
+ testPutObjectContextV2()
+ testPutObjectWithVersioning()
+ testMakeBucketError()
+ testMakeBucketRegions()
+ testPutObjectWithMetadata()
+ testPutObjectReadAt()
+ testPutObjectStreaming()
+ testGetObjectSeekEnd()
+ testGetObjectClosedTwice()
+ testGetObjectS3Zip()
+ testRemoveMultipleObjects()
+ testRemoveMultipleObjectsWithResult()
+ testFPutObjectMultipart()
+ testFPutObject()
+ testGetObjectReadSeekFunctional()
+ testGetObjectReadAtFunctional()
+ testGetObjectReadAtWhenEOFWasReached()
+ testPresignedPostPolicy()
+ testCopyObject()
+ testComposeObjectErrorCases()
+ testCompose10KSources()
+ testUserMetadataCopying()
+ testBucketNotification()
+ testFunctional()
+ testGetObjectModified()
+ testPutObjectUploadSeekedObject()
+ testGetObjectContext()
+ testFPutObjectContext()
+ testFGetObjectContext()
+ testGetObjectACLContext()
+ testPutObjectContext()
+ testStorageClassMetadataPutObject()
+ testStorageClassInvalidMetadataPutObject()
+ testStorageClassMetadataCopyObject()
+ testPutObjectWithContentLanguage()
+ testListObjects()
+ testRemoveObjects()
+ testListObjectVersions()
+ testStatObjectWithVersioning()
+ testGetObjectWithVersioning()
+ testCopyObjectWithVersioning()
+ testConcurrentCopyObjectWithVersioning()
+ testComposeObjectWithVersioning()
+ testRemoveObjectWithVersioning()
+ testRemoveObjectsWithVersioning()
+ testObjectTaggingWithVersioning()
+ testTrailingChecksums()
+ testPutObjectWithAutomaticChecksums()
+ testGetBucketTagging()
+ testSetBucketTagging()
+ testRemoveBucketTagging()
+
+ // SSE-C tests will only work over TLS connection.
+ if tls {
+ testGetObjectAttributesSSECEncryption()
+ testSSECEncryptionPutGet()
+ testSSECEncryptionFPut()
+ testSSECEncryptedGetObjectReadAtFunctional()
+ testSSECEncryptedGetObjectReadSeekFunctional()
+ testEncryptedCopyObjectV2()
+ testEncryptedSSECToSSECCopyObject()
+ testEncryptedSSECToUnencryptedCopyObject()
+ testUnencryptedToSSECCopyObject()
+ testUnencryptedToUnencryptedCopyObject()
+ testEncryptedEmptyObject()
+ testDecryptedCopyObject()
+ testSSECEncryptedToSSECCopyObjectPart()
+ testSSECMultipartEncryptedToSSECCopyObjectPart()
+ testSSECEncryptedToUnencryptedCopyPart()
+ testUnencryptedToSSECCopyObjectPart()
+ testUnencryptedToUnencryptedCopyPart()
+ testEncryptedSSECToSSES3CopyObject()
+ testEncryptedSSES3ToSSECCopyObject()
+ testSSECEncryptedToSSES3CopyObjectPart()
+ testSSES3EncryptedToSSECCopyObjectPart()
+ }
+
+ // KMS tests
+ if kms {
+ testSSES3EncryptionPutGet()
+ testSSES3EncryptionFPut()
+ testSSES3EncryptedGetObjectReadAtFunctional()
+ testSSES3EncryptedGetObjectReadSeekFunctional()
+ testEncryptedSSES3ToSSES3CopyObject()
+ testEncryptedSSES3ToUnencryptedCopyObject()
+ testUnencryptedToSSES3CopyObject()
+ testUnencryptedToSSES3CopyObjectPart()
+ testSSES3EncryptedToUnencryptedCopyPart()
+ testSSES3EncryptedToSSES3CopyObjectPart()
+ }
+ } else {
+ testFunctional()
+ testFunctionalV2()
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go
new file mode 100644
index 000000000..07bc7dbcf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go
@@ -0,0 +1,101 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "io"
+ "sync"
+)
+
+// hookReader hooks additional reader in the source stream. It is
+// useful for making progress bars. Second reader is appropriately
+// notified about the exact number of bytes read from the primary
+// source on each Read operation.
+type hookReader struct {
+ mu sync.RWMutex
+ source io.Reader
+ hook io.Reader
+}
+
+// Seek implements io.Seeker. Seeks source first, and if necessary
+// seeks hook if Seek method is appropriately found.
+func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
+ hr.mu.Lock()
+ defer hr.mu.Unlock()
+
+ // Verify for source has embedded Seeker, use it.
+ sourceSeeker, ok := hr.source.(io.Seeker)
+ if ok {
+ n, err = sourceSeeker.Seek(offset, whence)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ if hr.hook != nil {
+ // Verify if hook has embedded Seeker, use it.
+ hookSeeker, ok := hr.hook.(io.Seeker)
+ if ok {
+ var m int64
+ m, err = hookSeeker.Seek(offset, whence)
+ if err != nil {
+ return 0, err
+ }
+ if n != m {
+ return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n)
+ }
+ }
+ }
+
+ return n, nil
+}
+
+// Read implements io.Reader. Always reads from the source, the return
+// value 'n' number of bytes are reported through the hook. Returns
+// error for all non io.EOF conditions.
+func (hr *hookReader) Read(b []byte) (n int, err error) {
+ hr.mu.RLock()
+ defer hr.mu.RUnlock()
+
+ n, err = hr.source.Read(b)
+ if err != nil && err != io.EOF {
+ return n, err
+ }
+ if hr.hook != nil {
+ // Progress the hook with the total read bytes from the source.
+ if _, herr := hr.hook.Read(b[:n]); herr != nil {
+ if herr != io.EOF {
+ return n, herr
+ }
+ }
+ }
+ return n, err
+}
+
+// newHook returns a io.ReadSeeker which implements hookReader that
+// reports the data read from the source to the hook.
+func newHook(source, hook io.Reader) io.Reader {
+ if hook == nil {
+ return &hookReader{source: source}
+ }
+ return &hookReader{
+ source: source,
+ hook: hook,
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go
new file mode 100644
index 000000000..e71864ee9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go
@@ -0,0 +1,91 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2024 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package cors
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/dustin/go-humanize"
+)
+
+const defaultXMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
+
+// Config is the container for a CORS configuration for a bucket.
+type Config struct {
+ XMLNS string `xml:"xmlns,attr,omitempty"`
+ XMLName xml.Name `xml:"CORSConfiguration"`
+ CORSRules []Rule `xml:"CORSRule"`
+}
+
+// Rule is a single rule in a CORS configuration.
+type Rule struct {
+ AllowedHeader []string `xml:"AllowedHeader,omitempty"`
+ AllowedMethod []string `xml:"AllowedMethod,omitempty"`
+ AllowedOrigin []string `xml:"AllowedOrigin,omitempty"`
+ ExposeHeader []string `xml:"ExposeHeader,omitempty"`
+ ID string `xml:"ID,omitempty"`
+ MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty"`
+}
+
+// NewConfig creates a new CORS configuration with the given rules.
+func NewConfig(rules []Rule) *Config {
+ return &Config{
+ XMLNS: defaultXMLNS,
+ XMLName: xml.Name{
+ Local: "CORSConfiguration",
+ Space: defaultXMLNS,
+ },
+ CORSRules: rules,
+ }
+}
+
+// ParseBucketCorsConfig parses a CORS configuration in XML from an io.Reader.
+func ParseBucketCorsConfig(reader io.Reader) (*Config, error) {
+ var c Config
+
+ // Max size of cors document is 64KiB according to https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html
+ // This limiter is just for safety so has a max of 128KiB
+ err := xml.NewDecoder(io.LimitReader(reader, 128*humanize.KiByte)).Decode(&c)
+ if err != nil {
+ return nil, fmt.Errorf("decoding xml: %w", err)
+ }
+ if c.XMLNS == "" {
+ c.XMLNS = defaultXMLNS
+ }
+ for i, rule := range c.CORSRules {
+ for j, method := range rule.AllowedMethod {
+ c.CORSRules[i].AllowedMethod[j] = strings.ToUpper(method)
+ }
+ }
+ return &c, nil
+}
+
+// ToXML marshals the CORS configuration to XML.
+func (c Config) ToXML() ([]byte, error) {
+ if c.XMLNS == "" {
+ c.XMLNS = defaultXMLNS
+ }
+ data, err := xml.Marshal(&c)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling xml: %w", err)
+ }
+ return append([]byte(xml.Header), data...), nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
new file mode 100644
index 000000000..d245bc07a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
@@ -0,0 +1,243 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/xml"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/signer"
+)
+
+// AssumeRoleResponse contains the result of successful AssumeRole request.
+type AssumeRoleResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"`
+
+ Result AssumeRoleResult `xml:"AssumeRoleResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// AssumeRoleResult - Contains the response to a successful AssumeRole
+// request, including temporary credentials that can be used to make
+// MinIO API requests.
+type AssumeRoleResult struct {
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:",omitempty"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize int `xml:",omitempty"`
+}
+
+// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if
+// those credentials are expired.
+type STSAssumeRole struct {
+ Expiry
+
+ // Required http Client to use when connecting to MinIO STS service.
+ Client *http.Client
+
+ // STS endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // various options for this request.
+ Options STSAssumeRoleOptions
+}
+
+// STSAssumeRoleOptions collection of various input options
+// to obtain AssumeRole credentials.
+type STSAssumeRoleOptions struct {
+ // Mandatory inputs.
+ AccessKey string
+ SecretKey string
+
+ SessionToken string // Optional if the first request is made with temporary credentials.
+ Policy string // Optional to assign a policy to the assumed role
+
+ Location string // Optional commonly needed with AWS STS.
+ DurationSeconds int // Optional defaults to 1 hour.
+
+ // Optional only valid if using with AWS STS
+ RoleARN string
+ RoleSessionName string
+ ExternalID string
+}
+
+// NewSTSAssumeRole returns a pointer to a new
+// Credentials object wrapping the STSAssumeRole.
+func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
+ if stsEndpoint == "" {
+ return nil, errors.New("STS endpoint cannot be empty")
+ }
+ if opts.AccessKey == "" || opts.SecretKey == "" {
+ return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
+ }
+ return New(&STSAssumeRole{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ STSEndpoint: stsEndpoint,
+ Options: opts,
+ }), nil
+}
+
+const defaultDurationSeconds = 3600
+
+// closeResponse close non nil response with any response Body.
+// convenient wrapper to drain any remaining data on response body.
+//
+// Subsequently this allows golang http RoundTripper
+// to re-use the same connection for future requests.
+func closeResponse(resp *http.Response) {
+ // Callers should close resp.Body when done reading from it.
+ // If resp.Body is not closed, the Client's underlying RoundTripper
+ // (typically Transport) may not be able to re-use a persistent TCP
+ // connection to the server for a subsequent "keep-alive" request.
+ if resp != nil && resp.Body != nil {
+ // Drain any remaining Body and then close the connection.
+ // Without this closing connection would disallow re-using
+ // the same connection for future uses.
+ // - http://stackoverflow.com/a/17961593/4465767
+ io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ }
+}
+
+func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) {
+ v := url.Values{}
+ v.Set("Action", "AssumeRole")
+ v.Set("Version", STSVersion)
+ if opts.RoleARN != "" {
+ v.Set("RoleArn", opts.RoleARN)
+ }
+ if opts.RoleSessionName != "" {
+ v.Set("RoleSessionName", opts.RoleSessionName)
+ }
+ if opts.DurationSeconds > defaultDurationSeconds {
+ v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds))
+ } else {
+ v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds))
+ }
+ if opts.Policy != "" {
+ v.Set("Policy", opts.Policy)
+ }
+ if opts.ExternalID != "" {
+ v.Set("ExternalId", opts.ExternalID)
+ }
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ u.Path = "/"
+
+ postBody := strings.NewReader(v.Encode())
+ hash := sha256.New()
+ if _, err = io.Copy(hash, postBody); err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ postBody.Seek(0, 0)
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), postBody)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
+ if opts.SessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", opts.SessionToken)
+ }
+ req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
+
+ resp, err := clnt.Do(req)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ defer closeResponse(resp)
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleResponse{}, errResp
+ }
+
+ a := AssumeRoleResponse{}
+ if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ return a, nil
+}
+
+// Retrieve retrieves credentials from the MinIO service.
+// Error will be returned if the request fails.
+func (m *STSAssumeRole) Retrieve() (Value, error) {
+ a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: a.Result.Credentials.AccessKey,
+ SecretAccessKey: a.Result.Credentials.SecretKey,
+ SessionToken: a.Result.Credentials.SessionToken,
+ Expiration: a.Result.Credentials.Expiration,
+ SignerType: SignatureV4,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
new file mode 100644
index 000000000..ddccfb173
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
@@ -0,0 +1,88 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+// A Chain will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The Chain provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the
+// Providers in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the no credentials value.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again after IsExpired() is true.
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvAWSS3{},
+// &credentials.EnvMinio{},
+// })
+//
+// // Usage of ChainCredentials.
+// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
+// if err != nil {
+// log.Fatalln(err)
+// }
+type Chain struct {
+ Providers []Provider
+ curr Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return New(&Chain{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value, returns no credentials(anonymous)
+// if no credentials provider returned any value.
+//
+// If a provider is found with credentials, it will be cached and any calls
+// to IsExpired() will return the expired state of the cached provider.
+func (c *Chain) Retrieve() (Value, error) {
+ for _, p := range c.Providers {
+ creds, _ := p.Retrieve()
+ // Always prioritize non-anonymous providers, if any.
+ if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
+ continue
+ }
+ c.curr = p
+ return creds, nil
+ }
+ // At this point we have exhausted all the providers and
+ // are left without any credentials return anonymous.
+ return Value{
+ SignerType: SignatureAnonymous,
+ }, nil
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *Chain) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
new file mode 100644
index 000000000..d793c9e0e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
@@ -0,0 +1,17 @@
+{
+ "version": "8",
+ "hosts": {
+ "play": {
+ "url": "https://play.min.io",
+ "accessKey": "Q3AM3UQ867SPQQA43P2F",
+ "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
+ "api": "S3v2"
+ },
+ "s3": {
+ "url": "https://s3.amazonaws.com",
+ "accessKey": "accessKey",
+ "secretKey": "secret",
+ "api": "S3v4"
+ }
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
new file mode 100644
index 000000000..68f9b3815
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
@@ -0,0 +1,196 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+const (
+ // STSVersion sts version string
+ STSVersion = "2011-06-15"
+
+ // How much duration to slash from the given expiration duration
+ defaultExpiryWindow = 0.8
+)
+
+// A Value is the S3 credentials value for individual credential fields.
+type Value struct {
+ // S3 Access key ID
+ AccessKeyID string
+
+ // S3 Secret Access Key
+ SecretAccessKey string
+
+ // S3 Session Token
+ SessionToken string
+
+ // Expiration of this credentials - null means no expiration associated
+ Expiration time.Time
+
+ // Signature Type.
+ SignerType SignatureType
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+//
+// type IAMCredentialProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ cut := window
+ if cut < 0 {
+ expireIn := expiration.Sub(e.CurrentTime())
+ cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow))
+ }
+ e.expiration = expiration.Add(-cut)
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// Credentials - A container for synchronous safe retrieval of credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ sync.Mutex
+
+ creds Value
+ forceRefresh bool
+ provider Provider
+}
+
+// New returns a pointer to a new Credentials with the provider set.
+func New(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ if c == nil {
+ return Value{}, nil
+ }
+
+ c.Lock()
+ defer c.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.Lock()
+ defer c.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be refreshed.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
new file mode 100644
index 000000000..afbfad559
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
@@ -0,0 +1,7 @@
+{
+ "Version": 1,
+ "SessionToken": "token",
+ "AccessKeyId": "accessKey",
+ "SecretAccessKey": "secret",
+ "Expiration": "9999-04-27T16:02:25.000Z"
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
new file mode 100644
index 000000000..e2dc1bfec
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
@@ -0,0 +1,15 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
+
+[with_process]
+credential_process = /bin/cat credentials.json
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
new file mode 100644
index 000000000..fbfb10549
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
@@ -0,0 +1,60 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package credentials provides credential retrieval and management
+// for S3 compatible object storage.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := NewFromEnv()
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := NewFromIAM("")
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+// # Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+package credentials
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
new file mode 100644
index 000000000..b6e60d0e1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
@@ -0,0 +1,71 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvAWS retrieves credentials from the environment variables of the
+// running process. EnvAWSironment credentials never expire.
+//
+// EnvAWSironment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
+// * Secret Token: AWS_SESSION_TOKEN.
+type EnvAWS struct {
+ retrieved bool
+}
+
+// NewEnvAWS returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvAWS() *Credentials {
+ return New(&EnvAWS{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvAWS) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvAWS) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
new file mode 100644
index 000000000..5bfeab140
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
@@ -0,0 +1,68 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvMinio retrieves credentials from the environment variables of the
+// running process. EnvMinioironment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: MINIO_ACCESS_KEY.
+// * Secret Access Key: MINIO_SECRET_KEY.
+// * Access Key ID: MINIO_ROOT_USER.
+// * Secret Access Key: MINIO_ROOT_PASSWORD.
+type EnvMinio struct {
+ retrieved bool
+}
+
+// NewEnvMinio returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvMinio() *Credentials {
+ return New(&EnvMinio{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvMinio) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("MINIO_ROOT_USER")
+ secret := os.Getenv("MINIO_ROOT_PASSWORD")
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ id = os.Getenv("MINIO_ACCESS_KEY")
+ secret = os.Getenv("MINIO_SECRET_KEY")
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvMinio) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
new file mode 100644
index 000000000..07a9c2f09
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
@@ -0,0 +1,95 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2021 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+)
+
+// ErrorResponse - Is the typed error returned.
+// ErrorResponse struct should be comparable since it is compared inside
+// golang http API (https://github.com/golang/go/issues/29768)
+type ErrorResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"`
+ STSError struct {
+ Type string `xml:"Type"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ } `xml:"Error"`
+ RequestID string `xml:"RequestId"`
+}
+
+// Error - Is the typed error returned by all API operations.
+type Error struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string
+ Message string
+ BucketName string
+ Key string
+ Resource string
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+
+ // Region where the bucket is located. This header is returned
+ // only in HEAD bucket and ListObjects response.
+ Region string
+
+ // Captures the server string returned in response header.
+ Server string
+
+ // Underlying HTTP status code for the returned error
+ StatusCode int `xml:"-" json:"-"`
+}
+
+// Error - Returns S3 error string.
+func (e Error) Error() string {
+ if e.Message == "" {
+ return fmt.Sprintf("Error response code %s.", e.Code)
+ }
+ return e.Message
+}
+
+// Error - Returns STS error string.
+func (e ErrorResponse) Error() string {
+ if e.STSError.Message == "" {
+ return fmt.Sprintf("Error response code %s.", e.STSError.Code)
+ }
+ return e.STSError.Message
+}
+
+// xmlDecoder provide decoded value in xml.
+func xmlDecoder(body io.Reader, v interface{}) error {
+ d := xml.NewDecoder(body)
+ return d.Decode(v)
+}
+
+// xmlDecodeAndBody reads the whole body up to 1MB and
+// tries to XML decode it into v.
+// The body that was read and any error from reading or decoding is returned.
+func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
+ // read the whole body (up to 1MB)
+ const maxBodyLength = 1 << 20
+ body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
+ if err != nil {
+ return nil, err
+ }
+ return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
new file mode 100644
index 000000000..541e1a72f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
@@ -0,0 +1,158 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "encoding/json"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/go-ini/ini"
+)
+
+// A externalProcessCredentials stores the output of a credential_process
+type externalProcessCredentials struct {
+ Version int
+ SessionToken string
+ AccessKeyID string `json:"AccessKeyId"`
+ SecretAccessKey string
+ Expiration time.Time
+}
+
+// A FileAWSCredentials retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type FileAWSCredentials struct {
+ Expiry
+
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileAWSCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewFileAWSCredentials(filename, profile string) *Credentials {
+ return New(&FileAWSCredentials{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileAWSCredentials) Retrieve() (Value, error) {
+ if p.Filename == "" {
+ p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
+ if p.Filename == "" {
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+ }
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+ }
+
+ p.retrieved = false
+
+ iniProfile, err := loadProfile(p.Filename, p.Profile)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Default to empty string if not found.
+ id := iniProfile.Key("aws_access_key_id")
+ // Default to empty string if not found.
+ secret := iniProfile.Key("aws_secret_access_key")
+ // Default to empty string if not found.
+ token := iniProfile.Key("aws_session_token")
+
+ // If credential_process is defined, obtain credentials by executing
+ // the external process
+ credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String())
+ if credentialProcess != "" {
+ args := strings.Fields(credentialProcess)
+ if len(args) <= 1 {
+ return Value{}, errors.New("invalid credential process args")
+ }
+ cmd := exec.Command(args[0], args[1:]...)
+ out, err := cmd.Output()
+ if err != nil {
+ return Value{}, err
+ }
+ var externalProcessCredentials externalProcessCredentials
+ err = json.Unmarshal([]byte(out), &externalProcessCredentials)
+ if err != nil {
+ return Value{}, err
+ }
+ p.retrieved = true
+ p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow)
+ return Value{
+ AccessKeyID: externalProcessCredentials.AccessKeyID,
+ SecretAccessKey: externalProcessCredentials.SecretAccessKey,
+ SessionToken: externalProcessCredentials.SessionToken,
+ Expiration: externalProcessCredentials.Expiration,
+ SignerType: SignatureV4,
+ }, nil
+ }
+ p.retrieved = true
+ return Value{
+ AccessKeyID: id.String(),
+ SecretAccessKey: secret.String(),
+ SessionToken: token.String(),
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (*ini.Section, error) {
+ config, err := ini.Load(filename)
+ if err != nil {
+ return nil, err
+ }
+ iniProfile, err := config.GetSection(profile)
+ if err != nil {
+ return nil, err
+ }
+ return iniProfile, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
new file mode 100644
index 000000000..750e26ffa
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
@@ -0,0 +1,137 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/goccy/go-json"
+)
+
+// A FileMinioClient retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Configuration file example: $HOME/.mc/config.json
+type FileMinioClient struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.mc/config.json"
+ // Windows: "%USERALIAS%\mc\config.json"
+ Filename string
+
+ // MinIO Alias to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "MINIO_ALIAS" or "s3" if
+ // environment variable is also not set.
+ Alias string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileMinioClient returns a pointer to a new Credentials object
+// wrapping the Alias file provider.
+func NewFileMinioClient(filename, alias string) *Credentials {
+ return New(&FileMinioClient{
+ Filename: filename,
+ Alias: alias,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileMinioClient) Retrieve() (Value, error) {
+ if p.Filename == "" {
+ if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
+ p.Filename = value
+ } else {
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.Filename = filepath.Join(homeDir, ".mc", "config.json")
+ if runtime.GOOS == "windows" {
+ p.Filename = filepath.Join(homeDir, "mc", "config.json")
+ }
+ }
+ }
+
+ if p.Alias == "" {
+ p.Alias = os.Getenv("MINIO_ALIAS")
+ if p.Alias == "" {
+ p.Alias = "s3"
+ }
+ }
+
+ p.retrieved = false
+
+ hostCfg, err := loadAlias(p.Filename, p.Alias)
+ if err != nil {
+ return Value{}, err
+ }
+
+ p.retrieved = true
+ return Value{
+ AccessKeyID: hostCfg.AccessKey,
+ SecretAccessKey: hostCfg.SecretKey,
+ SignerType: parseSignatureType(hostCfg.API),
+ }, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *FileMinioClient) IsExpired() bool {
+ return !p.retrieved
+}
+
+// hostConfig configuration of a host.
+type hostConfig struct {
+ URL string `json:"url"`
+ AccessKey string `json:"accessKey"`
+ SecretKey string `json:"secretKey"`
+ API string `json:"api"`
+}
+
+// config config version.
+type config struct {
+ Version string `json:"version"`
+ Hosts map[string]hostConfig `json:"hosts"`
+ Aliases map[string]hostConfig `json:"aliases"`
+}
+
+// loadAliass loads from the file pointed to by shared credentials filename for alias.
+// The credentials retrieved from the alias will be returned or error. Error will be
+// returned if it fails to read from the file.
+func loadAlias(filename, alias string) (hostConfig, error) {
+ cfg := &config{}
+ configBytes, err := os.ReadFile(filename)
+ if err != nil {
+ return hostConfig{}, err
+ }
+ if err = json.Unmarshal(configBytes, cfg); err != nil {
+ return hostConfig{}, err
+ }
+
+ if cfg.Version == "10" {
+ return cfg.Aliases[alias], nil
+ }
+
+ return cfg.Hosts[alias], nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
new file mode 100644
index 000000000..ea4b3ef93
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
@@ -0,0 +1,456 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/goccy/go-json"
+)
+
+// DefaultExpiryWindow - Default expiry window.
+// ExpiryWindow will allow the credentials to trigger refreshing
+// prior to the credentials actually expiring. This is beneficial
+// so race conditions with expiring credentials do not cause
+// request to fail unexpectedly due to ExpiredTokenException exceptions.
+// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration.
+// When used the tokens refresh will be triggered when 80% of the elapsed
+// time until the actual expiration time is passed.
+const DefaultExpiryWindow = -1
+
+// A IAM retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+type IAM struct {
+ Expiry
+
+ // Required http Client to use when connecting to IAM metadata service.
+ Client *http.Client
+
+ // Custom endpoint to fetch IAM role credentials.
+ Endpoint string
+
+ // Region configurable custom region for STS
+ Region string
+
+ // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
+ Container struct {
+ AuthorizationToken string
+ AuthorizationTokenFile string
+ CredentialsFullURI string
+ CredentialsRelativeURI string
+ }
+
+ // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html
+ EKSIdentity struct {
+ TokenFile string
+ RoleARN string
+ RoleSessionName string
+ }
+}
+
+// IAM Roles for Amazon EC2
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+const (
+ DefaultIAMRoleEndpoint = "http://169.254.169.254"
+ DefaultECSRoleEndpoint = "http://169.254.170.2"
+ DefaultSTSRoleEndpoint = "https://sts.amazonaws.com"
+ DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
+ TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
+ TokenPath = "/latest/api/token"
+ TokenTTL = "21600"
+ TokenRequestHeader = "X-aws-ec2-metadata-token"
+)
+
+// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
+func NewIAM(endpoint string) *Credentials {
+ return New(&IAM{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ Endpoint: endpoint,
+ })
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired
+func (m *IAM) Retrieve() (Value, error) {
+ token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
+ if token == "" {
+ token = m.Container.AuthorizationToken
+ }
+
+ tokenFile := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE")
+ if tokenFile == "" {
+ tokenFile = m.Container.AuthorizationToken
+ }
+
+ relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
+ if relativeURI == "" {
+ relativeURI = m.Container.CredentialsRelativeURI
+ }
+
+ fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
+ if fullURI == "" {
+ fullURI = m.Container.CredentialsFullURI
+ }
+
+ identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
+ if identityFile == "" {
+ identityFile = m.EKSIdentity.TokenFile
+ }
+
+ roleArn := os.Getenv("AWS_ROLE_ARN")
+ if roleArn == "" {
+ roleArn = m.EKSIdentity.RoleARN
+ }
+
+ roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME")
+ if roleSessionName == "" {
+ roleSessionName = m.EKSIdentity.RoleSessionName
+ }
+
+ region := os.Getenv("AWS_REGION")
+ if region == "" {
+ region = m.Region
+ }
+
+ var roleCreds ec2RoleCredRespBody
+ var err error
+
+ endpoint := m.Endpoint
+ switch {
+ case identityFile != "":
+ if len(endpoint) == 0 {
+ if region != "" {
+ if strings.HasPrefix(region, "cn-") {
+ endpoint = "https://sts." + region + ".amazonaws.com.cn"
+ } else {
+ endpoint = "https://sts." + region + ".amazonaws.com"
+ }
+ } else {
+ endpoint = DefaultSTSRoleEndpoint
+ }
+ }
+
+ creds := &STSWebIdentity{
+ Client: m.Client,
+ STSEndpoint: endpoint,
+ GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
+ token, err := os.ReadFile(identityFile)
+ if err != nil {
+ return nil, err
+ }
+
+ return &WebIdentityToken{Token: string(token)}, nil
+ },
+ RoleARN: roleArn,
+ roleSessionName: roleSessionName,
+ }
+
+ stsWebIdentityCreds, err := creds.Retrieve()
+ if err == nil {
+ m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
+ }
+ return stsWebIdentityCreds, err
+
+ case relativeURI != "":
+ if len(endpoint) == 0 {
+ endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
+ }
+
+ roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
+
+ case tokenFile != "" && fullURI != "":
+ endpoint = fullURI
+ roleCreds, err = getEKSPodIdentityCredentials(m.Client, endpoint, tokenFile)
+
+ case fullURI != "":
+ if len(endpoint) == 0 {
+ endpoint = fullURI
+ var ok bool
+ if ok, err = isLoopback(endpoint); !ok {
+ if err == nil {
+ err = fmt.Errorf("uri host is not a loopback address: %s", endpoint)
+ }
+ break
+ }
+ }
+
+ roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
+
+ default:
+ roleCreds, err = getCredentials(m.Client, endpoint)
+ }
+
+ if err != nil {
+ return Value{}, err
+ }
+ // Expiry window is set to 10secs.
+ m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ Expiration: roleCreds.Expiration,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+
+ // Unused params.
+ LastUpdated time.Time
+ Type string
+}
+
+// Get the final IAM role URL where the request will
+// be sent to fetch the rolling access credentials.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func getIAMRoleURL(endpoint string) (*url.URL, error) {
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = DefaultIAMSecurityCredsPath
+ return u, nil
+}
+
+// listRoleNames lists of credential role names associated
+// with the current EC2 service. If there are no credentials,
+// or there is an error making or receiving the request.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) {
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ if token != "" {
+ req.Header.Add(TokenRequestHeader, token)
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.New(resp.Status)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(resp.Body)
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return credsList, nil
+}
+
+func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) {
+ req, err := http.NewRequest(http.MethodGet, endpoint, nil)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if token != "" {
+ req.Header.Set("Authorization", token)
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return ec2RoleCredRespBody{}, errors.New(resp.Status)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ return respCreds, nil
+}
+
+func getEKSPodIdentityCredentials(client *http.Client, endpoint string, tokenFile string) (ec2RoleCredRespBody, error) {
+ if tokenFile != "" {
+ bytes, err := os.ReadFile(tokenFile)
+ if err != nil {
+ return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: failed to read token file:%s", err)
+ }
+ token := string(bytes)
+ return getEcsTaskCredentials(client, endpoint, token)
+ }
+ return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: no tokenFile found")
+}
+
+func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil)
+ if err != nil {
+ return "", err
+ }
+ req.Header.Add(TokenRequestTTLHeader, TokenTTL)
+ resp, err := client.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return "", errors.New(resp.Status)
+ }
+ return string(data), nil
+}
+
+// getCredentials - obtains the credentials from the IAM role name associated with
+// the current EC2 service.
+//
+// If the credentials cannot be found, or there is an error
+// reading the response an error will be returned.
+func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
+ if endpoint == "" {
+ endpoint = DefaultIAMRoleEndpoint
+ }
+
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
+ token, err := fetchIMDSToken(client, endpoint)
+ if err != nil {
+ // Return only errors for valid situations, if the IMDSv2 is not enabled
+ // we will not be able to get the token, in such a situation we have
+ // to rely on IMDSv1 behavior as a fallback, this check ensures that.
+ // Refer https://github.com/minio/minio-go/issues/1866
+ if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
+ return ec2RoleCredRespBody{}, err
+ }
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ u, err := getIAMRoleURL(endpoint)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ roleNames, err := listRoleNames(client, u, token)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if len(roleNames) == 0 {
+ return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // - An instance profile can contain only one IAM role. This limit cannot be increased.
+ roleName := roleNames[0]
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // The following command retrieves the security credentials for an
+ // IAM role named `s3access`.
+ //
+ // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
+ //
+ u.Path = path.Join(u.Path, roleName)
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ if token != "" {
+ req.Header.Add(TokenRequestHeader, token)
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return ec2RoleCredRespBody{}, errors.New(resp.Status)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
+ }
+
+ return respCreds, nil
+}
+
+// isLoopback identifies if a uri's host is on a loopback address
+func isLoopback(uri string) (bool, error) {
+ u, err := url.Parse(uri)
+ if err != nil {
+ return false, err
+ }
+
+ host := u.Hostname()
+ if len(host) == 0 {
+ return false, fmt.Errorf("can't parse host from uri: %s", uri)
+ }
+
+ ips, err := net.LookupHost(host)
+ if err != nil {
+ return false, err
+ }
+ for _, ip := range ips {
+ if !net.ParseIP(ip).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
new file mode 100644
index 000000000..b79433305
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
@@ -0,0 +1,77 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "strings"
+
+// SignatureType is type of Authorization requested for a given HTTP request.
+type SignatureType int
+
+// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
+const (
+ // SignatureDefault is always set to v4.
+ SignatureDefault SignatureType = iota
+ SignatureV4
+ SignatureV2
+ SignatureV4Streaming
+ SignatureAnonymous // Anonymous signature signifies, no signature.
+)
+
+// IsV2 - is signature SignatureV2?
+func (s SignatureType) IsV2() bool {
+ return s == SignatureV2
+}
+
+// IsV4 - is signature SignatureV4?
+func (s SignatureType) IsV4() bool {
+ return s == SignatureV4 || s == SignatureDefault
+}
+
+// IsStreamingV4 - is signature SignatureV4Streaming?
+func (s SignatureType) IsStreamingV4() bool {
+ return s == SignatureV4Streaming
+}
+
+// IsAnonymous - is signature empty?
+func (s SignatureType) IsAnonymous() bool {
+ return s == SignatureAnonymous
+}
+
+// Stringer humanized version of signature type,
+// strings returned here are case insensitive.
+func (s SignatureType) String() string {
+ if s.IsV2() {
+ return "S3v2"
+ } else if s.IsV4() {
+ return "S3v4"
+ } else if s.IsStreamingV4() {
+ return "S3v4Streaming"
+ }
+ return "Anonymous"
+}
+
+func parseSignatureType(str string) SignatureType {
+ if strings.EqualFold(str, "S3v4") {
+ return SignatureV4
+ } else if strings.EqualFold(str, "S3v2") {
+ return SignatureV2
+ } else if strings.EqualFold(str, "S3v4Streaming") {
+ return SignatureV4Streaming
+ }
+ return SignatureAnonymous
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
new file mode 100644
index 000000000..7dde00b0a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go
@@ -0,0 +1,67 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+// A Static is a set of credentials which are set programmatically,
+// and will never expire.
+type Static struct {
+ Value
+}
+
+// NewStaticV2 returns a pointer to a new Credentials object
+// wrapping a static credentials value provider, signature is
+// set to v2. If access and secret are not specified then
+// regardless of signature type set it Value will return
+// as anonymous.
+func NewStaticV2(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV2)
+}
+
+// NewStaticV4 is similar to NewStaticV2 with similar considerations.
+func NewStaticV4(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV4)
+}
+
+// NewStatic returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
+ return New(&Static{
+ Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ SignerType: signerType,
+ },
+ })
+}
+
+// Retrieve returns the static credentials.
+func (s *Static) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ // Anonymous is not an error
+ return Value{SignerType: SignatureAnonymous}, nil
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For Static, the credentials never expired.
+func (s *Static) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
new file mode 100644
index 000000000..62bfbb6b0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
@@ -0,0 +1,183 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2019-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// AssumedRoleUser - The identifiers for the temporary security credentials that
+// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
+type AssumedRoleUser struct {
+ Arn string
+ AssumedRoleID string `xml:"AssumeRoleId"`
+}
+
+// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request.
+type AssumeRoleWithClientGrantsResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"`
+ Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants
+// request, including temporary credentials that can be used to make MinIO API requests.
+type ClientGrantsResult struct {
+ AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
+ Audience string `xml:",omitempty"`
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:",omitempty"`
+ PackedPolicySize int `xml:",omitempty"`
+ Provider string `xml:",omitempty"`
+ SubjectFromClientGrantsToken string `xml:",omitempty"`
+}
+
+// ClientGrantsToken - client grants token with expiry.
+type ClientGrantsToken struct {
+ Token string
+ Expiry int
+}
+
+// A STSClientGrants retrieves credentials from MinIO service, and keeps track if
+// those credentials are expired.
+type STSClientGrants struct {
+ Expiry
+
+ // Required http Client to use when connecting to MinIO STS service.
+ Client *http.Client
+
+ // MinIO endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // getClientGrantsTokenExpiry function to retrieve tokens
+ // from IDP This function should return two values one is
+ // accessToken which is a self contained access token (JWT)
+ // and second return value is the expiry associated with
+ // this token. This is a customer provided function and
+ // is mandatory.
+ GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error)
+}
+
+// NewSTSClientGrants returns a pointer to a new
+// Credentials object wrapping the STSClientGrants.
+func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) {
+ if stsEndpoint == "" {
+ return nil, errors.New("STS endpoint cannot be empty")
+ }
+ if getClientGrantsTokenExpiry == nil {
+ return nil, errors.New("Client grants access token and expiry retrieval function should be defined")
+ }
+ return New(&STSClientGrants{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ STSEndpoint: stsEndpoint,
+ GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry,
+ }), nil
+}
+
+func getClientGrantsCredentials(clnt *http.Client, endpoint string,
+ getClientGrantsTokenExpiry func() (*ClientGrantsToken, error),
+) (AssumeRoleWithClientGrantsResponse, error) {
+ accessToken, err := getClientGrantsTokenExpiry()
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+
+ v := url.Values{}
+ v.Set("Action", "AssumeRoleWithClientGrants")
+ v.Set("Token", accessToken.Token)
+ v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry))
+ v.Set("Version", STSVersion)
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := clnt.Do(req)
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleWithClientGrantsResponse{}, errResp
+ }
+
+ a := AssumeRoleWithClientGrantsResponse{}
+ if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ return a, nil
+}
+
+// Retrieve retrieves credentials from the MinIO service.
+// Error will be returned if the request fails.
+func (m *STSClientGrants) Retrieve() (Value, error) {
+ a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: a.Result.Credentials.AccessKey,
+ SecretAccessKey: a.Result.Credentials.SecretKey,
+ SessionToken: a.Result.Credentials.SessionToken,
+ Expiration: a.Result.Credentials.Expiration,
+ SignerType: SignatureV4,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
new file mode 100644
index 000000000..75e1a77d3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
@@ -0,0 +1,147 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// CustomTokenResult - Contains temporary creds and user metadata.
+type CustomTokenResult struct {
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId"`
+ SecretKey string `xml:"SecretAccessKey"`
+ Expiration time.Time `xml:"Expiration"`
+ SessionToken string `xml:"SessionToken"`
+ } `xml:",omitempty"`
+
+ AssumedUser string `xml:",omitempty"`
+}
+
+// AssumeRoleWithCustomTokenResponse contains the result of a successful
+// AssumeRoleWithCustomToken request.
+type AssumeRoleWithCustomTokenResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"`
+ Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"`
+ Metadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// CustomTokenIdentity - satisfies the Provider interface, and retrieves
+// credentials from MinIO using the AssumeRoleWithCustomToken STS API.
+type CustomTokenIdentity struct {
+ Expiry
+
+ Client *http.Client
+
+ // MinIO server STS endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // The custom token to use with the request.
+ Token string
+
+ // RoleArn associated with the identity
+ RoleArn string
+
+ // RequestedExpiry is to set the validity of the generated credentials
+ // (this value bounded by server).
+ RequestedExpiry time.Duration
+}
+
+// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
+func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
+ u, err := url.Parse(c.STSEndpoint)
+ if err != nil {
+ return value, err
+ }
+
+ v := url.Values{}
+ v.Set("Action", "AssumeRoleWithCustomToken")
+ v.Set("Version", STSVersion)
+ v.Set("RoleArn", c.RoleArn)
+ v.Set("Token", c.Token)
+ if c.RequestedExpiry != 0 {
+ v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds())))
+ }
+
+ u.RawQuery = v.Encode()
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), nil)
+ if err != nil {
+ return value, err
+ }
+
+ resp, err := c.Client.Do(req)
+ if err != nil {
+ return value, err
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return value, errors.New(resp.Status)
+ }
+
+ r := AssumeRoleWithCustomTokenResponse{}
+ if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
+ return
+ }
+
+ cr := r.Result.Credentials
+ c.SetExpiration(cr.Expiration, DefaultExpiryWindow)
+ return Value{
+ AccessKeyID: cr.AccessKey,
+ SecretAccessKey: cr.SecretKey,
+ SessionToken: cr.SessionToken,
+ Expiration: cr.Expiration,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// NewCustomTokenCredentials - returns credentials using the
+// AssumeRoleWithCustomToken STS API.
+func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) {
+ c := CustomTokenIdentity{
+ Client: &http.Client{Transport: http.DefaultTransport},
+ STSEndpoint: stsEndpoint,
+ Token: token,
+ RoleArn: roleArn,
+ }
+ for _, optFunc := range optFuncs {
+ optFunc(&c)
+ }
+ return New(&c), nil
+}
+
+// CustomTokenOpt is a function type to configure the custom-token based
+// credentials using NewCustomTokenCredentials.
+type CustomTokenOpt func(*CustomTokenIdentity)
+
+// CustomTokenValidityOpt sets the validity duration of the requested
+// credentials. This value is ignored if the server enforces a lower validity
+// period.
+func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt {
+ return func(c *CustomTokenIdentity) {
+ c.RequestedExpiry = d
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
new file mode 100644
index 000000000..b8df289f2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
@@ -0,0 +1,190 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2019-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// AssumeRoleWithLDAPResponse contains the result of successful
+// AssumeRoleWithLDAPIdentity request
+type AssumeRoleWithLDAPResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"`
+ Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// LDAPIdentityResult - contains credentials for a successful
+// AssumeRoleWithLDAPIdentity request.
+type LDAPIdentityResult struct {
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:",omitempty"`
+
+ SubjectFromToken string `xml:",omitempty"`
+}
+
+// LDAPIdentity retrieves credentials from MinIO
+type LDAPIdentity struct {
+ Expiry
+
+ // Required http Client to use when connecting to MinIO STS service.
+ Client *http.Client
+
+ // Exported STS endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // LDAP username/password used to fetch LDAP STS credentials.
+ LDAPUsername, LDAPPassword string
+
+ // Session policy to apply to the generated credentials. Leave empty to
+ // use the full access policy available to the user.
+ Policy string
+
+ // RequestedExpiry is the configured expiry duration for credentials
+ // requested from LDAP.
+ RequestedExpiry time.Duration
+}
+
+// NewLDAPIdentity returns new credentials object that uses LDAP
+// Identity.
+func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
+ l := LDAPIdentity{
+ Client: &http.Client{Transport: http.DefaultTransport},
+ STSEndpoint: stsEndpoint,
+ LDAPUsername: ldapUsername,
+ LDAPPassword: ldapPassword,
+ }
+ for _, optFunc := range optFuncs {
+ optFunc(&l)
+ }
+ return New(&l), nil
+}
+
+// LDAPIdentityOpt is a function type used to configured the LDAPIdentity
+// instance.
+type LDAPIdentityOpt func(*LDAPIdentity)
+
+// LDAPIdentityPolicyOpt sets the session policy for requested credentials.
+func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt {
+ return func(k *LDAPIdentity) {
+ k.Policy = policy
+ }
+}
+
+// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials.
+func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
+ return func(k *LDAPIdentity) {
+ k.RequestedExpiry = d
+ }
+}
+
+// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses
+// LDAP Identity with a specified session policy. The `policy` parameter must be
+// a JSON string specifying the policy document.
+//
+// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
+func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
+ return New(&LDAPIdentity{
+ Client: &http.Client{Transport: http.DefaultTransport},
+ STSEndpoint: stsEndpoint,
+ LDAPUsername: ldapUsername,
+ LDAPPassword: ldapPassword,
+ Policy: policy,
+ }), nil
+}
+
+// Retrieve gets the credential by calling the MinIO STS API for
+// LDAP on the configured stsEndpoint.
+func (k *LDAPIdentity) Retrieve() (value Value, err error) {
+ u, err := url.Parse(k.STSEndpoint)
+ if err != nil {
+ return value, err
+ }
+
+ v := url.Values{}
+ v.Set("Action", "AssumeRoleWithLDAPIdentity")
+ v.Set("Version", STSVersion)
+ v.Set("LDAPUsername", k.LDAPUsername)
+ v.Set("LDAPPassword", k.LDAPPassword)
+ if k.Policy != "" {
+ v.Set("Policy", k.Policy)
+ }
+ if k.RequestedExpiry != 0 {
+ v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
+ }
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
+ if err != nil {
+ return value, err
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := k.Client.Do(req)
+ if err != nil {
+ return value, err
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return value, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return value, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return value, errResp
+ }
+
+ r := AssumeRoleWithLDAPResponse{}
+ if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
+ return
+ }
+
+ cr := r.Result.Credentials
+ k.SetExpiration(cr.Expiration, DefaultExpiryWindow)
+ return Value{
+ AccessKeyID: cr.AccessKey,
+ SecretAccessKey: cr.SecretKey,
+ SessionToken: cr.SessionToken,
+ Expiration: cr.Expiration,
+ SignerType: SignatureV4,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
new file mode 100644
index 000000000..10083502d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
@@ -0,0 +1,212 @@
+// MinIO Go Library for Amazon S3 Compatible Cloud Storage
+// Copyright 2021 MinIO, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/xml"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+)
+
+// CertificateIdentityOption is an optional AssumeRoleWithCertificate
+// parameter - e.g. a custom HTTP transport configuration or S3 credental
+// livetime.
+type CertificateIdentityOption func(*STSCertificateIdentity)
+
+// CertificateIdentityWithTransport returns a CertificateIdentityOption that
+// customizes the STSCertificateIdentity with the given http.RoundTripper.
+func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption {
+ return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t })
+}
+
+// CertificateIdentityWithExpiry returns a CertificateIdentityOption that
+// customizes the STSCertificateIdentity with the given livetime.
+//
+// Fetched S3 credentials will have the given livetime if the STS server
+// allows such credentials.
+func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption {
+ return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime })
+}
+
+// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and
+// rotates those credentials once they expire.
+type STSCertificateIdentity struct {
+ Expiry
+
+ // STSEndpoint is the base URL endpoint of the STS API.
+ // For example, https://minio.local:9000
+ STSEndpoint string
+
+ // S3CredentialLivetime is the duration temp. S3 access
+ // credentials should be valid.
+ //
+ // It represents the access credential livetime requested
+ // by the client. The STS server may choose to issue
+ // temp. S3 credentials that have a different - usually
+ // shorter - livetime.
+ //
+ // The default livetime is one hour.
+ S3CredentialLivetime time.Duration
+
+ // Client is the HTTP client used to authenticate and fetch
+ // S3 credentials.
+ //
+ // A custom TLS client configuration can be specified by
+ // using a custom http.Transport:
+ // Client: http.Client {
+ // Transport: &http.Transport{
+ // TLSClientConfig: &tls.Config{},
+ // },
+ // }
+ Client http.Client
+}
+
+var _ Provider = (*STSWebIdentity)(nil) // compiler check
+
+// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
+// to the given STS endpoint with the given TLS certificate and retrieves and
+// rotates S3 credentials.
+func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) {
+ if endpoint == "" {
+ return nil, errors.New("STS endpoint cannot be empty")
+ }
+ if _, err := url.Parse(endpoint); err != nil {
+ return nil, err
+ }
+ identity := &STSCertificateIdentity{
+ STSEndpoint: endpoint,
+ Client: http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 5 * time.Second,
+ TLSClientConfig: &tls.Config{
+ Certificates: []tls.Certificate{certificate},
+ },
+ },
+ },
+ }
+ for _, option := range options {
+ option(identity)
+ }
+ return New(identity), nil
+}
+
+// Retrieve fetches a new set of S3 credentials from the configured
+// STS API endpoint.
+func (i *STSCertificateIdentity) Retrieve() (Value, error) {
+ endpointURL, err := url.Parse(i.STSEndpoint)
+ if err != nil {
+ return Value{}, err
+ }
+ livetime := i.S3CredentialLivetime
+ if livetime == 0 {
+ livetime = 1 * time.Hour
+ }
+
+ queryValues := url.Values{}
+ queryValues.Set("Action", "AssumeRoleWithCertificate")
+ queryValues.Set("Version", STSVersion)
+ endpointURL.RawQuery = queryValues.Encode()
+
+ req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil)
+ if err != nil {
+ return Value{}, err
+ }
+ if req.Form == nil {
+ req.Form = url.Values{}
+ }
+ req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
+
+ resp, err := i.Client.Do(req)
+ if err != nil {
+ return Value{}, err
+ }
+ if resp.Body != nil {
+ defer resp.Body.Close()
+ }
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return Value{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return Value{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return Value{}, errResp
+ }
+
+ const MaxSize = 10 * 1 << 20
+ var body io.Reader = resp.Body
+ if resp.ContentLength > 0 && resp.ContentLength < MaxSize {
+ body = io.LimitReader(body, resp.ContentLength)
+ } else {
+ body = io.LimitReader(body, MaxSize)
+ }
+
+ var response assumeRoleWithCertificateResponse
+ if err = xml.NewDecoder(body).Decode(&response); err != nil {
+ return Value{}, err
+ }
+ i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow)
+ return Value{
+ AccessKeyID: response.Result.Credentials.AccessKey,
+ SecretAccessKey: response.Result.Credentials.SecretKey,
+ SessionToken: response.Result.Credentials.SessionToken,
+ Expiration: response.Result.Credentials.Expiration,
+ SignerType: SignatureDefault,
+ }, nil
+}
+
+// Expiration returns the expiration time of the current S3 credentials.
+func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration }
+
+type assumeRoleWithCertificateResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"`
+ Result struct {
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:"Credentials" json:"credentials,omitempty"`
+ } `xml:"AssumeRoleWithCertificateResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
new file mode 100644
index 000000000..596d95152
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -0,0 +1,206 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2019-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request.
+type AssumeRoleWithWebIdentityResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"`
+ Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"`
+ ResponseMetadata struct {
+ RequestID string `xml:"RequestId,omitempty"`
+ } `xml:"ResponseMetadata,omitempty"`
+}
+
+// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity
+// request, including temporary credentials that can be used to make MinIO API requests.
+type WebIdentityResult struct {
+ AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
+ Audience string `xml:",omitempty"`
+ Credentials struct {
+ AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
+ SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
+ Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
+ SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
+ } `xml:",omitempty"`
+ PackedPolicySize int `xml:",omitempty"`
+ Provider string `xml:",omitempty"`
+ SubjectFromWebIdentityToken string `xml:",omitempty"`
+}
+
+// WebIdentityToken - web identity token with expiry.
+type WebIdentityToken struct {
+ Token string
+ AccessToken string
+ Expiry int
+}
+
+// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
+// those credentials are expired.
+type STSWebIdentity struct {
+ Expiry
+
+ // Required http Client to use when connecting to MinIO STS service.
+ Client *http.Client
+
+ // Exported STS endpoint to fetch STS credentials.
+ STSEndpoint string
+
+ // Exported GetWebIDTokenExpiry function which returns ID
+ // tokens from IDP. This function should return two values
+ // one is ID token which is a self contained ID token (JWT)
+ // and second return value is the expiry associated with
+ // this token.
+ // This is a customer provided function and is mandatory.
+ GetWebIDTokenExpiry func() (*WebIdentityToken, error)
+
+ // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is
+ // assuming.
+ RoleARN string
+
+ // roleSessionName is the identifier for the assumed role session.
+ roleSessionName string
+}
+
+// NewSTSWebIdentity returns a pointer to a new
+// Credentials object wrapping the STSWebIdentity.
+func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
+ if stsEndpoint == "" {
+ return nil, errors.New("STS endpoint cannot be empty")
+ }
+ if getWebIDTokenExpiry == nil {
+ return nil, errors.New("Web ID token and expiry retrieval function should be defined")
+ }
+ return New(&STSWebIdentity{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ STSEndpoint: stsEndpoint,
+ GetWebIDTokenExpiry: getWebIDTokenExpiry,
+ }), nil
+}
+
+func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
+ getWebIDTokenExpiry func() (*WebIdentityToken, error),
+) (AssumeRoleWithWebIdentityResponse, error) {
+ idToken, err := getWebIDTokenExpiry()
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ v := url.Values{}
+ v.Set("Action", "AssumeRoleWithWebIdentity")
+ if len(roleARN) > 0 {
+ v.Set("RoleArn", roleARN)
+
+ if len(roleSessionName) == 0 {
+ roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10)
+ }
+ v.Set("RoleSessionName", roleSessionName)
+ }
+ v.Set("WebIdentityToken", idToken.Token)
+ if idToken.AccessToken != "" {
+ // Usually set when server is using extended userInfo endpoint.
+ v.Set("WebIdentityAccessToken", idToken.AccessToken)
+ }
+ if idToken.Expiry > 0 {
+ v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
+ }
+ v.Set("Version", STSVersion)
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := clnt.Do(req)
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ var errResp ErrorResponse
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleWithWebIdentityResponse{}, errResp
+ }
+
+ a := AssumeRoleWithWebIdentityResponse{}
+ if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+
+ return a, nil
+}
+
+// Retrieve retrieves credentials from the MinIO service.
+// Error will be returned if the request fails.
+func (m *STSWebIdentity) Retrieve() (Value, error) {
+ a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: a.Result.Credentials.AccessKey,
+ SecretAccessKey: a.Result.Credentials.SecretKey,
+ SessionToken: a.Result.Credentials.SessionToken,
+ Expiration: a.Result.Credentials.Expiration,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// Expiration returns the expiration time of the credentials
+func (m *STSWebIdentity) Expiration() time.Time {
+ return m.expiration
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
new file mode 100644
index 000000000..6db26c036
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
@@ -0,0 +1,24 @@
+//go:build !fips
+// +build !fips
+
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+// FIPS is true if 'fips' build tag was specified.
+const FIPS = false
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
new file mode 100644
index 000000000..640258242
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
@@ -0,0 +1,24 @@
+//go:build fips
+// +build fips
+
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+// FIPS is true if 'fips' build tag was specified.
+const FIPS = true
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
new file mode 100644
index 000000000..c40e40a1c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
@@ -0,0 +1,197 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2018 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "errors"
+ "net/http"
+
+ "github.com/goccy/go-json"
+ "golang.org/x/crypto/argon2"
+)
+
+const (
+ // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
+ SseGenericHeader = "X-Amz-Server-Side-Encryption"
+
+ // SseKmsKeyID is the AWS SSE-KMS key id.
+ SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id"
+ // SseEncryptionContext is the AWS SSE-KMS Encryption Context data.
+ SseEncryptionContext = SseGenericHeader + "-Context"
+
+ // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
+ SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm"
+ // SseCustomerKey is the AWS SSE-C encryption key HTTP header key.
+ SseCustomerKey = SseGenericHeader + "-Customer-Key"
+ // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
+ SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5"
+
+ // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
+ SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
+ SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
+ SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
+)
+
+// PBKDF creates a SSE-C key from the provided password and salt.
+// PBKDF is a password-based key derivation function
+// which can be used to derive a high-entropy cryptographic
+// key from a low-entropy password and a salt.
+type PBKDF func(password, salt []byte) ServerSide
+
+// DefaultPBKDF is the default PBKDF. It uses Argon2id with the
+// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads).
+var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide {
+ sse := ssec{}
+ copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32))
+ return sse
+}
+
+// Type is the server-side-encryption method. It represents one of
+// the following encryption methods:
+// - SSE-C: server-side-encryption with customer provided keys
+// - KMS: server-side-encryption with managed keys
+// - S3: server-side-encryption using S3 storage encryption
+type Type string
+
+const (
+ // SSEC represents server-side-encryption with customer provided keys
+ SSEC Type = "SSE-C"
+ // KMS represents server-side-encryption with managed keys
+ KMS Type = "KMS"
+ // S3 represents server-side-encryption using S3 storage encryption
+ S3 Type = "S3"
+)
+
+// ServerSide is a form of S3 server-side-encryption.
+type ServerSide interface {
+ // Type returns the server-side-encryption method.
+ Type() Type
+
+ // Marshal adds encryption headers to the provided HTTP headers.
+ // It marks an HTTP request as server-side-encryption request
+ // and inserts the required data into the headers.
+ Marshal(h http.Header)
+}
+
+// NewSSE returns a server-side-encryption using S3 storage encryption.
+// Using SSE-S3 the server will encrypt the object with server-managed keys.
+func NewSSE() ServerSide { return s3{} }
+
+// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context.
+func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
+ if context == nil {
+ return kms{key: keyID, hasContext: false}, nil
+ }
+ serializedContext, err := json.Marshal(context)
+ if err != nil {
+ return nil, err
+ }
+ return kms{key: keyID, context: serializedContext, hasContext: true}, nil
+}
+
+// NewSSEC returns a new server-side-encryption using SSE-C and the provided key.
+// The key must be 32 bytes long.
+func NewSSEC(key []byte) (ServerSide, error) {
+ if len(key) != 32 {
+ return nil, errors.New("encrypt: SSE-C key must be 256 bit long")
+ }
+ sse := ssec{}
+ copy(sse[:], key)
+ return sse, nil
+}
+
+// SSE transforms a SSE-C copy encryption into a SSE-C encryption.
+// It is the inverse of SSECopy(...).
+//
+// If the provided sse is no SSE-C copy encryption SSE returns
+// sse unmodified.
+func SSE(sse ServerSide) ServerSide {
+ if sse == nil || sse.Type() != SSEC {
+ return sse
+ }
+ if sse, ok := sse.(ssecCopy); ok {
+ return ssec(sse)
+ }
+ return sse
+}
+
+// SSECopy transforms a SSE-C encryption into a SSE-C copy
+// encryption. This is required for SSE-C key rotation or a SSE-C
+// copy where the source and the destination should be encrypted.
+//
+// If the provided sse is no SSE-C encryption SSECopy returns
+// sse unmodified.
+func SSECopy(sse ServerSide) ServerSide {
+ if sse == nil || sse.Type() != SSEC {
+ return sse
+ }
+ if sse, ok := sse.(ssec); ok {
+ return ssecCopy(sse)
+ }
+ return sse
+}
+
+type ssec [32]byte
+
+func (s ssec) Type() Type { return SSEC }
+
+func (s ssec) Marshal(h http.Header) {
+ keyMD5 := md5.Sum(s[:])
+ h.Set(SseCustomerAlgorithm, "AES256")
+ h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+}
+
+type ssecCopy [32]byte
+
+func (s ssecCopy) Type() Type { return SSEC }
+
+func (s ssecCopy) Marshal(h http.Header) {
+ keyMD5 := md5.Sum(s[:])
+ h.Set(SseCopyCustomerAlgorithm, "AES256")
+ h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+}
+
+type s3 struct{}
+
+func (s s3) Type() Type { return S3 }
+
+func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") }
+
+type kms struct {
+ key string
+ context []byte
+ hasContext bool
+}
+
+func (s kms) Type() Type { return KMS }
+
+func (s kms) Marshal(h http.Header) {
+ h.Set(SseGenericHeader, "aws:kms")
+ if s.key != "" {
+ h.Set(SseKmsKeyID, s.key)
+ }
+ if s.hasContext {
+ h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
new file mode 100644
index 000000000..e706b57de
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -0,0 +1,516 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package lifecycle contains all the lifecycle related data types and marshallers.
+package lifecycle
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "time"
+)
+
+var errMissingStorageClass = errors.New("storage-class cannot be empty")
+
+// AbortIncompleteMultipartUpload structure, not supported yet on MinIO
+type AbortIncompleteMultipartUpload struct {
+ XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"`
+ DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"`
+}
+
+// IsDaysNull returns true if days field is null
+func (n AbortIncompleteMultipartUpload) IsDaysNull() bool {
+ return n.DaysAfterInitiation == ExpirationDays(0)
+}
+
+// MarshalXML if days after initiation is set to non-zero value
+func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if n.IsDaysNull() {
+ return nil
+ }
+ type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload
+ return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start)
+}
+
+// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire.
+// Upon expiration, server permanently deletes the noncurrent object versions.
+// Set this lifecycle configuration action on a bucket that has versioning enabled
+// (or suspended) to request server delete noncurrent object versions at a
+// specific period in the object's lifetime.
+type NoncurrentVersionExpiration struct {
+ XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
+ NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
+ NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
+}
+
+// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
+func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if n.isNull() {
+ return nil
+ }
+ type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration
+ return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start)
+}
+
+// IsDaysNull returns true if days field is null
+func (n NoncurrentVersionExpiration) IsDaysNull() bool {
+ return n.NoncurrentDays == ExpirationDays(0)
+}
+
+func (n NoncurrentVersionExpiration) isNull() bool {
+ return n.IsDaysNull() && n.NewerNoncurrentVersions == 0
+}
+
+// NoncurrentVersionTransition structure, set this action to request server to
+// transition noncurrent object versions to different set storage classes
+// at a specific period in the object's lifetime.
+type NoncurrentVersionTransition struct {
+ XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
+ StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
+ NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"`
+ NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
+}
+
+// IsDaysNull returns true if days field is null
+func (n NoncurrentVersionTransition) IsDaysNull() bool {
+ return n.NoncurrentDays == ExpirationDays(0)
+}
+
+// IsStorageClassEmpty returns true if storage class field is empty
+func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool {
+ return n.StorageClass == ""
+}
+
+func (n NoncurrentVersionTransition) isNull() bool {
+ return n.StorageClass == ""
+}
+
+// UnmarshalJSON implements NoncurrentVersionTransition JSONify
+func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error {
+ type noncurrentVersionTransition NoncurrentVersionTransition
+ var nt noncurrentVersionTransition
+ err := json.Unmarshal(b, &nt)
+ if err != nil {
+ return err
+ }
+
+ if nt.StorageClass == "" {
+ return errMissingStorageClass
+ }
+ *n = NoncurrentVersionTransition(nt)
+ return nil
+}
+
+// MarshalXML is extended to leave out
+// tags
+func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if n.isNull() {
+ return nil
+ }
+ type noncurrentVersionTransitionWrapper NoncurrentVersionTransition
+ return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start)
+}
+
+// Tag structure key/value pair representing an object tag to apply lifecycle configuration
+type Tag struct {
+ XMLName xml.Name `xml:"Tag,omitempty" json:"-"`
+ Key string `xml:"Key,omitempty" json:"Key,omitempty"`
+ Value string `xml:"Value,omitempty" json:"Value,omitempty"`
+}
+
+// IsEmpty returns whether this tag is empty or not.
+func (tag Tag) IsEmpty() bool {
+ return tag.Key == ""
+}
+
+// Transition structure - transition details of lifecycle configuration
+type Transition struct {
+ XMLName xml.Name `xml:"Transition" json:"-"`
+ Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
+ StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
+ Days ExpirationDays `xml:"Days" json:"Days"`
+}
+
+// UnmarshalJSON returns an error if storage-class is empty.
+func (t *Transition) UnmarshalJSON(b []byte) error {
+ type transition Transition
+ var tr transition
+ err := json.Unmarshal(b, &tr)
+ if err != nil {
+ return err
+ }
+
+ if tr.StorageClass == "" {
+ return errMissingStorageClass
+ }
+ *t = Transition(tr)
+ return nil
+}
+
+// MarshalJSON customizes json encoding by omitting empty values
+func (t Transition) MarshalJSON() ([]byte, error) {
+ if t.IsNull() {
+ return nil, nil
+ }
+ type transition struct {
+ Date *ExpirationDate `json:"Date,omitempty"`
+ StorageClass string `json:"StorageClass,omitempty"`
+ Days *ExpirationDays `json:"Days"`
+ }
+
+ newt := transition{
+ StorageClass: t.StorageClass,
+ }
+
+ if !t.IsDateNull() {
+ newt.Date = &t.Date
+ } else {
+ newt.Days = &t.Days
+ }
+ return json.Marshal(newt)
+}
+
+// IsDaysNull returns true if days field is null
+func (t Transition) IsDaysNull() bool {
+ return t.Days == ExpirationDays(0)
+}
+
+// IsDateNull returns true if date field is null
+func (t Transition) IsDateNull() bool {
+ return t.Date.Time.IsZero()
+}
+
+// IsNull returns true if no storage-class is set.
+func (t Transition) IsNull() bool {
+ return t.StorageClass == ""
+}
+
+// MarshalXML is transition is non null
+func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
+ if t.IsNull() {
+ return nil
+ }
+ type transitionWrapper Transition
+ return en.EncodeElement(transitionWrapper(t), startElement)
+}
+
+// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter
+type And struct {
+ XMLName xml.Name `xml:"And" json:"-"`
+ Prefix string `xml:"Prefix" json:"Prefix,omitempty"`
+ Tags []Tag `xml:"Tag" json:"Tags,omitempty"`
+ ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
+ ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
+}
+
+// IsEmpty returns true if Tags field is null
+func (a And) IsEmpty() bool {
+ return len(a.Tags) == 0 && a.Prefix == "" &&
+ a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0
+}
+
+// Filter will be used in selecting rule(s) for lifecycle configuration
+type Filter struct {
+ XMLName xml.Name `xml:"Filter" json:"-"`
+ And And `xml:"And,omitempty" json:"And,omitempty"`
+ Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
+ Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
+ ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
+ ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
+}
+
+// IsNull returns true if all Filter fields are empty.
+func (f Filter) IsNull() bool {
+ return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" &&
+ f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0
+}
+
+// MarshalJSON customizes json encoding by removing empty values.
+func (f Filter) MarshalJSON() ([]byte, error) {
+ type filter struct {
+ And *And `json:"And,omitempty"`
+ Prefix string `json:"Prefix,omitempty"`
+ Tag *Tag `json:"Tag,omitempty"`
+ ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"`
+ ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"`
+ }
+
+ newf := filter{
+ Prefix: f.Prefix,
+ }
+ if !f.Tag.IsEmpty() {
+ newf.Tag = &f.Tag
+ }
+ if !f.And.IsEmpty() {
+ newf.And = &f.And
+ }
+ newf.ObjectSizeLessThan = f.ObjectSizeLessThan
+ newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan
+ return json.Marshal(newf)
+}
+
+// MarshalXML - produces the xml representation of the Filter struct
+// only one of Prefix, And and Tag should be present in the output.
+func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+
+ switch {
+ case !f.And.IsEmpty():
+ if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil {
+ return err
+ }
+ case !f.Tag.IsEmpty():
+ if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil {
+ return err
+ }
+ default:
+ if f.ObjectSizeLessThan > 0 {
+ if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil {
+ return err
+ }
+ break
+ }
+ if f.ObjectSizeGreaterThan > 0 {
+ if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil {
+ return err
+ }
+ break
+ }
+ // Print empty Prefix field only when everything else is empty
+ if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
+ return err
+ }
+ }
+
+ return e.EncodeToken(xml.EndElement{Name: start.Name})
+}
+
+// ExpirationDays is a type alias to unmarshal Days in Expiration
+type ExpirationDays int
+
+// MarshalXML encodes number of days to expire if it is non-zero and
+// encodes empty string otherwise
+func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
+ if eDays == 0 {
+ return nil
+ }
+ return e.EncodeElement(int(eDays), startElement)
+}
+
+// ExpirationDate is a embedded type containing time.Time to unmarshal
+// Date in Expiration
+type ExpirationDate struct {
+ time.Time
+}
+
+// MarshalXML encodes expiration date if it is non-zero and encodes
+// empty string otherwise
+func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
+ if eDate.Time.IsZero() {
+ return nil
+ }
+ return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
+}
+
+// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
+type ExpireDeleteMarker ExpirationBoolean
+
+// IsEnabled returns true if the auto delete-marker expiration is enabled
+func (e ExpireDeleteMarker) IsEnabled() bool {
+ return bool(e)
+}
+
+// ExpirationBoolean represents an XML version of 'bool' type
+type ExpirationBoolean bool
+
+// MarshalXML encodes delete marker boolean into an XML form.
+func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
+ if !b {
+ return nil
+ }
+ type booleanWrapper ExpirationBoolean
+ return e.EncodeElement(booleanWrapper(b), startElement)
+}
+
+// IsEnabled returns true if the expiration boolean is enabled
+func (b ExpirationBoolean) IsEnabled() bool {
+ return bool(b)
+}
+
+// Expiration structure - expiration details of lifecycle configuration
+type Expiration struct {
+ XMLName xml.Name `xml:"Expiration,omitempty" json:"-"`
+ Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
+ Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
+ DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
+ DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"`
+}
+
+// MarshalJSON customizes json encoding by removing empty day/date specification.
+func (e Expiration) MarshalJSON() ([]byte, error) {
+ type expiration struct {
+ Date *ExpirationDate `json:"Date,omitempty"`
+ Days *ExpirationDays `json:"Days,omitempty"`
+ DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
+ DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"`
+ }
+
+ newexp := expiration{
+ DeleteMarker: e.DeleteMarker,
+ DeleteAll: e.DeleteAll,
+ }
+ if !e.IsDaysNull() {
+ newexp.Days = &e.Days
+ }
+ if !e.IsDateNull() {
+ newexp.Date = &e.Date
+ }
+ return json.Marshal(newexp)
+}
+
+// IsDaysNull returns true if days field is null
+func (e Expiration) IsDaysNull() bool {
+ return e.Days == ExpirationDays(0)
+}
+
+// IsDateNull returns true if date field is null
+func (e Expiration) IsDateNull() bool {
+ return e.Date.Time.IsZero()
+}
+
+// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled
+func (e Expiration) IsDeleteMarkerExpirationEnabled() bool {
+ return e.DeleteMarker.IsEnabled()
+}
+
+// IsNull returns true if both date and days fields are null
+func (e Expiration) IsNull() bool {
+ return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() && !e.DeleteAll.IsEnabled()
+}
+
+// MarshalXML is expiration is non null
+func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
+ if e.IsNull() {
+ return nil
+ }
+ type expirationWrapper Expiration
+ return en.EncodeElement(expirationWrapper(e), startElement)
+}
+
+// DelMarkerExpiration represents DelMarkerExpiration actions element in an ILM policy
+type DelMarkerExpiration struct {
+ XMLName xml.Name `xml:"DelMarkerExpiration" json:"-"`
+ Days int `xml:"Days,omitempty" json:"Days,omitempty"`
+}
+
+// IsNull returns true if Days isn't specified and false otherwise.
+func (de DelMarkerExpiration) IsNull() bool {
+ return de.Days == 0
+}
+
+// MarshalXML avoids serializing an empty DelMarkerExpiration element
+func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
+ if de.IsNull() {
+ return nil
+ }
+ type delMarkerExp DelMarkerExpiration
+ return enc.EncodeElement(delMarkerExp(de), start)
+}
+
+// MarshalJSON customizes json encoding by omitting empty values
+func (r Rule) MarshalJSON() ([]byte, error) {
+ type rule struct {
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
+ Expiration *Expiration `json:"Expiration,omitempty"`
+ DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"`
+ ID string `json:"ID"`
+ RuleFilter *Filter `json:"Filter,omitempty"`
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
+ NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"`
+ Prefix string `json:"Prefix,omitempty"`
+ Status string `json:"Status"`
+ Transition *Transition `json:"Transition,omitempty"`
+ }
+ newr := rule{
+ Prefix: r.Prefix,
+ Status: r.Status,
+ ID: r.ID,
+ }
+
+ if !r.RuleFilter.IsNull() {
+ newr.RuleFilter = &r.RuleFilter
+ }
+ if !r.AbortIncompleteMultipartUpload.IsDaysNull() {
+ newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload
+ }
+ if !r.Expiration.IsNull() {
+ newr.Expiration = &r.Expiration
+ }
+ if !r.DelMarkerExpiration.IsNull() {
+ newr.DelMarkerExpiration = &r.DelMarkerExpiration
+ }
+ if !r.Transition.IsNull() {
+ newr.Transition = &r.Transition
+ }
+ if !r.NoncurrentVersionExpiration.isNull() {
+ newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration
+ }
+ if !r.NoncurrentVersionTransition.isNull() {
+ newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
+ }
+
+ return json.Marshal(newr)
+}
+
+// Rule represents a single rule in lifecycle configuration
+type Rule struct {
+ XMLName xml.Name `xml:"Rule,omitempty" json:"-"`
+ AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"`
+ Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"`
+ DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"`
+ ID string `xml:"ID" json:"ID"`
+ RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"`
+ NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"`
+ NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"`
+ Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
+ Status string `xml:"Status" json:"Status"`
+ Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"`
+}
+
+// Configuration is a collection of Rule objects.
+type Configuration struct {
+ XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"`
+ Rules []Rule `xml:"Rule"`
+}
+
+// Empty check if lifecycle configuration is empty
+func (c *Configuration) Empty() bool {
+ if c == nil {
+ return true
+ }
+ return len(c.Rules) == 0
+}
+
+// NewConfiguration initializes a fresh lifecycle configuration
+// for manipulation, such as setting and removing lifecycle rules
+// and filters.
+func NewConfiguration() *Configuration {
+ return &Configuration{}
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
new file mode 100644
index 000000000..126661a9e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
@@ -0,0 +1,78 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package notification
+
+// Indentity represents the user id, this is a compliance field.
+type identity struct {
+ PrincipalID string `json:"principalId"`
+}
+
+// event bucket metadata.
+type bucketMeta struct {
+ Name string `json:"name"`
+ OwnerIdentity identity `json:"ownerIdentity"`
+ ARN string `json:"arn"`
+}
+
+// event object metadata.
+type objectMeta struct {
+ Key string `json:"key"`
+ Size int64 `json:"size,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ ContentType string `json:"contentType,omitempty"`
+ UserMetadata map[string]string `json:"userMetadata,omitempty"`
+ VersionID string `json:"versionId,omitempty"`
+ Sequencer string `json:"sequencer"`
+}
+
+// event server specific metadata.
+type eventMeta struct {
+ SchemaVersion string `json:"s3SchemaVersion"`
+ ConfigurationID string `json:"configurationId"`
+ Bucket bucketMeta `json:"bucket"`
+ Object objectMeta `json:"object"`
+}
+
+// sourceInfo represents information on the client that
+// triggered the event notification.
+type sourceInfo struct {
+ Host string `json:"host"`
+ Port string `json:"port"`
+ UserAgent string `json:"userAgent"`
+}
+
+// Event represents an Amazon an S3 bucket notification event.
+type Event struct {
+ EventVersion string `json:"eventVersion"`
+ EventSource string `json:"eventSource"`
+ AwsRegion string `json:"awsRegion"`
+ EventTime string `json:"eventTime"`
+ EventName string `json:"eventName"`
+ UserIdentity identity `json:"userIdentity"`
+ RequestParameters map[string]string `json:"requestParameters"`
+ ResponseElements map[string]string `json:"responseElements"`
+ S3 eventMeta `json:"s3"`
+ Source sourceInfo `json:"source"`
+}
+
+// Info - represents the collection of notification events, additionally
+// also reports errors if any while listening on bucket notifications.
+type Info struct {
+ Records []Event
+ Err error
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
new file mode 100644
index 000000000..151ca21e8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
@@ -0,0 +1,441 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package notification
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/minio/minio-go/v7/pkg/set"
+)
+
+// EventType is a S3 notification event associated to the bucket notification configuration
+type EventType string
+
+// The role of all event types are described in :
+//
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
+const (
+ ObjectCreatedAll EventType = "s3:ObjectCreated:*"
+ ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
+ ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
+ ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
+ ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging"
+ ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
+ ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold"
+ ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention"
+ ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging"
+ ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
+ ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
+ ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention"
+ ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold"
+ ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
+ ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
+ ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
+ ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
+ ILMDelMarkerExpirationDelete EventType = "s3:LifecycleDelMarkerExpiration:Delete"
+ ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
+ ObjectTransitionAll EventType = "s3:ObjectTransition:*"
+ ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed"
+ ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete"
+ ObjectTransitionPost EventType = "s3:ObjectRestore:Post"
+ ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed"
+ ObjectReplicationAll EventType = "s3:Replication:*"
+ ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication"
+ ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication"
+ ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold"
+ ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked"
+ ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold"
+ ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions"
+ ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix"
+ ObjectScannerAll EventType = "s3:Scanner:*"
+ BucketCreatedAll EventType = "s3:BucketCreated:*"
+ BucketRemovedAll EventType = "s3:BucketRemoved:*"
+)
+
+// FilterRule - child of S3Key, a tag in the notification xml which
+// carries suffix/prefix filters
+type FilterRule struct {
+ Name string `xml:"Name"`
+ Value string `xml:"Value"`
+}
+
+// S3Key - child of Filter, a tag in the notification xml which
+// carries suffix/prefix filters
+type S3Key struct {
+ FilterRules []FilterRule `xml:"FilterRule,omitempty"`
+}
+
+// Filter - a tag in the notification xml structure which carries
+// suffix/prefix filters
+type Filter struct {
+ S3Key S3Key `xml:"S3Key,omitempty"`
+}
+
+// Arn - holds ARN information that will be sent to the web service,
+// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+type Arn struct {
+ Partition string
+ Service string
+ Region string
+ AccountID string
+ Resource string
+}
+
+// NewArn creates new ARN based on the given partition, service, region, account id and resource
+func NewArn(partition, service, region, accountID, resource string) Arn {
+ return Arn{
+ Partition: partition,
+ Service: service,
+ Region: region,
+ AccountID: accountID,
+ Resource: resource,
+ }
+}
+
+var (
+ // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn'
+ ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'")
+ // ErrInvalidArnFormat is returned when ARN string format is not valid
+ ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'")
+)
+
+// NewArnFromString parses string representation of ARN into Arn object.
+// Returns an error if the string format is incorrect.
+func NewArnFromString(arn string) (Arn, error) {
+ parts := strings.Split(arn, ":")
+ if len(parts) != 6 {
+ return Arn{}, ErrInvalidArnFormat
+ }
+ if parts[0] != "arn" {
+ return Arn{}, ErrInvalidArnPrefix
+ }
+
+ return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil
+}
+
+// String returns the string format of the ARN
+func (arn Arn) String() string {
+ return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
+}
+
+// Config - represents one single notification configuration
+// such as topic, queue or lambda configuration.
+type Config struct {
+ ID string `xml:"Id,omitempty"`
+ Arn Arn `xml:"-"`
+ Events []EventType `xml:"Event"`
+ Filter *Filter `xml:"Filter,omitempty"`
+}
+
+// NewConfig creates one notification config and sets the given ARN
+func NewConfig(arn Arn) Config {
+ return Config{Arn: arn, Filter: &Filter{}}
+}
+
+// AddEvents adds one event to the current notification config
+func (t *Config) AddEvents(events ...EventType) {
+ t.Events = append(t.Events, events...)
+}
+
+// AddFilterSuffix sets the suffix configuration to the current notification config
+func (t *Config) AddFilterSuffix(suffix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "suffix", Value: suffix}
+ // Replace any suffix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// AddFilterPrefix sets the prefix configuration to the current notification config
+func (t *Config) AddFilterPrefix(prefix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "prefix", Value: prefix}
+ // Replace any prefix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// EqualEventTypeList tells whether a and b contain the same events
+func EqualEventTypeList(a, b []EventType) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ setA := set.NewStringSet()
+ for _, i := range a {
+ setA.Add(string(i))
+ }
+
+ setB := set.NewStringSet()
+ for _, i := range b {
+ setB.Add(string(i))
+ }
+
+ return setA.Difference(setB).IsEmpty()
+}
+
+// EqualFilterRuleList tells whether a and b contain the same filters
+func EqualFilterRuleList(a, b []FilterRule) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ setA := set.NewStringSet()
+ for _, i := range a {
+ setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
+ }
+
+ setB := set.NewStringSet()
+ for _, i := range b {
+ setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
+ }
+
+ return setA.Difference(setB).IsEmpty()
+}
+
+// Equal returns whether this `Config` is equal to another defined by the passed parameters
+func (t *Config) Equal(events []EventType, prefix, suffix string) bool {
+ if t == nil {
+ return false
+ }
+
+ // Compare events
+ passEvents := EqualEventTypeList(t.Events, events)
+
+ // Compare filters
+ var newFilterRules []FilterRule
+ if prefix != "" {
+ newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix})
+ }
+ if suffix != "" {
+ newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix})
+ }
+
+ var currentFilterRules []FilterRule
+ if t.Filter != nil {
+ currentFilterRules = t.Filter.S3Key.FilterRules
+ }
+
+ passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules)
+ return passEvents && passFilters
+}
+
+// TopicConfig carries one single topic notification configuration
+type TopicConfig struct {
+ Config
+ Topic string `xml:"Topic"`
+}
+
+// QueueConfig carries one single queue notification configuration
+type QueueConfig struct {
+ Config
+ Queue string `xml:"Queue"`
+}
+
+// LambdaConfig carries one single cloudfunction notification configuration
+type LambdaConfig struct {
+ Config
+ Lambda string `xml:"CloudFunction"`
+}
+
+// Configuration - the struct that represents the whole XML to be sent to the web service
+type Configuration struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
+ TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
+ QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
+}
+
+// AddTopic adds a given topic config to the general bucket notification config
+func (b *Configuration) AddTopic(topicConfig Config) bool {
+ newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()}
+ for _, n := range b.TopicConfigs {
+ // If new config matches existing one
+ if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range topicConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
+ return true
+}
+
+// AddQueue adds a given queue config to the general bucket notification config
+func (b *Configuration) AddQueue(queueConfig Config) bool {
+ newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()}
+ for _, n := range b.QueueConfigs {
+ if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range queueConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
+ return true
+}
+
+// AddLambda adds a given lambda config to the general bucket notification config
+func (b *Configuration) AddLambda(lambdaConfig Config) bool {
+ newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
+ for _, n := range b.LambdaConfigs {
+ if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range lambdaConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
+ return true
+}
+
+// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
+func (b *Configuration) RemoveTopicByArn(arn Arn) {
+ var topics []TopicConfig
+ for _, topic := range b.TopicConfigs {
+ if topic.Topic != arn.String() {
+ topics = append(topics, topic)
+ }
+ }
+ b.TopicConfigs = topics
+}
+
+// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete
+var ErrNoConfigMatch = errors.New("no notification configuration matched")
+
+// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
+func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
+ removeIndex := -1
+ for i, v := range b.TopicConfigs {
+ // if it matches events and filters, mark the index for deletion
+ if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) {
+ removeIndex = i
+ break // since we have at most one matching config
+ }
+ }
+ if removeIndex >= 0 {
+ b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...)
+ return nil
+ }
+ return ErrNoConfigMatch
+}
+
+// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
+func (b *Configuration) RemoveQueueByArn(arn Arn) {
+ var queues []QueueConfig
+ for _, queue := range b.QueueConfigs {
+ if queue.Queue != arn.String() {
+ queues = append(queues, queue)
+ }
+ }
+ b.QueueConfigs = queues
+}
+
+// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix
+func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
+ removeIndex := -1
+ for i, v := range b.QueueConfigs {
+ // if it matches events and filters, mark the index for deletion
+ if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) {
+ removeIndex = i
+ break // since we have at most one matching config
+ }
+ }
+ if removeIndex >= 0 {
+ b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...)
+ return nil
+ }
+ return ErrNoConfigMatch
+}
+
+// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
+func (b *Configuration) RemoveLambdaByArn(arn Arn) {
+ var lambdas []LambdaConfig
+ for _, lambda := range b.LambdaConfigs {
+ if lambda.Lambda != arn.String() {
+ lambdas = append(lambdas, lambda)
+ }
+ }
+ b.LambdaConfigs = lambdas
+}
+
+// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
+func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
+ removeIndex := -1
+ for i, v := range b.LambdaConfigs {
+ // if it matches events and filters, mark the index for deletion
+ if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) {
+ removeIndex = i
+ break // since we have at most one matching config
+ }
+ }
+ if removeIndex >= 0 {
+ b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...)
+ return nil
+ }
+ return ErrNoConfigMatch
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
new file mode 100644
index 000000000..65a2f75e9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
@@ -0,0 +1,974 @@
+/*
+ * MinIO Client (C) 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package replication
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/rs/xid"
+)
+
+var errInvalidFilter = fmt.Errorf("invalid filter")
+
+// OptionType specifies operation to be performed on config
+type OptionType string
+
+const (
+ // AddOption specifies addition of rule to config
+ AddOption OptionType = "Add"
+ // SetOption specifies modification of existing rule to config
+ SetOption OptionType = "Set"
+
+ // RemoveOption specifies rule options are for removing a rule
+ RemoveOption OptionType = "Remove"
+ // ImportOption is for getting current config
+ ImportOption OptionType = "Import"
+)
+
+// Options represents options to set a replication configuration rule
+type Options struct {
+ Op OptionType
+ RoleArn string
+ ID string
+ Prefix string
+ RuleStatus string
+ Priority string
+ TagString string
+ StorageClass string
+ DestBucket string
+ IsTagSet bool
+ IsSCSet bool
+ ReplicateDeletes string // replicate versioned deletes
+ ReplicateDeleteMarkers string // replicate soft deletes
+ ReplicaSync string // replicate replica metadata modifications
+ ExistingObjectReplicate string
+}
+
+// Tags returns a slice of tags for a rule
+func (opts Options) Tags() ([]Tag, error) {
+ var tagList []Tag
+ tagTokens := strings.Split(opts.TagString, "&")
+ for _, tok := range tagTokens {
+ if tok == "" {
+ break
+ }
+ kv := strings.SplitN(tok, "=", 2)
+ if len(kv) != 2 {
+ return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs")
+ }
+ tagList = append(tagList, Tag{
+ Key: kv[0],
+ Value: kv[1],
+ })
+ }
+ return tagList, nil
+}
+
+// Config - replication configuration specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
+type Config struct {
+ XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"`
+ Rules []Rule `xml:"Rule" json:"Rules"`
+ Role string `xml:"Role" json:"Role"`
+}
+
+// Empty returns true if config is not set
+func (c *Config) Empty() bool {
+ return len(c.Rules) == 0
+}
+
+// AddRule adds a new rule to existing replication config. If a rule exists with the
+// same ID, then the rule is replaced.
+func (c *Config) AddRule(opts Options) error {
+ priority, err := strconv.Atoi(opts.Priority)
+ if err != nil {
+ return err
+ }
+ var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite
+ if opts.RoleArn != "" {
+ tokens := strings.Split(opts.RoleArn, ":")
+ if len(tokens) != 6 {
+ return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn)
+ }
+ switch {
+ case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0:
+ c.Role = opts.RoleArn
+ compatSw = true
+ case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"):
+ c.Role = opts.RoleArn
+ default:
+ return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn)
+ }
+ }
+
+ var status Status
+ // toggle rule status for edit option
+ switch opts.RuleStatus {
+ case "enable":
+ status = Enabled
+ case "disable":
+ status = Disabled
+ default:
+ return fmt.Errorf("rule state should be either [enable|disable]")
+ }
+
+ tags, err := opts.Tags()
+ if err != nil {
+ return err
+ }
+ andVal := And{
+ Tags: tags,
+ }
+ filter := Filter{Prefix: opts.Prefix}
+ // only a single tag is set.
+ if opts.Prefix == "" && len(tags) == 1 {
+ filter.Tag = tags[0]
+ }
+ // both prefix and tag are present
+ if len(andVal.Tags) > 1 || opts.Prefix != "" {
+ filter.And = andVal
+ filter.And.Prefix = opts.Prefix
+ filter.Prefix = ""
+ filter.Tag = Tag{}
+ }
+ if opts.ID == "" {
+ opts.ID = xid.New().String()
+ }
+
+ destBucket := opts.DestBucket
+ // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
+ if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
+ if len(btokens) == 1 && compatSw {
+ destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
+ } else {
+ return fmt.Errorf("destination bucket needs to be in Arn format")
+ }
+ }
+ dmStatus := Disabled
+ if opts.ReplicateDeleteMarkers != "" {
+ switch opts.ReplicateDeleteMarkers {
+ case "enable":
+ dmStatus = Enabled
+ case "disable":
+ dmStatus = Disabled
+ default:
+ return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable")
+ }
+ }
+
+ vDeleteStatus := Disabled
+ if opts.ReplicateDeletes != "" {
+ switch opts.ReplicateDeletes {
+ case "enable":
+ vDeleteStatus = Enabled
+ case "disable":
+ vDeleteStatus = Disabled
+ default:
+ return fmt.Errorf("ReplicateDeletes should be either enable|disable")
+ }
+ }
+ var replicaSync Status
+ // replica sync is by default Enabled, unless specified.
+ switch opts.ReplicaSync {
+ case "enable", "":
+ replicaSync = Enabled
+ case "disable":
+ replicaSync = Disabled
+ default:
+ return fmt.Errorf("replica metadata sync should be either [enable|disable]")
+ }
+
+ var existingStatus Status
+ if opts.ExistingObjectReplicate != "" {
+ switch opts.ExistingObjectReplicate {
+ case "enable":
+ existingStatus = Enabled
+ case "disable", "":
+ existingStatus = Disabled
+ default:
+ return fmt.Errorf("existingObjectReplicate should be either enable|disable")
+ }
+ }
+ newRule := Rule{
+ ID: opts.ID,
+ Priority: priority,
+ Status: status,
+ Filter: filter,
+ Destination: Destination{
+ Bucket: destBucket,
+ StorageClass: opts.StorageClass,
+ },
+ DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus},
+ DeleteReplication: DeleteReplication{Status: vDeleteStatus},
+ // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow
+ // automatic failover as the expectation in this case is that replica and source should be identical.
+ // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html
+ SourceSelectionCriteria: SourceSelectionCriteria{
+ ReplicaModifications: ReplicaModifications{
+ Status: replicaSync,
+ },
+ },
+ // By default disable existing object replication unless selected
+ ExistingObjectReplication: ExistingObjectReplication{
+ Status: existingStatus,
+ },
+ }
+
+ // validate rule after overlaying priority for pre-existing rule being disabled.
+ if err := newRule.Validate(); err != nil {
+ return err
+ }
+ // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration
+ if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw {
+ for i := range c.Rules {
+ c.Rules[i].Destination.Bucket = c.Role
+ }
+ c.Role = ""
+ }
+
+ for _, rule := range c.Rules {
+ if rule.Priority == newRule.Priority {
+ return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
+ }
+ if rule.ID == newRule.ID {
+ return fmt.Errorf("a rule exists with this ID")
+ }
+ }
+
+ c.Rules = append(c.Rules, newRule)
+ return nil
+}
+
+// EditRule modifies an existing rule in replication config
+func (c *Config) EditRule(opts Options) error {
+ if opts.ID == "" {
+ return fmt.Errorf("rule ID missing")
+ }
+ // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS.
+ if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 {
+ for i := range c.Rules {
+ c.Rules[i].Destination.Bucket = c.Role
+ }
+ c.Role = ""
+ }
+
+ rIdx := -1
+ var newRule Rule
+ for i, rule := range c.Rules {
+ if rule.ID == opts.ID {
+ rIdx = i
+ newRule = rule
+ break
+ }
+ }
+ if rIdx < 0 {
+ return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID)
+ }
+ prefixChg := opts.Prefix != newRule.Prefix()
+ if opts.IsTagSet || prefixChg {
+ prefix := newRule.Prefix()
+ if prefix != opts.Prefix {
+ prefix = opts.Prefix
+ }
+ tags := []Tag{newRule.Filter.Tag}
+ if len(newRule.Filter.And.Tags) != 0 {
+ tags = newRule.Filter.And.Tags
+ }
+ var err error
+ if opts.IsTagSet {
+ tags, err = opts.Tags()
+ if err != nil {
+ return err
+ }
+ }
+ andVal := And{
+ Tags: tags,
+ }
+
+ filter := Filter{Prefix: prefix}
+ // only a single tag is set.
+ if prefix == "" && len(tags) == 1 {
+ filter.Tag = tags[0]
+ }
+ // both prefix and tag are present
+ if len(andVal.Tags) > 1 || prefix != "" {
+ filter.And = andVal
+ filter.And.Prefix = prefix
+ filter.Prefix = ""
+ filter.Tag = Tag{}
+ }
+ newRule.Filter = filter
+ }
+
+ // toggle rule status for edit option
+ if opts.RuleStatus != "" {
+ switch opts.RuleStatus {
+ case "enable":
+ newRule.Status = Enabled
+ case "disable":
+ newRule.Status = Disabled
+ default:
+ return fmt.Errorf("rule state should be either [enable|disable]")
+ }
+ }
+ // set DeleteMarkerReplication rule status for edit option
+ if opts.ReplicateDeleteMarkers != "" {
+ switch opts.ReplicateDeleteMarkers {
+ case "enable":
+ newRule.DeleteMarkerReplication.Status = Enabled
+ case "disable":
+ newRule.DeleteMarkerReplication.Status = Disabled
+ default:
+ return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]")
+ }
+ }
+
+ // set DeleteReplication rule status for edit option. This is a MinIO specific
+ // option to replicate versioned deletes
+ if opts.ReplicateDeletes != "" {
+ switch opts.ReplicateDeletes {
+ case "enable":
+ newRule.DeleteReplication.Status = Enabled
+ case "disable":
+ newRule.DeleteReplication.Status = Disabled
+ default:
+ return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]")
+ }
+ }
+
+ if opts.ReplicaSync != "" {
+ switch opts.ReplicaSync {
+ case "enable", "":
+ newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled
+ case "disable":
+ newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled
+ default:
+ return fmt.Errorf("replica metadata sync should be either [enable|disable]")
+ }
+ }
+
+ if opts.ExistingObjectReplicate != "" {
+ switch opts.ExistingObjectReplicate {
+ case "enable":
+ newRule.ExistingObjectReplication.Status = Enabled
+ case "disable":
+ newRule.ExistingObjectReplication.Status = Disabled
+ default:
+ return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]")
+ }
+ }
+ if opts.IsSCSet {
+ newRule.Destination.StorageClass = opts.StorageClass
+ }
+ if opts.Priority != "" {
+ priority, err := strconv.Atoi(opts.Priority)
+ if err != nil {
+ return err
+ }
+ newRule.Priority = priority
+ }
+ if opts.DestBucket != "" {
+ destBucket := opts.DestBucket
+ // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
+ if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 {
+ return fmt.Errorf("destination bucket needs to be in Arn format")
+ }
+ newRule.Destination.Bucket = destBucket
+ }
+ // validate rule
+ if err := newRule.Validate(); err != nil {
+ return err
+ }
+ // ensure priority and destination bucket restrictions are not violated
+ for idx, rule := range c.Rules {
+ if rule.Priority == newRule.Priority && rIdx != idx {
+ return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
+ }
+ if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID {
+ if c.Role == newRule.Destination.Bucket {
+ continue
+ }
+ return fmt.Errorf("invalid destination bucket for this rule")
+ }
+ }
+
+ c.Rules[rIdx] = newRule
+ return nil
+}
+
+// RemoveRule removes a rule from replication config.
+func (c *Config) RemoveRule(opts Options) error {
+ var newRules []Rule
+ ruleFound := false
+ for _, rule := range c.Rules {
+ if rule.ID != opts.ID {
+ newRules = append(newRules, rule)
+ continue
+ }
+ ruleFound = true
+ }
+ if !ruleFound {
+ return fmt.Errorf("Rule with ID %s not found", opts.ID)
+ }
+ if len(newRules) == 0 {
+ return fmt.Errorf("replication configuration should have at least one rule")
+ }
+ c.Rules = newRules
+ return nil
+}
+
+// Rule - a rule for replication configuration.
+type Rule struct {
+ XMLName xml.Name `xml:"Rule" json:"-"`
+ ID string `xml:"ID,omitempty"`
+ Status Status `xml:"Status"`
+ Priority int `xml:"Priority"`
+ DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"`
+ DeleteReplication DeleteReplication `xml:"DeleteReplication"`
+ Destination Destination `xml:"Destination"`
+ Filter Filter `xml:"Filter" json:"Filter"`
+ SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"`
+ ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"`
+}
+
+// Validate validates the rule for correctness
+func (r Rule) Validate() error {
+ if err := r.validateID(); err != nil {
+ return err
+ }
+ if err := r.validateStatus(); err != nil {
+ return err
+ }
+ if err := r.validateFilter(); err != nil {
+ return err
+ }
+
+ if r.Priority < 0 && r.Status == Enabled {
+ return fmt.Errorf("priority must be set for the rule")
+ }
+
+ if err := r.validateStatus(); err != nil {
+ return err
+ }
+ return r.ExistingObjectReplication.Validate()
+}
+
+// validateID - checks if ID is valid or not.
+func (r Rule) validateID() error {
+ // cannot be longer than 255 characters
+ if len(r.ID) > 255 {
+ return fmt.Errorf("ID must be less than 255 characters")
+ }
+ return nil
+}
+
+// validateStatus - checks if status is valid or not.
+func (r Rule) validateStatus() error {
+ // Status can't be empty
+ if len(r.Status) == 0 {
+ return fmt.Errorf("status cannot be empty")
+ }
+
+ // Status must be one of Enabled or Disabled
+ if r.Status != Enabled && r.Status != Disabled {
+ return fmt.Errorf("status must be set to either Enabled or Disabled")
+ }
+ return nil
+}
+
+func (r Rule) validateFilter() error {
+ return r.Filter.Validate()
+}
+
+// Prefix - a rule can either have prefix under or under
+// . This method returns the prefix from the
+// location where it is available
+func (r Rule) Prefix() string {
+ if r.Filter.Prefix != "" {
+ return r.Filter.Prefix
+ }
+ return r.Filter.And.Prefix
+}
+
+// Tags - a rule can either have tag under or under
+// . This method returns all the tags from the
+// rule in the format tag1=value1&tag2=value2
+func (r Rule) Tags() string {
+ ts := []Tag{r.Filter.Tag}
+ if len(r.Filter.And.Tags) != 0 {
+ ts = r.Filter.And.Tags
+ }
+
+ var buf bytes.Buffer
+ for _, t := range ts {
+ if buf.Len() > 0 {
+ buf.WriteString("&")
+ }
+ buf.WriteString(t.String())
+ }
+ return buf.String()
+}
+
+// Filter - a filter for a replication configuration Rule.
+type Filter struct {
+ XMLName xml.Name `xml:"Filter" json:"-"`
+ Prefix string `json:"Prefix,omitempty"`
+ And And `xml:"And,omitempty" json:"And,omitempty"`
+ Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
+}
+
+// Validate - validates the filter element
+func (f Filter) Validate() error {
+ // A Filter must have exactly one of Prefix, Tag, or And specified.
+ if !f.And.isEmpty() {
+ if f.Prefix != "" {
+ return errInvalidFilter
+ }
+ if !f.Tag.IsEmpty() {
+ return errInvalidFilter
+ }
+ }
+ if f.Prefix != "" {
+ if !f.Tag.IsEmpty() {
+ return errInvalidFilter
+ }
+ }
+ if !f.Tag.IsEmpty() {
+ if err := f.Tag.Validate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tag - a tag for a replication configuration Rule filter.
+type Tag struct {
+ XMLName xml.Name `json:"-"`
+ Key string `xml:"Key,omitempty" json:"Key,omitempty"`
+ Value string `xml:"Value,omitempty" json:"Value,omitempty"`
+}
+
+func (tag Tag) String() string {
+ if tag.IsEmpty() {
+ return ""
+ }
+ return tag.Key + "=" + tag.Value
+}
+
+// IsEmpty returns whether this tag is empty or not.
+func (tag Tag) IsEmpty() bool {
+ return tag.Key == ""
+}
+
+// Validate checks this tag.
+func (tag Tag) Validate() error {
+ if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 {
+ return fmt.Errorf("invalid Tag Key")
+ }
+
+ if utf8.RuneCountInString(tag.Value) > 256 {
+ return fmt.Errorf("invalid Tag Value")
+ }
+ return nil
+}
+
+// Destination - destination in ReplicationConfiguration.
+type Destination struct {
+ XMLName xml.Name `xml:"Destination" json:"-"`
+ Bucket string `xml:"Bucket" json:"Bucket"`
+ StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
+}
+
+// And - a tag to combine a prefix and multiple tags for replication configuration rule.
+type And struct {
+ XMLName xml.Name `xml:"And,omitempty" json:"-"`
+ Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
+ Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
+}
+
+// isEmpty returns true if Tags field is null
+func (a And) isEmpty() bool {
+ return len(a.Tags) == 0 && a.Prefix == ""
+}
+
+// Status represents Enabled/Disabled status
+type Status string
+
+// Supported status types
+const (
+ Enabled Status = "Enabled"
+ Disabled Status = "Disabled"
+)
+
+// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
+type DeleteMarkerReplication struct {
+ Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
+}
+
+// IsEmpty returns true if DeleteMarkerReplication is not set
+func (d DeleteMarkerReplication) IsEmpty() bool {
+ return len(d.Status) == 0
+}
+
+// DeleteReplication - whether versioned deletes are replicated - this
+// is a MinIO specific extension
+type DeleteReplication struct {
+ Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
+}
+
+// IsEmpty returns true if DeleteReplication is not set
+func (d DeleteReplication) IsEmpty() bool {
+ return len(d.Status) == 0
+}
+
+// ReplicaModifications specifies if replica modification sync is enabled
+type ReplicaModifications struct {
+ Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default
+}
+
+// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration.
+type SourceSelectionCriteria struct {
+ ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"`
+}
+
+// IsValid - checks whether SourceSelectionCriteria is valid or not.
+func (s SourceSelectionCriteria) IsValid() bool {
+ return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled
+}
+
+// Validate source selection criteria
+func (s SourceSelectionCriteria) Validate() error {
+ if (s == SourceSelectionCriteria{}) {
+ return nil
+ }
+ if !s.IsValid() {
+ return fmt.Errorf("invalid ReplicaModification status")
+ }
+ return nil
+}
+
+// ExistingObjectReplication - whether existing object replication is enabled
+type ExistingObjectReplication struct {
+ Status Status `xml:"Status"` // should be set to "Disabled" by default
+}
+
+// IsEmpty returns true if DeleteMarkerReplication is not set
+func (e ExistingObjectReplication) IsEmpty() bool {
+ return len(e.Status) == 0
+}
+
+// Validate validates whether the status is disabled.
+func (e ExistingObjectReplication) Validate() error {
+ if e.IsEmpty() {
+ return nil
+ }
+ if e.Status != Disabled && e.Status != Enabled {
+ return fmt.Errorf("invalid ExistingObjectReplication status")
+ }
+ return nil
+}
+
+// TargetMetrics represents inline replication metrics
+// such as pending, failed and completed bytes in total for a bucket remote target
+type TargetMetrics struct {
+ // Completed count
+ ReplicatedCount uint64 `json:"replicationCount,omitempty"`
+ // Completed size in bytes
+ ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
+ // Bandwidth limit in bytes/sec for this target
+ BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"`
+ // Current bandwidth used in bytes/sec for this target
+ CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"`
+ // errors seen in replication in last minute, hour and total
+ Failed TimedErrStats `json:"failed,omitempty"`
+ // Deprecated fields
+ // Pending size in bytes
+ PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
+ // Total Replica size in bytes
+ ReplicaSize uint64 `json:"replicaSize,omitempty"`
+ // Failed size in bytes
+ FailedSize uint64 `json:"failedReplicationSize,omitempty"`
+ // Total number of pending operations including metadata updates
+ PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
+ // Total number of failed operations including metadata updates
+ FailedCount uint64 `json:"failedReplicationCount,omitempty"`
+}
+
+// Metrics represents inline replication metrics for a bucket.
+type Metrics struct {
+ Stats map[string]TargetMetrics
+ // Completed size in bytes across targets
+ ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
+ // Total Replica size in bytes across targets
+ ReplicaSize uint64 `json:"replicaSize,omitempty"`
+ // Total Replica counts
+ ReplicaCount int64 `json:"replicaCount,omitempty"`
+ // Total Replicated count
+ ReplicatedCount int64 `json:"replicationCount,omitempty"`
+ // errors seen in replication in last minute, hour and total
+ Errors TimedErrStats `json:"failed,omitempty"`
+ // Total number of entries that are queued for replication
+ QStats InQueueMetric `json:"queued"`
+ // Deprecated fields
+ // Total Pending size in bytes across targets
+ PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
+ // Failed size in bytes across targets
+ FailedSize uint64 `json:"failedReplicationSize,omitempty"`
+ // Total number of pending operations including metadata updates across targets
+ PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
+ // Total number of failed operations including metadata updates across targets
+ FailedCount uint64 `json:"failedReplicationCount,omitempty"`
+}
+
+// RStat - has count and bytes for replication metrics
+type RStat struct {
+ Count float64 `json:"count"`
+ Bytes int64 `json:"bytes"`
+}
+
+// Add two RStat
+func (r RStat) Add(r1 RStat) RStat {
+ return RStat{
+ Count: r.Count + r1.Count,
+ Bytes: r.Bytes + r1.Bytes,
+ }
+}
+
+// TimedErrStats holds error stats for a time period
+type TimedErrStats struct {
+ LastMinute RStat `json:"lastMinute"`
+ LastHour RStat `json:"lastHour"`
+ Totals RStat `json:"totals"`
+}
+
+// Add two TimedErrStats
+func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats {
+ return TimedErrStats{
+ LastMinute: te.LastMinute.Add(o.LastMinute),
+ LastHour: te.LastHour.Add(o.LastHour),
+ Totals: te.Totals.Add(o.Totals),
+ }
+}
+
+// ResyncTargetsInfo provides replication target information to resync replicated data.
+type ResyncTargetsInfo struct {
+ Targets []ResyncTarget `json:"target,omitempty"`
+}
+
+// ResyncTarget provides the replica resources and resetID to initiate resync replication.
+type ResyncTarget struct {
+ Arn string `json:"arn"`
+ ResetID string `json:"resetid"`
+ StartTime time.Time `json:"startTime,omitempty"`
+ EndTime time.Time `json:"endTime,omitempty"`
+ // Status of resync operation
+ ResyncStatus string `json:"resyncStatus,omitempty"`
+ // Completed size in bytes
+ ReplicatedSize int64 `json:"completedReplicationSize,omitempty"`
+ // Failed size in bytes
+ FailedSize int64 `json:"failedReplicationSize,omitempty"`
+ // Total number of failed operations
+ FailedCount int64 `json:"failedReplicationCount,omitempty"`
+ // Total number of completed operations
+ ReplicatedCount int64 `json:"replicationCount,omitempty"`
+ // Last bucket/object replicated.
+ Bucket string `json:"bucket,omitempty"`
+ Object string `json:"object,omitempty"`
+}
+
+// XferStats holds transfer rate info for uploads/sec
+type XferStats struct {
+ AvgRate float64 `json:"avgRate"`
+ PeakRate float64 `json:"peakRate"`
+ CurrRate float64 `json:"currRate"`
+}
+
+// Merge two XferStats
+func (x *XferStats) Merge(x1 XferStats) {
+ x.AvgRate += x1.AvgRate
+ x.PeakRate += x1.PeakRate
+ x.CurrRate += x1.CurrRate
+}
+
+// QStat holds count and bytes for objects in replication queue
+type QStat struct {
+ Count float64 `json:"count"`
+ Bytes float64 `json:"bytes"`
+}
+
+// Add 2 QStat entries
+func (q *QStat) Add(q1 QStat) {
+ q.Count += q1.Count
+ q.Bytes += q1.Bytes
+}
+
+// InQueueMetric holds stats for objects in replication queue
+type InQueueMetric struct {
+ Curr QStat `json:"curr" msg:"cq"`
+ Avg QStat `json:"avg" msg:"aq"`
+ Max QStat `json:"peak" msg:"pq"`
+}
+
+// MetricName name of replication metric
+type MetricName string
+
+const (
+ // Large is a metric name for large objects >=128MiB
+ Large MetricName = "Large"
+ // Small is a metric name for objects <128MiB size
+ Small MetricName = "Small"
+ // Total is a metric name for total objects
+ Total MetricName = "Total"
+)
+
+// WorkerStat has stats on number of replication workers
+type WorkerStat struct {
+ Curr int32 `json:"curr"`
+ Avg float32 `json:"avg"`
+ Max int32 `json:"max"`
+}
+
+// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
+// and number of entries that failed replication after 3 retries
+type ReplMRFStats struct {
+ LastFailedCount uint64 `json:"failedCount_last5min"`
+ // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
+ TotalDroppedCount uint64 `json:"droppedCount_since_uptime"`
+ // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
+ TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"`
+}
+
+// ReplQNodeStats holds stats for a node in replication queue
+type ReplQNodeStats struct {
+ NodeName string `json:"nodeName"`
+ Uptime int64 `json:"uptime"`
+ Workers WorkerStat `json:"activeWorkers"`
+
+ XferStats map[MetricName]XferStats `json:"transferSummary"`
+ TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
+
+ QStats InQueueMetric `json:"queueStats"`
+ MRFStats ReplMRFStats `json:"mrfStats"`
+}
+
+// ReplQueueStats holds stats for replication queue across nodes
+type ReplQueueStats struct {
+ Nodes []ReplQNodeStats `json:"nodes"`
+}
+
+// Workers returns number of workers across all nodes
+func (q ReplQueueStats) Workers() (tot WorkerStat) {
+ for _, node := range q.Nodes {
+ tot.Avg += node.Workers.Avg
+ tot.Curr += node.Workers.Curr
+ if tot.Max < node.Workers.Max {
+ tot.Max = node.Workers.Max
+ }
+ }
+ if len(q.Nodes) > 0 {
+ tot.Avg /= float32(len(q.Nodes))
+ tot.Curr /= int32(len(q.Nodes))
+ }
+ return tot
+}
+
+// qStatSummary returns cluster level stats for objects in replication queue
+func (q ReplQueueStats) qStatSummary() InQueueMetric {
+ m := InQueueMetric{}
+ for _, v := range q.Nodes {
+ m.Avg.Add(v.QStats.Avg)
+ m.Curr.Add(v.QStats.Curr)
+ if m.Max.Count < v.QStats.Max.Count {
+ m.Max.Add(v.QStats.Max)
+ }
+ }
+ return m
+}
+
+// ReplQStats holds stats for objects in replication queue
+type ReplQStats struct {
+ Uptime int64 `json:"uptime"`
+ Workers WorkerStat `json:"workers"`
+
+ XferStats map[MetricName]XferStats `json:"xferStats"`
+ TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
+
+ QStats InQueueMetric `json:"qStats"`
+ MRFStats ReplMRFStats `json:"mrfStats"`
+}
+
+// QStats returns cluster level stats for objects in replication queue
+func (q ReplQueueStats) QStats() (r ReplQStats) {
+ r.QStats = q.qStatSummary()
+ r.XferStats = make(map[MetricName]XferStats)
+ r.TgtXferStats = make(map[string]map[MetricName]XferStats)
+ r.Workers = q.Workers()
+
+ for _, node := range q.Nodes {
+ for arn := range node.TgtXferStats {
+ xmap, ok := node.TgtXferStats[arn]
+ if !ok {
+ xmap = make(map[MetricName]XferStats)
+ }
+ for m, v := range xmap {
+ st, ok := r.XferStats[m]
+ if !ok {
+ st = XferStats{}
+ }
+ st.AvgRate += v.AvgRate
+ st.CurrRate += v.CurrRate
+ st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
+ if _, ok := r.TgtXferStats[arn]; !ok {
+ r.TgtXferStats[arn] = make(map[MetricName]XferStats)
+ }
+ r.TgtXferStats[arn][m] = st
+ }
+ }
+ for k, v := range node.XferStats {
+ st, ok := r.XferStats[k]
+ if !ok {
+ st = XferStats{}
+ }
+ st.AvgRate += v.AvgRate
+ st.CurrRate += v.CurrRate
+ st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
+ r.XferStats[k] = st
+ }
+ r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount
+ r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount
+ r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes
+ r.Uptime += node.Uptime
+ }
+ if len(q.Nodes) > 0 {
+ r.Uptime /= int64(len(q.Nodes)) // average uptime
+ }
+ return
+}
+
+// MetricsV2 represents replication metrics for a bucket.
+type MetricsV2 struct {
+ Uptime int64 `json:"uptime"`
+ CurrentStats Metrics `json:"currStats"`
+ QueueStats ReplQueueStats `json:"queueStats"`
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
new file mode 100644
index 000000000..0e63ce2f7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
@@ -0,0 +1,411 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3utils
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "net"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
+
+// IsValidDomain validates if input string is a valid domain name.
+func IsValidDomain(host string) bool {
+ // See RFC 1035, RFC 3696.
+ host = strings.TrimSpace(host)
+ if len(host) == 0 || len(host) > 255 {
+ return false
+ }
+ // host cannot start or end with "-"
+ if host[len(host)-1:] == "-" || host[:1] == "-" {
+ return false
+ }
+ // host cannot start or end with "_"
+ if host[len(host)-1:] == "_" || host[:1] == "_" {
+ return false
+ }
+ // host cannot start with a "."
+ if host[:1] == "." {
+ return false
+ }
+ // All non alphanumeric characters are invalid.
+ if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:>/") {
+ return false
+ }
+ // No need to regexp match, since the list is non-exhaustive.
+ // We let it valid and fail later.
+ return true
+}
+
+// IsValidIP parses input string for ip address validity.
+func IsValidIP(ip string) bool {
+ return net.ParseIP(ip) != nil
+}
+
+// IsVirtualHostSupported - verifies if bucketName can be part of
+// virtual host. Currently only Amazon S3 and Google Cloud Storage
+// would support this.
+func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ // bucketName can be valid but '.' in the hostname will fail SSL
+ // certificate validation. So do not use host-style for such buckets.
+ if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
+ return false
+ }
+ // Return true for all other cases
+ return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) || IsAliyunOSSEndpoint(endpointURL)
+}
+
+// Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+
+// amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style.
+var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?).amazonaws.com$`)
+
+// amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack.
+var amazonS3HostDualStack = regexp.MustCompile(`^s3.dualstack.(.*?).amazonaws.com$`)
+
+// amazonS3HostFIPS - regular expression used to determine if an arg is s3 FIPS host.
+var amazonS3HostFIPS = regexp.MustCompile(`^s3-fips.(.*?).amazonaws.com$`)
+
+// amazonS3HostFIPSDualStack - regular expression used to determine if an arg is s3 FIPS host dualstack.
+var amazonS3HostFIPSDualStack = regexp.MustCompile(`^s3-fips.dualstack.(.*?).amazonaws.com$`)
+
+// amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style.
+var amazonS3HostDot = regexp.MustCompile(`^s3.(.*?).amazonaws.com$`)
+
+// amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host.
+var amazonS3ChinaHost = regexp.MustCompile(`^s3.(cn.*?).amazonaws.com.cn$`)
+
+// amazonS3ChinaHostDualStack - regular expression used to determine if the arg is s3 china host dualstack.
+var amazonS3ChinaHostDualStack = regexp.MustCompile(`^s3.dualstack.(cn.*?).amazonaws.com.cn$`)
+
+// Regular expression used to determine if the arg is elb host.
+var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`)
+
+// Regular expression used to determine if the arg is elb host in china.
+var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`)
+
+// amazonS3HostPrivateLink - regular expression used to determine if an arg is s3 host in AWS PrivateLink interface endpoints style
+var amazonS3HostPrivateLink = regexp.MustCompile(`^(?:bucket|accesspoint).vpce-.*?.s3.(.*?).vpce.amazonaws.com$`)
+
+// GetRegionFromURL - returns a region from url host.
+func GetRegionFromURL(endpointURL url.URL) string {
+ if endpointURL == sentinelURL {
+ return ""
+ }
+ if endpointURL.Host == "s3-external-1.amazonaws.com" {
+ return ""
+ }
+
+ // if elb's are used we cannot calculate which region it may be, just return empty.
+ if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
+ return ""
+ }
+
+ // We check for FIPS dualstack matching first to avoid the non-greedy
+ // regex for FIPS non-dualstack matching a dualstack URL
+ parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+
+ return ""
+}
+
+// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint.
+func IsAliyunOSSEndpoint(endpointURL url.URL) bool {
+ return strings.HasSuffix(endpointURL.Host, "aliyuncs.com")
+}
+
+// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
+func IsAmazonEndpoint(endpointURL url.URL) bool {
+ if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" {
+ return true
+ }
+ return GetRegionFromURL(endpointURL) != ""
+}
+
+// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
+func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
+ endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" ||
+ IsAmazonFIPSGovCloudEndpoint(endpointURL))
+}
+
+// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud.
+func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-")
+}
+
+// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint.
+// See https://aws.amazon.com/compliance/fips.
+func IsAmazonFIPSEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com")
+}
+
+// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint
+// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html.
+func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return amazonS3HostPrivateLink.MatchString(endpointURL.Host)
+}
+
+// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
+func IsGoogleEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Hostname() == "storage.googleapis.com"
+}
+
+// Expects ascii encoded strings - from output of urlEncodePath
+func percentEncodeSlash(s string) string {
+ return strings.ReplaceAll(s, "/", "%2F")
+}
+
+// QueryEncode - encodes query values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func QueryEncode(v url.Values) string {
+ if v == nil {
+ return ""
+ }
+ var buf bytes.Buffer
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ prefix := percentEncodeSlash(EncodePath(k)) + "="
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(percentEncodeSlash(EncodePath(v)))
+ }
+ }
+ return buf.String()
+}
+
+// TagDecode - decodes canonical tag into map of key and value.
+func TagDecode(ctag string) map[string]string {
+ if ctag == "" {
+ return map[string]string{}
+ }
+ tags := strings.Split(ctag, "&")
+ tagMap := make(map[string]string, len(tags))
+ var err error
+ for _, tag := range tags {
+ kvs := strings.SplitN(tag, "=", 2)
+ if len(kvs) == 0 {
+ return map[string]string{}
+ }
+ if len(kvs) == 1 {
+ return map[string]string{}
+ }
+ tagMap[kvs[0]], err = url.PathUnescape(kvs[1])
+ if err != nil {
+ continue
+ }
+ }
+ return tagMap
+}
+
+// TagEncode - encodes tag values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func TagEncode(tags map[string]string) string {
+ if tags == nil {
+ return ""
+ }
+ values := url.Values{}
+ for k, v := range tags {
+ values[k] = []string{v}
+ }
+ return QueryEncode(values)
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname strings.Builder
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname.WriteRune(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname.WriteRune(s)
+ continue
+ default:
+ l := utf8.RuneLen(s)
+ if l < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, l)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname.WriteString("%" + strings.ToUpper(hex))
+ }
+ }
+ }
+ return encodedPathname.String()
+}
+
+// We support '.' with bucket names but we fallback to using path
+// style requests instead for such buckets.
+var (
+ validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
+ validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+ ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+)
+
+// Common checker for both stricter and basic validation.
+func checkBucketNameCommon(bucketName string, strict bool) (err error) {
+ if strings.TrimSpace(bucketName) == "" {
+ return errors.New("Bucket name cannot be empty")
+ }
+ if len(bucketName) < 3 {
+ return errors.New("Bucket name cannot be shorter than 3 characters")
+ }
+ if len(bucketName) > 63 {
+ return errors.New("Bucket name cannot be longer than 63 characters")
+ }
+ if ipAddress.MatchString(bucketName) {
+ return errors.New("Bucket name cannot be an ip address")
+ }
+ if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") {
+ return errors.New("Bucket name contains invalid characters")
+ }
+ if strict {
+ if !validBucketNameStrict.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+ }
+ if !validBucketName.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+}
+
+// CheckValidBucketName - checks if we have a valid input bucket name.
+func CheckValidBucketName(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, false)
+}
+
+// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
+// This is a stricter version.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+func CheckValidBucketNameStrict(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, true)
+}
+
+// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectNamePrefix(objectName string) error {
+ if len(objectName) > 1024 {
+ return errors.New("Object name cannot be longer than 1024 characters")
+ }
+ if !utf8.ValidString(objectName) {
+ return errors.New("Object name with non UTF-8 strings are not supported")
+ }
+ return nil
+}
+
+// CheckValidObjectName - checks if we have a valid input object name.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectName(objectName string) error {
+ if strings.TrimSpace(objectName) == "" {
+ return errors.New("Object name cannot be empty")
+ }
+ return CheckValidObjectNamePrefix(objectName)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
new file mode 100644
index 000000000..c265ce572
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
@@ -0,0 +1,195 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/goccy/go-json"
+)
+
+// StringSet - uses map as set of strings.
+type StringSet map[string]struct{}
+
+// ToSlice - returns StringSet as string slice.
+func (set StringSet) ToSlice() []string {
+ keys := make([]string, 0, len(set))
+ for k := range set {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// IsEmpty - returns whether the set is empty or not.
+func (set StringSet) IsEmpty() bool {
+ return len(set) == 0
+}
+
+// Add - adds string to the set.
+func (set StringSet) Add(s string) {
+ set[s] = struct{}{}
+}
+
+// Remove - removes string in the set. It does nothing if string does not exist in the set.
+func (set StringSet) Remove(s string) {
+ delete(set, s)
+}
+
+// Contains - checks if string is in the set.
+func (set StringSet) Contains(s string) bool {
+ _, ok := set[s]
+ return ok
+}
+
+// FuncMatch - returns new set containing each value who passes match function.
+// A 'matchFn' should accept element in a set as first argument and
+// 'matchString' as second argument. The function can do any logic to
+// compare both the arguments and should return true to accept element in
+// a set to include in output set else the element is ignored.
+func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if matchFn(k, matchString) {
+ nset.Add(k)
+ }
+ }
+ return nset
+}
+
+// ApplyFunc - returns new set containing each value processed by 'applyFn'.
+// A 'applyFn' should accept element in a set as a argument and return
+// a processed string. The function can do any logic to return a processed
+// string.
+func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(applyFn(k))
+ }
+ return nset
+}
+
+// Equals - checks whether given set is equal to current set or not.
+func (set StringSet) Equals(sset StringSet) bool {
+ // If length of set is not equal to length of given set, the
+ // set is not equal to given set.
+ if len(set) != len(sset) {
+ return false
+ }
+
+ // As both sets are equal in length, check each elements are equal.
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection - returns the intersection with given set as new set.
+func (set StringSet) Intersection(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Difference - returns the difference with given set as new set.
+func (set StringSet) Difference(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Union - returns the union with given set as new set.
+func (set StringSet) Union(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(k)
+ }
+
+ for k := range sset {
+ nset.Add(k)
+ }
+
+ return nset
+}
+
+// MarshalJSON - converts to JSON data.
+func (set StringSet) MarshalJSON() ([]byte, error) {
+ return json.Marshal(set.ToSlice())
+}
+
+// UnmarshalJSON - parses JSON data and creates new set with it.
+func (set *StringSet) UnmarshalJSON(data []byte) error {
+ sl := []interface{}{}
+ var err error
+ if err = json.Unmarshal(data, &sl); err == nil {
+ *set = make(StringSet)
+ for _, s := range sl {
+ set.Add(fmt.Sprintf("%v", s))
+ }
+ } else {
+ var s interface{}
+ if err = json.Unmarshal(data, &s); err == nil {
+ *set = make(StringSet)
+ set.Add(fmt.Sprintf("%v", s))
+ }
+ }
+
+ return err
+}
+
+// String - returns printable string of the set.
+func (set StringSet) String() string {
+ return fmt.Sprintf("%s", set.ToSlice())
+}
+
+// NewStringSet - creates new string set.
+func NewStringSet() StringSet {
+ return make(StringSet)
+}
+
+// CreateStringSet - creates new string set with given string values.
+func CreateStringSet(sl ...string) StringSet {
+ set := make(StringSet)
+ for _, k := range sl {
+ set.Add(k)
+ }
+ return set
+}
+
+// CopyStringSet - returns copy of given set.
+func CopyStringSet(set StringSet) StringSet {
+ nset := NewStringSet()
+ for k, v := range set {
+ nset[k] = v
+ }
+ return nset
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
new file mode 100644
index 000000000..77540e2d8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
@@ -0,0 +1,224 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// getUnsignedChunkLength - calculates the length of chunk metadata
+func getUnsignedChunkLength(chunkDataSize int64) int64 {
+ return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
+ crlfLen +
+ chunkDataSize +
+ crlfLen
+}
+
+// getUSStreamLength - calculates the length of the overall stream (data + metadata)
+func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
+ if dataLen <= 0 {
+ return 0
+ }
+
+ chunksCount := int64(dataLen / chunkSize)
+ remainingBytes := int64(dataLen % chunkSize)
+ streamLen := int64(0)
+ streamLen += chunksCount * getUnsignedChunkLength(chunkSize)
+ if remainingBytes > 0 {
+ streamLen += getUnsignedChunkLength(remainingBytes)
+ }
+ streamLen += getUnsignedChunkLength(0)
+ if len(trailers) > 0 {
+ for name, placeholder := range trailers {
+ if len(placeholder) > 0 {
+ streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
+ }
+ }
+ streamLen += crlfLen
+ }
+
+ return streamLen
+}
+
+// prepareStreamingRequest - prepares a request with appropriate
+// headers before computing the seed signature.
+func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
+ req.TransferEncoding = []string{"aws-chunked"}
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
+ // Set content length with streaming signature for each chunk included.
+ req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
+}
+
+// StreamingUSReader implements chunked upload signature as a reader on
+// top of req.Body's ReaderCloser chunk header;data;... repeat
+type StreamingUSReader struct {
+ contentLen int64 // Content-Length from req header
+ baseReadCloser io.ReadCloser // underlying io.Reader
+ bytesRead int64 // bytes read from underlying io.Reader
+ buf bytes.Buffer // holds signed chunk
+ chunkBuf []byte // holds raw data read from req Body
+ chunkBufLen int // no. of bytes read so far into chunkBuf
+ done bool // done reading the underlying reader to EOF
+ chunkNum int
+ totalChunks int
+ lastChunkSize int
+ trailer http.Header
+}
+
+// writeChunk - signs a chunk read from s.baseReader of chunkLen size.
+func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) {
+ s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n")
+
+ // Write chunk data into streaming buffer
+ s.buf.Write(s.chunkBuf[:chunkLen])
+
+ // Write the chunk trailer.
+ if addCrLf {
+ s.buf.Write([]byte("\r\n"))
+ }
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// addSignedTrailer - adds a trailer with the provided headers,
+// then signs a chunk and adds it to output.
+func (s *StreamingUSReader) addTrailer(h http.Header) {
+ olen := len(s.chunkBuf)
+ s.chunkBuf = s.chunkBuf[:0]
+ for k, v := range h {
+ s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
+ }
+
+ s.buf.Write(s.chunkBuf)
+ s.buf.WriteString("\r\n\r\n")
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBuf = s.chunkBuf[:olen]
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// StreamingUnsignedV4 - provides chunked upload
+func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request {
+ // Set headers needed for streaming signature.
+ prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
+
+ if req.Body == nil {
+ req.Body = io.NopCloser(bytes.NewReader([]byte("")))
+ }
+
+ stReader := &StreamingUSReader{
+ baseReadCloser: req.Body,
+ chunkBuf: make([]byte, payloadChunkSize),
+ contentLen: dataLen,
+ chunkNum: 1,
+ totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
+ lastChunkSize: int(dataLen % payloadChunkSize),
+ }
+ if len(req.Trailer) > 0 {
+ stReader.trailer = req.Trailer
+ // Remove...
+ req.Trailer = nil
+ }
+
+ req.Body = stReader
+
+ return req
+}
+
+// Read - this method performs chunk upload signature providing a
+// io.Reader interface.
+func (s *StreamingUSReader) Read(buf []byte) (int, error) {
+ switch {
+ // After the last chunk is read from underlying reader, we
+ // never re-fill s.buf.
+ case s.done:
+
+ // s.buf will be (re-)filled with next chunk when has lesser
+ // bytes than asked for.
+ case s.buf.Len() < len(buf):
+ s.chunkBufLen = 0
+ for {
+ n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
+ // Usually we validate `err` first, but in this case
+ // we are validating n > 0 for the following reasons.
+ //
+ // 1. n > 0, err is one of io.EOF, nil (near end of stream)
+ // A Reader returning a non-zero number of bytes at the end
+ // of the input stream may return either err == EOF or err == nil
+ //
+ // 2. n == 0, err is io.EOF (actual end of stream)
+ //
+ // Callers should always process the n > 0 bytes returned
+ // before considering the error err.
+ if n1 > 0 {
+ s.chunkBufLen += n1
+ s.bytesRead += int64(n1)
+
+ if s.chunkBufLen == payloadChunkSize ||
+ (s.chunkNum == s.totalChunks-1 &&
+ s.chunkBufLen == s.lastChunkSize) {
+ // Sign the chunk and write it to s.buf.
+ s.writeChunk(s.chunkBufLen, true)
+ break
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ // No more data left in baseReader - last chunk.
+ // Done reading the last chunk from baseReader.
+ s.done = true
+
+ // bytes read from baseReader different than
+ // content length provided.
+ if s.bytesRead != s.contentLen {
+ return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
+ }
+
+ // Sign the chunk and write it to s.buf.
+ s.writeChunk(0, len(s.trailer) == 0)
+ if len(s.trailer) > 0 {
+ // Trailer must be set now.
+ s.addTrailer(s.trailer)
+ }
+ break
+ }
+ return 0, err
+ }
+
+ }
+ }
+ return s.buf.Read(buf)
+}
+
+// Close - this method makes underlying io.ReadCloser's Close method available.
+func (s *StreamingUSReader) Close() error {
+ return s.baseReadCloser.Close()
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
new file mode 100644
index 000000000..1c2f1dc9d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
@@ -0,0 +1,403 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ md5simd "github.com/minio/md5-simd"
+)
+
+// Reference for constants used below -
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
+const (
+ streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
+ streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
+ streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER"
+ emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ payloadChunkSize = 64 * 1024
+ chunkSigConstLen = 17 // ";chunk-signature="
+ signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
+ crlfLen = 2 // CRLF
+ trailerKVSeparator = ":"
+ trailerSignature = "x-amz-trailer-signature"
+)
+
+// Request headers to be ignored while calculating seed signature for
+// a request.
+var ignoredStreamingHeaders = map[string]bool{
+ "Authorization": true,
+ "User-Agent": true,
+ "Content-Type": true,
+}
+
+// getSignedChunkLength - calculates the length of chunk metadata
+func getSignedChunkLength(chunkDataSize int64) int64 {
+ return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
+ chunkSigConstLen +
+ signatureStrLen +
+ crlfLen +
+ chunkDataSize +
+ crlfLen
+}
+
+// getStreamLength - calculates the length of the overall stream (data + metadata)
+func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
+ if dataLen <= 0 {
+ return 0
+ }
+
+ chunksCount := int64(dataLen / chunkSize)
+ remainingBytes := int64(dataLen % chunkSize)
+ streamLen := int64(0)
+ streamLen += chunksCount * getSignedChunkLength(chunkSize)
+ if remainingBytes > 0 {
+ streamLen += getSignedChunkLength(remainingBytes)
+ }
+ streamLen += getSignedChunkLength(0)
+ if len(trailers) > 0 {
+ for name, placeholder := range trailers {
+ if len(placeholder) > 0 {
+ streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
+ }
+ }
+ streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen
+ }
+
+ return streamLen
+}
+
+// buildChunkStringToSign - returns the string to sign given chunk data
+// and previous signature.
+func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
+ stringToSignParts := []string{
+ streamingPayloadHdr,
+ t.Format(iso8601DateFormat),
+ getScope(region, t, ServiceTypeS3),
+ previousSig,
+ emptySHA256,
+ chunkChecksum,
+ }
+
+ return strings.Join(stringToSignParts, "\n")
+}
+
+// buildTrailerChunkStringToSign - returns the string to sign given chunk data
+// and previous signature.
+func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
+ stringToSignParts := []string{
+ streamingTrailerHdr,
+ t.Format(iso8601DateFormat),
+ getScope(region, t, ServiceTypeS3),
+ previousSig,
+ chunkChecksum,
+ }
+
+ return strings.Join(stringToSignParts, "\n")
+}
+
+// prepareStreamingRequest - prepares a request with appropriate
+// headers before computing the seed signature.
+func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
+ // Set x-amz-content-sha256 header.
+ if len(req.Trailer) == 0 {
+ req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
+ } else {
+ req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm)
+ for k := range req.Trailer {
+ req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
+ }
+ req.TransferEncoding = []string{"aws-chunked"}
+ }
+
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
+ // Set content length with streaming signature for each chunk included.
+ req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
+ req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
+}
+
+// buildChunkHeader - returns the chunk header.
+// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
+func buildChunkHeader(chunkLen int64, signature string) []byte {
+ return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
+}
+
+// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
+func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
+ previousSignature, secretAccessKey string,
+) string {
+ chunkStringToSign := buildChunkStringToSign(reqTime, region,
+ previousSignature, chunkCheckSum)
+ signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
+ return getSignature(signingKey, chunkStringToSign)
+}
+
+// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
+func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
+ previousSignature, secretAccessKey string,
+) string {
+ chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
+ previousSignature, chunkChecksum)
+ signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
+ return getSignature(signingKey, chunkStringToSign)
+}
+
+// getSeedSignature - returns the seed signature for a given request.
+func (s *StreamingReader) setSeedSignature(req *http.Request) {
+ // Get canonical request
+ canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req))
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3)
+
+ signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3)
+
+ // Calculate signature.
+ s.seedSignature = getSignature(signingKey, stringToSign)
+}
+
+// StreamingReader implements chunked upload signature as a reader on
+// top of req.Body's ReaderCloser chunk header;data;... repeat
+type StreamingReader struct {
+ accessKeyID string
+ secretAccessKey string
+ sessionToken string
+ region string
+ prevSignature string
+ seedSignature string
+ contentLen int64 // Content-Length from req header
+ baseReadCloser io.ReadCloser // underlying io.Reader
+ bytesRead int64 // bytes read from underlying io.Reader
+ buf bytes.Buffer // holds signed chunk
+ chunkBuf []byte // holds raw data read from req Body
+ chunkBufLen int // no. of bytes read so far into chunkBuf
+ done bool // done reading the underlying reader to EOF
+ reqTime time.Time
+ chunkNum int
+ totalChunks int
+ lastChunkSize int
+ trailer http.Header
+ sh256 md5simd.Hasher
+}
+
+// signChunk - signs a chunk read from s.baseReader of chunkLen size.
+func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
+ // Compute chunk signature for next header
+ s.sh256.Reset()
+ s.sh256.Write(s.chunkBuf[:chunkLen])
+ chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
+
+ signature := buildChunkSignature(chunckChecksum, s.reqTime,
+ s.region, s.prevSignature, s.secretAccessKey)
+
+ // For next chunk signature computation
+ s.prevSignature = signature
+
+ // Write chunk header into streaming buffer
+ chunkHdr := buildChunkHeader(int64(chunkLen), signature)
+ s.buf.Write(chunkHdr)
+
+ // Write chunk data into streaming buffer
+ s.buf.Write(s.chunkBuf[:chunkLen])
+
+ // Write the chunk trailer.
+ if addCrLf {
+ s.buf.Write([]byte("\r\n"))
+ }
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// addSignedTrailer - adds a trailer with the provided headers,
+// then signs a chunk and adds it to output.
+func (s *StreamingReader) addSignedTrailer(h http.Header) {
+ olen := len(s.chunkBuf)
+ s.chunkBuf = s.chunkBuf[:0]
+ for k, v := range h {
+ s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
+ }
+
+ s.sh256.Reset()
+ s.sh256.Write(s.chunkBuf)
+ chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
+ // Compute chunk signature
+ signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
+ s.region, s.prevSignature, s.secretAccessKey)
+
+ // For next chunk signature computation
+ s.prevSignature = signature
+
+ s.buf.Write(s.chunkBuf)
+ s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n")
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBuf = s.chunkBuf[:olen]
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// setStreamingAuthHeader - builds and sets authorization header value
+// for streaming signature.
+func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
+ credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3)
+ authParts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
+ "Signature=" + s.seedSignature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(authParts, ",")
+ req.Header.Set("Authorization", auth)
+}
+
+// StreamingSignV4 - provides chunked upload signatureV4 support by
+// implementing io.Reader.
+func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
+ region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
+) *http.Request {
+ // Set headers needed for streaming signature.
+ prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
+
+ if req.Body == nil {
+ req.Body = io.NopCloser(bytes.NewReader([]byte("")))
+ }
+
+ stReader := &StreamingReader{
+ baseReadCloser: req.Body,
+ accessKeyID: accessKeyID,
+ secretAccessKey: secretAccessKey,
+ sessionToken: sessionToken,
+ region: region,
+ reqTime: reqTime,
+ chunkBuf: make([]byte, payloadChunkSize),
+ contentLen: dataLen,
+ chunkNum: 1,
+ totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
+ lastChunkSize: int(dataLen % payloadChunkSize),
+ sh256: sh256,
+ }
+ if len(req.Trailer) > 0 {
+ stReader.trailer = req.Trailer
+ // Remove...
+ req.Trailer = nil
+ }
+
+ // Add the request headers required for chunk upload signing.
+
+ // Compute the seed signature.
+ stReader.setSeedSignature(req)
+
+ // Set the authorization header with the seed signature.
+ stReader.setStreamingAuthHeader(req)
+
+ // Set seed signature as prevSignature for subsequent
+ // streaming signing process.
+ stReader.prevSignature = stReader.seedSignature
+ req.Body = stReader
+
+ return req
+}
+
+// Read - this method performs chunk upload signature providing a
+// io.Reader interface.
+func (s *StreamingReader) Read(buf []byte) (int, error) {
+ switch {
+ // After the last chunk is read from underlying reader, we
+ // never re-fill s.buf.
+ case s.done:
+
+ // s.buf will be (re-)filled with next chunk when has lesser
+ // bytes than asked for.
+ case s.buf.Len() < len(buf):
+ s.chunkBufLen = 0
+ for {
+ n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
+ // Usually we validate `err` first, but in this case
+ // we are validating n > 0 for the following reasons.
+ //
+ // 1. n > 0, err is one of io.EOF, nil (near end of stream)
+ // A Reader returning a non-zero number of bytes at the end
+ // of the input stream may return either err == EOF or err == nil
+ //
+ // 2. n == 0, err is io.EOF (actual end of stream)
+ //
+ // Callers should always process the n > 0 bytes returned
+ // before considering the error err.
+ if n1 > 0 {
+ s.chunkBufLen += n1
+ s.bytesRead += int64(n1)
+
+ if s.chunkBufLen == payloadChunkSize ||
+ (s.chunkNum == s.totalChunks-1 &&
+ s.chunkBufLen == s.lastChunkSize) {
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(s.chunkBufLen, true)
+ break
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ // No more data left in baseReader - last chunk.
+ // Done reading the last chunk from baseReader.
+ s.done = true
+
+ // bytes read from baseReader different than
+ // content length provided.
+ if s.bytesRead != s.contentLen {
+ return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
+ }
+
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(0, len(s.trailer) == 0)
+ if len(s.trailer) > 0 {
+ // Trailer must be set now.
+ s.addSignedTrailer(s.trailer)
+ }
+ break
+ }
+ return 0, err
+ }
+
+ }
+ }
+ return s.buf.Read(buf)
+}
+
+// Close - this method makes underlying io.ReadCloser's Close method available.
+func (s *StreamingReader) Close() error {
+ if s.sh256 != nil {
+ s.sh256.Close()
+ s.sh256 = nil
+ }
+ return s.baseReadCloser.Close()
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
new file mode 100644
index 000000000..fa4f8c91e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
@@ -0,0 +1,319 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// Signature and API related constants.
+const (
+ signV2Algorithm = "AWS"
+)
+
+// Encode input URL path to URL encoded path.
+func encodeURL2Path(req *http.Request, virtualHost bool) (path string) {
+ if virtualHost {
+ reqHost := getHostAddr(req)
+ dotPos := strings.Index(reqHost, ".")
+ if dotPos > -1 {
+ bucketName := reqHost[:dotPos]
+ path = "/" + bucketName
+ path += req.URL.Path
+ path = s3utils.EncodePath(path)
+ return
+ }
+ }
+ path = s3utils.EncodePath(req.URL.Path)
+ return
+}
+
+// PreSignV2 - presign the request in following style.
+// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
+func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ d := time.Now().UTC()
+ // Find epoch expires when the request will expire.
+ epochExpires := d.Unix() + expires
+
+ // Add expires header if not present.
+ if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
+ req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
+ }
+
+ // Get presigned string to sign.
+ stringToSign := preStringToSignV2(req, virtualHost)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Calculate signature.
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+
+ query := req.URL.Query()
+ // Handle specially for Google Cloud Storage.
+ if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") {
+ query.Set("GoogleAccessId", accessKeyID)
+ } else {
+ query.Set("AWSAccessKeyId", accessKeyID)
+ }
+
+ // Fill in Expires for presigned query.
+ query.Set("Expires", strconv.FormatInt(epochExpires, 10))
+
+ // Encode query and save.
+ req.URL.RawQuery = s3utils.QueryEncode(query)
+
+ // Save signature finally.
+ req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
+
+ // Return.
+ return &req
+}
+
+// PostPresignSignatureV2 - presigned signature for PostPolicy
+// request.
+func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(policyBase64))
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+ return signature
+}
+
+// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
+// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// +
+// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+//
+// CanonicalizedProtocolHeaders =
+
+// SignV2 sign the request before Do() (AWS Signature Version 2).
+func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request {
+ // Signature calculation is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ d := time.Now().UTC()
+
+ // Add date if not present.
+ if date := req.Header.Get("Date"); date == "" {
+ req.Header.Set("Date", d.Format(http.TimeFormat))
+ }
+
+ // Calculate HMAC for secretAccessKey.
+ stringToSign := stringToSignV2(req, virtualHost)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Prepare auth header.
+ authHeader := new(bytes.Buffer)
+ authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
+ encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
+ encoder.Write(hm.Sum(nil))
+ encoder.Close()
+
+ // Set Authorization header.
+ req.Header.Set("Authorization", authHeader.String())
+
+ return &req
+}
+
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+//
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Expires + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func preStringToSignV2(req http.Request, virtualHost bool) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writePreSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ writeCanonicalizedResource(buf, req, virtualHost)
+ return buf.String()
+}
+
+// writePreSignV2Headers - write preSign v2 required headers.
+func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Expires") + "\n")
+}
+
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+//
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func stringToSignV2(req http.Request, virtualHost bool) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writeSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ writeCanonicalizedResource(buf, req, virtualHost)
+ return buf.String()
+}
+
+// writeSignV2Headers - write signV2 required headers.
+func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Date") + "\n")
+}
+
+// writeCanonicalizedHeaders - write canonicalized headers.
+func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
+ var protoHeaders []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ // All the AMZ headers should be lowercase
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-amz") {
+ protoHeaders = append(protoHeaders, lk)
+ vals[lk] = vv
+ }
+ }
+ sort.Strings(protoHeaders)
+ for _, k := range protoHeaders {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+}
+
+// AWS S3 Signature V2 calculation rule is give here:
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+//
+// This list should be kept alphabetically sorted, do not hastily edit.
+var resourceList = []string{
+ "acl",
+ "cors",
+ "delete",
+ "encryption",
+ "legal-hold",
+ "lifecycle",
+ "location",
+ "logging",
+ "notification",
+ "partNumber",
+ "policy",
+ "replication",
+ "requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
+ "retention",
+ "select",
+ "select-type",
+ "tagging",
+ "torrent",
+ "uploadId",
+ "uploads",
+ "versionId",
+ "versioning",
+ "versions",
+ "website",
+}
+
+// From the Amazon docs:
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+//
+// +
+// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
+ // Save request URL.
+ requestURL := req.URL
+ // Get encoded URL path.
+ buf.WriteString(encodeURL2Path(&req, virtualHost))
+ if requestURL.RawQuery != "" {
+ var n int
+ vals, _ := url.ParseQuery(requestURL.RawQuery)
+ // Verify if any sub resource queries are present, if yes
+ // canonicallize them.
+ for _, resource := range resourceList {
+ if vv, ok := vals[resource]; ok && len(vv) > 0 {
+ n++
+ // First element
+ switch n {
+ case 1:
+ buf.WriteByte('?')
+ // The rest
+ default:
+ buf.WriteByte('&')
+ }
+ buf.WriteString(resource)
+ // Request parameters
+ if len(vv[0]) > 0 {
+ buf.WriteByte('=')
+ buf.WriteString(vv[0])
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
new file mode 100644
index 000000000..ffd251451
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
@@ -0,0 +1,351 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "bytes"
+ "encoding/hex"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// Signature and API related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+ yyyymmdd = "20060102"
+)
+
+// Different service types
+const (
+ ServiceTypeS3 = "s3"
+ ServiceTypeSTS = "sts"
+)
+
+// Excerpts from @lsegal -
+// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
+//
+// * User-Agent
+// This is ignored from signing because signing this causes problems with generating pre-signed
+// URLs (that are executed by other agents) or when customers pass requests through proxies, which
+// may modify the user-agent.
+//
+// * Authorization
+// Is skipped for obvious reasons.
+//
+// * Accept-Encoding
+// Some S3 servers like Hitachi Content Platform do not honor this header for signature
+// calculation.
+var v4IgnoredHeaders = map[string]bool{
+ "Accept-Encoding": true,
+ "Authorization": true,
+ "User-Agent": true,
+}
+
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte {
+ date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
+ location := sumHMAC(date, []byte(loc))
+ service := sumHMAC(location, []byte(serviceType))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getScope generate a string of a specific date, an AWS region, and a
+// service.
+func getScope(location string, t time.Time, serviceType string) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ location,
+ serviceType,
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// GetCredential generate a credential string.
+func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string {
+ scope := getScope(location, t, serviceType)
+ return accessKeyID + "/" + scope
+}
+
+// getHashedPayload get the hexadecimal value of the SHA256 hash of
+// the request payload.
+func getHashedPayload(req http.Request) string {
+ hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
+ if hashedPayload == "" {
+ // Presign does not have a payload, use S3 recommended value.
+ hashedPayload = unsignedPayload
+ }
+ return hashedPayload
+}
+
+// getCanonicalHeaders generate a list of request headers for
+// signature.
+func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
+ var headers []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // ignored header
+ }
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ if !headerExists("host", headers) {
+ headers = append(headers, "host")
+ }
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ // Save all the headers in canonical form : newline
+ // separated for each header.
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(getHostAddr(&req))
+ buf.WriteByte('\n')
+ default:
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(signV4TrimAll(v))
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ return buf.String()
+}
+
+func headerExists(key string, headers []string) bool {
+ for _, k := range headers {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// getSignedHeaders generate all signed request headers.
+// i.e lexically sorted, semicolon-separated list of lowercase
+// request header names.
+func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
+ var headers []string
+ for k := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // Ignored header found continue.
+ }
+ headers = append(headers, strings.ToLower(k))
+ }
+ if !headerExists("host", headers) {
+ headers = append(headers, "host")
+ }
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getCanonicalRequest generate a canonical request of style.
+//
+// canonicalRequest =
+//
+// \n
+// \n
+// \n
+// \n
+// \n
+//
+func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
+ req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
+ canonicalRequest := strings.Join([]string{
+ req.Method,
+ s3utils.EncodePath(req.URL.Path),
+ req.URL.RawQuery,
+ getCanonicalHeaders(req, ignoredHeaders),
+ getSignedHeaders(req, ignoredHeaders),
+ hashedPayload,
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
+ stringToSign = stringToSign + getScope(location, t, serviceType) + "\n"
+ stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest)))
+ return stringToSign
+}
+
+// PreSignV4 presign the request, in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
+func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Get credential string.
+ credential := GetCredential(accessKeyID, location, t, ServiceTypeS3)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
+
+ // Set URL query.
+ query := req.URL.Query()
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+ query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+ query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
+ query.Set("X-Amz-SignedHeaders", signedHeaders)
+ query.Set("X-Amz-Credential", credential)
+ // Set session token if available.
+ if sessionToken != "" {
+ query.Set("X-Amz-Security-Token", sessionToken)
+ }
+ req.URL.RawQuery = query.Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req))
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3)
+
+ // Gext hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
+
+ // Calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // Add signature header to RawQuery.
+ req.URL.RawQuery += "&X-Amz-Signature=" + signature
+
+ return &req
+}
+
+// PostPresignSignatureV4 - presigned signature for PostPolicy
+// requests.
+func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+ // Get signining key.
+ signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
+ // Calculate signature.
+ signature := getSignature(signingkey, policyBase64)
+ return signature
+}
+
+// SignV4STS - signature v4 for STS request.
+func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
+ return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil)
+}
+
+// Internal function called for different service types.
+func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request {
+ // Signature calculation is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Set x-amz-date.
+ req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+
+ // Set session token if available.
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ if len(trailer) > 0 {
+ for k := range trailer {
+ req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
+ }
+
+ req.Header.Set("Content-Encoding", "aws-chunked")
+ req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
+ }
+
+ hashedPayload := getHashedPayload(req)
+ if serviceType == ServiceTypeSTS {
+ // Content sha256 header is not sent with the request
+ // but it is expected to have sha256 of payload for signature
+ // in STS service type request.
+ req.Header.Del("X-Amz-Content-Sha256")
+ }
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType)
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t, serviceType)
+
+ // Get credential string.
+ credential := GetCredential(accessKeyID, location, t, serviceType)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
+
+ // Calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // If regular request, construct the final authorization header.
+ parts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + signedHeaders,
+ "Signature=" + signature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(parts, ", ")
+ req.Header.Set("Authorization", auth)
+
+ if len(trailer) > 0 {
+ // Use custom chunked encoding.
+ req.Trailer = trailer
+ return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC())
+ }
+ return &req
+}
+
+// SignV4 sign the request before Do(), in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
+func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
+ return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil)
+}
+
+// SignV4Trailer sign the request before Do(), in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request {
+ return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
new file mode 100644
index 000000000..87c993989
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
@@ -0,0 +1,62 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signer
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "net/http"
+ "strings"
+)
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// sum256 calculate sha256 sum for an input byte array.
+func sum256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getHostAddr returns host header if available, otherwise returns host from URL
+func getHostAddr(req *http.Request) string {
+ host := req.Header.Get("host")
+ if host != "" && req.Host != host {
+ return host
+ }
+ if req.Host != "" {
+ return req.Host
+ }
+ return req.URL.Host
+}
+
+// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
+// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+func signV4TrimAll(input string) string {
+ // Compress adjacent spaces (a space is determined by
+ // unicode.IsSpace() internally here) to one space and return
+ return strings.Join(strings.Fields(input), " ")
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
new file mode 100644
index 000000000..b5fb9565a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go
@@ -0,0 +1,66 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package sse
+
+import "encoding/xml"
+
+// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate
+// KMS, SSEAlgoritm needs to be set to "aws:kms"
+// Minio currently does not support Kms.
+type ApplySSEByDefault struct {
+ KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
+ SSEAlgorithm string `xml:"SSEAlgorithm"`
+}
+
+// Rule layer encapsulates default encryption configuration
+type Rule struct {
+ Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"`
+}
+
+// Configuration is the default encryption configuration structure
+type Configuration struct {
+ XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
+ Rules []Rule `xml:"Rule"`
+}
+
+// NewConfigurationSSES3 initializes a new SSE-S3 configuration
+func NewConfigurationSSES3() *Configuration {
+ return &Configuration{
+ Rules: []Rule{
+ {
+ Apply: ApplySSEByDefault{
+ SSEAlgorithm: "AES256",
+ },
+ },
+ },
+ }
+}
+
+// NewConfigurationSSEKMS initializes a new SSE-KMS configuration
+func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration {
+ return &Configuration{
+ Rules: []Rule{
+ {
+ Apply: ApplySSEByDefault{
+ KmsMasterKeyID: kmsMasterKey,
+ SSEAlgorithm: "aws:kms",
+ },
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
new file mode 100644
index 000000000..7a84a6f34
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
@@ -0,0 +1,413 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020-2022 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package tags
+
+import (
+ "encoding/xml"
+ "io"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error contains tag specific error.
+type Error interface {
+ error
+ Code() string
+}
+
+type errTag struct {
+ code string
+ message string
+}
+
+// Code contains error code.
+func (err errTag) Code() string {
+ return err.code
+}
+
+// Error contains error message.
+func (err errTag) Error() string {
+ return err.message
+}
+
+var (
+ errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"}
+ errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"}
+ errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"}
+ errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"}
+ errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"}
+)
+
+// Tag comes with limitation as per
+// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
+const (
+ maxKeyLength = 128
+ maxValueLength = 256
+ maxObjectTagCount = 10
+ maxTagCount = 50
+)
+
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
+// borrowed from this article and also testing various ASCII characters following regex
+// is supported by AWS S3 for both tags and values.
+var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
+
+func checkKey(key string) error {
+ if len(key) == 0 {
+ return errInvalidTagKey
+ }
+
+ if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) {
+ return errInvalidTagKey
+ }
+
+ return nil
+}
+
+func checkValue(value string) error {
+ if value != "" {
+ if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) {
+ return errInvalidTagValue
+ }
+ }
+
+ return nil
+}
+
+// Tag denotes key and value.
+type Tag struct {
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+func (tag Tag) String() string {
+ return tag.Key + "=" + tag.Value
+}
+
+// IsEmpty returns whether this tag is empty or not.
+func (tag Tag) IsEmpty() bool {
+ return tag.Key == ""
+}
+
+// Validate checks this tag.
+func (tag Tag) Validate() error {
+ if err := checkKey(tag.Key); err != nil {
+ return err
+ }
+
+ return checkValue(tag.Value)
+}
+
+// MarshalXML encodes to XML data.
+func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if err := tag.Validate(); err != nil {
+ return err
+ }
+
+ type subTag Tag // to avoid recursively calling MarshalXML()
+ return e.EncodeElement(subTag(tag), start)
+}
+
+// UnmarshalXML decodes XML data to tag.
+func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type subTag Tag // to avoid recursively calling UnmarshalXML()
+ var st subTag
+ if err := d.DecodeElement(&st, &start); err != nil {
+ return err
+ }
+
+ if err := Tag(st).Validate(); err != nil {
+ return err
+ }
+
+ *tag = Tag(st)
+ return nil
+}
+
+// tagSet represents list of unique tags.
+type tagSet struct {
+ tagMap map[string]string
+ isObject bool
+}
+
+func (tags tagSet) String() string {
+ if len(tags.tagMap) == 0 {
+ return ""
+ }
+ var buf strings.Builder
+ keys := make([]string, 0, len(tags.tagMap))
+ for k := range tags.tagMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ keyEscaped := url.QueryEscape(k)
+ valueEscaped := url.QueryEscape(tags.tagMap[k])
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(keyEscaped)
+ buf.WriteByte('=')
+ buf.WriteString(valueEscaped)
+ }
+ return buf.String()
+}
+
+func (tags *tagSet) remove(key string) {
+ delete(tags.tagMap, key)
+}
+
+func (tags *tagSet) set(key, value string, failOnExist bool) error {
+ if failOnExist {
+ if _, found := tags.tagMap[key]; found {
+ return errDuplicateTagKey
+ }
+ }
+
+ if err := checkKey(key); err != nil {
+ return err
+ }
+
+ if err := checkValue(value); err != nil {
+ return err
+ }
+
+ if tags.isObject {
+ if len(tags.tagMap) == maxObjectTagCount {
+ return errTooManyObjectTags
+ }
+ } else if len(tags.tagMap) == maxTagCount {
+ return errTooManyTags
+ }
+
+ tags.tagMap[key] = value
+ return nil
+}
+
+func (tags tagSet) count() int {
+ return len(tags.tagMap)
+}
+
+func (tags tagSet) toMap() map[string]string {
+ m := make(map[string]string, len(tags.tagMap))
+ for key, value := range tags.tagMap {
+ m[key] = value
+ }
+ return m
+}
+
+// MarshalXML encodes to XML data.
+func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ tagList := struct {
+ Tags []Tag `xml:"Tag"`
+ }{}
+
+ tagList.Tags = make([]Tag, 0, len(tags.tagMap))
+ for key, value := range tags.tagMap {
+ tagList.Tags = append(tagList.Tags, Tag{key, value})
+ }
+
+ return e.EncodeElement(tagList, start)
+}
+
+// UnmarshalXML decodes XML data to tag list.
+func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ tagList := struct {
+ Tags []Tag `xml:"Tag"`
+ }{}
+
+ if err := d.DecodeElement(&tagList, &start); err != nil {
+ return err
+ }
+
+ if tags.isObject {
+ if len(tagList.Tags) > maxObjectTagCount {
+ return errTooManyObjectTags
+ }
+ } else if len(tagList.Tags) > maxTagCount {
+ return errTooManyTags
+ }
+
+ m := make(map[string]string, len(tagList.Tags))
+ for _, tag := range tagList.Tags {
+ if _, found := m[tag.Key]; found {
+ return errDuplicateTagKey
+ }
+
+ m[tag.Key] = tag.Value
+ }
+
+ tags.tagMap = m
+ return nil
+}
+
+type tagging struct {
+ XMLName xml.Name `xml:"Tagging"`
+ TagSet *tagSet `xml:"TagSet"`
+}
+
+// Tags is list of tags of XML request/response as per
+// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody
+type Tags tagging
+
+func (tags Tags) String() string {
+ return tags.TagSet.String()
+}
+
+// Remove removes a tag by its key.
+func (tags *Tags) Remove(key string) {
+ tags.TagSet.remove(key)
+}
+
+// Set sets new tag.
+func (tags *Tags) Set(key, value string) error {
+ return tags.TagSet.set(key, value, false)
+}
+
+// Count - return number of tags accounted for
+func (tags Tags) Count() int {
+ return tags.TagSet.count()
+}
+
+// ToMap returns copy of tags.
+func (tags Tags) ToMap() map[string]string {
+ return tags.TagSet.toMap()
+}
+
+// MapToObjectTags converts an input map of key and value into
+// *Tags data structure with validation.
+func MapToObjectTags(tagMap map[string]string) (*Tags, error) {
+ return NewTags(tagMap, true)
+}
+
+// MapToBucketTags converts an input map of key and value into
+// *Tags data structure with validation.
+func MapToBucketTags(tagMap map[string]string) (*Tags, error) {
+ return NewTags(tagMap, false)
+}
+
+// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags.
+func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) {
+ tagging := &Tags{
+ TagSet: &tagSet{
+ tagMap: make(map[string]string),
+ isObject: isObject,
+ },
+ }
+
+ for key, value := range tagMap {
+ if err := tagging.TagSet.set(key, value, true); err != nil {
+ return nil, err
+ }
+ }
+
+ return tagging, nil
+}
+
+func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) {
+ tagging := &Tags{
+ TagSet: &tagSet{
+ tagMap: make(map[string]string),
+ isObject: isObject,
+ },
+ }
+
+ if err := xml.NewDecoder(reader).Decode(tagging); err != nil {
+ return nil, err
+ }
+
+ return tagging, nil
+}
+
+// ParseBucketXML decodes XML data of tags in reader specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax.
+func ParseBucketXML(reader io.Reader) (*Tags, error) {
+ return unmarshalXML(reader, false)
+}
+
+// ParseObjectXML decodes XML data of tags in reader specified in
+// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax
+func ParseObjectXML(reader io.Reader) (*Tags, error) {
+ return unmarshalXML(reader, true)
+}
+
+// stringsCut slices s around the first instance of sep,
+// returning the text before and after sep.
+// The found result reports whether sep appears in s.
+// If sep does not appear in s, cut returns s, "", false.
+func stringsCut(s, sep string) (before, after string, found bool) {
+ if i := strings.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, "", false
+}
+
+func (tags *tagSet) parseTags(tgs string) (err error) {
+ for tgs != "" {
+ var key string
+ key, tgs, _ = stringsCut(tgs, "&")
+ if key == "" {
+ continue
+ }
+ key, value, _ := stringsCut(key, "=")
+ key, err1 := url.QueryUnescape(key)
+ if err1 != nil {
+ if err == nil {
+ err = err1
+ }
+ continue
+ }
+ value, err1 = url.QueryUnescape(value)
+ if err1 != nil {
+ if err == nil {
+ err = err1
+ }
+ continue
+ }
+ if err = tags.set(key, value, true); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Parse decodes HTTP query formatted string into tags which is limited by isObject.
+// A query formatted string is like "key1=value1&key2=value2".
+func Parse(s string, isObject bool) (*Tags, error) {
+ tagging := &Tags{
+ TagSet: &tagSet{
+ tagMap: make(map[string]string),
+ isObject: isObject,
+ },
+ }
+
+ if err := tagging.TagSet.parseTags(s); err != nil {
+ return nil, err
+ }
+
+ return tagging, nil
+}
+
+// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2".
+func ParseObjectTags(s string) (*Tags, error) {
+ return Parse(s, true)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go
new file mode 100644
index 000000000..19687e027
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/post-policy.go
@@ -0,0 +1,408 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2023 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/v7/pkg/encrypt"
+ "github.com/minio/minio-go/v7/pkg/tags"
+)
+
+// expirationDateFormat date format for expiration key in json policy.
+const expirationDateFormat = "2006-01-02T15:04:05.000Z"
+
+// policyCondition explanation:
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
+//
+// Example:
+//
+// policyCondition {
+// matchType: "$eq",
+// key: "$Content-Type",
+// value: "image/png",
+// }
+type policyCondition struct {
+ matchType string
+ condition string
+ value string
+}
+
+// PostPolicy - Provides strict static type conversion and validation
+// for Amazon S3's POST policy JSON string.
+type PostPolicy struct {
+ // Expiration date and time of the POST policy.
+ expiration time.Time
+ // Collection of different policy conditions.
+ conditions []policyCondition
+ // ContentLengthRange minimum and maximum allowable size for the
+ // uploaded content.
+ contentLengthRange struct {
+ min int64
+ max int64
+ }
+
+ // Post form data.
+ formData map[string]string
+}
+
+// NewPostPolicy - Instantiate new post policy.
+func NewPostPolicy() *PostPolicy {
+ p := &PostPolicy{}
+ p.conditions = make([]policyCondition, 0)
+ p.formData = make(map[string]string)
+ return p
+}
+
+// SetExpires - Sets expiration time for the new policy.
+func (p *PostPolicy) SetExpires(t time.Time) error {
+ if t.IsZero() {
+ return errInvalidArgument("No expiry time set.")
+ }
+ p.expiration = t
+ return nil
+}
+
+// SetKey - Sets an object name for the policy based upload.
+func (p *PostPolicy) SetKey(key string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return errInvalidArgument("Object name is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$key",
+ value: key,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["key"] = key
+ return nil
+}
+
+// SetKeyStartsWith - Sets an object name that an policy based upload
+// can start with.
+// Can use an empty value ("") to allow any key.
+func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
+ policyCond := policyCondition{
+ matchType: "starts-with",
+ condition: "$key",
+ value: keyStartsWith,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["key"] = keyStartsWith
+ return nil
+}
+
+// SetBucket - Sets bucket at which objects will be uploaded to.
+func (p *PostPolicy) SetBucket(bucketName string) error {
+ if strings.TrimSpace(bucketName) == "" || bucketName == "" {
+ return errInvalidArgument("Bucket name is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$bucket",
+ value: bucketName,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["bucket"] = bucketName
+ return nil
+}
+
+// SetCondition - Sets condition for credentials, date and algorithm
+func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
+ if strings.TrimSpace(value) == "" || value == "" {
+ return errInvalidArgument("No value specified for condition")
+ }
+
+ policyCond := policyCondition{
+ matchType: matchType,
+ condition: "$" + condition,
+ value: value,
+ }
+ if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" {
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[condition] = value
+ return nil
+ }
+ return errInvalidArgument("Invalid condition in policy")
+}
+
+// SetTagging - Sets tagging for the object for this policy based upload.
+func (p *PostPolicy) SetTagging(tagging string) error {
+ if strings.TrimSpace(tagging) == "" || tagging == "" {
+ return errInvalidArgument("No tagging specified.")
+ }
+ _, err := tags.ParseObjectXML(strings.NewReader(tagging))
+ if err != nil {
+ return errors.New("The XML you provided was not well-formed or did not validate against our published schema.") //nolint
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$tagging",
+ value: tagging,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["tagging"] = tagging
+ return nil
+}
+
+// SetContentType - Sets content-type of the object for this policy
+// based upload.
+func (p *PostPolicy) SetContentType(contentType string) error {
+ if strings.TrimSpace(contentType) == "" || contentType == "" {
+ return errInvalidArgument("No content type specified.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$Content-Type",
+ value: contentType,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["Content-Type"] = contentType
+ return nil
+}
+
+// SetContentTypeStartsWith - Sets what content-type of the object for this policy
+// based upload can start with.
+// Can use an empty value ("") to allow any content-type.
+func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error {
+ policyCond := policyCondition{
+ matchType: "starts-with",
+ condition: "$Content-Type",
+ value: contentTypeStartsWith,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["Content-Type"] = contentTypeStartsWith
+ return nil
+}
+
+// SetContentDisposition - Sets content-disposition of the object for this policy
+func (p *PostPolicy) SetContentDisposition(contentDisposition string) error {
+ if strings.TrimSpace(contentDisposition) == "" || contentDisposition == "" {
+ return errInvalidArgument("No content disposition specified.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$Content-Disposition",
+ value: contentDisposition,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["Content-Disposition"] = contentDisposition
+ return nil
+}
+
+// SetContentLengthRange - Set new min and max content length
+// condition for all incoming uploads.
+func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
+ if min > max {
+ return errInvalidArgument("Minimum limit is larger than maximum limit.")
+ }
+ if min < 0 {
+ return errInvalidArgument("Minimum limit cannot be negative.")
+ }
+ if max <= 0 {
+ return errInvalidArgument("Maximum limit cannot be non-positive.")
+ }
+ p.contentLengthRange.min = min
+ p.contentLengthRange.max = max
+ return nil
+}
+
+// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy
+// based upload.
+func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
+ if strings.TrimSpace(redirect) == "" || redirect == "" {
+ return errInvalidArgument("Redirect is empty")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$success_action_redirect",
+ value: redirect,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["success_action_redirect"] = redirect
+ return nil
+}
+
+// SetSuccessStatusAction - Sets the status success code of the object for this policy
+// based upload.
+func (p *PostPolicy) SetSuccessStatusAction(status string) error {
+ if strings.TrimSpace(status) == "" || status == "" {
+ return errInvalidArgument("Status is empty")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$success_action_status",
+ value: status,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["success_action_status"] = status
+ return nil
+}
+
+// SetUserMetadata - Set user metadata as a key/value couple.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserMetadata(key, value string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return errInvalidArgument("Key is empty")
+ }
+ if strings.TrimSpace(value) == "" || value == "" {
+ return errInvalidArgument("Value is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-meta-%s", key)
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
+// SetUserMetadataStartsWith - Set how an user metadata should starts with.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return errInvalidArgument("Key is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-meta-%s", key)
+ policyCond := policyCondition{
+ matchType: "starts-with",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
+// SetChecksum sets the checksum of the request.
+func (p *PostPolicy) SetChecksum(c Checksum) {
+ if c.IsSet() {
+ p.formData[amzChecksumAlgo] = c.Type.String()
+ p.formData[c.Type.Key()] = c.Encoded()
+ }
+}
+
+// SetEncryption - sets encryption headers for POST API
+func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) {
+ if sse == nil {
+ return
+ }
+ h := http.Header{}
+ sse.Marshal(h)
+ for k, v := range h {
+ p.formData[k] = v[0]
+ }
+}
+
+// SetUserData - Set user data as a key/value couple.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserData(key, value string) error {
+ if key == "" {
+ return errInvalidArgument("Key is empty")
+ }
+ if value == "" {
+ return errInvalidArgument("Value is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-%s", key)
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
+// addNewPolicy - internal helper to validate adding new policies.
+// Can use starts-with with an empty value ("") to allow any content within a form field.
+func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
+ if policyCond.matchType == "" || policyCond.condition == "" {
+ return errInvalidArgument("Policy fields are empty.")
+ }
+ if policyCond.matchType != "starts-with" && policyCond.value == "" {
+ return errInvalidArgument("Policy value is empty.")
+ }
+ p.conditions = append(p.conditions, policyCond)
+ return nil
+}
+
+// String function for printing policy in json formatted string.
+func (p PostPolicy) String() string {
+ return string(p.marshalJSON())
+}
+
+// marshalJSON - Provides Marshaled JSON in bytes.
+func (p PostPolicy) marshalJSON() []byte {
+ expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
+ var conditionsStr string
+ conditions := []string{}
+ for _, po := range p.conditions {
+ conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
+ }
+ if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
+ conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
+ p.contentLengthRange.min, p.contentLengthRange.max))
+ }
+ if len(conditions) > 0 {
+ conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
+ }
+ retStr := "{"
+ retStr = retStr + expirationStr + ","
+ retStr += conditionsStr
+ retStr += "}"
+ return []byte(retStr)
+}
+
+// base64 - Produces base64 of PostPolicy's Marshaled json.
+func (p PostPolicy) base64() string {
+ return base64.StdEncoding.EncodeToString(p.marshalJSON())
+}
diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go
new file mode 100644
index 000000000..bfeea95f3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go
@@ -0,0 +1,69 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "time"
+
+// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
+func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
+ attemptCh := make(chan int)
+
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // 1< maxAttempt {
+ attempt = maxAttempt
+ }
+ // sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1< cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ var nextBackoff int
+ for {
+ select {
+ // Attempts starts.
+ case attemptCh <- nextBackoff:
+ nextBackoff++
+ case <-doneCh:
+ // Stop the routine.
+ return
+ }
+ time.Sleep(exponentialBackoffWait(nextBackoff))
+ }
+ }()
+ return attemptCh
+}
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go
new file mode 100644
index 000000000..d15eb5901
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/retry.go
@@ -0,0 +1,150 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "crypto/x509"
+ "errors"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// MaxRetry is the maximum number of retries before stopping.
+var MaxRetry = 10
+
+// MaxJitter will randomize over the full exponential backoff time
+const MaxJitter = 1.0
+
+// NoJitter disables the use of jitter for randomizing the exponential backoff time
+const NoJitter = 0.0
+
+// DefaultRetryUnit - default unit multiplicative per retry.
+// defaults to 200 * time.Millisecond
+var DefaultRetryUnit = 200 * time.Millisecond
+
+// DefaultRetryCap - Each retry attempt never waits no longer than
+// this maximum time duration.
+var DefaultRetryCap = time.Second
+
+// newRetryTimer creates a timer with exponentially increasing
+// delays until the maximum retry attempts are reached.
+func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int {
+ attemptCh := make(chan int)
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ // sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1< cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ for i := 0; i < maxRetry; i++ {
+ select {
+ case attemptCh <- i + 1:
+ case <-ctx.Done():
+ return
+ }
+
+ select {
+ case <-time.After(exponentialBackoffWait(i)):
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+ return attemptCh
+}
+
+// List of AWS S3 error codes which are retryable.
+var retryableS3Codes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "InternalError": {},
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "SlowDown": {},
+ // Add more AWS S3 codes here.
+}
+
+// isS3CodeRetryable - is s3 error code retryable.
+func isS3CodeRetryable(s3Code string) (ok bool) {
+ _, ok = retryableS3Codes[s3Code]
+ return ok
+}
+
+// List of HTTP status codes which are retryable.
+var retryableHTTPStatusCodes = map[int]struct{}{
+ 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
+ 499: {}, // client closed request, retry. A non-standard status code introduced by nginx.
+ http.StatusInternalServerError: {},
+ http.StatusBadGateway: {},
+ http.StatusServiceUnavailable: {},
+ http.StatusGatewayTimeout: {},
+ 520: {}, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected.
+ // Add more HTTP status codes here.
+}
+
+// isHTTPStatusRetryable - is HTTP error code retryable.
+func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
+ _, ok = retryableHTTPStatusCodes[httpStatusCode]
+ return ok
+}
+
+// For now, all http Do() requests are retriable except some well defined errors
+func isRequestErrorRetryable(ctx context.Context, err error) bool {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ // Retry if internal timeout in the HTTP call.
+ return ctx.Err() == nil
+ }
+ if ue, ok := err.(*url.Error); ok {
+ e := ue.Unwrap()
+ switch e.(type) {
+ // x509: certificate signed by unknown authority
+ case x509.UnknownAuthorityError:
+ return false
+ }
+ switch e.Error() {
+ case "http: server gave HTTP response to HTTPS client":
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
new file mode 100644
index 000000000..01cee8a19
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
@@ -0,0 +1,175 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2024 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+type awsS3Endpoint struct {
+ endpoint string
+ dualstackEndpoint string
+}
+
+// awsS3EndpointMap Amazon S3 endpoint map.
+var awsS3EndpointMap = map[string]awsS3Endpoint{
+ "us-east-1": {
+ "s3.us-east-1.amazonaws.com",
+ "s3.dualstack.us-east-1.amazonaws.com",
+ },
+ "us-east-2": {
+ "s3.us-east-2.amazonaws.com",
+ "s3.dualstack.us-east-2.amazonaws.com",
+ },
+ "us-west-2": {
+ "s3.us-west-2.amazonaws.com",
+ "s3.dualstack.us-west-2.amazonaws.com",
+ },
+ "us-west-1": {
+ "s3.us-west-1.amazonaws.com",
+ "s3.dualstack.us-west-1.amazonaws.com",
+ },
+ "ca-central-1": {
+ "s3.ca-central-1.amazonaws.com",
+ "s3.dualstack.ca-central-1.amazonaws.com",
+ },
+ "ca-west-1": {
+ "s3.ca-west-1.amazonaws.com",
+ "s3.dualstack.ca-west-1.amazonaws.com",
+ },
+ "eu-west-1": {
+ "s3.eu-west-1.amazonaws.com",
+ "s3.dualstack.eu-west-1.amazonaws.com",
+ },
+ "eu-west-2": {
+ "s3.eu-west-2.amazonaws.com",
+ "s3.dualstack.eu-west-2.amazonaws.com",
+ },
+ "eu-west-3": {
+ "s3.eu-west-3.amazonaws.com",
+ "s3.dualstack.eu-west-3.amazonaws.com",
+ },
+ "eu-central-1": {
+ "s3.eu-central-1.amazonaws.com",
+ "s3.dualstack.eu-central-1.amazonaws.com",
+ },
+ "eu-central-2": {
+ "s3.eu-central-2.amazonaws.com",
+ "s3.dualstack.eu-central-2.amazonaws.com",
+ },
+ "eu-north-1": {
+ "s3.eu-north-1.amazonaws.com",
+ "s3.dualstack.eu-north-1.amazonaws.com",
+ },
+ "eu-south-1": {
+ "s3.eu-south-1.amazonaws.com",
+ "s3.dualstack.eu-south-1.amazonaws.com",
+ },
+ "eu-south-2": {
+ "s3.eu-south-2.amazonaws.com",
+ "s3.dualstack.eu-south-2.amazonaws.com",
+ },
+ "ap-east-1": {
+ "s3.ap-east-1.amazonaws.com",
+ "s3.dualstack.ap-east-1.amazonaws.com",
+ },
+ "ap-south-1": {
+ "s3.ap-south-1.amazonaws.com",
+ "s3.dualstack.ap-south-1.amazonaws.com",
+ },
+ "ap-south-2": {
+ "s3.ap-south-2.amazonaws.com",
+ "s3.dualstack.ap-south-2.amazonaws.com",
+ },
+ "ap-southeast-1": {
+ "s3.ap-southeast-1.amazonaws.com",
+ "s3.dualstack.ap-southeast-1.amazonaws.com",
+ },
+ "ap-southeast-2": {
+ "s3.ap-southeast-2.amazonaws.com",
+ "s3.dualstack.ap-southeast-2.amazonaws.com",
+ },
+ "ap-southeast-3": {
+ "s3.ap-southeast-3.amazonaws.com",
+ "s3.dualstack.ap-southeast-3.amazonaws.com",
+ },
+ "ap-southeast-4": {
+ "s3.ap-southeast-4.amazonaws.com",
+ "s3.dualstack.ap-southeast-4.amazonaws.com",
+ },
+ "ap-northeast-1": {
+ "s3.ap-northeast-1.amazonaws.com",
+ "s3.dualstack.ap-northeast-1.amazonaws.com",
+ },
+ "ap-northeast-2": {
+ "s3.ap-northeast-2.amazonaws.com",
+ "s3.dualstack.ap-northeast-2.amazonaws.com",
+ },
+ "ap-northeast-3": {
+ "s3.ap-northeast-3.amazonaws.com",
+ "s3.dualstack.ap-northeast-3.amazonaws.com",
+ },
+ "af-south-1": {
+ "s3.af-south-1.amazonaws.com",
+ "s3.dualstack.af-south-1.amazonaws.com",
+ },
+ "me-central-1": {
+ "s3.me-central-1.amazonaws.com",
+ "s3.dualstack.me-central-1.amazonaws.com",
+ },
+ "me-south-1": {
+ "s3.me-south-1.amazonaws.com",
+ "s3.dualstack.me-south-1.amazonaws.com",
+ },
+ "sa-east-1": {
+ "s3.sa-east-1.amazonaws.com",
+ "s3.dualstack.sa-east-1.amazonaws.com",
+ },
+ "us-gov-west-1": {
+ "s3.us-gov-west-1.amazonaws.com",
+ "s3.dualstack.us-gov-west-1.amazonaws.com",
+ },
+ "us-gov-east-1": {
+ "s3.us-gov-east-1.amazonaws.com",
+ "s3.dualstack.us-gov-east-1.amazonaws.com",
+ },
+ "cn-north-1": {
+ "s3.cn-north-1.amazonaws.com.cn",
+ "s3.dualstack.cn-north-1.amazonaws.com.cn",
+ },
+ "cn-northwest-1": {
+ "s3.cn-northwest-1.amazonaws.com.cn",
+ "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
+ },
+ "il-central-1": {
+ "s3.il-central-1.amazonaws.com",
+ "s3.dualstack.il-central-1.amazonaws.com",
+ },
+}
+
+// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
+func getS3Endpoint(bucketLocation string, useDualstack bool) (endpoint string) {
+ s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
+ if !ok {
+ // Default to 's3.us-east-1.amazonaws.com' endpoint.
+ if useDualstack {
+ return "s3.dualstack.us-east-1.amazonaws.com"
+ }
+ return "s3.us-east-1.amazonaws.com"
+ }
+ if useDualstack {
+ return s3Endpoint.dualstackEndpoint
+ }
+ return s3Endpoint.endpoint
+}
diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go
new file mode 100644
index 000000000..f7fad19f6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/s3-error.go
@@ -0,0 +1,62 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+// Non exhaustive list of AWS S3 standard error responses -
+// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+var s3ErrorResponseMap = map[string]string{
+ "AccessDenied": "Access Denied.",
+ "BadDigest": "The Content-Md5 you specified did not match what we received.",
+ "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
+ "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
+ "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
+ "InternalError": "We encountered an internal error, please try again.",
+ "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
+ "InvalidBucketName": "The specified bucket is not valid.",
+ "InvalidDigest": "The Content-Md5 you specified is not valid.",
+ "InvalidRange": "The requested range is not satisfiable",
+ "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
+ "MissingContentLength": "You must provide the Content-Length HTTP header.",
+ "MissingContentMD5": "Missing required header for this request: Content-Md5.",
+ "MissingRequestBodyError": "Request body is empty.",
+ "NoSuchBucket": "The specified bucket does not exist.",
+ "NoSuchBucketPolicy": "The bucket policy does not exist",
+ "NoSuchKey": "The specified key does not exist.",
+ "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
+ "NotImplemented": "A header you provided implies functionality that is not implemented",
+ "PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
+ "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
+ "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
+ "MethodNotAllowed": "The specified method is not allowed against this resource.",
+ "InvalidPart": "One or more of the specified parts could not be found.",
+ "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
+ "InvalidObjectState": "The operation is not valid for the current state of the object.",
+ "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
+ "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
+ "BucketNotEmpty": "The bucket you tried to delete is not empty",
+ "AllAccessDisabled": "All access to this bucket has been disabled.",
+ "MalformedPolicy": "Policy has invalid resource.",
+ "MissingFields": "Missing fields in request.",
+ "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".",
+ "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
+ "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
+ "InvalidDuration": "Duration provided in the request is invalid.",
+ "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ "NoSuchCORSConfiguration": "The specified bucket does not have a CORS configuration.",
+ // Add new API errors here.
+}
diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go
new file mode 100644
index 000000000..1bff66462
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/transport.go
@@ -0,0 +1,83 @@
+//go:build go1.7 || go1.8
+// +build go1.7 go1.8
+
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017-2018 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net"
+ "net/http"
+ "os"
+ "time"
+)
+
+// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows)
+func mustGetSystemCertPool() *x509.CertPool {
+ pool, err := x509.SystemCertPool()
+ if err != nil {
+ return x509.NewCertPool()
+ }
+ return pool
+}
+
+// DefaultTransport - this default transport is similar to
+// http.DefaultTransport but with additional param DisableCompression
+// is set to true to avoid decompressing content with 'gzip' encoding.
+var DefaultTransport = func(secure bool) (*http.Transport, error) {
+ tr := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ MaxIdleConns: 256,
+ MaxIdleConnsPerHost: 16,
+ ResponseHeaderTimeout: time.Minute,
+ IdleConnTimeout: time.Minute,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 10 * time.Second,
+ // Set this value so that the underlying transport round-tripper
+ // doesn't try to auto decode the body of objects with
+ // content-encoding set to `gzip`.
+ //
+ // Refer:
+ // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
+ DisableCompression: true,
+ }
+
+ if secure {
+ tr.TLSClientConfig = &tls.Config{
+ // Can't use SSLv3 because of POODLE and BEAST
+ // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
+ // Can't use TLSv1.1 because of RC4 cipher usage
+ MinVersion: tls.VersionTLS12,
+ }
+ if f := os.Getenv("SSL_CERT_FILE"); f != "" {
+ rootCAs := mustGetSystemCertPool()
+ data, err := os.ReadFile(f)
+ if err == nil {
+ rootCAs.AppendCertsFromPEM(data)
+ }
+ tr.TLSClientConfig.RootCAs = rootCAs
+ }
+ }
+ return tr, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
new file mode 100644
index 000000000..a5beb371f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -0,0 +1,700 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "crypto/md5"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ md5simd "github.com/minio/md5-simd"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+func trimEtag(etag string) string {
+ etag = strings.TrimPrefix(etag, "\"")
+ return strings.TrimSuffix(etag, "\"")
+}
+
+var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`)
+
+func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) {
+ if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 {
+ expTime, err := parseRFC7231Time(matches[1])
+ if err != nil {
+ return time.Time{}, ""
+ }
+ return expTime, matches[2]
+ }
+ return time.Time{}, ""
+}
+
+var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`)
+
+func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) {
+ matches := restoreRegex.FindStringSubmatch(restore)
+ if len(matches) != 4 {
+ return false, time.Time{}, errors.New("unexpected restore header")
+ }
+ ongoing, err = strconv.ParseBool(matches[1])
+ if err != nil {
+ return false, time.Time{}, err
+ }
+ if matches[3] != "" {
+ expTime, err = parseRFC7231Time(matches[3])
+ if err != nil {
+ return false, time.Time{}, err
+ }
+ }
+ return
+}
+
+// xmlDecoder provide decoded value in xml.
+func xmlDecoder(body io.Reader, v interface{}) error {
+ d := xml.NewDecoder(body)
+ return d.Decode(v)
+}
+
+// sum256 calculate sha256sum for an input byte array, returns hex encoded.
+func sum256Hex(data []byte) string {
+ hash := newSHA256Hasher()
+ defer hash.Close()
+ hash.Write(data)
+ return hex.EncodeToString(hash.Sum(nil))
+}
+
+// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
+func sumMD5Base64(data []byte) string {
+ hash := newMd5Hasher()
+ defer hash.Close()
+ hash.Write(data)
+ return base64.StdEncoding.EncodeToString(hash.Sum(nil))
+}
+
+// getEndpointURL - construct a new endpoint.
+func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
+ // If secure is false, use 'http' scheme.
+ scheme := "https"
+ if !secure {
+ scheme = "http"
+ }
+
+ // Construct a secured endpoint URL.
+ endpointURLStr := scheme + "://" + endpoint
+ endpointURL, err := url.Parse(endpointURLStr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Validate incoming endpoint URL.
+ if err := isValidEndpointURL(*endpointURL); err != nil {
+ return nil, err
+ }
+ return endpointURL, nil
+}
+
+// closeResponse close non nil response with any response Body.
+// convenient wrapper to drain any remaining data on response body.
+//
+// Subsequently this allows golang http RoundTripper
+// to re-use the same connection for future requests.
+func closeResponse(resp *http.Response) {
+ // Callers should close resp.Body when done reading from it.
+ // If resp.Body is not closed, the Client's underlying RoundTripper
+ // (typically Transport) may not be able to re-use a persistent TCP
+ // connection to the server for a subsequent "keep-alive" request.
+ if resp != nil && resp.Body != nil {
+ // Drain any remaining Body and then close the connection.
+ // Without this closing connection would disallow re-using
+ // the same connection for future uses.
+ // - http://stackoverflow.com/a/17961593/4465767
+ io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ }
+}
+
+var (
+ // Hex encoded string of nil sha256sum bytes.
+ emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+
+ // Sentinel URL is the default url value which is invalid.
+ sentinelURL = url.URL{}
+)
+
+// Verify if input endpoint URL is valid.
+func isValidEndpointURL(endpointURL url.URL) error {
+ if endpointURL == sentinelURL {
+ return errInvalidArgument("Endpoint url cannot be empty.")
+ }
+ if endpointURL.Path != "/" && endpointURL.Path != "" {
+ return errInvalidArgument("Endpoint url cannot have fully qualified paths.")
+ }
+ host := endpointURL.Hostname()
+ if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
+ msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards."
+ return errInvalidArgument(msg)
+ }
+
+ if strings.Contains(host, ".s3.amazonaws.com") {
+ if !s3utils.IsAmazonEndpoint(endpointURL) {
+ return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
+ }
+ }
+ if strings.Contains(host, ".googleapis.com") {
+ if !s3utils.IsGoogleEndpoint(endpointURL) {
+ return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
+ }
+ }
+ return nil
+}
+
+// Verify if input expires value is valid.
+func isValidExpiry(expires time.Duration) error {
+ expireSeconds := int64(expires / time.Second)
+ if expireSeconds < 1 {
+ return errInvalidArgument("Expires cannot be lesser than 1 second.")
+ }
+ if expireSeconds > 604800 {
+ return errInvalidArgument("Expires cannot be greater than 7 days.")
+ }
+ return nil
+}
+
+// Extract only necessary metadata header key/values by
+// filtering them out with a list of custom header keys.
+func extractObjMetadata(header http.Header) http.Header {
+ preserveKeys := []string{
+ "Content-Type",
+ "Cache-Control",
+ "Content-Encoding",
+ "Content-Language",
+ "Content-Disposition",
+ "X-Amz-Storage-Class",
+ "X-Amz-Object-Lock-Mode",
+ "X-Amz-Object-Lock-Retain-Until-Date",
+ "X-Amz-Object-Lock-Legal-Hold",
+ "X-Amz-Website-Redirect-Location",
+ "X-Amz-Server-Side-Encryption",
+ "X-Amz-Tagging-Count",
+ "X-Amz-Meta-",
+ // Add new headers to be preserved.
+ // if you add new headers here, please extend
+ // PutObjectOptions{} to preserve them
+ // upon upload as well.
+ }
+ filteredHeader := make(http.Header)
+ for k, v := range header {
+ var found bool
+ for _, prefix := range preserveKeys {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ found = true
+ break
+ }
+ if found {
+ filteredHeader[k] = v
+ }
+ }
+ return filteredHeader
+}
+
+const (
+ // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
+ rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
+ rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
+ rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
+)
+
+func parseTime(t string, formats ...string) (time.Time, error) {
+ for _, format := range formats {
+ tt, err := time.Parse(format, t)
+ if err == nil {
+ return tt, nil
+ }
+ }
+ return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats)
+}
+
+func parseRFC7231Time(lastModified string) (time.Time, error) {
+ return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear)
+}
+
+// ToObjectInfo converts http header values into ObjectInfo type,
+// extracts metadata and fills in all the necessary fields in ObjectInfo.
+func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) {
+ var err error
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ etag := trimEtag(h.Get("ETag"))
+
+ // Parse content length is exists
+ var size int64 = -1
+ contentLengthStr := h.Get("Content-Length")
+ if contentLengthStr != "" {
+ size, err = strconv.ParseInt(contentLengthStr, 10, 64)
+ if err != nil {
+ // Content-Length is not valid
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err),
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: h.Get("x-amz-request-id"),
+ HostID: h.Get("x-amz-id-2"),
+ Region: h.Get("x-amz-bucket-region"),
+ }
+ }
+ }
+
+ // Parse Last-Modified has http time format.
+ mtime, err := parseRFC7231Time(h.Get("Last-Modified"))
+ if err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err),
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: h.Get("x-amz-request-id"),
+ HostID: h.Get("x-amz-id-2"),
+ Region: h.Get("x-amz-bucket-region"),
+ }
+ }
+
+ // Fetch content type if any present.
+ contentType := strings.TrimSpace(h.Get("Content-Type"))
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+
+ expiryStr := h.Get("Expires")
+ var expiry time.Time
+ if expiryStr != "" {
+ expiry, err = parseRFC7231Time(expiryStr)
+ if err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err),
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: h.Get("x-amz-request-id"),
+ HostID: h.Get("x-amz-id-2"),
+ Region: h.Get("x-amz-bucket-region"),
+ }
+ }
+ }
+
+ metadata := extractObjMetadata(h)
+ userMetadata := make(map[string]string)
+ for k, v := range metadata {
+ if strings.HasPrefix(k, "X-Amz-Meta-") {
+ userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0]
+ }
+ }
+ userTags := s3utils.TagDecode(h.Get(amzTaggingHeader))
+
+ var tagCount int
+ if count := h.Get(amzTaggingCount); count != "" {
+ tagCount, err = strconv.Atoi(count)
+ if err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err),
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: h.Get("x-amz-request-id"),
+ HostID: h.Get("x-amz-id-2"),
+ Region: h.Get("x-amz-bucket-region"),
+ }
+ }
+ }
+
+ // Nil if not found
+ var restore *RestoreInfo
+ if restoreHdr := h.Get(amzRestore); restoreHdr != "" {
+ ongoing, expTime, err := amzRestoreToStruct(restoreHdr)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+ restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime}
+ }
+
+ // extract lifecycle expiry date and rule ID
+ expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration))
+
+ deleteMarker := h.Get(amzDeleteMarker) == "true"
+
+ // Save object metadata info.
+ return ObjectInfo{
+ ETag: etag,
+ Key: objectName,
+ Size: size,
+ LastModified: mtime,
+ ContentType: contentType,
+ Expires: expiry,
+ VersionID: h.Get(amzVersionID),
+ IsDeleteMarker: deleteMarker,
+ ReplicationStatus: h.Get(amzReplicationStatus),
+ Expiration: expTime,
+ ExpirationRuleID: ruleID,
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ Metadata: metadata,
+ UserMetadata: userMetadata,
+ UserTags: userTags,
+ UserTagCount: tagCount,
+ Restore: restore,
+
+ // Checksum values
+ ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
+ ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
+ ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
+ ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
+ }, nil
+}
+
+var readFull = func(r io.Reader, buf []byte) (n int, err error) {
+ // ReadFull reads exactly len(buf) bytes from r into buf.
+ // It returns the number of bytes copied and an error if
+ // fewer bytes were read. The error is EOF only if no bytes
+ // were read. If an EOF happens after reading some but not
+ // all the bytes, ReadFull returns ErrUnexpectedEOF.
+ // On return, n == len(buf) if and only if err == nil.
+ // If r returns an error having read at least len(buf) bytes,
+ // the error is dropped.
+ for n < len(buf) && err == nil {
+ var nn int
+ nn, err = r.Read(buf[n:])
+ // Some spurious io.Reader's return
+ // io.ErrUnexpectedEOF when nn == 0
+ // this behavior is undocumented
+ // so we are on purpose not using io.ReadFull
+ // implementation because this can lead
+ // to custom handling, to avoid that
+ // we simply modify the original io.ReadFull
+ // implementation to avoid this issue.
+ // io.ErrUnexpectedEOF with nn == 0 really
+ // means that io.EOF
+ if err == io.ErrUnexpectedEOF && nn == 0 {
+ err = io.EOF
+ }
+ n += nn
+ }
+ if n >= len(buf) {
+ err = nil
+ } else if n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// regCred matches credential string in HTTP header
+var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
+
+// regCred matches signature string in HTTP header
+var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
+
+// Redact out signature value from authorization string.
+func redactSignature(origAuth string) string {
+ if !strings.HasPrefix(origAuth, signV4Algorithm) {
+ // Set a temporary redacted auth
+ return "AWS **REDACTED**:**REDACTED**"
+ }
+
+ // Signature V4 authorization header.
+
+ // Strip out accessKeyID from:
+ // Credential=////aws4_request
+ newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
+
+ // Strip out 256-bit signature from: Signature=<256-bit signature>
+ return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
+}
+
+// Get default location returns the location based on the input
+// URL `u`, if region override is provided then all location
+// defaults to regionOverride.
+//
+// If no other cases match then the location is set to `us-east-1`
+// as a last resort.
+func getDefaultLocation(u url.URL, regionOverride string) (location string) {
+ if regionOverride != "" {
+ return regionOverride
+ }
+ region := s3utils.GetRegionFromURL(u)
+ if region == "" {
+ region = "us-east-1"
+ }
+ return region
+}
+
+var supportedHeaders = map[string]bool{
+ "content-type": true,
+ "cache-control": true,
+ "content-encoding": true,
+ "content-disposition": true,
+ "content-language": true,
+ "x-amz-website-redirect-location": true,
+ "x-amz-object-lock-mode": true,
+ "x-amz-metadata-directive": true,
+ "x-amz-object-lock-retain-until-date": true,
+ "expires": true,
+ "x-amz-replication-status": true,
+ // Add more supported headers here.
+ // Must be lower case.
+}
+
+// isStorageClassHeader returns true if the header is a supported storage class header
+func isStorageClassHeader(headerKey string) bool {
+ return strings.EqualFold(amzStorageClass, headerKey)
+}
+
+// isStandardHeader returns true if header is a supported header and not a custom header
+func isStandardHeader(headerKey string) bool {
+ return supportedHeaders[strings.ToLower(headerKey)]
+}
+
+// sseHeaders is list of server side encryption headers
+var sseHeaders = map[string]bool{
+ "x-amz-server-side-encryption": true,
+ "x-amz-server-side-encryption-aws-kms-key-id": true,
+ "x-amz-server-side-encryption-context": true,
+ "x-amz-server-side-encryption-customer-algorithm": true,
+ "x-amz-server-side-encryption-customer-key": true,
+ "x-amz-server-side-encryption-customer-key-md5": true,
+ // Add more supported headers here.
+ // Must be lower case.
+}
+
+// isSSEHeader returns true if header is a server side encryption header.
+func isSSEHeader(headerKey string) bool {
+ return sseHeaders[strings.ToLower(headerKey)]
+}
+
+// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
+func isAmzHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+
+ return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-")
+}
+
+// isMinioHeader returns true if header is x-minio- header.
+func isMinioHeader(headerKey string) bool {
+ return strings.HasPrefix(strings.ToLower(headerKey), "x-minio-")
+}
+
+// supportedQueryValues is a list of query strings that can be passed in when using GetObject.
+var supportedQueryValues = map[string]bool{
+ "attributes": true,
+ "partNumber": true,
+ "versionId": true,
+ "response-cache-control": true,
+ "response-content-disposition": true,
+ "response-content-encoding": true,
+ "response-content-language": true,
+ "response-content-type": true,
+ "response-expires": true,
+}
+
+// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized.
+func isStandardQueryValue(qsKey string) bool {
+ return supportedQueryValues[qsKey]
+}
+
+// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the
+// set of query params starting with "x-" are ignored by S3.
+const allowedCustomQueryPrefix = "x-"
+
+func isCustomQueryValue(qsKey string) bool {
+ return strings.HasPrefix(qsKey, allowedCustomQueryPrefix)
+}
+
+var (
+ md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
+ sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}
+)
+
+func newMd5Hasher() md5simd.Hasher {
+ return &hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true}
+}
+
+func newSHA256Hasher() md5simd.Hasher {
+ return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true}
+}
+
+// hashWrapper implements the md5simd.Hasher interface.
+type hashWrapper struct {
+ hash.Hash
+ isMD5 bool
+ isSHA256 bool
+}
+
+// Close will put the hasher back into the pool.
+func (m *hashWrapper) Close() {
+ if m.isMD5 && m.Hash != nil {
+ m.Reset()
+ md5Pool.Put(m.Hash)
+ }
+ if m.isSHA256 && m.Hash != nil {
+ m.Reset()
+ sha256Pool.Put(m.Hash)
+ }
+ m.Hash = nil
+}
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
+const (
+ letterIdxBits = 6 // 6 bits to represent a letter index
+ letterIdxMask = 1<= 0; {
+ if remain == 0 {
+ cache, remain = src.Int63(), letterIdxMax
+ }
+ if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+ b[i] = letterBytes[idx]
+ i--
+ }
+ cache >>= letterIdxBits
+ remain--
+ }
+ return prefix + string(b[0:30-len(prefix)])
+}
+
+// IsNetworkOrHostDown - if there was a network error or if the host is down.
+// expectTimeouts indicates that *context* timeouts are expected and does not
+// indicate a downed host. Other timeouts still returns down.
+func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
+ if err == nil {
+ return false
+ }
+
+ if errors.Is(err, context.Canceled) {
+ return false
+ }
+
+ if expectTimeouts && errors.Is(err, context.DeadlineExceeded) {
+ return false
+ }
+
+ if errors.Is(err, context.DeadlineExceeded) {
+ return true
+ }
+
+ // We need to figure if the error either a timeout
+ // or a non-temporary error.
+ urlErr := &url.Error{}
+ if errors.As(err, &urlErr) {
+ switch urlErr.Err.(type) {
+ case *net.DNSError, *net.OpError, net.UnknownNetworkError, *tls.CertificateVerificationError:
+ return true
+ }
+ }
+ var e net.Error
+ if errors.As(err, &e) {
+ if e.Timeout() {
+ return true
+ }
+ }
+
+ // Fallback to other mechanisms.
+ switch {
+ case strings.Contains(err.Error(), "Connection closed by foreign host"):
+ return true
+ case strings.Contains(err.Error(), "TLS handshake timeout"):
+ // If error is - tlsHandshakeTimeoutError.
+ return true
+ case strings.Contains(err.Error(), "i/o timeout"):
+ // If error is - tcp timeoutError.
+ return true
+ case strings.Contains(err.Error(), "connection timed out"):
+ // If err is a net.Dial timeout.
+ return true
+ case strings.Contains(err.Error(), "connection refused"):
+ // If err is connection refused
+ return true
+ case strings.Contains(err.Error(), "server gave HTTP response to HTTPS client"):
+ // If err is TLS client is used with HTTP server
+ return true
+ case strings.Contains(err.Error(), "Client sent an HTTP request to an HTTPS server"):
+ // If err is plain-text Client is used with a HTTPS server
+ return true
+ case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
+ // Denial errors
+ return true
+ }
+ return false
+}
+
+// newHashReaderWrapper will hash all reads done through r.
+// When r returns io.EOF the done function will be called with the sum.
+func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper {
+ return &hashReaderWrapper{
+ r: r,
+ h: h,
+ done: done,
+ }
+}
+
+type hashReaderWrapper struct {
+ r io.Reader
+ h hash.Hash
+ done func(hash []byte)
+}
+
+// Read implements the io.Reader interface.
+func (h *hashReaderWrapper) Read(p []byte) (n int, err error) {
+ n, err = h.r.Read(p)
+ if n > 0 {
+ n2, err := h.h.Write(p[:n])
+ if err != nil {
+ return 0, err
+ }
+ if n2 != n {
+ return 0, io.ErrShortWrite
+ }
+ }
+ if err == io.EOF {
+ // Call back
+ h.done(h.h.Sum(nil))
+ }
+ return n, err
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
new file mode 100644
index 000000000..c75823490
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
@@ -0,0 +1,96 @@
+## 1.5.0
+
+* New option `IgnoreUntaggedFields` to ignore decoding to any fields
+ without `mapstructure` (or the configured tag name) set [GH-277]
+* New option `ErrorUnset` which makes it an error if any fields
+ in a target struct are not set by the decoding process. [GH-225]
+* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
+* Decoding to slice from array no longer crashes [GH-265]
+* Decode nested struct pointers to map [GH-271]
+* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
+* Fix issue where fields with `,omitempty` would sometimes decode
+ into a map with an empty string key [GH-281]
+
+## 1.4.3
+
+* Fix cases where `json.Number` didn't decode properly [GH-261]
+
+## 1.4.2
+
+* Custom name matchers to support any sort of casing, formatting, etc. for
+ field names. [GH-250]
+* Fix possible panic in ComposeDecodeHookFunc [GH-251]
+
+## 1.4.1
+
+* Fix regression where `*time.Time` value would be set to empty and not be sent
+ to decode hooks properly [GH-232]
+
+## 1.4.0
+
+* A new decode hook type `DecodeHookFuncValue` has been added that has
+ access to the full values. [GH-183]
+* Squash is now supported with embedded fields that are struct pointers [GH-205]
+* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
+
+## 1.3.3
+
+* Decoding maps from maps creates a settable value for decode hooks [GH-203]
+
+## 1.3.2
+
+* Decode into interface type with a struct value is supported [GH-187]
+
+## 1.3.1
+
+* Squash should only squash embedded structs. [GH-194]
+
+## 1.3.0
+
+* Added `",omitempty"` support. This will ignore zero values in the source
+ structure when encoding. [GH-145]
+
+## 1.2.3
+
+* Fix duplicate entries in Keys list with pointer values. [GH-185]
+
+## 1.2.2
+
+* Do not add unsettable (unexported) values to the unused metadata key
+ or "remain" value. [GH-150]
+
+## 1.2.1
+
+* Go modules checksum mismatch fix
+
+## 1.2.0
+
+* Added support to capture unused values in a field using the `",remain"` value
+ in the mapstructure tag. There is an example to showcase usage.
+* Added `DecoderConfig` option to always squash embedded structs
+* `json.Number` can decode into `uint` types
+* Empty slices are preserved and not replaced with nil slices
+* Fix panic that can occur in when decoding a map into a nil slice of structs
+* Improved package documentation for godoc
+
+## 1.1.2
+
+* Fix error when decode hook decodes interface implementation into interface
+ type. [GH-140]
+
+## 1.1.1
+
+* Fix panic that can happen in `decodePtr`
+
+## 1.1.0
+
+* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
+* Support struct to struct decoding [GH-137]
+* If source map value is nil, then destination map value is nil (instead of empty)
+* If source slice value is nil, then destination slice value is nil (instead of empty)
+* If source pointer is nil, then destination pointer is set to nil (instead of
+ allocated zero value of type)
+
+## 1.0.0
+
+* Initial tagged stable release.
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644
index 000000000..f9c841a51
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
new file mode 100644
index 000000000..0018dc7d9
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/README.md
@@ -0,0 +1,46 @@
+# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/mapstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+ "type": "person",
+ "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644
index 000000000..3a754ca72
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -0,0 +1,279 @@
+package mapstructure
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "net"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+ // Create variables here so we can reference them with the reflect pkg
+ var f1 DecodeHookFuncType
+ var f2 DecodeHookFuncKind
+ var f3 DecodeHookFuncValue
+
+ // Fill in the variables into this interface and the rest is done
+ // automatically using the reflect package.
+ potential := []interface{}{f1, f2, f3}
+
+ v := reflect.ValueOf(h)
+ vt := v.Type()
+ for _, raw := range potential {
+ pt := reflect.ValueOf(raw).Type()
+ if vt.ConvertibleTo(pt) {
+ return v.Convert(pt).Interface()
+ }
+ }
+
+ return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+ raw DecodeHookFunc,
+ from reflect.Value, to reflect.Value) (interface{}, error) {
+
+ switch f := typedDecodeHook(raw).(type) {
+ case DecodeHookFuncType:
+ return f(from.Type(), to.Type(), from.Interface())
+ case DecodeHookFuncKind:
+ return f(from.Kind(), to.Kind(), from.Interface())
+ case DecodeHookFuncValue:
+ return f(from, to)
+ default:
+ return nil, errors.New("invalid decode hook signature")
+ }
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ var err error
+ data := f.Interface()
+
+ newFrom := f
+ for _, f1 := range fs {
+ data, err = DecodeHookExec(f1, newFrom, t)
+ if err != nil {
+ return nil, err
+ }
+ newFrom = reflect.ValueOf(data)
+ }
+
+ return data, nil
+ }
+}
+
+// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
+// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
+func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
+ return func(a, b reflect.Value) (interface{}, error) {
+ var allErrs string
+ var out interface{}
+ var err error
+
+ for _, f := range ff {
+ out, err = DecodeHookExec(f, a, b)
+ if err != nil {
+ allErrs += err.Error() + "\n"
+ continue
+ }
+
+ return out, nil
+ }
+
+ return nil, errors.New(allErrs)
+ }
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+ return func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ if f != reflect.String || t != reflect.Slice {
+ return data, nil
+ }
+
+ raw := data.(string)
+ if raw == "" {
+ return []string{}, nil
+ }
+
+ return strings.Split(raw, sep), nil
+ }
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Duration(5)) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.ParseDuration(data.(string))
+ }
+}
+
+// StringToIPHookFunc returns a DecodeHookFunc that converts
+// strings to net.IP
+func StringToIPHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(net.IP{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ ip := net.ParseIP(data.(string))
+ if ip == nil {
+ return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
+ }
+
+ return ip, nil
+ }
+}
+
+// StringToIPNetHookFunc returns a DecodeHookFunc that converts
+// strings to net.IPNet
+func StringToIPNetHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(net.IPNet{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ _, net, err := net.ParseCIDR(data.(string))
+ return net, err
+ }
+}
+
+// StringToTimeHookFunc returns a DecodeHookFunc that converts
+// strings to time.Time.
+func StringToTimeHookFunc(layout string) DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Time{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.Parse(layout, data.(string))
+ }
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ dataVal := reflect.ValueOf(data)
+ switch t {
+ case reflect.String:
+ switch f {
+ case reflect.Bool:
+ if dataVal.Bool() {
+ return "1", nil
+ }
+ return "0", nil
+ case reflect.Float32:
+ return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+ case reflect.Int:
+ return strconv.FormatInt(dataVal.Int(), 10), nil
+ case reflect.Slice:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ if elemKind == reflect.Uint8 {
+ return string(dataVal.Interface().([]uint8)), nil
+ }
+ case reflect.Uint:
+ return strconv.FormatUint(dataVal.Uint(), 10), nil
+ }
+ }
+
+ return data, nil
+}
+
+func RecursiveStructToMapHookFunc() DecodeHookFunc {
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ if f.Kind() != reflect.Struct {
+ return f.Interface(), nil
+ }
+
+ var i interface{} = struct{}{}
+ if t.Type() != reflect.TypeOf(&i).Elem() {
+ return f.Interface(), nil
+ }
+
+ m := make(map[string]interface{})
+ t.Set(reflect.ValueOf(m))
+
+ return f.Interface(), nil
+ }
+}
+
+// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
+// strings to the UnmarshalText function, when the target type
+// implements the encoding.TextUnmarshaler interface
+func TextUnmarshallerHookFunc() DecodeHookFuncType {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ result := reflect.New(t).Interface()
+ unmarshaller, ok := result.(encoding.TextUnmarshaler)
+ if !ok {
+ return data, nil
+ }
+ if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644
index 000000000..47a99e5af
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/error.go
@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+ Errors []string
+}
+
+func (e *Error) Error() string {
+ points := make([]string, len(e.Errors))
+ for i, err := range e.Errors {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ sort.Strings(points)
+ return fmt.Sprintf(
+ "%d error(s) decoding:\n\n%s",
+ len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+ if e == nil {
+ return nil
+ }
+
+ result := make([]error, len(e.Errors))
+ for i, e := range e.Errors {
+ result[i] = errors.New(e)
+ }
+
+ return result
+}
+
+func appendErrors(errors []string, err error) []string {
+ switch e := err.(type) {
+ case *Error:
+ return append(errors, e.Errors...)
+ default:
+ return append(errors, e.Error())
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644
index 000000000..1efb22ac3
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -0,0 +1,1540 @@
+// Package mapstructure exposes functionality to convert one arbitrary
+// Go type into another, typically to convert a map[string]interface{}
+// into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+//
+// The simplest function to start with is Decode.
+//
+// Field Tags
+//
+// When decoding to a struct, mapstructure will use the field name by
+// default to perform the mapping. For example, if a struct has a field
+// "Username" then mapstructure will look for a key in the source value
+// of "username" (case insensitive).
+//
+// type User struct {
+// Username string
+// }
+//
+// You can change the behavior of mapstructure by using struct tags.
+// The default struct tag that mapstructure looks for is "mapstructure"
+// but you can customize it using DecoderConfig.
+//
+// Renaming Fields
+//
+// To rename the key that mapstructure looks for, use the "mapstructure"
+// tag and set a value directly. For example, to change the "username" example
+// above to "user":
+//
+// type User struct {
+// Username string `mapstructure:"user"`
+// }
+//
+// Embedded Structs and Squashing
+//
+// Embedded structs are treated as if they're another field with that name.
+// By default, the two structs below are equivalent when decoding with
+// mapstructure:
+//
+// type Person struct {
+// Name string
+// }
+//
+// type Friend struct {
+// Person
+// }
+//
+// type Friend struct {
+// Person Person
+// }
+//
+// This would require an input that looks like below:
+//
+// map[string]interface{}{
+// "person": map[string]interface{}{"name": "alice"},
+// }
+//
+// If your "person" value is NOT nested, then you can append ",squash" to
+// your tag value and mapstructure will treat it as if the embedded struct
+// were part of the struct directly. Example:
+//
+// type Friend struct {
+// Person `mapstructure:",squash"`
+// }
+//
+// Now the following input would be accepted:
+//
+// map[string]interface{}{
+// "name": "alice",
+// }
+//
+// When decoding from a struct to a map, the squash tag squashes the struct
+// fields into a single map. Using the example structs from above:
+//
+// Friend{Person: Person{Name: "alice"}}
+//
+// Will be decoded into a map:
+//
+// map[string]interface{}{
+// "name": "alice",
+// }
+//
+// DecoderConfig has a field that changes the behavior of mapstructure
+// to always squash embedded structs.
+//
+// Remainder Values
+//
+// If there are any unmapped keys in the source value, mapstructure by
+// default will silently ignore them. You can error by setting ErrorUnused
+// in DecoderConfig. If you're using Metadata you can also maintain a slice
+// of the unused keys.
+//
+// You can also use the ",remain" suffix on your tag to collect all unused
+// values in a map. The field with this tag MUST be a map type and should
+// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
+// See example below:
+//
+// type Friend struct {
+// Name string
+// Other map[string]interface{} `mapstructure:",remain"`
+// }
+//
+// Given the input below, Other would be populated with the other
+// values that weren't used (everything but "name"):
+//
+// map[string]interface{}{
+// "name": "bob",
+// "address": "123 Maple St.",
+// }
+//
+// Omit Empty Values
+//
+// When decoding from a struct to any other value, you may use the
+// ",omitempty" suffix on your tag to omit that value if it equates to
+// the zero value. The zero value of all types is specified in the Go
+// specification.
+//
+// For example, the zero type of a numeric type is zero ("0"). If the struct
+// field value is zero and a numeric type, the field is empty, and it won't
+// be encoded into the destination type.
+//
+// type Source struct {
+// Age int `mapstructure:",omitempty"`
+// }
+//
+// Unexported fields
+//
+// Since unexported (private) struct fields cannot be set outside the package
+// where they are defined, the decoder will simply skip them.
+//
+// For this output type definition:
+//
+// type Exported struct {
+// private string // this unexported field will be skipped
+// Public string
+// }
+//
+// Using this map as input:
+//
+// map[string]interface{}{
+// "private": "I will be ignored",
+// "Public": "I made it through!",
+// }
+//
+// The following struct will be decoded:
+//
+// type Exported struct {
+// private: "" // field is left with an empty string (zero value)
+// Public: "I made it through!"
+// }
+//
+// Other Configuration
+//
+// mapstructure is highly configurable. See the DecoderConfig struct
+// for other features and options that are supported.
+package mapstructure
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or
+// DecodeHookFuncValue.
+// Values are a superset of Types (Values can return types), and Types are a
+// superset of Kinds (Types can return Kinds) and are generally a richer thing
+// to use, but Kinds are simpler if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target
+// values.
+type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+ // DecodeHook, if set, will be called before any decoding and any
+ // type conversion (if WeaklyTypedInput is on). This lets you modify
+ // the values before they're set down onto the resulting struct. The
+ // DecodeHook is called for every map and value in the input. This means
+ // that if a struct has embedded fields with squash tags the decode hook
+ // is called only once with all of the input data, not once for each
+ // embedded struct.
+ //
+ // If an error is returned, the entire decode will fail with that error.
+ DecodeHook DecodeHookFunc
+
+ // If ErrorUnused is true, then it is an error for there to exist
+ // keys in the original map that were unused in the decoding process
+ // (extra keys).
+ ErrorUnused bool
+
+ // If ErrorUnset is true, then it is an error for there to exist
+ // fields in the result that were not set in the decoding process
+ // (extra fields). This only applies to decoding to a struct. This
+ // will affect all nested structs as well.
+ ErrorUnset bool
+
+ // ZeroFields, if set to true, will zero fields before writing them.
+ // For example, a map will be emptied before decoded values are put in
+ // it. If this is false, a map will be merged.
+ ZeroFields bool
+
+ // If WeaklyTypedInput is true, the decoder will make the following
+ // "weak" conversions:
+ //
+ // - bools to string (true = "1", false = "0")
+ // - numbers to string (base 10)
+ // - bools to int/uint (true = 1, false = 0)
+ // - strings to int/uint (base implied by prefix)
+ // - int to bool (true if value != 0)
+ // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+ // FALSE, false, False. Anything else is an error)
+ // - empty array = empty map and vice versa
+ // - negative numbers to overflowed uint values (base 10)
+ // - slice of maps to a merged map
+ // - single values are converted to slices if required. Each
+ // element is weakly decoded. For example: "4" can become []int{4}
+ // if the target type is an int slice.
+ //
+ WeaklyTypedInput bool
+
+ // Squash will squash embedded structs. A squash tag may also be
+ // added to an individual struct field using a tag. For example:
+ //
+ // type Parent struct {
+ // Child `mapstructure:",squash"`
+ // }
+ Squash bool
+
+ // Metadata is the struct that will contain extra metadata about
+ // the decoding. If this is nil, then no metadata will be tracked.
+ Metadata *Metadata
+
+ // Result is a pointer to the struct that will contain the decoded
+ // value.
+ Result interface{}
+
+ // The tag name that mapstructure reads for field names. This
+ // defaults to "mapstructure"
+ TagName string
+
+ // IgnoreUntaggedFields ignores all struct fields without explicit
+ // TagName, comparable to `mapstructure:"-"` as default behaviour.
+ IgnoreUntaggedFields bool
+
+ // MatchName is the function used to match the map key to the struct
+ // field name or tag. Defaults to `strings.EqualFold`. This can be used
+ // to implement case-sensitive tag values, support snake casing, etc.
+ MatchName func(mapKey, fieldName string) bool
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+ config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+ // Keys are the keys of the structure which were successfully decoded
+ Keys []string
+
+ // Unused is a slice of keys that were found in the raw value but
+ // weren't decoded since there was no matching field in the result interface
+ Unused []string
+
+ // Unset is a slice of field names that were found in the result interface
+ // but weren't set in the decoding process since there was no matching value
+ // in the input
+ Unset []string
+}
+
+// Decode takes an input structure and uses reflection to translate it to
+// the output structure. output must be a pointer to a map or struct.
+func Decode(input interface{}, output interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// DecodeMetadata is the same as Decode, but is shorthand to
+// enable metadata collection. See DecoderConfig for more info.
+func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+ config := &DecoderConfig{
+ Metadata: metadata,
+ Result: output,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// WeakDecodeMetadata is the same as Decode, but is shorthand to
+// enable both WeaklyTypedInput and metadata collection. See
+// DecoderConfig for more info.
+func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+ config := &DecoderConfig{
+ Metadata: metadata,
+ Result: output,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+ val := reflect.ValueOf(config.Result)
+ if val.Kind() != reflect.Ptr {
+ return nil, errors.New("result must be a pointer")
+ }
+
+ val = val.Elem()
+ if !val.CanAddr() {
+ return nil, errors.New("result must be addressable (a pointer)")
+ }
+
+ if config.Metadata != nil {
+ if config.Metadata.Keys == nil {
+ config.Metadata.Keys = make([]string, 0)
+ }
+
+ if config.Metadata.Unused == nil {
+ config.Metadata.Unused = make([]string, 0)
+ }
+
+ if config.Metadata.Unset == nil {
+ config.Metadata.Unset = make([]string, 0)
+ }
+ }
+
+ if config.TagName == "" {
+ config.TagName = "mapstructure"
+ }
+
+ if config.MatchName == nil {
+ config.MatchName = strings.EqualFold
+ }
+
+ result := &Decoder{
+ config: config,
+ }
+
+ return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(input interface{}) error {
+ return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
+ var inputVal reflect.Value
+ if input != nil {
+ inputVal = reflect.ValueOf(input)
+
+ // We need to check here if input is a typed nil. Typed nils won't
+ // match the "input == nil" below so we check that here.
+ if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
+ input = nil
+ }
+ }
+
+ if input == nil {
+ // If the data is nil, then we don't set anything, unless ZeroFields is set
+ // to true.
+ if d.config.ZeroFields {
+ outVal.Set(reflect.Zero(outVal.Type()))
+
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+ }
+ return nil
+ }
+
+ if !inputVal.IsValid() {
+ // If the input value is invalid, then we just set the value
+ // to be the zero value.
+ outVal.Set(reflect.Zero(outVal.Type()))
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+ return nil
+ }
+
+ if d.config.DecodeHook != nil {
+ // We have a DecodeHook, so let's pre-process the input.
+ var err error
+ input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal)
+ if err != nil {
+ return fmt.Errorf("error decoding '%s': %s", name, err)
+ }
+ }
+
+ var err error
+ outputKind := getKind(outVal)
+ addMetaKey := true
+ switch outputKind {
+ case reflect.Bool:
+ err = d.decodeBool(name, input, outVal)
+ case reflect.Interface:
+ err = d.decodeBasic(name, input, outVal)
+ case reflect.String:
+ err = d.decodeString(name, input, outVal)
+ case reflect.Int:
+ err = d.decodeInt(name, input, outVal)
+ case reflect.Uint:
+ err = d.decodeUint(name, input, outVal)
+ case reflect.Float32:
+ err = d.decodeFloat(name, input, outVal)
+ case reflect.Struct:
+ err = d.decodeStruct(name, input, outVal)
+ case reflect.Map:
+ err = d.decodeMap(name, input, outVal)
+ case reflect.Ptr:
+ addMetaKey, err = d.decodePtr(name, input, outVal)
+ case reflect.Slice:
+ err = d.decodeSlice(name, input, outVal)
+ case reflect.Array:
+ err = d.decodeArray(name, input, outVal)
+ case reflect.Func:
+ err = d.decodeFunc(name, input, outVal)
+ default:
+ // If we reached this point then we weren't able to decode it
+ return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
+ }
+
+ // If we reached here, then we successfully decoded SOMETHING, so
+ // mark the key as used if we're tracking metainput.
+ if addMetaKey && d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+
+ return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+ if val.IsValid() && val.Elem().IsValid() {
+ elem := val.Elem()
+
+ // If we can't address this element, then its not writable. Instead,
+ // we make a copy of the value (which is a pointer and therefore
+ // writable), decode into that, and replace the whole value.
+ copied := false
+ if !elem.CanAddr() {
+ copied = true
+
+ // Make *T
+ copy := reflect.New(elem.Type())
+
+ // *T = elem
+ copy.Elem().Set(elem)
+
+ // Set elem so we decode into it
+ elem = copy
+ }
+
+ // Decode. If we have an error then return. We also return right
+ // away if we're not a copy because that means we decoded directly.
+ if err := d.decode(name, data, elem); err != nil || !copied {
+ return err
+ }
+
+ // If we're a copy, we need to set te final result
+ val.Set(elem.Elem())
+ return nil
+ }
+
+ dataVal := reflect.ValueOf(data)
+
+ // If the input data is a pointer, and the assigned type is the dereference
+ // of that exact pointer, then indirect it so that we can assign it.
+ // Example: *string to string
+ if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
+ dataVal = reflect.Indirect(dataVal)
+ }
+
+ if !dataVal.IsValid() {
+ dataVal = reflect.Zero(val.Type())
+ }
+
+ dataValType := dataVal.Type()
+ if !dataValType.AssignableTo(val.Type()) {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got '%s'",
+ name, val.Type(), dataValType)
+ }
+
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+
+ converted := true
+ switch {
+ case dataKind == reflect.String:
+ val.SetString(dataVal.String())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetString("1")
+ } else {
+ val.SetString("0")
+ }
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+ case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
+ dataKind == reflect.Array && d.config.WeaklyTypedInput:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ switch elemKind {
+ case reflect.Uint8:
+ var uints []uint8
+ if dataKind == reflect.Array {
+ uints = make([]uint8, dataVal.Len(), dataVal.Len())
+ for i := range uints {
+ uints[i] = dataVal.Index(i).Interface().(uint8)
+ }
+ } else {
+ uints = dataVal.Interface().([]uint8)
+ }
+ val.SetString(string(uints))
+ default:
+ converted = false
+ }
+ default:
+ converted = false
+ }
+
+ if !converted {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetInt(dataVal.Int())
+ case dataKind == reflect.Uint:
+ val.SetInt(int64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetInt(int64(dataVal.Float()))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetInt(1)
+ } else {
+ val.SetInt(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ i, err := strconv.ParseInt(str, 0, val.Type().Bits())
+ if err == nil {
+ val.SetInt(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Int64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetInt(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ i := dataVal.Int()
+ if i < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %d overflows uint",
+ name, i)
+ }
+ val.SetUint(uint64(i))
+ case dataKind == reflect.Uint:
+ val.SetUint(dataVal.Uint())
+ case dataKind == reflect.Float32:
+ f := dataVal.Float()
+ if f < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %f overflows uint",
+ name, f)
+ }
+ val.SetUint(uint64(f))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetUint(1)
+ } else {
+ val.SetUint(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ i, err := strconv.ParseUint(str, 0, val.Type().Bits())
+ if err == nil {
+ val.SetUint(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := strconv.ParseUint(string(jn), 0, 64)
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetUint(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Bool:
+ val.SetBool(dataVal.Bool())
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Int() != 0)
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Uint() != 0)
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Float() != 0)
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ b, err := strconv.ParseBool(dataVal.String())
+ if err == nil {
+ val.SetBool(b)
+ } else if dataVal.String() == "" {
+ val.SetBool(false)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetFloat(float64(dataVal.Int()))
+ case dataKind == reflect.Uint:
+ val.SetFloat(float64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetFloat(dataVal.Float())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetFloat(1)
+ } else {
+ val.SetFloat(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ f, err := strconv.ParseFloat(str, val.Type().Bits())
+ if err == nil {
+ val.SetFloat(f)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Float64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetFloat(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+ valType := val.Type()
+ valKeyType := valType.Key()
+ valElemType := valType.Elem()
+
+ // By default we overwrite keys in the current map
+ valMap := val
+
+ // If the map is nil or we're purposely zeroing fields, make a new map
+ if valMap.IsNil() || d.config.ZeroFields {
+ // Make a new map to hold our result
+ mapType := reflect.MapOf(valKeyType, valElemType)
+ valMap = reflect.MakeMap(mapType)
+ }
+
+ // Check input type and based on the input type jump to the proper func
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ switch dataVal.Kind() {
+ case reflect.Map:
+ return d.decodeMapFromMap(name, dataVal, val, valMap)
+
+ case reflect.Struct:
+ return d.decodeMapFromStruct(name, dataVal, val, valMap)
+
+ case reflect.Array, reflect.Slice:
+ if d.config.WeaklyTypedInput {
+ return d.decodeMapFromSlice(name, dataVal, val, valMap)
+ }
+
+ fallthrough
+
+ default:
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+ }
+}
+
+func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ // Special case for BC reasons (covered by tests)
+ if dataVal.Len() == 0 {
+ val.Set(valMap)
+ return nil
+ }
+
+ for i := 0; i < dataVal.Len(); i++ {
+ err := d.decode(
+ name+"["+strconv.Itoa(i)+"]",
+ dataVal.Index(i).Interface(), val)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ valType := val.Type()
+ valKeyType := valType.Key()
+ valElemType := valType.Elem()
+
+ // Accumulate errors
+ errors := make([]string, 0)
+
+ // If the input data is empty, then we just match what the input data is.
+ if dataVal.Len() == 0 {
+ if dataVal.IsNil() {
+ if !val.IsNil() {
+ val.Set(dataVal)
+ }
+ } else {
+ // Set to empty allocated value
+ val.Set(valMap)
+ }
+
+ return nil
+ }
+
+ for _, k := range dataVal.MapKeys() {
+ fieldName := name + "[" + k.String() + "]"
+
+ // First decode the key into the proper type
+ currentKey := reflect.Indirect(reflect.New(valKeyType))
+ if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+ errors = appendErrors(errors, err)
+ continue
+ }
+
+ // Next decode the data into the proper type
+ v := dataVal.MapIndex(k).Interface()
+ currentVal := reflect.Indirect(reflect.New(valElemType))
+ if err := d.decode(fieldName, v, currentVal); err != nil {
+ errors = appendErrors(errors, err)
+ continue
+ }
+
+ valMap.SetMapIndex(currentKey, currentVal)
+ }
+
+ // Set the built up map to the value
+ val.Set(valMap)
+
+ // If we had errors, return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ typ := dataVal.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ // Get the StructField first since this is a cheap operation. If the
+ // field is unexported, then ignore it.
+ f := typ.Field(i)
+ if f.PkgPath != "" {
+ continue
+ }
+
+ // Next get the actual value of this field and verify it is assignable
+ // to the map value.
+ v := dataVal.Field(i)
+ if !v.Type().AssignableTo(valMap.Type().Elem()) {
+ return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
+ }
+
+ tagValue := f.Tag.Get(d.config.TagName)
+ keyName := f.Name
+
+ if tagValue == "" && d.config.IgnoreUntaggedFields {
+ continue
+ }
+
+ // If Squash is set in the config, we squash the field down.
+ squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
+
+ v = dereferencePtrToStructIfNeeded(v, d.config.TagName)
+
+ // Determine the name of the key in the map
+ if index := strings.Index(tagValue, ","); index != -1 {
+ if tagValue[:index] == "-" {
+ continue
+ }
+ // If "omitempty" is specified in the tag, it ignores empty values.
+ if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
+ continue
+ }
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash = squash || strings.Index(tagValue[index+1:], "squash") != -1
+ if squash {
+ // When squashing, the embedded type can be a pointer to a struct.
+ if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
+ v = v.Elem()
+ }
+
+ // The final type must be a struct
+ if v.Kind() != reflect.Struct {
+ return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+ }
+ }
+ if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
+ keyName = keyNameTagValue
+ }
+ } else if len(tagValue) > 0 {
+ if tagValue == "-" {
+ continue
+ }
+ keyName = tagValue
+ }
+
+ switch v.Kind() {
+ // this is an embedded struct, so handle it differently
+ case reflect.Struct:
+ x := reflect.New(v.Type())
+ x.Elem().Set(v)
+
+ vType := valMap.Type()
+ vKeyType := vType.Key()
+ vElemType := vType.Elem()
+ mType := reflect.MapOf(vKeyType, vElemType)
+ vMap := reflect.MakeMap(mType)
+
+ // Creating a pointer to a map so that other methods can completely
+ // overwrite the map if need be (looking at you decodeMapFromMap). The
+ // indirection allows the underlying map to be settable (CanSet() == true)
+ // where as reflect.MakeMap returns an unsettable map.
+ addrVal := reflect.New(vMap.Type())
+ reflect.Indirect(addrVal).Set(vMap)
+
+ err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal))
+ if err != nil {
+ return err
+ }
+
+ // the underlying map may have been completely overwritten so pull
+ // it indirectly out of the enclosing value.
+ vMap = reflect.Indirect(addrVal)
+
+ if squash {
+ for _, k := range vMap.MapKeys() {
+ valMap.SetMapIndex(k, vMap.MapIndex(k))
+ }
+ } else {
+ valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
+ }
+
+ default:
+ valMap.SetMapIndex(reflect.ValueOf(keyName), v)
+ }
+ }
+
+ if val.CanAddr() {
+ val.Set(valMap)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) {
+ // If the input data is nil, then we want to just set the output
+ // pointer to be nil as well.
+ isNil := data == nil
+ if !isNil {
+ switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
+ case reflect.Chan,
+ reflect.Func,
+ reflect.Interface,
+ reflect.Map,
+ reflect.Ptr,
+ reflect.Slice:
+ isNil = v.IsNil()
+ }
+ }
+ if isNil {
+ if !val.IsNil() && val.CanSet() {
+ nilValue := reflect.New(val.Type()).Elem()
+ val.Set(nilValue)
+ }
+
+ return true, nil
+ }
+
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ valType := val.Type()
+ valElemType := valType.Elem()
+ if val.CanSet() {
+ realVal := val
+ if realVal.IsNil() || d.config.ZeroFields {
+ realVal = reflect.New(valElemType)
+ }
+
+ if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+ return false, err
+ }
+
+ val.Set(realVal)
+ } else {
+ if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
+ return false, err
+ }
+ }
+ return false, nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ if val.Type() != dataVal.Type() {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ sliceType := reflect.SliceOf(valElemType)
+
+ // If we have a non array/slice type then we first attempt to convert.
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Slice and array we use the normal logic
+ case dataValKind == reflect.Slice, dataValKind == reflect.Array:
+ break
+
+ // Empty maps turn into empty slices
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.MakeSlice(sliceType, 0, 0))
+ return nil
+ }
+ // Create slice of maps of other sizes
+ return d.decodeSlice(name, []interface{}{data}, val)
+
+ case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
+ return d.decodeSlice(name, []byte(dataVal.String()), val)
+
+ // All other types we try to convert to the slice type
+ // and "lift" it into it. i.e. a string becomes a string slice.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeSlice(name, []interface{}{data}, val)
+ }
+ }
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ }
+
+ // If the input value is nil, then don't allocate since empty != nil
+ if dataValKind != reflect.Array && dataVal.IsNil() {
+ return nil
+ }
+
+ valSlice := val
+ if valSlice.IsNil() || d.config.ZeroFields {
+ // Make a new slice to hold our result, same size as the original data.
+ valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+ }
+
+ // Accumulate any errors
+ errors := make([]string, 0)
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ for valSlice.Len() <= i {
+ valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+ }
+ currentField := valSlice.Index(i)
+
+ fieldName := name + "[" + strconv.Itoa(i) + "]"
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // Finally, set the value to the slice we built up
+ val.Set(valSlice)
+
+ // If there were errors, we return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ arrayType := reflect.ArrayOf(valType.Len(), valElemType)
+
+ valArray := val
+
+ if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+ // Check input type
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Empty maps turn into empty arrays
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.Zero(arrayType))
+ return nil
+ }
+
+ // All other types we try to convert to the array type
+ // and "lift" it into it. i.e. a string becomes a string array.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeArray(name, []interface{}{data}, val)
+ }
+ }
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+ }
+ if dataVal.Len() > arrayType.Len() {
+ return fmt.Errorf(
+ "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
+
+ }
+
+ // Make a new array to hold our result, same size as the original data.
+ valArray = reflect.New(arrayType).Elem()
+ }
+
+ // Accumulate any errors
+ errors := make([]string, 0)
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ currentField := valArray.Index(i)
+
+ fieldName := name + "[" + strconv.Itoa(i) + "]"
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // Finally, set the value to the array we built up
+ val.Set(valArray)
+
+ // If there were errors, we return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+ // If the type of the value to write to and the data match directly,
+ // then we just set it directly instead of recursing into the structure.
+ if dataVal.Type() == val.Type() {
+ val.Set(dataVal)
+ return nil
+ }
+
+ dataValKind := dataVal.Kind()
+ switch dataValKind {
+ case reflect.Map:
+ return d.decodeStructFromMap(name, dataVal, val)
+
+ case reflect.Struct:
+ // Not the most efficient way to do this but we can optimize later if
+ // we want to. To convert from struct to struct we go to map first
+ // as an intermediary.
+
+ // Make a new map to hold our result
+ mapType := reflect.TypeOf((map[string]interface{})(nil))
+ mval := reflect.MakeMap(mapType)
+
+ // Creating a pointer to a map so that other methods can completely
+ // overwrite the map if need be (looking at you decodeMapFromMap). The
+ // indirection allows the underlying map to be settable (CanSet() == true)
+ // where as reflect.MakeMap returns an unsettable map.
+ addrVal := reflect.New(mval.Type())
+
+ reflect.Indirect(addrVal).Set(mval)
+ if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil {
+ return err
+ }
+
+ result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val)
+ return result
+
+ default:
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+ }
+}
+
+func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
+ dataValType := dataVal.Type()
+ if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+ return fmt.Errorf(
+ "'%s' needs a map with string keys, has '%s' keys",
+ name, dataValType.Key().Kind())
+ }
+
+ dataValKeys := make(map[reflect.Value]struct{})
+ dataValKeysUnused := make(map[interface{}]struct{})
+ for _, dataValKey := range dataVal.MapKeys() {
+ dataValKeys[dataValKey] = struct{}{}
+ dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+ }
+
+ targetValKeysUnused := make(map[interface{}]struct{})
+ errors := make([]string, 0)
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = val
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+
+ // remainField is set to a valid field set with the "remain" tag if
+ // we are keeping track of remaining values.
+ var remainField *field
+
+ fields := []field{}
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ fieldVal := structVal.Field(i)
+ if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct {
+ // Handle embedded struct pointers as embedded structs.
+ fieldVal = fieldVal.Elem()
+ }
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous
+ remain := false
+
+ // We always parse the tags cause we're looking for other tags too
+ tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+
+ if tag == "remain" {
+ remain = true
+ break
+ }
+ }
+
+ if squash {
+ if fieldVal.Kind() != reflect.Struct {
+ errors = appendErrors(errors,
+ fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
+ } else {
+ structs = append(structs, fieldVal)
+ }
+ continue
+ }
+
+ // Build our field
+ if remain {
+ remainField = &field{fieldType, fieldVal}
+ } else {
+ // Normal struct field, store it away
+ fields = append(fields, field{fieldType, fieldVal})
+ }
+ }
+ }
+
+ // for fieldType, field := range fields {
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ fieldName := field.Name
+
+ tagValue := field.Tag.Get(d.config.TagName)
+ tagValue = strings.SplitN(tagValue, ",", 2)[0]
+ if tagValue != "" {
+ fieldName = tagValue
+ }
+
+ rawMapKey := reflect.ValueOf(fieldName)
+ rawMapVal := dataVal.MapIndex(rawMapKey)
+ if !rawMapVal.IsValid() {
+ // Do a slower search by iterating over each key and
+ // doing case-insensitive search.
+ for dataValKey := range dataValKeys {
+ mK, ok := dataValKey.Interface().(string)
+ if !ok {
+ // Not a string key
+ continue
+ }
+
+ if d.config.MatchName(mK, fieldName) {
+ rawMapKey = dataValKey
+ rawMapVal = dataVal.MapIndex(dataValKey)
+ break
+ }
+ }
+
+ if !rawMapVal.IsValid() {
+ // There was no matching key in the map for the value in
+ // the struct. Remember it for potential errors and metadata.
+ targetValKeysUnused[fieldName] = struct{}{}
+ continue
+ }
+ }
+
+ if !fieldValue.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !fieldValue.CanSet() {
+ continue
+ }
+
+ // Delete the key we're using from the unused map so we stop tracking
+ delete(dataValKeysUnused, rawMapKey.Interface())
+
+ // If the name is empty string, then we're at the root, and we
+ // don't dot-join the fields.
+ if name != "" {
+ fieldName = name + "." + fieldName
+ }
+
+ if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // If we have a "remain"-tagged field and we have unused keys then
+ // we put the unused keys directly into the remain field.
+ if remainField != nil && len(dataValKeysUnused) > 0 {
+ // Build a map of only the unused values
+ remain := map[interface{}]interface{}{}
+ for key := range dataValKeysUnused {
+ remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
+ }
+
+ // Decode it as-if we were just decoding this map onto our map.
+ if err := d.decodeMap(name, remain, remainField.val); err != nil {
+ errors = appendErrors(errors, err)
+ }
+
+ // Set the map to nil so we have none so that the next check will
+ // not error (ErrorUnused)
+ dataValKeysUnused = nil
+ }
+
+ if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+ keys := make([]string, 0, len(dataValKeysUnused))
+ for rawKey := range dataValKeysUnused {
+ keys = append(keys, rawKey.(string))
+ }
+ sort.Strings(keys)
+
+ err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+ errors = appendErrors(errors, err)
+ }
+
+ if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
+ keys := make([]string, 0, len(targetValKeysUnused))
+ for rawKey := range targetValKeysUnused {
+ keys = append(keys, rawKey.(string))
+ }
+ sort.Strings(keys)
+
+ err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
+ errors = appendErrors(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ // Add the unused keys to the list of unused keys if we're tracking metadata
+ if d.config.Metadata != nil {
+ for rawKey := range dataValKeysUnused {
+ key := rawKey.(string)
+ if name != "" {
+ key = name + "." + key
+ }
+
+ d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+ }
+ for rawKey := range targetValKeysUnused {
+ key := rawKey.(string)
+ if name != "" {
+ key = name + "." + key
+ }
+
+ d.config.Metadata.Unset = append(d.config.Metadata.Unset, key)
+ }
+ }
+
+ return nil
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch getKind(v) {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+ kind := val.Kind()
+
+ switch {
+ case kind >= reflect.Int && kind <= reflect.Int64:
+ return reflect.Int
+ case kind >= reflect.Uint && kind <= reflect.Uint64:
+ return reflect.Uint
+ case kind >= reflect.Float32 && kind <= reflect.Float64:
+ return reflect.Float32
+ default:
+ return kind
+ }
+}
+
+func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool {
+ for i := 0; i < typ.NumField(); i++ {
+ f := typ.Field(i)
+ if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields
+ return true
+ }
+ if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside
+ return true
+ }
+ }
+ return false
+}
+
+func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value {
+ if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+ return v
+ }
+ deref := v.Elem()
+ derefT := deref.Type()
+ if isStructTypeConvertibleToMap(derefT, true, tagName) {
+ return deref
+ }
+ return v
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go
new file mode 100644
index 000000000..c4510a695
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package operational
+
+import (
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/heptiolabs/healthcheck"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/server"
+ log "github.com/sirupsen/logrus"
+)
+
+func NewHealthServer(opts *config.Options, isAlive healthcheck.Check, isReady healthcheck.Check) *http.Server {
+ handler := healthcheck.NewHandler()
+ address := net.JoinHostPort(opts.Health.Address, opts.Health.Port)
+ handler.AddLivenessCheck("PipelineCheck", isAlive)
+ handler.AddReadinessCheck("PipelineCheck", isReady)
+
+ server := server.Default(&http.Server{
+ Handler: handler,
+ Addr: address,
+ })
+
+ go func() {
+ for {
+ err := server.ListenAndServe()
+ log.Errorf("http.ListenAndServe error %v", err)
+ time.Sleep(60 * time.Second)
+ }
+ }()
+
+ return server
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go
new file mode 100644
index 000000000..d0008aa3f
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package operational
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+)
+
+type MetricDefinition struct {
+ Name string
+ Help string
+ Type metricType
+ Labels []string
+}
+
+type metricType string
+
+const TypeCounter metricType = "counter"
+const TypeGauge metricType = "gauge"
+const TypeHistogram metricType = "histogram"
+const TypeSummary metricType = "summary"
+
+var allMetrics = []MetricDefinition{}
+
+func DefineMetric(name, help string, t metricType, labels ...string) MetricDefinition {
+ def := MetricDefinition{
+ Name: name,
+ Help: help,
+ Type: t,
+ Labels: labels,
+ }
+ allMetrics = append(allMetrics, def)
+ return def
+}
+
+var (
+ recordsWritten = DefineMetric(
+ "records_written",
+ "Number of output records written",
+ TypeCounter,
+ "stage",
+ )
+ stageInQueueSize = DefineMetric(
+ "stage_in_queue_size",
+ "Pipeline stage input queue size (number of elements in queue)",
+ TypeGauge,
+ "stage",
+ )
+ stageOutQueueSize = DefineMetric(
+ "stage_out_queue_size",
+ "Pipeline stage output queue size (number of elements in queue)",
+ TypeGauge,
+ "stage",
+ )
+ stageDuration = DefineMetric(
+ "stage_duration_ms",
+ "Pipeline stage duration in milliseconds",
+ TypeHistogram,
+ "stage",
+ )
+ indexerHit = DefineMetric(
+ "secondary_network_indexer_hit",
+ "Counter of hits per secondary network index for Kubernetes enrichment",
+ TypeCounter,
+ "kind",
+ "namespace",
+ "network",
+ "warning",
+ )
+)
+
+func (def *MetricDefinition) mapLabels(labels []string) prometheus.Labels {
+ if len(labels) != len(def.Labels) {
+ logrus.Errorf("Could not map labels, length differ in def %s [%v / %v]", def.Name, def.Labels, labels)
+ }
+ labelsMap := prometheus.Labels{}
+ for i, label := range labels {
+ labelsMap[def.Labels[i]] = label
+ }
+ return labelsMap
+}
+
+func verifyMetricType(def *MetricDefinition, t metricType) {
+ if def.Type != t {
+ logrus.Panicf("operational metric %q is of type %q but is being registered as %q", def.Name, def.Type, t)
+ }
+}
+
+type Metrics struct {
+ settings *config.MetricsSettings
+ stageDurationHisto *prometheus.HistogramVec
+}
+
+func NewMetrics(settings *config.MetricsSettings) *Metrics {
+ return &Metrics{settings: settings}
+}
+
+// register will register against the default registry. May panic or not depending on settings
+func (o *Metrics) register(c prometheus.Collector, name string) {
+ err := prometheus.DefaultRegisterer.Register(c)
+ if err != nil {
+ var castErr prometheus.AlreadyRegisteredError
+ if errors.As(err, &castErr) {
+ logrus.Warningf("metrics registration error [%s]: %v", name, err)
+ } else if o.settings.NoPanic {
+ logrus.Errorf("metrics registration error [%s]: %v", name, err)
+ } else {
+ logrus.Panicf("metrics registration error [%s]: %v", name, err)
+ }
+ }
+}
+
+func (o *Metrics) NewCounter(def *MetricDefinition, labels ...string) prometheus.Counter {
+ verifyMetricType(def, TypeCounter)
+ fullName := o.settings.Prefix + def.Name
+ c := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: fullName,
+ Help: def.Help,
+ ConstLabels: def.mapLabels(labels),
+ })
+ o.register(c, fullName)
+ return c
+}
+
+func (o *Metrics) NewCounterVec(def *MetricDefinition) *prometheus.CounterVec {
+ verifyMetricType(def, TypeCounter)
+ fullName := o.settings.Prefix + def.Name
+ c := prometheus.NewCounterVec(prometheus.CounterOpts{
+ Name: fullName,
+ Help: def.Help,
+ }, def.Labels)
+ o.register(c, fullName)
+ return c
+}
+
+func (o *Metrics) NewGauge(def *MetricDefinition, labels ...string) prometheus.Gauge {
+ verifyMetricType(def, TypeGauge)
+ fullName := o.settings.Prefix + def.Name
+ c := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: fullName,
+ Help: def.Help,
+ ConstLabels: def.mapLabels(labels),
+ })
+ o.register(c, fullName)
+ return c
+}
+
+func (o *Metrics) NewGaugeVec(def *MetricDefinition) *prometheus.GaugeVec {
+ verifyMetricType(def, TypeGauge)
+ fullName := o.settings.Prefix + def.Name
+ c := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Name: fullName,
+ Help: def.Help,
+ }, def.Labels)
+ o.register(c, fullName)
+ return c
+}
+
+func (o *Metrics) NewGaugeFunc(def *MetricDefinition, f func() float64, labels ...string) {
+ verifyMetricType(def, TypeGauge)
+ fullName := o.settings.Prefix + def.Name
+ c := prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Name: fullName,
+ Help: def.Help,
+ ConstLabels: def.mapLabels(labels),
+ }, f)
+ o.register(c, fullName)
+}
+
+func (o *Metrics) NewHistogram(def *MetricDefinition, buckets []float64, labels ...string) prometheus.Histogram {
+ verifyMetricType(def, TypeHistogram)
+ fullName := o.settings.Prefix + def.Name
+ c := prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: fullName,
+ Help: def.Help,
+ Buckets: buckets,
+ ConstLabels: def.mapLabels(labels),
+ })
+ o.register(c, fullName)
+ return c
+}
+
+func (o *Metrics) NewHistogramVec(def *MetricDefinition, buckets []float64) *prometheus.HistogramVec {
+ verifyMetricType(def, TypeHistogram)
+ fullName := o.settings.Prefix + def.Name
+ c := prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Name: fullName,
+ Help: def.Help,
+ Buckets: buckets,
+ }, def.Labels)
+ o.register(c, fullName)
+ return c
+}
+
+func (o *Metrics) NewSummary(def *MetricDefinition, labels ...string) prometheus.Summary {
+ verifyMetricType(def, TypeSummary)
+ fullName := o.settings.Prefix + def.Name
+ c := prometheus.NewSummary(prometheus.SummaryOpts{
+ Name: fullName,
+ Help: def.Help,
+ ConstLabels: def.mapLabels(labels),
+ // arbitrary objectives for now
+ Objectives: map[float64]float64{
+ 0.5: 0.02,
+ 0.95: 0.01,
+ },
+ })
+ o.register(c, fullName)
+ return c
+}
+
+func (o *Metrics) CreateRecordsWrittenCounter(stage string) prometheus.Counter {
+ return o.NewCounter(&recordsWritten, stage)
+}
+
+func (o *Metrics) CreateInQueueSizeGauge(stage string, f func() int) {
+ o.NewGaugeFunc(&stageInQueueSize, func() float64 { return float64(f()) }, stage)
+}
+
+func (o *Metrics) CreateOutQueueSizeGauge(stage string, f func() int) {
+ o.NewGaugeFunc(&stageOutQueueSize, func() float64 { return float64(f()) }, stage)
+}
+
+func (o *Metrics) GetOrCreateStageDurationHisto() *prometheus.HistogramVec {
+ if o.stageDurationHisto == nil {
+ o.stageDurationHisto = o.NewHistogramVec(&stageDuration, []float64{.001, .01, .1, 1, 10, 100, 1000, 10000})
+ }
+ return o.stageDurationHisto
+}
+
+func (o *Metrics) CreateIndexerHitCounter() *prometheus.CounterVec {
+ return o.NewCounterVec(&indexerHit)
+}
+
+func GetDocumentation() string {
+ doc := ""
+ sort.Slice(allMetrics, func(i, j int) bool {
+ return allMetrics[i].Name < allMetrics[j].Name
+ })
+ for _, opts := range allMetrics {
+ doc += fmt.Sprintf(
+ `
+### %s
+| **Name** | %s |
+|:---|:---|
+| **Description** | %s |
+| **Type** | %s |
+| **Labels** | %s |
+
+`,
+ opts.Name,
+ opts.Name,
+ opts.Help,
+ opts.Type,
+ strings.Join(opts.Labels, ", "),
+ )
+ }
+
+ return doc
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/timer.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/timer.go
new file mode 100644
index 000000000..6862326b8
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/timer.go
@@ -0,0 +1,51 @@
+package operational
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type Timer struct {
+ startTime *time.Time
+ observer prometheus.Observer
+}
+
+func NewTimer(o prometheus.Observer) *Timer {
+ return &Timer{
+ observer: o,
+ }
+}
+
+// Start starts or restarts the timer, regardless if a previous call occurred without being observed first
+func (t *Timer) Start() time.Time {
+ now := time.Now()
+ t.startTime = &now
+ return now
+}
+
+// StartOnce starts the timer just the first time. Subsequent calls will be ignored,
+// until the timer is observed
+func (t *Timer) StartOnce() time.Time {
+ if t.startTime == nil {
+ now := time.Now()
+ t.startTime = &now
+ }
+ return *t.startTime
+}
+
+func (t *Timer) ObserveMilliseconds() {
+ t.observe(func(d time.Duration) float64 { return float64(d.Milliseconds()) })
+}
+
+func (t *Timer) ObserveSeconds() {
+ t.observe(time.Duration.Seconds)
+}
+
+func (t *Timer) observe(f func(d time.Duration) float64) {
+ if t.startTime != nil {
+ duration := time.Since(*t.startTime)
+ t.observer.Observe(f(duration))
+ t.startTime = nil
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go
new file mode 100644
index 000000000..d85511e61
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package decode
+
+import (
+ "fmt"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/decode"
+)
+
+type Decoder interface {
+ Decode(in []byte) (config.GenericMap, error)
+}
+
+func GetDecoder(params api.Decoder) (Decoder, error) {
+ switch params.Type {
+ case api.DecoderJSON:
+ return NewDecodeJSON()
+ case api.DecoderProtobuf:
+ return decode.NewProtobuf()
+ }
+ panic(fmt.Sprintf("`decode` type %s not defined", params.Type))
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go
new file mode 100644
index 000000000..c3b7481ab
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package decode
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ log "github.com/sirupsen/logrus"
+)
+
+//nolint:revive
+type DecodeJSON struct {
+}
+
+// Decode decodes input strings to a list of flow entries
+func (c *DecodeJSON) Decode(line []byte) (config.GenericMap, error) {
+
+ if log.IsLevelEnabled(log.DebugLevel) {
+ log.Debugf("decodeJSON: line = %v", string(line))
+ }
+ var decodedLine map[string]interface{}
+ if err := json.Unmarshal(line, &decodedLine); err != nil {
+ return nil, err
+ }
+ decodedLine2 := make(config.GenericMap, len(decodedLine))
+ // flows directly ingested by flp-transformer won't have this field, so we need to add it
+ // here. If the received line already contains the field, it will be overridden later
+ decodedLine2["TimeReceived"] = time.Now().Unix()
+ for k, v := range decodedLine {
+ if v == nil {
+ continue
+ }
+ decodedLine2[k] = v
+ }
+ return decodedLine2, nil
+}
+
+// NewDecodeJSON create a new decode
+func NewDecodeJSON() (Decoder, error) {
+ log.Debugf("entering NewDecodeJSON")
+ return &DecodeJSON{}, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode.go
new file mode 100644
index 000000000..e754ed1d4
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode.go
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy ofthe License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specificlanguage governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encode
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ log "github.com/sirupsen/logrus"
+)
+
+type encodeNone struct {
+ prevRecord config.GenericMap
+}
+
+type Encoder interface {
+ Encode(in config.GenericMap)
+ Update(config.StageParam)
+}
+
+// Encode encodes a flow before being stored
+func (t *encodeNone) Encode(in config.GenericMap) {
+ t.prevRecord = in
+}
+
+func (t *encodeNone) Update(_ config.StageParam) {
+ log.Warn("Encode None, update not supported")
+}
+
+// NewEncodeNone create a new encode
+func NewEncodeNone() (Encoder, error) {
+ log.Debugf("entering NewEncodeNone")
+ return &encodeNone{}, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go
new file mode 100644
index 000000000..2ead32be6
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encode
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/prometheus/client_golang/prometheus"
+ kafkago "github.com/segmentio/kafka-go"
+ log "github.com/sirupsen/logrus"
+ "golang.org/x/net/context"
+)
+
+const (
+ defaultReadTimeoutSeconds = int64(10)
+ defaultWriteTimeoutSeconds = int64(10)
+)
+
+type kafkaWriteMessage interface {
+ WriteMessages(ctx context.Context, msgs ...kafkago.Message) error
+}
+
+type encodeKafka struct {
+ kafkaParams api.EncodeKafka
+ kafkaWriter kafkaWriteMessage
+ recordsWritten prometheus.Counter
+}
+
+// Encode writes entries to kafka topic
+func (r *encodeKafka) Encode(entry config.GenericMap) {
+ var entryByteArray []byte
+ var err error
+ entryByteArray, err = json.Marshal(entry)
+ if err != nil {
+ log.Errorf("encodeKafka error: %v", err)
+ return
+ }
+ msg := kafkago.Message{
+ Value: entryByteArray,
+ }
+ err = r.kafkaWriter.WriteMessages(context.Background(), msg)
+ if err != nil {
+ log.Errorf("encodeKafka error: %v", err)
+ } else {
+ r.recordsWritten.Inc()
+ }
+}
+
+func (r *encodeKafka) Update(_ config.StageParam) {
+ log.Warn("Encode Kafka, update not supported")
+}
+
+// NewEncodeKafka create a new writer to kafka
+func NewEncodeKafka(opMetrics *operational.Metrics, params config.StageParam) (Encoder, error) {
+ log.Debugf("entering NewEncodeKafka")
+ config := api.EncodeKafka{}
+ if params.Encode != nil && params.Encode.Kafka != nil {
+ config = *params.Encode.Kafka
+ }
+
+ var balancer kafkago.Balancer
+ switch config.Balancer {
+ case api.KafkaRoundRobin:
+ balancer = &kafkago.RoundRobin{}
+ case api.KafkaLeastBytes:
+ balancer = &kafkago.LeastBytes{}
+ case api.KafkaHash:
+ balancer = &kafkago.Hash{}
+ case api.KafkaCrc32:
+ balancer = &kafkago.CRC32Balancer{}
+ case api.KafkaMurmur2:
+ balancer = &kafkago.Murmur2Balancer{}
+ default:
+ balancer = nil
+ }
+
+ readTimeoutSecs := defaultReadTimeoutSeconds
+ if config.ReadTimeout != 0 {
+ readTimeoutSecs = config.ReadTimeout
+ }
+
+ writeTimeoutSecs := defaultWriteTimeoutSeconds
+ if config.WriteTimeout != 0 {
+ writeTimeoutSecs = config.WriteTimeout
+ }
+
+ transport := kafkago.Transport{}
+ if config.TLS != nil {
+ log.Infof("Using TLS configuration: %v", config.TLS)
+ tlsConfig, err := config.TLS.Build()
+ if err != nil {
+ return nil, err
+ }
+ transport.TLS = tlsConfig
+ }
+
+ if config.SASL != nil {
+ m, err := utils.SetupSASLMechanism(config.SASL)
+ if err != nil {
+ return nil, err
+ }
+ transport.SASL = m
+ }
+
+ // connect to the kafka server
+ kafkaWriter := kafkago.Writer{
+ Addr: kafkago.TCP(config.Address),
+ Topic: config.Topic,
+ Balancer: balancer,
+ ReadTimeout: time.Duration(readTimeoutSecs) * time.Second,
+ WriteTimeout: time.Duration(writeTimeoutSecs) * time.Second,
+ BatchSize: config.BatchSize,
+ BatchBytes: config.BatchBytes,
+ // Temporary fix may be we should implement a batching systems
+ // https://github.com/segmentio/kafka-go/issues/326#issuecomment-519375403
+ BatchTimeout: time.Nanosecond,
+ Transport: &transport,
+ }
+
+ return &encodeKafka{
+ kafkaParams: config,
+ kafkaWriter: &kafkaWriter,
+ recordsWritten: opMetrics.CreateRecordsWrittenCounter(params.Name),
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go
new file mode 100644
index 000000000..dbf974991
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encode
+
+import (
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics"
+ promserver "github.com/netobserv/flowlogs-pipeline/pkg/prometheus"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+)
+
+var plog = logrus.WithField("component", "encode.Prometheus")
+
+const defaultExpiryTime = time.Duration(2 * time.Minute)
+
+// nolint:revive
+type EncodeProm struct {
+ cfg *api.PromEncode
+ registerer prometheus.Registerer
+ metricCommon *MetricsCommonStruct
+ updateChan chan config.StageParam
+ server *promserver.PromServer
+ regName string
+}
+
+func (e *EncodeProm) Gatherer() prometheus.Gatherer {
+ return e.server
+}
+
+// Encode encodes a metric before being stored; the heavy work is done by the MetricCommonEncode
+func (e *EncodeProm) Encode(metricRecord config.GenericMap) {
+ plog.Tracef("entering EncodeMetric. metricRecord = %v", metricRecord)
+ e.metricCommon.MetricCommonEncode(e, metricRecord)
+ e.checkConfUpdate()
+}
+
+func (e *EncodeProm) ProcessCounter(m interface{}, labels map[string]string, value float64) error {
+ counter := m.(*prometheus.CounterVec)
+ mm, err := counter.GetMetricWith(labels)
+ if err != nil {
+ return err
+ }
+ mm.Add(value)
+ return nil
+}
+
+func (e *EncodeProm) ProcessGauge(m interface{}, labels map[string]string, value float64, _ string) error {
+ gauge := m.(*prometheus.GaugeVec)
+ mm, err := gauge.GetMetricWith(labels)
+ if err != nil {
+ return err
+ }
+ mm.Set(value)
+ return nil
+}
+
+func (e *EncodeProm) ProcessHist(m interface{}, labels map[string]string, value float64) error {
+ hist := m.(*prometheus.HistogramVec)
+ mm, err := hist.GetMetricWith(labels)
+ if err != nil {
+ return err
+ }
+ mm.Observe(value)
+ return nil
+}
+
+func (e *EncodeProm) ProcessAggHist(m interface{}, labels map[string]string, values []float64) error {
+ hist := m.(*prometheus.HistogramVec)
+ mm, err := hist.GetMetricWith(labels)
+ if err != nil {
+ return err
+ }
+ for _, v := range values {
+ mm.Observe(v)
+ }
+ return nil
+}
+
+func (e *EncodeProm) GetChacheEntry(entryLabels map[string]string, m interface{}) interface{} {
+ switch mv := m.(type) {
+ case *prometheus.CounterVec:
+ return func() { mv.Delete(entryLabels) }
+ case *prometheus.GaugeVec:
+ return func() { mv.Delete(entryLabels) }
+ case *prometheus.HistogramVec:
+ return func() { mv.Delete(entryLabels) }
+ }
+ return nil
+}
+
+// callback function from lru cleanup
+func (e *EncodeProm) Cleanup(cleanupFunc interface{}) {
+ cleanupFunc.(func())()
+}
+
+func (e *EncodeProm) addCounter(fullMetricName string, mInfo *metrics.Preprocessed) prometheus.Collector {
+ counter := prometheus.NewCounterVec(prometheus.CounterOpts{Name: fullMetricName, Help: ""}, mInfo.TargetLabels())
+ e.metricCommon.AddCounter(fullMetricName, counter, mInfo)
+ return counter
+}
+
+func (e *EncodeProm) addGauge(fullMetricName string, mInfo *metrics.Preprocessed) prometheus.Collector {
+ gauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{Name: fullMetricName, Help: ""}, mInfo.TargetLabels())
+ e.metricCommon.AddGauge(fullMetricName, gauge, mInfo)
+ return gauge
+}
+
+func (e *EncodeProm) addHistogram(fullMetricName string, mInfo *metrics.Preprocessed) prometheus.Collector {
+ histogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fullMetricName, Help: ""}, mInfo.TargetLabels())
+ e.metricCommon.AddHist(fullMetricName, histogram, mInfo)
+ return histogram
+}
+
+func (e *EncodeProm) addAgghistogram(fullMetricName string, mInfo *metrics.Preprocessed) prometheus.Collector {
+ agghistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fullMetricName, Help: ""}, mInfo.TargetLabels())
+ e.metricCommon.AddAggHist(fullMetricName, agghistogram, mInfo)
+ return agghistogram
+}
+
+func (e *EncodeProm) unregisterMetric(c interface{}) {
+ if c, ok := c.(prometheus.Collector); ok {
+ e.registerer.Unregister(c)
+ }
+}
+
+func (e *EncodeProm) cleanDeletedGeneric(newCfg api.PromEncode, metrics map[string]mInfoStruct) {
+ for fullName, m := range metrics {
+ if !strings.HasPrefix(fullName, newCfg.Prefix) {
+ if c, ok := m.genericMetric.(prometheus.Collector); ok {
+ e.registerer.Unregister(c)
+ }
+ e.unregisterMetric(m.genericMetric)
+ delete(metrics, fullName)
+ }
+ metricName := strings.TrimPrefix(fullName, newCfg.Prefix)
+ found := false
+ for i := range newCfg.Metrics {
+ if metricName == newCfg.Metrics[i].Name {
+ found = true
+ break
+ }
+ }
+ if !found {
+ e.unregisterMetric(m.genericMetric)
+ delete(metrics, fullName)
+ }
+ }
+}
+
+func (e *EncodeProm) cleanDeletedMetrics(newCfg api.PromEncode) {
+ e.cleanDeletedGeneric(newCfg, e.metricCommon.counters)
+ e.cleanDeletedGeneric(newCfg, e.metricCommon.gauges)
+ e.cleanDeletedGeneric(newCfg, e.metricCommon.histos)
+ e.cleanDeletedGeneric(newCfg, e.metricCommon.aggHistos)
+}
+
+// returns true if a registry restart is needed
+func (e *EncodeProm) checkMetricUpdate(prefix string, apiItem *api.MetricsItem, store map[string]mInfoStruct, createMetric func(string, *metrics.Preprocessed) prometheus.Collector) bool {
+ fullMetricName := prefix + apiItem.Name
+ plog.Debugf("Checking metric: %s", fullMetricName)
+ mInfo := metrics.Preprocess(apiItem)
+ if oldMetric, ok := store[fullMetricName]; ok {
+ if !reflect.DeepEqual(mInfo.TargetLabels(), oldMetric.info.TargetLabels()) {
+ plog.Debug("Changes detected in labels")
+ return true
+ }
+ if !reflect.DeepEqual(mInfo.MetricsItem, oldMetric.info.MetricsItem) {
+ plog.Debug("Changes detected: unregistering and replacing")
+ e.unregisterMetric(oldMetric.genericMetric)
+ c := createMetric(fullMetricName, mInfo)
+ err := e.registerer.Register(c)
+ if err != nil {
+ plog.Errorf("error in prometheus.Register: %v", err)
+ }
+ } else {
+ plog.Debug("No changes found")
+ }
+ } else {
+ plog.Debug("New metric")
+ c := createMetric(fullMetricName, mInfo)
+ err := e.registerer.Register(c)
+ if err != nil {
+ plog.Errorf("error in prometheus.Register: %v", err)
+ }
+ }
+ return false
+}
+
+func (e *EncodeProm) checkConfUpdate() {
+ select {
+ case stage := <-e.updateChan:
+ cfg := api.PromEncode{}
+ if stage.Encode != nil && stage.Encode.Prom != nil {
+ cfg = *stage.Encode.Prom
+ }
+ plog.Infof("Received config update: %v", cfg)
+
+ e.cleanDeletedMetrics(cfg)
+
+ needNewRegistry := false
+ for i := range cfg.Metrics {
+ switch cfg.Metrics[i].Type {
+ case api.MetricCounter:
+ needNewRegistry = e.checkMetricUpdate(cfg.Prefix, &cfg.Metrics[i], e.metricCommon.counters, e.addCounter)
+ case api.MetricGauge:
+ needNewRegistry = e.checkMetricUpdate(cfg.Prefix, &cfg.Metrics[i], e.metricCommon.gauges, e.addGauge)
+ case api.MetricHistogram:
+ needNewRegistry = e.checkMetricUpdate(cfg.Prefix, &cfg.Metrics[i], e.metricCommon.histos, e.addHistogram)
+ case api.MetricAggHistogram:
+ needNewRegistry = e.checkMetricUpdate(cfg.Prefix, &cfg.Metrics[i], e.metricCommon.aggHistos, e.addAgghistogram)
+ case "default":
+ plog.Errorf("invalid metric type = %v, skipping", cfg.Metrics[i].Type)
+ continue
+ }
+ if needNewRegistry {
+ break
+ }
+ }
+ e.cfg = &cfg
+ if needNewRegistry {
+ // cf https://pkg.go.dev/github.com/prometheus/client_golang@v1.19.0/prometheus#Registerer.Unregister
+ plog.Info("Changes detected on labels: need registry reset.")
+ e.resetRegistry()
+ break
+ }
+ default:
+ // Nothing to do
+ return
+ }
+}
+
+func (e *EncodeProm) resetRegistry() {
+ e.metricCommon.cleanupInfoStructs()
+ reg := prometheus.NewRegistry()
+ e.registerer = reg
+ for i := range e.cfg.Metrics {
+ mCfg := &e.cfg.Metrics[i]
+ fullMetricName := e.cfg.Prefix + mCfg.Name
+ mInfo := metrics.Preprocess(mCfg)
+ plog.Debugf("Create metric: %s, Labels: %v", fullMetricName, mInfo.TargetLabels())
+ var m prometheus.Collector
+ switch mCfg.Type {
+ case api.MetricCounter:
+ m = e.addCounter(fullMetricName, mInfo)
+ case api.MetricGauge:
+ m = e.addGauge(fullMetricName, mInfo)
+ case api.MetricHistogram:
+ m = e.addHistogram(fullMetricName, mInfo)
+ case api.MetricAggHistogram:
+ m = e.addAgghistogram(fullMetricName, mInfo)
+ case "default":
+ plog.Errorf("invalid metric type = %v, skipping", mCfg.Type)
+ continue
+ }
+ if m != nil {
+ err := e.registerer.Register(m)
+ if err != nil {
+ plog.Errorf("error in prometheus.Register: %v", err)
+ }
+ }
+ }
+ e.server.SetRegistry(e.regName, reg)
+}
+
+func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (Encoder, error) {
+ cfg := api.PromEncode{}
+ if params.Encode != nil && params.Encode.Prom != nil {
+ cfg = *params.Encode.Prom
+ }
+
+ expiryTime := cfg.ExpiryTime
+ if expiryTime.Duration == 0 {
+ expiryTime.Duration = defaultExpiryTime
+ }
+ plog.Debugf("expiryTime = %v", expiryTime)
+
+ registry := prometheus.NewRegistry()
+
+ w := &EncodeProm{
+ cfg: &cfg,
+ registerer: registry,
+ updateChan: make(chan config.StageParam),
+ server: promserver.SharedServer,
+ regName: params.Name,
+ }
+
+ if cfg.PromConnectionInfo != nil {
+ // Start new server
+ w.server = promserver.StartServerAsync(cfg.PromConnectionInfo, params.Name, registry)
+ }
+
+ metricCommon := NewMetricsCommonStruct(opMetrics, cfg.MaxMetrics, params.Name, expiryTime, w.Cleanup)
+ w.metricCommon = metricCommon
+
+ // Init metrics
+ w.resetRegistry()
+
+ return w, nil
+}
+
+func (e *EncodeProm) Update(config config.StageParam) {
+ e.updateChan <- config
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go
new file mode 100644
index 000000000..a1b476c77
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encode
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "sync"
+ "time"
+
+ minio "github.com/minio/minio-go/v7"
+ "github.com/minio/minio-go/v7/pkg/credentials"
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/prometheus/client_golang/prometheus"
+ log "github.com/sirupsen/logrus"
+ "golang.org/x/net/context"
+)
+
+const (
+ flpS3Version = "v0.1"
+ defaultBatchSize = 10
+)
+
+var (
+ defaultTimeOut = api.Duration{Duration: 60 * time.Second}
+)
+
+type encodeS3 struct {
+ s3Params api.EncodeS3
+ s3Writer s3WriteEntries
+ recordsWritten prometheus.Counter
+ pendingEntries []config.GenericMap
+ mutex *sync.Mutex
+ expiryTime time.Time
+ exitChan <-chan struct{}
+ streamID string
+ intervalStartTime time.Time
+ sequenceNumber int64
+}
+
+type s3WriteEntries interface {
+ putObject(bucket string, objectName string, object map[string]interface{}) error
+}
+
+type encodeS3Writer struct {
+ s3Client *minio.Client
+ s3Params *api.EncodeS3
+}
+
+// The mutex must be held when calling writeObject
+func (s *encodeS3) writeObject() error {
+ nLogs := len(s.pendingEntries)
+ if nLogs > s.s3Params.BatchSize {
+ nLogs = s.s3Params.BatchSize
+ }
+ now := time.Now()
+ object := s.GenerateStoreHeader(s.pendingEntries[0:nLogs], s.intervalStartTime, now)
+ year := fmt.Sprintf("%04d", now.Year())
+ month := fmt.Sprintf("%02d", now.Month())
+ day := fmt.Sprintf("%02d", now.Day())
+ hour := fmt.Sprintf("%02d", now.Hour())
+ seq := fmt.Sprintf("%08d", s.sequenceNumber)
+ objectName := s.s3Params.Account + "/year=" + year + "/month=" + month + "/day=" + day + "/hour=" + hour + "/stream-id=" + s.streamID + "/" + seq
+ log.Debugf("S3 writeObject: objectName = %s", objectName)
+ log.Debugf("S3 writeObject: object = %v", object)
+ s.pendingEntries = s.pendingEntries[nLogs:]
+ s.intervalStartTime = now
+ s.expiryTime = now.Add(s.s3Params.WriteTimeout.Duration)
+ s.sequenceNumber++
+ // send object to object store
+ err := s.s3Writer.putObject(s.s3Params.Bucket, objectName, object)
+ if err != nil {
+ log.Errorf("error in writing object: %v", err)
+ }
+ return err
+}
+
+func (s *encodeS3) GenerateStoreHeader(flows []config.GenericMap, startTime time.Time, endTime time.Time) map[string]interface{} {
+ object := make(map[string]interface{})
+ // copy user defined keys from config to object header
+ for key, value := range s.s3Params.ObjectHeaderParameters {
+ object[key] = value
+ }
+ object["version"] = flpS3Version
+ object["capture_start_time"] = startTime.Format(time.RFC3339)
+ object["capture_end_time"] = endTime.Format(time.RFC3339)
+ object["number_of_flow_logs"] = len(flows)
+ object["flow_logs"] = flows
+
+ return object
+}
+
+func (s *encodeS3) Update(_ config.StageParam) {
+ log.Warn("Encode S3 Writer, update not supported")
+}
+
+func (s *encodeS3) createObjectTimeoutLoop() {
+ log.Debugf("entering createObjectTimeoutLoop")
+ ticker := time.NewTicker(s.s3Params.WriteTimeout.Duration)
+ for {
+ select {
+ case <-s.exitChan:
+ log.Debugf("exiting createObjectTimeoutLoop because of signal")
+ return
+ case <-ticker.C:
+ now := time.Now()
+ log.Debugf("time now = %v, expiryTime = %v", now, s.expiryTime)
+ s.mutex.Lock()
+ _ = s.writeObject()
+ s.mutex.Unlock()
+ }
+ }
+}
+
+// Encode queues entries to be sent to object store
+func (s *encodeS3) Encode(entry config.GenericMap) {
+ log.Debugf("Encode S3, entry = %v", entry)
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.pendingEntries = append(s.pendingEntries, entry)
+ s.recordsWritten.Inc()
+ if len(s.pendingEntries) >= s.s3Params.BatchSize {
+ _ = s.writeObject()
+ }
+}
+
+// NewEncodeS3 creates a new writer to S3
+func NewEncodeS3(opMetrics *operational.Metrics, params config.StageParam) (Encoder, error) {
+ configParams := api.EncodeS3{}
+ if params.Encode != nil && params.Encode.S3 != nil {
+ configParams = *params.Encode.S3
+ }
+ log.Debugf("NewEncodeS3, config = %v", configParams)
+ s3Writer := &encodeS3Writer{
+ s3Params: &configParams,
+ }
+ if configParams.WriteTimeout.Duration == time.Duration(0) {
+ configParams.WriteTimeout = defaultTimeOut
+ }
+ if configParams.BatchSize == 0 {
+ configParams.BatchSize = defaultBatchSize
+ }
+
+ s := &encodeS3{
+ s3Params: configParams,
+ s3Writer: s3Writer,
+ recordsWritten: opMetrics.CreateRecordsWrittenCounter(params.Name),
+ pendingEntries: make([]config.GenericMap, 0),
+ expiryTime: time.Now().Add(configParams.WriteTimeout.Duration),
+ exitChan: utils.ExitChannel(),
+ streamID: time.Now().Format(time.RFC3339),
+ intervalStartTime: time.Now(),
+ mutex: &sync.Mutex{},
+ }
+ go s.createObjectTimeoutLoop()
+ return s, nil
+}
+
+func (e *encodeS3Writer) connectS3(config *api.EncodeS3) (*minio.Client, error) {
+ // Initialize s3 client object.
+ minioOptions := minio.Options{
+ Creds: credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, ""),
+ Secure: config.Secure,
+ }
+ s3Client, err := minio.New(config.Endpoint, &minioOptions)
+ if err != nil {
+ log.Errorf("Error when creating S3 client: %v", err)
+ return nil, err
+ }
+
+ found, err := s3Client.BucketExists(context.Background(), config.Bucket)
+ if err != nil {
+ log.Errorf("Error accessing S3 bucket: %v", err)
+ return nil, err
+ }
+ if found {
+ log.Infof("S3 Bucket %s found", config.Bucket)
+ }
+ log.Debugf("s3Client = %#v", s3Client) // s3Client is now setup
+ return s3Client, nil
+}
+
+func (e *encodeS3Writer) putObject(bucket string, objectName string, object map[string]interface{}) error {
+ if e.s3Client == nil {
+ s3Client, err := e.connectS3(e.s3Params)
+ if s3Client == nil {
+ return err
+ }
+ e.s3Client = s3Client
+ }
+ b := new(bytes.Buffer)
+ err := json.NewEncoder(b).Encode(object)
+ if err != nil {
+ log.Errorf("error encoding object: %v", err)
+ return err
+ }
+ log.Debugf("encoded object = %v", b)
+ // TBD: add necessary headers such as authorization (token), gzip, md5, etc
+ uploadInfo, err := e.s3Client.PutObject(context.Background(), bucket, objectName, b, int64(b.Len()), minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ log.Debugf("uploadInfo = %v", uploadInfo)
+ return err
+}
+
+func (e *encodeS3Writer) Update(_ config.StageParam) {
+ log.Warn("Encode S3 Writer, update not supported")
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/filtering.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/filtering.go
new file mode 100644
index 000000000..885d4ae2a
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/filtering.go
@@ -0,0 +1,28 @@
+package metrics
+
+import "github.com/netobserv/flowlogs-pipeline/pkg/config"
+
+func (p *Preprocessed) ApplyFilters(flow config.GenericMap, flatParts []config.GenericMap) (bool, []config.GenericMap) {
+ filteredParts := flatParts
+ for _, filter := range p.filters {
+ if filter.useFlat {
+ filteredParts = filter.filterFlatParts(filteredParts)
+ if len(filteredParts) == 0 {
+ return false, nil
+ }
+ } else if !filter.predicate(flow) {
+ return false, nil
+ }
+ }
+ return true, filteredParts
+}
+
+func (pf *preprocessedFilter) filterFlatParts(flatParts []config.GenericMap) []config.GenericMap {
+ var filteredParts []config.GenericMap
+ for _, part := range flatParts {
+ if pf.predicate(part) {
+ filteredParts = append(filteredParts, part)
+ }
+ }
+ return filteredParts
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/flattening.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/flattening.go
new file mode 100644
index 000000000..e9f27ebbd
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/flattening.go
@@ -0,0 +1,88 @@
+package metrics
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+)
+
+func (p *Preprocessed) GenerateFlatParts(flow config.GenericMap) []config.GenericMap {
+ if len(p.MetricsItem.Flatten) == 0 {
+ return nil
+ }
+ // Want to generate sub-flows from {A=foo, B=[{B1=x, B2=y},{B1=z}], C=[foo,bar]}
+ // => {B>B1=x, B>B2=y, C=foo}, {B>B1=z, C=foo}, {B>B1=x, B>B2=y, C=bar}, {B>B1=z, C=bar}
+ var partsPerLabel [][]config.GenericMap
+ for _, fl := range p.MetricsItem.Flatten {
+ if anyVal, ok := flow[fl]; ok {
+ // Intermediate step to get:
+ // [{B>B1=x, B>B2=y}, {B>B1=z}], [C=foo, C=bar]
+ var partsForLabel []config.GenericMap
+ switch v := anyVal.(type) {
+ case []any:
+ prefix := fl + ">"
+ for _, vv := range v {
+ switch vvv := vv.(type) {
+ case config.GenericMap:
+ partsForLabel = append(partsForLabel, flattenNested(prefix, vvv))
+ default:
+ partsForLabel = append(partsForLabel, config.GenericMap{fl: vv})
+ }
+ }
+ case []config.GenericMap:
+ prefix := fl + ">"
+ for _, vv := range v {
+ partsForLabel = append(partsForLabel, flattenNested(prefix, vv))
+ }
+ case []string:
+ for _, vv := range v {
+ partsForLabel = append(partsForLabel, config.GenericMap{fl: vv})
+ }
+ }
+ if len(partsForLabel) > 0 {
+ partsPerLabel = append(partsPerLabel, partsForLabel)
+ }
+ }
+ }
+ return distribute(partsPerLabel)
+}
+
+func distribute(allUnflat [][]config.GenericMap) []config.GenericMap {
+ // turn
+ // [{B>B1=x, B>B2=y}, {B>B1=z}], [{C=foo}, {C=bar}]
+ // into
+ // [{B>B1=x, B>B2=y, C=foo}, {B>B1=z, C=foo}, {B>B1=x, B>B2=y, C=bar}, {B>B1=z, C=bar}]
+ totalCard := 1
+ for _, part := range allUnflat {
+ if len(part) > 1 {
+ totalCard *= len(part)
+ }
+ }
+ ret := make([]config.GenericMap, totalCard)
+ indexes := make([]int, len(allUnflat))
+ for c := range ret {
+ ret[c] = config.GenericMap{}
+ incIndex := false
+ for i, part := range allUnflat {
+ index := indexes[i]
+ for k, v := range part[index] {
+ ret[c][k] = v
+ }
+ if !incIndex {
+ if index+1 == len(part) {
+ indexes[i] = 0
+ } else {
+ indexes[i] = index + 1
+ incIndex = true
+ }
+ }
+ }
+ }
+ return ret
+}
+
+func flattenNested(prefix string, nested config.GenericMap) config.GenericMap {
+ subFlow := config.GenericMap{}
+ for k, v := range nested {
+ subFlow[prefix+k] = v
+ }
+ return subFlow
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/preprocess.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/preprocess.go
new file mode 100644
index 000000000..5aabc8311
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/preprocess.go
@@ -0,0 +1,91 @@
+package metrics
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils/filters"
+)
+
+type Preprocessed struct {
+ *api.MetricsItem
+ filters []preprocessedFilter
+ MappedLabels []MappedLabel
+ FlattenedLabels []MappedLabel
+}
+
+type MappedLabel struct {
+ Source string
+ Target string
+}
+
+type preprocessedFilter struct {
+ predicate filters.Predicate
+ useFlat bool
+}
+
+func (p *Preprocessed) TargetLabels() []string {
+ var targetLabels []string
+ for _, l := range p.FlattenedLabels {
+ targetLabels = append(targetLabels, l.Target)
+ }
+ for _, l := range p.MappedLabels {
+ targetLabels = append(targetLabels, l.Target)
+ }
+ return targetLabels
+}
+
+func filterToPredicate(filter api.MetricsFilter) filters.Predicate {
+ switch filter.Type {
+ case api.MetricFilterEqual:
+ return filters.Equal(filter.Key, filter.Value, true)
+ case api.MetricFilterNotEqual:
+ return filters.NotEqual(filter.Key, filter.Value, true)
+ case api.MetricFilterPresence:
+ return filters.Presence(filter.Key)
+ case api.MetricFilterAbsence:
+ return filters.Absence(filter.Key)
+ case api.MetricFilterRegex:
+ r, _ := regexp.Compile(filter.Value)
+ return filters.Regex(filter.Key, r)
+ case api.MetricFilterNotRegex:
+ r, _ := regexp.Compile(filter.Value)
+ return filters.NotRegex(filter.Key, r)
+ }
+ // Default = Exact
+ return filters.Equal(filter.Key, filter.Value, true)
+}
+
+func Preprocess(def *api.MetricsItem) *Preprocessed {
+ mi := Preprocessed{
+ MetricsItem: def,
+ }
+ for _, l := range def.Labels {
+ ml := MappedLabel{Source: l, Target: l}
+ if as := def.Remap[l]; as != "" {
+ ml.Target = as
+ }
+ if mi.isFlattened(l) {
+ mi.FlattenedLabels = append(mi.FlattenedLabels, ml)
+ } else {
+ mi.MappedLabels = append(mi.MappedLabels, ml)
+ }
+ }
+ for _, f := range def.Filters {
+ mi.filters = append(mi.filters, preprocessedFilter{
+ predicate: filterToPredicate(f),
+ useFlat: mi.isFlattened(f.Key),
+ })
+ }
+ return &mi
+}
+
+func (p *Preprocessed) isFlattened(fieldPath string) bool {
+ for _, flat := range p.Flatten {
+ if fieldPath == flat || strings.HasPrefix(fieldPath, flat+">") {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go
new file mode 100644
index 000000000..d420f1af8
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2024 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encode
+
+import (
+ "strings"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics"
+ putils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ "github.com/prometheus/client_golang/prometheus"
+ log "github.com/sirupsen/logrus"
+)
+
+type mInfoStruct struct {
+ genericMetric interface{} // can be a counter, gauge, or histogram pointer
+ info *metrics.Preprocessed
+}
+
+type MetricsCommonStruct struct {
+ gauges map[string]mInfoStruct
+ counters map[string]mInfoStruct
+ histos map[string]mInfoStruct
+ aggHistos map[string]mInfoStruct
+ mCache *putils.TimedCache
+ mChacheLenMetric prometheus.Gauge
+ metricsProcessed prometheus.Counter
+ metricsDropped prometheus.Counter
+ errorsCounter *prometheus.CounterVec
+ expiryTime time.Duration
+ exitChan <-chan struct{}
+}
+
+type MetricsCommonInterface interface {
+ GetChacheEntry(entryLabels map[string]string, m interface{}) interface{}
+ ProcessCounter(m interface{}, labels map[string]string, value float64) error
+ ProcessGauge(m interface{}, labels map[string]string, value float64, key string) error
+ ProcessHist(m interface{}, labels map[string]string, value float64) error
+ ProcessAggHist(m interface{}, labels map[string]string, value []float64) error
+}
+
+var (
+ metricsProcessed = operational.DefineMetric(
+ "metrics_processed",
+ "Number of metrics processed",
+ operational.TypeCounter,
+ "stage",
+ )
+ metricsDropped = operational.DefineMetric(
+ "metrics_dropped",
+ "Number of metrics dropped",
+ operational.TypeCounter,
+ "stage",
+ )
+ encodePromErrors = operational.DefineMetric(
+ "encode_prom_errors",
+ "Total errors during metrics generation",
+ operational.TypeCounter,
+ "error", "metric", "key",
+ )
+ mChacheLen = operational.DefineMetric(
+ "encode_prom_metrics_reported",
+ "Total number of prometheus metrics reported by this stage",
+ operational.TypeGauge,
+ "stage",
+ )
+)
+
+func (m *MetricsCommonStruct) AddCounter(name string, g interface{}, info *metrics.Preprocessed) {
+ mStruct := mInfoStruct{genericMetric: g, info: info}
+ m.counters[name] = mStruct
+}
+
+func (m *MetricsCommonStruct) AddGauge(name string, g interface{}, info *metrics.Preprocessed) {
+ mStruct := mInfoStruct{genericMetric: g, info: info}
+ m.gauges[name] = mStruct
+}
+
+func (m *MetricsCommonStruct) AddHist(name string, g interface{}, info *metrics.Preprocessed) {
+ mStruct := mInfoStruct{genericMetric: g, info: info}
+ m.histos[name] = mStruct
+}
+
+func (m *MetricsCommonStruct) AddAggHist(name string, g interface{}, info *metrics.Preprocessed) {
+ mStruct := mInfoStruct{genericMetric: g, info: info}
+ m.aggHistos[name] = mStruct
+}
+
+func (m *MetricsCommonStruct) MetricCommonEncode(mci MetricsCommonInterface, metricRecord config.GenericMap) {
+ log.Tracef("entering MetricCommonEncode. metricRecord = %v", metricRecord)
+
+ // Process counters
+ for _, mInfo := range m.counters {
+ labelSets, value := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
+ if labelSets == nil {
+ continue
+ }
+ for _, labels := range labelSets {
+ err := mci.ProcessCounter(mInfo.genericMetric, labels.lMap, value)
+ if err != nil {
+ log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err)
+ m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
+ continue
+ }
+ m.metricsProcessed.Inc()
+ }
+ }
+
+ // Process gauges
+ for _, mInfo := range m.gauges {
+ labelSets, value := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
+ if labelSets == nil {
+ continue
+ }
+ for _, labels := range labelSets {
+ err := mci.ProcessGauge(mInfo.genericMetric, labels.lMap, value, labels.key)
+ if err != nil {
+ log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err)
+ m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
+ continue
+ }
+ m.metricsProcessed.Inc()
+ }
+ }
+
+ // Process histograms
+ for _, mInfo := range m.histos {
+ labelSets, value := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
+ if labelSets == nil {
+ continue
+ }
+ for _, labels := range labelSets {
+ err := mci.ProcessHist(mInfo.genericMetric, labels.lMap, value)
+ if err != nil {
+ log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err)
+ m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
+ continue
+ }
+ m.metricsProcessed.Inc()
+ }
+ }
+
+ // Process pre-aggregated histograms
+ for _, mInfo := range m.aggHistos {
+ labelSets, values := m.prepareAggHisto(mci, metricRecord, mInfo.info, mInfo.genericMetric)
+ if labelSets == nil {
+ continue
+ }
+ for _, labels := range labelSets {
+ err := mci.ProcessAggHist(mInfo.genericMetric, labels.lMap, values)
+ if err != nil {
+ log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err)
+ m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
+ continue
+ }
+ m.metricsProcessed.Inc()
+ }
+ }
+}
+
+func (m *MetricsCommonStruct) prepareMetric(mci MetricsCommonInterface, flow config.GenericMap, info *metrics.Preprocessed, mv interface{}) ([]labelsKeyAndMap, float64) {
+ flatParts := info.GenerateFlatParts(flow)
+ ok, flatParts := info.ApplyFilters(flow, flatParts)
+ if !ok {
+ return nil, 0
+ }
+
+ val := m.extractGenericValue(flow, info)
+ if val == nil {
+ return nil, 0
+ }
+ floatVal, err := utils.ConvertToFloat64(val)
+ if err != nil {
+ m.errorsCounter.WithLabelValues("ValueConversionError", info.Name, info.ValueKey).Inc()
+ return nil, 0
+ }
+ if info.ValueScale != 0 {
+ floatVal /= info.ValueScale
+ }
+
+ labelSets := extractLabels(flow, flatParts, info)
+ var lkms []labelsKeyAndMap
+ for _, ls := range labelSets {
+ // Update entry for expiry mechanism (the entry itself is its own cleanup function)
+ lkm := ls.toKeyAndMap(info)
+ lkms = append(lkms, lkm)
+ cacheEntry := mci.GetChacheEntry(lkm.lMap, mv)
+ ok := m.mCache.UpdateCacheEntry(lkm.key, cacheEntry)
+ if !ok {
+ m.metricsDropped.Inc()
+ return nil, 0
+ }
+ }
+ return lkms, floatVal
+}
+
+func (m *MetricsCommonStruct) prepareAggHisto(mci MetricsCommonInterface, flow config.GenericMap, info *metrics.Preprocessed, mc interface{}) ([]labelsKeyAndMap, []float64) {
+ flatParts := info.GenerateFlatParts(flow)
+ ok, flatParts := info.ApplyFilters(flow, flatParts)
+ if !ok {
+ return nil, nil
+ }
+
+ val := m.extractGenericValue(flow, info)
+ if val == nil {
+ return nil, nil
+ }
+ values, ok := val.([]float64)
+ if !ok {
+ m.errorsCounter.WithLabelValues("HistoValueConversionError", info.Name, info.ValueKey).Inc()
+ return nil, nil
+ }
+
+ labelSets := extractLabels(flow, flatParts, info)
+ var lkms []labelsKeyAndMap
+ for _, ls := range labelSets {
+ // Update entry for expiry mechanism (the entry itself is its own cleanup function)
+ lkm := ls.toKeyAndMap(info)
+ lkms = append(lkms, lkm)
+ cacheEntry := mci.GetChacheEntry(lkm.lMap, mc)
+ ok := m.mCache.UpdateCacheEntry(lkm.key, cacheEntry)
+ if !ok {
+ m.metricsDropped.Inc()
+ return nil, nil
+ }
+ }
+ return lkms, values
+}
+
+func (m *MetricsCommonStruct) extractGenericValue(flow config.GenericMap, info *metrics.Preprocessed) interface{} {
+ if info.ValueKey == "" {
+ // No value key means it's a records / flows counter (1 flow = 1 increment), so just return 1
+ return 1
+ }
+ val, found := flow[info.ValueKey]
+ if !found {
+ // No value might mean 0 for counters, to keep storage lightweight - it can safely be ignored
+ return nil
+ }
+ return val
+}
+
+type label struct {
+ key string
+ value string
+}
+
+type labelSet []label
+
+type labelsKeyAndMap struct {
+ key string
+ lMap map[string]string
+}
+
+func (l labelSet) toKeyAndMap(info *metrics.Preprocessed) labelsKeyAndMap {
+ key := strings.Builder{}
+ key.WriteString(info.Name)
+ key.WriteRune('|')
+ m := map[string]string{}
+ for _, kv := range l {
+ key.WriteString(kv.value)
+ key.WriteRune('|')
+ m[kv.key] = kv.value
+ }
+ return labelsKeyAndMap{key: key.String(), lMap: m}
+}
+
+// extractLabels takes the flow and a single metric definition as input.
+// It returns the flat labels maps (label names and values).
+// Most of the time it will return a single map; it may return several of them when the parsed flow fields are lists (e.g. "interfaces").
+func extractLabels(flow config.GenericMap, flatParts []config.GenericMap, info *metrics.Preprocessed) []labelSet {
+ common := newLabelSet(flow, info.MappedLabels)
+ if len(flatParts) == 0 {
+ return []labelSet{common}
+ }
+ var all []labelSet
+ for _, fp := range flatParts {
+ ls := newLabelSet(fp, info.FlattenedLabels)
+ ls = append(ls, common...)
+ all = append(all, ls)
+ }
+ return all
+}
+
+func newLabelSet(part config.GenericMap, labels []metrics.MappedLabel) labelSet {
+ var ls labelSet
+ for _, t := range labels {
+ label := label{key: t.Target, value: ""}
+ if v, ok := part[t.Source]; ok {
+ label.value = utils.ConvertToString(v)
+ }
+ ls = append(ls, label)
+ }
+ return ls
+}
+
+func (m *MetricsCommonStruct) cleanupExpiredEntriesLoop(callback putils.CacheCallback) {
+ ticker := time.NewTicker(m.expiryTime)
+ for {
+ select {
+ case <-m.exitChan:
+ log.Debugf("exiting cleanupExpiredEntriesLoop because of signal")
+ return
+ case <-ticker.C:
+ m.mCache.CleanupExpiredEntries(m.expiryTime, callback)
+ }
+ }
+}
+
+func (m *MetricsCommonStruct) cleanupInfoStructs() {
+ m.gauges = map[string]mInfoStruct{}
+ m.counters = map[string]mInfoStruct{}
+ m.histos = map[string]mInfoStruct{}
+ m.aggHistos = map[string]mInfoStruct{}
+}
+
+func NewMetricsCommonStruct(opMetrics *operational.Metrics, maxCacheEntries int, name string, expiryTime api.Duration, callback putils.CacheCallback) *MetricsCommonStruct {
+ mChacheLenMetric := opMetrics.NewGauge(&mChacheLen, name)
+ m := &MetricsCommonStruct{
+ mCache: putils.NewTimedCache(maxCacheEntries, mChacheLenMetric),
+ mChacheLenMetric: mChacheLenMetric,
+ metricsProcessed: opMetrics.NewCounter(&metricsProcessed, name),
+ metricsDropped: opMetrics.NewCounter(&metricsDropped, name),
+ errorsCounter: opMetrics.NewCounterVec(&encodePromErrors),
+ expiryTime: expiryTime.Duration,
+ exitChan: putils.ExitChannel(),
+ gauges: map[string]mInfoStruct{},
+ counters: map[string]mInfoStruct{},
+ histos: map[string]mInfoStruct{},
+ aggHistos: map[string]mInfoStruct{},
+ }
+ go m.cleanupExpiredEntriesLoop(callback)
+ return m
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go
new file mode 100644
index 000000000..1eee38912
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package opentelemetry
+
+import (
+ "context"
+
+ sdklog "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode"
+ log "github.com/sirupsen/logrus"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+type EncodeOtlpLogs struct {
+ cfg api.EncodeOtlpLogs
+ ctx context.Context
+ res *resource.Resource
+ lp *sdklog.LoggerProvider
+}
+
+// Encode encodes a log entry to be exported
+func (e *EncodeOtlpLogs) Encode(entry config.GenericMap) {
+ log.Tracef("entering EncodeOtlpLogs. entry = %v", entry)
+ e.LogWrite(entry)
+}
+
+func (e *EncodeOtlpLogs) Update(_ config.StageParam) {
+ log.Warn("EncodeOtlpLogs, update not supported")
+}
+
+func NewEncodeOtlpLogs(_ *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+ log.Tracef("entering NewEncodeOtlpLogs \n")
+ cfg := api.EncodeOtlpLogs{}
+ if params.Encode != nil && params.Encode.OtlpLogs != nil {
+ cfg = *params.Encode.OtlpLogs
+ }
+ log.Debugf("NewEncodeOtlpLogs cfg = %v \n", cfg)
+
+ ctx := context.Background()
+ res := newResource()
+
+ lp, err := NewOtlpLoggerProvider(ctx, params, res)
+ if err != nil {
+ return nil, err
+ }
+
+ w := &EncodeOtlpLogs{
+ cfg: cfg,
+ ctx: ctx,
+ res: res,
+ lp: lp,
+ }
+ return w, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go
new file mode 100644
index 000000000..92200cece
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package opentelemetry
+
+import (
+ "context"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics"
+ log "github.com/sirupsen/logrus"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ sdkmetric "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+const defaultExpiryTime = time.Duration(2 * time.Minute)
+const flpMeterName = "flp_meter"
+
+type EncodeOtlpMetrics struct {
+ cfg api.EncodeOtlpMetrics
+ ctx context.Context
+ res *resource.Resource
+ mp *sdkmetric.MeterProvider
+ meter metric.Meter
+ metricCommon *encode.MetricsCommonStruct
+}
+
+func (e *EncodeOtlpMetrics) Update(_ config.StageParam) {
+ log.Warn("EncodeOtlpMetrics, update not supported")
+}
+
+// Encode encodes a metric to be exported
+func (e *EncodeOtlpMetrics) Encode(metricRecord config.GenericMap) {
+ log.Tracef("entering EncodeOtlpMetrics. entry = %v", metricRecord)
+ e.metricCommon.MetricCommonEncode(e, metricRecord)
+}
+
+func (e *EncodeOtlpMetrics) ProcessCounter(m interface{}, labels map[string]string, value float64) error {
+ counter := m.(metric.Float64Counter)
+ // set attributes using the labels
+ attributes := obtainAttributesFromLabels(labels)
+ counter.Add(e.ctx, value, metric.WithAttributes(attributes...))
+ return nil
+}
+
+func (e *EncodeOtlpMetrics) ProcessGauge(m interface{}, labels map[string]string, value float64, key string) error {
+ obs := m.(Float64Gauge)
+ // set attributes using the labels
+ attributes := obtainAttributesFromLabels(labels)
+ obs.Set(key, value, attributes)
+ return nil
+}
+
+func (e *EncodeOtlpMetrics) ProcessHist(m interface{}, labels map[string]string, value float64) error {
+ histo := m.(metric.Float64Histogram)
+ // set attributes using the labels
+ attributes := obtainAttributesFromLabels(labels)
+ histo.Record(e.ctx, value, metric.WithAttributes(attributes...))
+ return nil
+}
+
+func (e *EncodeOtlpMetrics) ProcessAggHist(m interface{}, labels map[string]string, values []float64) error {
+ histo := m.(metric.Float64Histogram)
+ // set attributes using the labels
+ attributes := obtainAttributesFromLabels(labels)
+ for _, v := range values {
+ histo.Record(e.ctx, v, metric.WithAttributes(attributes...))
+ }
+ return nil
+}
+
+func (e *EncodeOtlpMetrics) GetChacheEntry(entryLabels map[string]string, _ interface{}) interface{} {
+ return entryLabels
+}
+
+func NewEncodeOtlpMetrics(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+ log.Tracef("entering NewEncodeOtlpMetrics \n")
+ cfg := api.EncodeOtlpMetrics{}
+ if params.Encode != nil && params.Encode.OtlpMetrics != nil {
+ cfg = *params.Encode.OtlpMetrics
+ }
+ log.Debugf("NewEncodeOtlpMetrics cfg = %v \n", cfg)
+
+ ctx := context.Background()
+ res := newResource()
+
+ mp, err := NewOtlpMetricsProvider(ctx, params, res)
+ if err != nil {
+ return nil, err
+ }
+ meter := mp.Meter(
+ flpMeterName,
+ )
+
+ expiryTime := cfg.ExpiryTime
+ if expiryTime.Duration == 0 {
+ expiryTime.Duration = defaultExpiryTime
+ }
+
+ meterFactory := otel.Meter(flpMeterName)
+
+ w := &EncodeOtlpMetrics{
+ cfg: cfg,
+ ctx: ctx,
+ res: res,
+ mp: mp,
+ meter: meterFactory,
+ }
+
+ metricCommon := encode.NewMetricsCommonStruct(opMetrics, 0, params.Name, expiryTime, nil)
+ w.metricCommon = metricCommon
+
+ for i := range cfg.Metrics {
+ mCfg := &cfg.Metrics[i]
+ fullMetricName := cfg.Prefix + mCfg.Name
+ log.Debugf("fullMetricName = %v", fullMetricName)
+ log.Debugf("Labels = %v", mCfg.Labels)
+ mInfo := metrics.Preprocess(mCfg)
+ switch mCfg.Type {
+ case api.MetricCounter:
+ counter, err := meter.Float64Counter(fullMetricName)
+ if err != nil {
+ log.Errorf("error during counter creation: %v", err)
+ return nil, err
+ }
+ metricCommon.AddCounter(fullMetricName, counter, mInfo)
+ case api.MetricGauge:
+ // at implementation time, only asynchronous gauges are supported by otel in golang
+ obs := Float64Gauge{observations: make(map[string]Float64GaugeEntry)}
+ gauge, err := meterFactory.Float64ObservableGauge(
+ fullMetricName,
+ metric.WithFloat64Callback(obs.Callback),
+ )
+ if err != nil {
+ log.Errorf("error during gauge creation: %v", err)
+ return nil, err
+ }
+ metricCommon.AddGauge(fullMetricName, gauge, mInfo)
+ case api.MetricHistogram:
+ var histo metric.Float64Histogram
+ if len(mCfg.Buckets) == 0 {
+ histo, err = meter.Float64Histogram(fullMetricName)
+ } else {
+ histo, err = meter.Float64Histogram(fullMetricName,
+ metric.WithExplicitBucketBoundaries(mCfg.Buckets...),
+ )
+
+ }
+ if err != nil {
+ log.Errorf("error during histogram creation: %v", err)
+ return nil, err
+ }
+ metricCommon.AddHist(fullMetricName, histo, mInfo)
+ case api.MetricAggHistogram:
+ fallthrough
+ default:
+ log.Errorf("invalid metric type = %v, skipping", mCfg.Type)
+ continue
+ }
+ }
+
+ return w, nil
+}
+
+// At present, golang only supports asynchronous gauge, so we have some function here to support this
+
+type Float64GaugeEntry struct {
+ attributes []attribute.KeyValue
+ value float64
+}
+
+type Float64Gauge struct {
+ observations map[string]Float64GaugeEntry
+}
+
+// Callback implements the callback function for the underlying asynchronous gauge
+// it observes the current state of all previous Set() calls.
+func (f *Float64Gauge) Callback(_ context.Context, o metric.Float64Observer) error {
+ for _, fEntry := range f.observations {
+ o.Observe(fEntry.value, metric.WithAttributes(fEntry.attributes...))
+ }
+ // re-initialize the observed items
+ f.observations = make(map[string]Float64GaugeEntry)
+ return nil
+}
+
+func (f *Float64Gauge) Set(key string, val float64, attrs []attribute.KeyValue) {
+ f.observations[key] = Float64GaugeEntry{
+ value: val,
+ attributes: attrs,
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go
new file mode 100644
index 000000000..cd5e62c19
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package opentelemetry
+
+import (
+ "context"
+ "strings"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode"
+ log "github.com/sirupsen/logrus"
+ "go.opentelemetry.io/otel/sdk/resource"
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+)
+
+const (
+ flpTracerName = "flp_tracer"
+ flpEncodeSpanName = "flp_encode"
+)
+
+type EncodeOtlpTrace struct {
+ cfg api.EncodeOtlpTraces
+ ctx context.Context
+ res *resource.Resource
+ tp *sdktrace.TracerProvider
+}
+
+// Encode encodes a metric to be exported
+func (e *EncodeOtlpTrace) Encode(entry config.GenericMap) {
+ log.Tracef("entering EncodeOtlpTrace. entry = %v", entry)
+ tr := e.tp.Tracer(flpTracerName)
+ ll := len(e.cfg.SpanSplitter)
+
+ // create parent span
+ newCtx, span0 := tr.Start(e.ctx, flpEncodeSpanName)
+ attributes := obtainAttributesFromEntry(entry)
+ span0.SetAttributes(*attributes...)
+ defer span0.End()
+ if ll == 0 {
+ return
+ }
+ // for each item in SpanSplitter, make a separate entry for each listed item
+ // do not include fields that belong exclusively to other items
+ ss := e.cfg.SpanSplitter
+ records := make([]config.GenericMap, ll)
+ keepItem := make([]bool, ll)
+ for i := 0; i < ll; i++ {
+ records[i] = make(config.GenericMap)
+ }
+OUTER:
+ for key, value := range entry {
+ for i := 0; i < ll; i++ {
+ if strings.HasPrefix(key, ss[i]) {
+ trimmed := strings.TrimPrefix(key, ss[i])
+ records[i][trimmed] = value
+ keepItem[i] = true
+ continue OUTER
+ }
+ }
+ // if we reach here, the field did not have any of the prefixes.
+ // copy it into each of the records
+ for i := 0; i < ll; i++ {
+ records[i][key] = value
+ }
+ }
+ // only create child spans for records that have a field directly related to their item
+ for i := 0; i < ll; i++ {
+ if keepItem[i] {
+ _, span := tr.Start(newCtx, ss[i])
+ attributes := obtainAttributesFromEntry(records[i])
+ span.SetAttributes(*attributes...)
+ span.End()
+ }
+ }
+}
+
+func (e *EncodeOtlpTrace) Update(_ config.StageParam) {
+ log.Warn("EncodeOtlpTrace, update not supported")
+}
+
+func NewEncodeOtlpTraces(_ *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+ log.Tracef("entering NewEncodeOtlpTraces \n")
+ cfg := api.EncodeOtlpTraces{}
+ if params.Encode != nil && params.Encode.OtlpTraces != nil {
+ cfg = *params.Encode.OtlpTraces
+ }
+ log.Debugf("NewEncodeOtlpTraces cfg = %v \n", cfg)
+
+ ctx := context.Background()
+ res := newResource()
+
+ tp, err := NewOtlpTracerProvider(ctx, params, res)
+ if err != nil {
+ return nil, err
+ }
+
+ w := &EncodeOtlpTrace{
+ cfg: cfg,
+ ctx: ctx,
+ res: res,
+ tp: tp,
+ }
+ return w, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go
new file mode 100644
index 000000000..2aebf1463
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package opentelemetry
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ otel2 "github.com/agoda-com/opentelemetry-logs-go"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc"
+ "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp"
+ "github.com/agoda-com/opentelemetry-logs-go/logs"
+ sdklog2 "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+ sdkmetric "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/resource"
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ "google.golang.org/grpc/credentials"
+)
+
+// Note:
+// As of the writing of this module, go.opentelemetry.io does not provide interfaces for logs.
+// We therefore temporarily use agoda-com/opentelemetry-logs-go for logs.
+// When go.opentelemetry.io provides interfaces for logs, the code here should be updated to use those interfaces.
+
+const (
+ flpOtlpLoggerName = "flp-otlp-logger"
+ defaultTimeInterval = time.Duration(20 * time.Second)
+ flpOtlpResourceVersion = "v0.1.0"
+ flpOtlpResourceName = "netobserv-otlp"
+ grpcType = "grpc"
+ httpType = "http"
+)
+
+func NewOtlpTracerProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdktrace.TracerProvider, error) {
+ cfg := api.EncodeOtlpTraces{}
+ if params.Encode != nil && params.Encode.OtlpTraces != nil {
+ cfg = *params.Encode.OtlpTraces
+ }
+ if cfg.OtlpConnectionInfo == nil {
+ return nil, fmt.Errorf("otlptraces missing connection info")
+ }
+ addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port)
+ var err error
+ var traceProvider *sdktrace.TracerProvider
+ var traceExporter *otlptrace.Exporter
+ if cfg.ConnectionType == grpcType {
+ var expOption otlptracegrpc.Option
+ var tlsOption otlptracegrpc.Option
+ tlsOption = otlptracegrpc.WithInsecure()
+ if cfg.TLS != nil {
+ tlsConfig, err := cfg.OtlpConnectionInfo.TLS.Build()
+ if err != nil {
+ return nil, err
+ }
+ tlsOption = otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))
+ }
+ expOption = otlptracegrpc.WithEndpoint(addr)
+ traceExporter, err = otlptracegrpc.New(ctx,
+ expOption,
+ tlsOption,
+ otlptracegrpc.WithHeaders(cfg.Headers))
+ if err != nil {
+ return nil, err
+ }
+ } else if cfg.ConnectionType == httpType {
+ var expOption otlptracehttp.Option
+ var tlsOption otlptracehttp.Option
+ tlsOption = otlptracehttp.WithInsecure()
+ if cfg.TLS != nil {
+ tlsConfig, err := cfg.TLS.Build()
+ if err != nil {
+ return nil, err
+ }
+ tlsOption = otlptracehttp.WithTLSClientConfig(tlsConfig)
+ }
+ expOption = otlptracehttp.WithEndpoint(addr)
+ traceExporter, err = otlptracehttp.New(ctx,
+ expOption,
+ tlsOption,
+ otlptracehttp.WithHeaders(cfg.Headers))
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, fmt.Errorf("must specify grpcaddress or httpaddress")
+ }
+ traceProvider = sdktrace.NewTracerProvider(
+ sdktrace.WithResource(res),
+ sdktrace.WithSpanProcessor(sdktrace.NewBatchSpanProcessor(traceExporter)),
+ )
+
+ otel.SetTracerProvider(traceProvider)
+ return traceProvider, nil
+}
+
+func NewOtlpMetricsProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdkmetric.MeterProvider, error) {
+ cfg := api.EncodeOtlpMetrics{}
+ if params.Encode != nil && params.Encode.OtlpMetrics != nil {
+ cfg = *params.Encode.OtlpMetrics
+ }
+ timeInterval := cfg.PushTimeInterval
+ if timeInterval.Duration == 0 {
+ timeInterval.Duration = defaultTimeInterval
+ }
+ if cfg.OtlpConnectionInfo == nil {
+ return nil, fmt.Errorf("otlpmetrics missing connection info")
+ }
+ addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port)
+ var err error
+ var meterProvider *sdkmetric.MeterProvider
+ if cfg.ConnectionType == grpcType {
+ var metricExporter *otlpmetricgrpc.Exporter
+ var expOption otlpmetricgrpc.Option
+ var tlsOption otlpmetricgrpc.Option
+ tlsOption = otlpmetricgrpc.WithInsecure()
+ if cfg.TLS != nil {
+ tlsConfig, err := cfg.TLS.Build()
+ if err != nil {
+ return nil, err
+ }
+ tlsOption = otlpmetricgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))
+ }
+ expOption = otlpmetricgrpc.WithEndpoint(addr)
+ metricExporter, err = otlpmetricgrpc.New(ctx, expOption, tlsOption)
+ if err != nil {
+ return nil, err
+ }
+ meterProvider = sdkmetric.NewMeterProvider(
+ sdkmetric.WithResource(res),
+ sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter,
+ sdkmetric.WithInterval(timeInterval.Duration))),
+ )
+ } else if cfg.ConnectionType == httpType {
+ var metricExporter *otlpmetrichttp.Exporter
+ var expOption otlpmetrichttp.Option
+ var tlsOption otlpmetrichttp.Option
+ tlsOption = otlpmetrichttp.WithInsecure()
+ if cfg.TLS != nil {
+ tlsConfig, err := cfg.TLS.Build()
+ if err != nil {
+ return nil, err
+ }
+ tlsOption = otlpmetrichttp.WithTLSClientConfig(tlsConfig)
+ }
+ expOption = otlpmetrichttp.WithEndpoint(addr)
+ metricExporter, err = otlpmetrichttp.New(ctx, expOption, tlsOption)
+ if err != nil {
+ return nil, err
+ }
+ meterProvider = sdkmetric.NewMeterProvider(
+ sdkmetric.WithResource(res),
+ sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter,
+ sdkmetric.WithInterval(timeInterval.Duration))),
+ )
+ } else {
+ return nil, fmt.Errorf("must specify grpcaddress or httpaddress")
+ }
+
+ otel.SetMeterProvider(meterProvider)
+ return meterProvider, nil
+}
+
+func NewOtlpLoggerProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdklog2.LoggerProvider, error) {
+ cfg := api.EncodeOtlpLogs{}
+ if params.Encode != nil && params.Encode.OtlpLogs != nil {
+ cfg = *params.Encode.OtlpLogs
+ }
+ if cfg.OtlpConnectionInfo == nil {
+ return nil, fmt.Errorf("otlplogs missing connection info")
+ }
+ addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port)
+ var expOption otlplogs.ExporterOption
+ if cfg.ConnectionType == grpcType {
+ var tlsOption otlplogsgrpc.Option
+ tlsOption = otlplogsgrpc.WithInsecure()
+ if params.Encode.OtlpLogs.TLS != nil {
+ tlsConfig, err := cfg.TLS.Build()
+ if err != nil {
+ return nil, err
+ }
+ tlsOption = otlplogsgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))
+ }
+ expOption = otlplogs.WithClient(otlplogsgrpc.NewClient(
+ otlplogsgrpc.WithEndpoint(addr),
+ tlsOption,
+ otlplogsgrpc.WithHeaders(cfg.Headers),
+ ))
+ } else if cfg.ConnectionType == httpType {
+ var tlsOption otlplogshttp.Option
+ tlsOption = otlplogshttp.WithInsecure()
+ if params.Encode.OtlpLogs.TLS != nil {
+ tlsConfig, err := cfg.TLS.Build()
+ if err != nil {
+ return nil, err
+ }
+ tlsOption = otlplogshttp.WithTLSClientConfig(tlsConfig)
+ }
+ expOption = otlplogs.WithClient(otlplogshttp.NewClient(
+ otlplogshttp.WithEndpoint(addr),
+ tlsOption,
+ otlplogshttp.WithHeaders(cfg.Headers),
+ ))
+ } else {
+ return nil, fmt.Errorf("must specify grpcaddress or httpaddress")
+ }
+ logExporter, err := otlplogs.NewExporter(ctx, expOption)
+ if err != nil {
+ return nil, err
+ }
+
+ loggerProvider := sdklog2.NewLoggerProvider(
+ sdklog2.WithBatcher(logExporter),
+ sdklog2.WithResource(res),
+ )
+ otel2.SetLoggerProvider(loggerProvider)
+ return loggerProvider, nil
+}
+
+func (e *EncodeOtlpLogs) LogWrite(entry config.GenericMap) {
+ now := time.Now()
+ sn := logs.INFO
+ st := "INFO"
+ msgByteArray, _ := json.Marshal(entry)
+ msg := string(msgByteArray)
+ // TODO: Decide whether the content should be delivered as Body or as Attributes
+ lrc := logs.LogRecordConfig{
+ // Timestamp: &now, // take timestamp from entry, if present?
+ ObservedTimestamp: now,
+ SeverityNumber: &sn,
+ SeverityText: &st,
+ Resource: e.res,
+ Body: &msg,
+ Attributes: obtainAttributesFromEntry(entry),
+ }
+ logRecord := logs.NewLogRecord(lrc)
+
+ logger := otel2.GetLoggerProvider().Logger(
+ flpOtlpLoggerName,
+ logs.WithSchemaURL(semconv.SchemaURL),
+ )
+ logger.Emit(logRecord)
+}
+
+func obtainAttributesFromEntry(entry config.GenericMap) *[]attribute.KeyValue {
+ // convert the entry fields to Attributes of the message
+ var att = make([]attribute.KeyValue, len(entry))
+ index := 0
+ for k, v := range entry {
+ switch v := v.(type) {
+ case []string:
+ att[index] = attribute.StringSlice(k, v)
+ case string:
+ att[index] = attribute.String(k, v)
+ case []int:
+ att[index] = attribute.IntSlice(k, v)
+ case []int32:
+ valInt64Slice := []int64{}
+ for _, valInt32 := range v {
+ valInt64, _ := utils.ConvertToInt64(valInt32)
+ valInt64Slice = append(valInt64Slice, valInt64)
+ }
+ att[index] = attribute.Int64Slice(k, valInt64Slice)
+ case []int64:
+ att[index] = attribute.Int64Slice(k, v)
+ case int:
+ att[index] = attribute.Int(k, v)
+ case int32, int64, int16, uint, uint8, uint16, uint32, uint64:
+ valInt, _ := utils.ConvertToInt64(v)
+ att[index] = attribute.Int64(k, valInt)
+ case []float32:
+ valFloat64Slice := []float64{}
+ for _, valFloat32 := range v {
+ valFloat64, _ := utils.ConvertToFloat64(valFloat32)
+ valFloat64Slice = append(valFloat64Slice, valFloat64)
+ }
+ att[index] = attribute.Float64Slice(k, valFloat64Slice)
+ case []float64:
+ att[index] = attribute.Float64Slice(k, v)
+ case float32:
+ valFloat, _ := utils.ConvertToFloat64(v)
+ att[index] = attribute.Float64(k, valFloat)
+ case float64:
+ att[index] = attribute.Float64(k, v)
+ case []bool:
+ att[index] = attribute.BoolSlice(k, v)
+ case bool:
+ att[index] = attribute.Bool(k, v)
+ case nil:
+ // skip this field
+ continue
+ }
+ index++
+ }
+ addjustedAtt := att[0:index]
+ return &addjustedAtt
+}
+
+func obtainAttributesFromLabels(labels map[string]string) []attribute.KeyValue {
+ // convert the entry fields to Attributes of the message
+ var att = make([]attribute.KeyValue, len(labels))
+ index := 0
+ for k, v := range labels {
+ att[index] = attribute.String(k, v)
+ index++
+ }
+ return att
+}
+
+func (e *EncodeOtlpMetrics) MetricWrite(_ config.GenericMap) {
+ // nothing more to do at present
+}
+
+// newResource returns a resource describing this application.
+func newResource() *resource.Resource {
+ r, _ := resource.Merge(
+ resource.Default(),
+ resource.NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ServiceName(flpOtlpResourceName),
+ semconv.ServiceVersion(flpOtlpResourceVersion),
+ ),
+ )
+ return r
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go
new file mode 100644
index 000000000..22aa4292e
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package aggregate
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ util "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+const (
+ OperationSum = "sum"
+ OperationAvg = "avg"
+ OperationMax = "max"
+ OperationMin = "min"
+ OperationCount = "count"
+ OperationRawValues = "raw_values"
+)
+
+type Labels map[string]string
+type NormalizedValues string
+
+type Aggregate struct {
+ definition *api.AggregateDefinition
+ cache *utils.TimedCache
+ mutex *sync.Mutex
+ expiryTime time.Duration
+}
+
+type GroupState struct {
+ normalizedValues NormalizedValues
+ labels Labels
+ recentRawValues []float64
+ recentOpValue float64
+ recentCount int
+ totalValue float64
+ totalCount int
+}
+
+func (aggregate *Aggregate) LabelsFromEntry(entry config.GenericMap) (Labels, bool) {
+ allLabelsFound := true
+ labels := Labels{}
+
+ for _, key := range aggregate.definition.GroupByKeys {
+ value, ok := entry[key]
+ if !ok {
+ allLabelsFound = false
+ }
+ labels[key] = util.ConvertToString(value)
+ }
+
+ return labels, allLabelsFound
+}
+
+func (labels Labels) getNormalizedValues() NormalizedValues {
+ var normalizedAsString string
+
+ keys := make([]string, 0, len(labels))
+ for k := range labels {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ normalizedAsString += labels[k] + ","
+ }
+
+ if len(normalizedAsString) > 0 {
+ normalizedAsString = normalizedAsString[:len(normalizedAsString)-1]
+ }
+
+ return NormalizedValues(normalizedAsString)
+}
+
+func (aggregate *Aggregate) filterEntry(entry config.GenericMap) (NormalizedValues, Labels, error) {
+ labels, allLabelsFound := aggregate.LabelsFromEntry(entry)
+ if !allLabelsFound {
+ return "", nil, fmt.Errorf("missing keys in entry")
+ }
+
+ normalizedValues := labels.getNormalizedValues()
+ return normalizedValues, labels, nil
+}
+
+func getInitValue(operation string) float64 {
+ switch operation {
+ case OperationSum, OperationAvg, OperationMax, OperationCount:
+ return 0
+ case OperationMin:
+ return math.MaxFloat64
+ case OperationRawValues:
+ // Actually, in OperationRawValues the value is ignored.
+ return 0
+ default:
+ log.Panicf("unknown operation %v", operation)
+ return 0
+ }
+}
+
+func (aggregate *Aggregate) UpdateByEntry(entry config.GenericMap, normalizedValues NormalizedValues, labels Labels) error {
+
+ aggregate.mutex.Lock()
+ defer aggregate.mutex.Unlock()
+
+ var groupState *GroupState
+ oldEntry, ok := aggregate.cache.GetCacheEntry(string(normalizedValues))
+ if !ok {
+ groupState = &GroupState{normalizedValues: normalizedValues, labels: labels}
+ initVal := getInitValue(string(aggregate.definition.OperationType))
+ groupState.totalValue = initVal
+ groupState.recentOpValue = initVal
+ if aggregate.definition.OperationType == OperationRawValues {
+ groupState.recentRawValues = make([]float64, 0)
+ }
+ } else {
+ groupState = oldEntry.(*GroupState)
+ }
+ aggregate.cache.UpdateCacheEntry(string(normalizedValues), groupState)
+
+ // update value
+ operationKey := aggregate.definition.OperationKey
+ operation := aggregate.definition.OperationType
+
+ if operation == OperationCount {
+ groupState.totalValue = float64(groupState.totalCount + 1)
+ groupState.recentOpValue = float64(groupState.recentCount + 1)
+ } else if operationKey != "" {
+ value, ok := entry[operationKey]
+ if ok {
+ valueString := util.ConvertToString(value)
+ if valueFloat64, err := strconv.ParseFloat(valueString, 64); err != nil {
+ // Log as debug to avoid performance impact
+ log.Debugf("UpdateByEntry error when parsing float '%s': %v", valueString, err)
+ } else {
+ switch operation {
+ case OperationSum:
+ groupState.totalValue += valueFloat64
+ groupState.recentOpValue += valueFloat64
+ case OperationMax:
+ groupState.totalValue = math.Max(groupState.totalValue, valueFloat64)
+ groupState.recentOpValue = math.Max(groupState.recentOpValue, valueFloat64)
+ case OperationMin:
+ groupState.totalValue = math.Min(groupState.totalValue, valueFloat64)
+ groupState.recentOpValue = math.Min(groupState.recentOpValue, valueFloat64)
+ case OperationAvg:
+ groupState.totalValue = (groupState.totalValue*float64(groupState.totalCount) + valueFloat64) / float64(groupState.totalCount+1)
+ groupState.recentOpValue = (groupState.recentOpValue*float64(groupState.recentCount) + valueFloat64) / float64(groupState.recentCount+1)
+ case OperationRawValues:
+ groupState.recentRawValues = append(groupState.recentRawValues, valueFloat64)
+ }
+ }
+ }
+ }
+
+ // update count
+ groupState.totalCount++
+ groupState.recentCount++
+
+ return nil
+}
+
+func (aggregate *Aggregate) Evaluate(entries []config.GenericMap) error {
+ for _, entry := range entries {
+ // filter entries matching labels with aggregates
+ normalizedValues, labels, err := aggregate.filterEntry(entry)
+ if err != nil {
+ continue
+ }
+
+ // update aggregate group by entry
+ err = aggregate.UpdateByEntry(entry, normalizedValues, labels)
+ if err != nil {
+ log.Debugf("UpdateByEntry error %v", err)
+ continue
+ }
+ }
+
+ return nil
+}
+
+func (aggregate *Aggregate) GetMetrics() []config.GenericMap {
+ aggregate.mutex.Lock()
+ defer aggregate.mutex.Unlock()
+
+ var metrics []config.GenericMap
+
+ // iterate over the items in the cache
+ aggregate.cache.Iterate(func(_ string, value interface{}) {
+ group := value.(*GroupState)
+ newEntry := config.GenericMap{
+ "name": aggregate.definition.Name,
+ "operation_type": aggregate.definition.OperationType,
+ "operation_key": aggregate.definition.OperationKey,
+ "by": strings.Join(aggregate.definition.GroupByKeys, ","),
+ "aggregate": string(group.normalizedValues),
+ "total_value": group.totalValue,
+ "total_count": group.totalCount,
+ "recent_raw_values": group.recentRawValues,
+ "recent_op_value": group.recentOpValue,
+ "recent_count": group.recentCount,
+ strings.Join(aggregate.definition.GroupByKeys, "_"): string(group.normalizedValues),
+ }
+ // add the items in aggregate.definition.GroupByKeys individually to the entry
+ for _, key := range aggregate.definition.GroupByKeys {
+ newEntry[key] = group.labels[key]
+ }
+ metrics = append(metrics, newEntry)
+ // Once reported, we reset the recentXXX fields
+ if aggregate.definition.OperationType == OperationRawValues {
+ group.recentRawValues = make([]float64, 0)
+ }
+ group.recentCount = 0
+ group.recentOpValue = getInitValue(string(aggregate.definition.OperationType))
+ })
+
+ return metrics
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go
new file mode 100644
index 000000000..848e9bf26
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package aggregate
+
+import (
+ "sync"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+var defaultExpiryTime = 2 * time.Minute
+var cleanupLoopTime = 2 * time.Minute
+
+type Aggregates struct {
+ Aggregates []Aggregate
+ cleanupLoopTime time.Duration
+ defaultExpiryTime time.Duration
+}
+
+func (aggregates *Aggregates) Evaluate(entries []config.GenericMap) error {
+ for _, aggregate := range aggregates.Aggregates {
+ err := aggregate.Evaluate(entries)
+ if err != nil {
+ log.Debugf("Evaluate error %v", err)
+ continue
+ }
+ }
+
+ return nil
+}
+
+func (aggregates *Aggregates) GetMetrics() []config.GenericMap {
+ var metrics []config.GenericMap
+ for _, aggregate := range aggregates.Aggregates {
+ aggregateMetrics := aggregate.GetMetrics()
+ metrics = append(metrics, aggregateMetrics...)
+ }
+
+ return metrics
+}
+
+func (aggregates *Aggregates) addAggregate(aggregateDefinition *api.AggregateDefinition) []Aggregate {
+ expiryTime := aggregateDefinition.ExpiryTime
+ if expiryTime.Duration == 0 {
+ expiryTime.Duration = defaultExpiryTime
+ }
+ aggregate := Aggregate{
+ definition: aggregateDefinition,
+ cache: utils.NewTimedCache(0, nil),
+ mutex: &sync.Mutex{},
+ expiryTime: expiryTime.Duration,
+ }
+
+ return append(aggregates.Aggregates, aggregate)
+}
+
+func (aggregates *Aggregates) cleanupExpiredEntriesLoop() {
+
+ ticker := time.NewTicker(aggregates.cleanupLoopTime)
+ go func() {
+ for {
+ select {
+ case <-utils.ExitChannel():
+ return
+ case <-ticker.C:
+ aggregates.cleanupExpiredEntries()
+ }
+ }
+ }()
+}
+
+func (aggregates *Aggregates) cleanupExpiredEntries() {
+ for _, aggregate := range aggregates.Aggregates {
+ aggregate.mutex.Lock()
+ aggregate.cache.CleanupExpiredEntries(aggregate.expiryTime, func(_ interface{}) {})
+ aggregate.mutex.Unlock()
+ }
+}
+
+func NewAggregatesFromConfig(aggConfig *api.Aggregates) (Aggregates, error) {
+ aggregates := Aggregates{
+ cleanupLoopTime: cleanupLoopTime,
+ defaultExpiryTime: aggConfig.DefaultExpiryTime.Duration,
+ }
+ if aggregates.defaultExpiryTime == 0 {
+ aggregates.defaultExpiryTime = defaultExpiryTime
+ }
+
+ for i := range aggConfig.Rules {
+ aggregates.Aggregates = aggregates.addAggregate(&aggConfig.Rules[i])
+ }
+
+ aggregates.cleanupExpiredEntriesLoop()
+
+ return aggregates, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go
new file mode 100644
index 000000000..cf5b1de01
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package conntrack
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+// aggregator represents a single aggregate field in a connection. The aggregated values are stored in the connection
+// but managed by the aggregator.
+type aggregator interface {
+ // addField adds an aggregate field to the connection
+ addField(conn connection)
+ // update updates the aggregate field in the connection based on the flow log.
+ update(conn connection, flowLog config.GenericMap, d direction, isFirst bool)
+}
+
+type aggregateBase struct {
+ inputField string
+ outputField string
+ splitAB bool
+ initVal interface{}
+ metrics *metricsType
+ reportMissing bool
+}
+
+type aSum struct{ aggregateBase }
+type aCount struct{ aggregateBase }
+type aMin struct{ aggregateBase }
+type aMax struct{ aggregateBase }
+type aFirst struct{ aggregateBase }
+type aLast struct{ aggregateBase }
+
+// TODO: think of adding a more complex operation such as Average Packet Size which involves 2 input fields: Bytes/Packets
+
+// newAggregator returns a new aggregator depending on the output field operation
+func newAggregator(of api.OutputField, metrics *metricsType) (aggregator, error) {
+ if of.Name == "" {
+ return nil, fmt.Errorf("empty name %v", of)
+ }
+ var inputField string
+ if of.Input != "" {
+ inputField = of.Input
+ } else {
+ inputField = of.Name
+ }
+ aggBase := aggregateBase{inputField: inputField, outputField: of.Name, splitAB: of.SplitAB, metrics: metrics, reportMissing: of.ReportMissing}
+ var agg aggregator
+ switch of.Operation {
+ case api.ConnTrackSum:
+ aggBase.initVal = float64(0)
+ agg = &aSum{aggBase}
+ case api.ConnTrackCount:
+ aggBase.initVal = float64(0)
+ agg = &aCount{aggBase}
+ case api.ConnTrackMin:
+ aggBase.initVal = math.MaxFloat64
+ agg = &aMin{aggBase}
+ case api.ConnTrackMax:
+ aggBase.initVal = -math.MaxFloat64
+ agg = &aMax{aggBase}
+ case api.ConnTrackFirst:
+ aggBase.initVal = nil
+ agg = &aFirst{aggBase}
+ case api.ConnTrackLast:
+ aggBase.initVal = nil
+ agg = &aLast{aggBase}
+ default:
+ return nil, fmt.Errorf("unknown operation: %q", of.Operation)
+ }
+ return agg, nil
+}
+
+func (agg *aggregateBase) getOutputField(d direction) string {
+ outputField := agg.outputField
+ if agg.splitAB {
+ switch d {
+ case dirAB:
+ outputField += "_AB"
+ case dirBA:
+ outputField += "_BA"
+ case dirNA:
+ fallthrough
+ default:
+ log.Panicf("splitAB aggregator %v cannot determine outputField because direction is missing. Check configuration.", outputField)
+ }
+ }
+ return outputField
+}
+
+func (agg *aggregateBase) getInputFieldValue(flowLog config.GenericMap) (float64, error) {
+ rawValue, ok := flowLog[agg.inputField]
+ if !ok {
+ // error only if explicitly specified as FLP skip empty fields by default to reduce storage size
+ if agg.reportMissing {
+ if agg.metrics != nil {
+ agg.metrics.aggregatorErrors.WithLabelValues("MissingFieldError", agg.inputField).Inc()
+ }
+ return 0, fmt.Errorf("missing field %v", agg.inputField)
+ }
+ // fallback on 0 without error
+ return 0, nil
+ }
+ floatValue, err := utils.ConvertToFloat64(rawValue)
+ if err != nil {
+ if agg.metrics != nil {
+ agg.metrics.aggregatorErrors.WithLabelValues("Float64ConversionError", agg.inputField).Inc()
+ }
+ return 0, fmt.Errorf("cannot convert %q to float64: %w", rawValue, err)
+ }
+ return floatValue, nil
+}
+
+func (agg *aggregateBase) addField(conn connection) {
+ if agg.splitAB {
+ conn.addAgg(agg.getOutputField(dirAB), agg.initVal)
+ conn.addAgg(agg.getOutputField(dirBA), agg.initVal)
+ } else {
+ conn.addAgg(agg.getOutputField(dirNA), agg.initVal)
+ }
+}
+
+func (agg *aSum) update(conn connection, flowLog config.GenericMap, d direction, _ bool) {
+ outputField := agg.getOutputField(d)
+ v, err := agg.getInputFieldValue(flowLog)
+ if err != nil {
+ log.Errorf("error updating connection %x: %v", conn.getHash().hashTotal, err)
+ return
+ }
+ conn.updateAggFnValue(outputField, func(curr float64) float64 {
+ return curr + v
+ })
+}
+
+func (agg *aCount) update(conn connection, _ config.GenericMap, d direction, _ bool) {
+ outputField := agg.getOutputField(d)
+ conn.updateAggFnValue(outputField, func(curr float64) float64 {
+ return curr + 1
+ })
+}
+
+func (agg *aMin) update(conn connection, flowLog config.GenericMap, d direction, _ bool) {
+ outputField := agg.getOutputField(d)
+ v, err := agg.getInputFieldValue(flowLog)
+ if err != nil {
+ log.Errorf("error updating connection %x: %v", conn.getHash().hashTotal, err)
+ return
+ }
+
+ conn.updateAggFnValue(outputField, func(curr float64) float64 {
+ return math.Min(curr, v)
+ })
+}
+
+func (agg *aMax) update(conn connection, flowLog config.GenericMap, d direction, _ bool) {
+ outputField := agg.getOutputField(d)
+ v, err := agg.getInputFieldValue(flowLog)
+ if err != nil {
+ log.Errorf("error updating connection %x: %v", conn.getHash().hashTotal, err)
+ return
+ }
+
+ conn.updateAggFnValue(outputField, func(curr float64) float64 {
+ return math.Max(curr, v)
+ })
+}
+
+func (cp *aFirst) update(conn connection, flowLog config.GenericMap, _ direction, isNew bool) {
+ if isNew {
+ conn.updateAggValue(cp.outputField, flowLog[cp.inputField])
+ }
+}
+
+func (cp *aLast) update(conn connection, flowLog config.GenericMap, _ direction, _ bool) {
+ if flowLog[cp.inputField] != nil {
+ conn.updateAggValue(cp.outputField, flowLog[cp.inputField])
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go
new file mode 100644
index 000000000..e67c5125b
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package conntrack
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+)
+
+type connection interface {
+ addAgg(fieldName string, initValue interface{})
+ updateAggValue(fieldName string, newValue interface{})
+ updateAggFnValue(fieldName string, newValueFn func(curr float64) float64)
+ setExpiryTime(t time.Time)
+ getExpiryTime() time.Time
+ setNextHeartbeatTime(t time.Time)
+ getNextHeartbeatTime() time.Time
+ toGenericMap() config.GenericMap
+ getHash() totalHashType
+ // markReported marks the connection as has been reported. That is, at least one connection record has been emitted
+ // for this connection (i.e. newConnection, heartbeat, endConnection).
+ // It returns true on the first invocation to indicate the first report. Otherwise, it returns false.
+ markReported() bool
+ isMatchSelector(map[string]interface{}) bool
+}
+
+type connType struct {
+ hash totalHashType
+ keys config.GenericMap
+ aggFields map[string]interface{}
+ expiryTime time.Time
+ nextHeartbeatTime time.Time
+ isReported bool
+}
+
+func (c *connType) addAgg(fieldName string, initValue interface{}) {
+ c.aggFields[fieldName] = initValue
+}
+
+func (c *connType) updateAggValue(fieldName string, newValue interface{}) {
+ _, ok := c.aggFields[fieldName]
+ if !ok {
+ log.Panicf("tried updating missing field %v", fieldName)
+ }
+ c.aggFields[fieldName] = newValue
+}
+
+func (c *connType) updateAggFnValue(fieldName string, newValueFn func(curr float64) float64) {
+ v, ok := c.aggFields[fieldName]
+ if !ok {
+ log.Panicf("tried updating missing field %v", fieldName)
+ }
+
+ // existing value must be float64 for function aggregation
+ switch value := v.(type) {
+ case float64:
+ c.aggFields[fieldName] = newValueFn(value)
+ default:
+ log.Panicf("tried to aggregate non float64 field %v value %v", fieldName, v)
+ }
+}
+
+func (c *connType) setExpiryTime(t time.Time) {
+ c.expiryTime = t
+}
+
+func (c *connType) getExpiryTime() time.Time {
+ return c.expiryTime
+}
+
+func (c *connType) setNextHeartbeatTime(t time.Time) {
+ c.nextHeartbeatTime = t
+}
+
+func (c *connType) getNextHeartbeatTime() time.Time {
+ return c.nextHeartbeatTime
+}
+
+func (c *connType) toGenericMap() config.GenericMap {
+ gm := config.GenericMap{}
+ for k, v := range c.aggFields {
+ if v != nil && (reflect.TypeOf(v).Kind() != reflect.Float64 || v.(float64) != 0) {
+ gm[k] = v
+ }
+ }
+
+ // In case of a conflict between the keys and the aggFields / cpFields, the keys should prevail.
+ for k, v := range c.keys {
+ gm[k] = v
+ }
+ return gm
+}
+
+func (c *connType) getHash() totalHashType {
+ return c.hash
+}
+
+func (c *connType) markReported() bool {
+ isFirst := !c.isReported
+ c.isReported = true
+ return isFirst
+}
+
+//nolint:cyclop
+func (c *connType) isMatchSelector(selector map[string]interface{}) bool {
+ for k, v := range selector {
+ connValueRaw, found := c.keys[k]
+ if !found {
+ return false
+ }
+ switch connValue := connValueRaw.(type) {
+ case int:
+ selectorValue, err := utils.ConvertToInt(v)
+ if err != nil || connValue != selectorValue {
+ return false
+ }
+ case uint32:
+ selectorValue, err := utils.ConvertToUint32(v)
+ if err != nil || connValue != selectorValue {
+ return false
+ }
+ case uint64:
+ selectorValue, err := utils.ConvertToUint64(v)
+ if err != nil || connValue != selectorValue {
+ return false
+ }
+ case int64:
+ selectorValue, err := utils.ConvertToInt64(v)
+ if err != nil || connValue != selectorValue {
+ return false
+ }
+ case float64:
+ selectorValue, err := utils.ConvertToFloat64(v)
+ if err != nil || connValue != selectorValue {
+ return false
+ }
+ case bool:
+ selectorValue, err := utils.ConvertToBool(v)
+ if err != nil || connValue != selectorValue {
+ return false
+ }
+ case string:
+ selectorValue := utils.ConvertToString(v)
+ if connValue != selectorValue {
+ return false
+ }
+ default:
+ connValue = utils.ConvertToString(connValue)
+ selectorValue := fmt.Sprintf("%v", v)
+ if connValue != selectorValue {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+type connBuilder struct {
+ conn *connType
+ shouldSwapAB bool
+ metrics *metricsType
+}
+
+func newConnBuilder(metrics *metricsType) *connBuilder {
+ return &connBuilder{
+ conn: &connType{
+ aggFields: make(map[string]interface{}),
+ keys: config.GenericMap{},
+ isReported: false,
+ },
+ metrics: metrics,
+ }
+}
+
+func (cb *connBuilder) Hash(h totalHashType) *connBuilder {
+ if cb.shouldSwapAB {
+ h.hashA, h.hashB = h.hashB, h.hashA
+ }
+ cb.conn.hash = h
+ return cb
+}
+
+func (cb *connBuilder) ShouldSwapAB(b bool) *connBuilder {
+ cb.shouldSwapAB = b
+ return cb
+}
+
+func (cb *connBuilder) keysFrom(flowLog config.GenericMap, kd *api.KeyDefinition, endpointAFields, endpointBFields []string) *connBuilder {
+ for _, fg := range kd.FieldGroups {
+ for _, f := range fg.Fields {
+ cb.conn.keys[f] = flowLog[f]
+ }
+ }
+ if cb.shouldSwapAB {
+ for i := range endpointAFields {
+ fieldA := endpointAFields[i]
+ fieldB := endpointBFields[i]
+ cb.conn.keys[fieldA] = flowLog[fieldB]
+ cb.conn.keys[fieldB] = flowLog[fieldA]
+ }
+ cb.metrics.tcpFlags.WithLabelValues("swapAB").Inc()
+ }
+ return cb
+}
+
+func (cb *connBuilder) Aggregators(aggs []aggregator) *connBuilder {
+ for _, agg := range aggs {
+ agg.addField(cb.conn)
+ }
+ return cb
+}
+
+func (cb *connBuilder) Build() connection {
+ return cb.conn
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go
new file mode 100644
index 000000000..47d67a493
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package conntrack
+
+import (
+ "fmt"
+ "hash"
+ "hash/fnv"
+ "strconv"
+
+ "github.com/benbjohnson/clock"
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+// direction indicates the direction of a flow log in a connection. It's used by aggregators to determine which split
+// of the aggregator should be updated, xxx_AB or xxx_BA.
+type direction uint8
+
+const (
+ dirNA direction = iota
+ dirAB
+ dirBA
+)
+
+type conntrackImpl struct {
+ clock clock.Clock
+ config *api.ConnTrack
+ endpointAFields, endpointBFields []string
+ hashProvider func() hash.Hash64
+ connStore *connectionStore
+ aggregators []aggregator
+ shouldOutputFlowLogs bool
+ shouldOutputNewConnection bool
+ shouldOutputEndConnection bool
+ shouldOutputHeartbeats bool
+ metrics *metricsType
+}
+
+func (ct *conntrackImpl) filterFlowLog(fl config.GenericMap) bool {
+ if !fl.IsValidProtocol() || !fl.IsTransportProtocol() {
+ return true
+ }
+ return false
+}
+
+func (ct *conntrackImpl) Extract(flowLogs []config.GenericMap) []config.GenericMap {
+ log.Debugf("entering Extract conntrack, in = %v", flowLogs)
+
+ var outputRecords []config.GenericMap
+ for _, fl := range flowLogs {
+ if ct.filterFlowLog(fl) {
+ ct.metrics.inputRecords.WithLabelValues("discarded").Inc()
+ continue
+ }
+ computedHash, err := computeHash(fl, &ct.config.KeyDefinition, ct.hashProvider(), ct.metrics)
+ if err != nil {
+ log.Warningf("skipping flow log %v: %v", fl, err)
+ ct.metrics.inputRecords.WithLabelValues("rejected").Inc()
+ continue
+ }
+
+ if fl.IsDuplicate() {
+ log.Debugf("skipping duplicated flow log %v", fl)
+ ct.metrics.inputRecords.WithLabelValues("duplicate").Inc()
+ } else {
+ conn, exists, _ := ct.connStore.getConnection(computedHash.hashTotal)
+ if !exists {
+ if (ct.config.MaxConnectionsTracked > 0) && (ct.connStore.len() >= ct.config.MaxConnectionsTracked) {
+ log.Warningf("too many connections; skipping flow log %v: ", fl)
+ ct.metrics.inputRecords.WithLabelValues("discarded").Inc()
+ } else {
+ builder := newConnBuilder(ct.metrics)
+ conn = builder.
+ ShouldSwapAB(ct.config.TCPFlags.SwapAB && ct.containsTCPFlag(fl, SYNACKFlag)).
+ Hash(computedHash).
+ keysFrom(fl, &ct.config.KeyDefinition, ct.endpointAFields, ct.endpointBFields).
+ Aggregators(ct.aggregators).
+ Hash(computedHash).
+ Build()
+ ct.connStore.addConnection(computedHash.hashTotal, conn)
+ ct.connStore.updateNextHeartbeatTime(computedHash.hashTotal)
+ ct.updateConnection(conn, fl, computedHash, true)
+ ct.metrics.inputRecords.WithLabelValues("newConnection").Inc()
+ if ct.shouldOutputNewConnection {
+ record := conn.toGenericMap()
+ addHashField(record, computedHash.hashTotal)
+ addTypeField(record, api.ConnTrackNewConnection)
+ isFirst := conn.markReported()
+ addIsFirstField(record, isFirst)
+ outputRecords = append(outputRecords, record)
+ ct.metrics.outputRecords.WithLabelValues("newConnection").Inc()
+ }
+ }
+ } else {
+ ct.updateConnection(conn, fl, computedHash, false)
+ ct.metrics.inputRecords.WithLabelValues("update").Inc()
+ }
+ }
+
+ if ct.shouldOutputFlowLogs {
+ record := fl.Copy()
+ addHashField(record, computedHash.hashTotal)
+ addTypeField(record, api.ConnTrackFlowLog)
+ outputRecords = append(outputRecords, record)
+ ct.metrics.outputRecords.WithLabelValues("flowLog").Inc()
+ }
+ }
+
+ endConnectionRecords := ct.popEndConnections()
+ if ct.shouldOutputEndConnection {
+ outputRecords = append(outputRecords, endConnectionRecords...)
+ ct.metrics.outputRecords.WithLabelValues("endConnection").Add(float64(len(endConnectionRecords)))
+ }
+
+ if ct.shouldOutputHeartbeats {
+ heartbeatRecords := ct.prepareHeartbeatRecords()
+ outputRecords = append(outputRecords, heartbeatRecords...)
+ ct.metrics.outputRecords.WithLabelValues("heartbeat").Add(float64(len(heartbeatRecords)))
+ }
+
+ return outputRecords
+}
+
+func (ct *conntrackImpl) popEndConnections() []config.GenericMap {
+ connections := ct.connStore.popEndConnections()
+
+ var outputRecords []config.GenericMap
+ // Convert the connections to GenericMaps and add meta fields
+ for _, conn := range connections {
+ record := conn.toGenericMap()
+ addHashField(record, conn.getHash().hashTotal)
+ addTypeField(record, api.ConnTrackEndConnection)
+ var isFirst bool
+ if ct.shouldOutputEndConnection {
+ isFirst = conn.markReported()
+ }
+ addIsFirstField(record, isFirst)
+ outputRecords = append(outputRecords, record)
+ }
+ return outputRecords
+}
+
+func (ct *conntrackImpl) prepareHeartbeatRecords() []config.GenericMap {
+ connections := ct.connStore.prepareHeartbeats()
+
+ var outputRecords []config.GenericMap
+ // Convert the connections to GenericMaps and add meta fields
+ for _, conn := range connections {
+ record := conn.toGenericMap()
+ addHashField(record, conn.getHash().hashTotal)
+ addTypeField(record, api.ConnTrackHeartbeat)
+ var isFirst bool
+ if ct.shouldOutputHeartbeats {
+ isFirst = conn.markReported()
+ }
+ addIsFirstField(record, isFirst)
+ outputRecords = append(outputRecords, record)
+ }
+ return outputRecords
+}
+
+func (ct *conntrackImpl) updateConnection(conn connection, flowLog config.GenericMap, flowLogHash totalHashType, isNew bool) {
+ d := ct.getFlowLogDirection(conn, flowLogHash)
+ for _, agg := range ct.aggregators {
+ agg.update(conn, flowLog, d, isNew)
+ }
+
+ if ct.config.TCPFlags.DetectEndConnection && ct.containsTCPFlag(flowLog, FINFlag) {
+ ct.metrics.tcpFlags.WithLabelValues("detectEndConnection").Inc()
+ ct.connStore.setConnectionTerminating(flowLogHash.hashTotal)
+ } else {
+ ct.connStore.updateConnectionExpiryTime(flowLogHash.hashTotal)
+ }
+}
+
+func (ct *conntrackImpl) containsTCPFlag(flowLog config.GenericMap, queryFlag uint32) bool {
+ tcpFlagsRaw, ok := flowLog[ct.config.TCPFlags.FieldName]
+ if ok {
+ tcpFlags, err := utils.ConvertToUint32(tcpFlagsRaw)
+ if err != nil {
+ log.Warningf("cannot convert TCP flag %q to uint32: %v", tcpFlagsRaw, err)
+ return false
+ }
+ containsFlag := (tcpFlags & queryFlag) == queryFlag
+ if containsFlag {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ct *conntrackImpl) getFlowLogDirection(conn connection, flowLogHash totalHashType) direction {
+ d := dirNA
+ if ct.config.KeyDefinition.Hash.FieldGroupARef != "" {
+ if conn.getHash().hashA == flowLogHash.hashA {
+ // A -> B
+ d = dirAB
+ } else {
+ // B -> A
+ d = dirBA
+ }
+ }
+ return d
+}
+
+// NewConnectionTrack creates a new connection track instance
+func NewConnectionTrack(opMetrics *operational.Metrics, params config.StageParam, clock clock.Clock) (extract.Extractor, error) {
+ cfg := params.Extract.ConnTrack
+ if err := cfg.Validate(); err != nil {
+ return nil, fmt.Errorf("ConnectionTrack config is invalid: %w", err)
+ }
+
+ metrics := newMetrics(opMetrics)
+
+ var aggregators []aggregator
+ for _, of := range cfg.OutputFields {
+ agg, err := newAggregator(of, metrics)
+ if err != nil {
+ return nil, fmt.Errorf("error creating aggregator: %w", err)
+ }
+ aggregators = append(aggregators, agg)
+ }
+ shouldOutputFlowLogs := false
+ shouldOutputNewConnection := false
+ shouldOutputEndConnection := false
+ shouldOutputHeartbeats := false
+ for _, option := range cfg.OutputRecordTypes {
+ switch option {
+ case api.ConnTrackFlowLog:
+ shouldOutputFlowLogs = true
+ case api.ConnTrackNewConnection:
+ shouldOutputNewConnection = true
+ case api.ConnTrackEndConnection:
+ shouldOutputEndConnection = true
+ case api.ConnTrackHeartbeat:
+ shouldOutputHeartbeats = true
+ default:
+ return nil, fmt.Errorf("unknown OutputRecordTypes: %v", option)
+ }
+ }
+
+ endpointAFields, endpointBFields := cfg.GetABFields()
+ conntrack := &conntrackImpl{
+ clock: clock,
+ connStore: newConnectionStore(cfg.Scheduling, metrics, clock.Now),
+ config: cfg,
+ endpointAFields: endpointAFields,
+ endpointBFields: endpointBFields,
+ hashProvider: fnv.New64a,
+ aggregators: aggregators,
+ shouldOutputFlowLogs: shouldOutputFlowLogs,
+ shouldOutputNewConnection: shouldOutputNewConnection,
+ shouldOutputEndConnection: shouldOutputEndConnection,
+ shouldOutputHeartbeats: shouldOutputHeartbeats,
+ metrics: metrics,
+ }
+ return conntrack, nil
+}
+
+func addHashField(record config.GenericMap, hashID uint64) {
+ record[api.HashIDFieldName] = strconv.FormatUint(hashID, 16)
+}
+
+func addTypeField(record config.GenericMap, recordType api.ConnTrackOutputRecordTypeEnum) {
+ record[api.RecordTypeFieldName] = recordType
+}
+
+func addIsFirstField(record config.GenericMap, isFirst bool) {
+ record[api.IsFirstFieldName] = isFirst
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go
new file mode 100644
index 000000000..042331b73
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package conntrack
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/gob"
+ "fmt"
+ "hash"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ log "github.com/sirupsen/logrus"
+)
+
+// TODO: what's a better name for this struct?
+type totalHashType struct {
+ hashA uint64
+ hashB uint64
+ hashTotal uint64
+}
+
+// computeHash computes the hash of a flow log according to keyDefinition.
+// Two flow logs will have the same hash if they belong to the same connection.
+func computeHash(flowLog config.GenericMap, keyDefinition *api.KeyDefinition, hasher hash.Hash64, metrics *metricsType) (totalHashType, error) {
+ fieldGroup2hash := make(map[string]uint64)
+
+ // Compute the hash of each field group
+ for _, fg := range keyDefinition.FieldGroups {
+ h, err := computeHashFields(flowLog, fg.Fields, hasher, metrics)
+ if err != nil {
+ return totalHashType{}, fmt.Errorf("compute hash: %w", err)
+ }
+ fieldGroup2hash[fg.Name] = h
+ }
+
+ // Compute the total hash
+ th := totalHashType{}
+ hasher.Reset()
+ for _, fgName := range keyDefinition.Hash.FieldGroupRefs {
+ hasher.Write(uint64ToBytes(fieldGroup2hash[fgName]))
+ }
+ if keyDefinition.Hash.FieldGroupARef != "" {
+ th.hashA = fieldGroup2hash[keyDefinition.Hash.FieldGroupARef]
+ th.hashB = fieldGroup2hash[keyDefinition.Hash.FieldGroupBRef]
+ // Determine order between A's and B's hash to get the same hash for both flow logs from A to B and from B to A.
+ if th.hashA < th.hashB {
+ hasher.Write(uint64ToBytes(th.hashA))
+ hasher.Write(uint64ToBytes(th.hashB))
+ } else {
+ hasher.Write(uint64ToBytes(th.hashB))
+ hasher.Write(uint64ToBytes(th.hashA))
+ }
+ }
+ th.hashTotal = hasher.Sum64()
+ return th, nil
+}
+
+func computeHashFields(flowLog config.GenericMap, fieldNames []string, hasher hash.Hash64, metrics *metricsType) (uint64, error) {
+ hasher.Reset()
+ for _, fn := range fieldNames {
+ f, ok := flowLog[fn]
+ if !ok {
+ log.Warningf("Missing field %v", fn)
+ if metrics != nil {
+ metrics.hashErrors.WithLabelValues("MissingFieldError", fn).Inc()
+ }
+ continue
+ }
+ bytes, err := toBytes(f)
+ if err != nil {
+ return 0, err
+ }
+ hasher.Write(bytes)
+ }
+ return hasher.Sum64(), nil
+}
+
+func uint64ToBytes(data uint64) []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, data)
+ return b
+}
+
+func toBytes(data interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ err := enc.Encode(data)
+ if err != nil {
+ return nil, err
+ }
+ bytes := buf.Bytes()
+ return bytes, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/metrics.go
new file mode 100644
index 000000000..66baaa71f
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/metrics.go
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package conntrack
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ connStoreLengthDef = operational.DefineMetric(
+ "conntrack_memory_connections",
+ "The total number of tracked connections in memory per group and phase",
+ operational.TypeGauge,
+ "group", "phase",
+ )
+
+ inputRecordsDef = operational.DefineMetric(
+ "conntrack_input_records",
+ "The total number of input records per classification",
+ operational.TypeCounter,
+ "classification",
+ )
+
+ outputRecordsDef = operational.DefineMetric(
+ "conntrack_output_records",
+ "The total number of output records",
+ operational.TypeCounter,
+ "type",
+ )
+
+ tcpFlagsDef = operational.DefineMetric(
+ "conntrack_tcp_flags",
+ "The total number of actions taken based on TCP flags",
+ operational.TypeCounter,
+ "action",
+ )
+
+ hashErrorsDef = operational.DefineMetric(
+ "conntrack_hash_errors",
+ "The total number of errors during hash computation",
+ operational.TypeCounter,
+ "error", "field",
+ )
+
+ aggregatorErrorsDef = operational.DefineMetric(
+ "conntrack_aggregator_errors",
+ "The total number of errors during aggregation",
+ operational.TypeCounter,
+ "error", "field",
+ )
+
+ endConnectionsDef = operational.DefineMetric(
+ "conntrack_end_connections",
+ "The total number of connections ended per group and reason",
+ operational.TypeCounter,
+ "group", "reason",
+ )
+)
+
+type metricsType struct {
+ connStoreLength *prometheus.GaugeVec
+ inputRecords *prometheus.CounterVec
+ outputRecords *prometheus.CounterVec
+ tcpFlags *prometheus.CounterVec
+ hashErrors *prometheus.CounterVec
+ aggregatorErrors *prometheus.CounterVec
+ endConnections *prometheus.CounterVec
+}
+
+func newMetrics(opMetrics *operational.Metrics) *metricsType {
+ return &metricsType{
+ connStoreLength: opMetrics.NewGaugeVec(&connStoreLengthDef),
+ inputRecords: opMetrics.NewCounterVec(&inputRecordsDef),
+ outputRecords: opMetrics.NewCounterVec(&outputRecordsDef),
+ tcpFlags: opMetrics.NewCounterVec(&tcpFlagsDef),
+ hashErrors: opMetrics.NewCounterVec(&hashErrorsDef),
+ aggregatorErrors: opMetrics.NewCounterVec(&aggregatorErrorsDef),
+ endConnections: opMetrics.NewCounterVec(&endConnectionsDef),
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go
new file mode 100644
index 000000000..bc7040d64
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package conntrack
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+const (
+ expiryOrder = utils.OrderID("expiryOrder")
+ nextHeartbeatTimeOrder = utils.OrderID("nextHeartbeatTimeOrder")
+ activeLabel = "active"
+ terminatingLabel = "terminating"
+)
+
+// connectionStore provides means to manage the connections such as retrieving a connection by its hash and organizing
+// them in groups sorted by expiry time and next report time.
+// This allows efficient retrieval and removal of connections.
+type connectionStore struct {
+ groups []*groupType
+ hashID2groupIdx map[uint64]int
+ metrics *metricsType
+ now func() time.Time
+}
+
+type groupType struct {
+ scheduling api.ConnTrackSchedulingGroup
+ // active connections
+ activeMom *utils.MultiOrderedMap
+ // connections that detected EndConnection from TCP FIN flag. These will not trigger updates anymore until pop
+ // check expireConnection func
+ terminatingMom *utils.MultiOrderedMap
+ labelValue string
+}
+
+func (cs *connectionStore) getGroupIdx(conn connection) (groupIdx int) {
+ for i, group := range cs.groups {
+ if conn.isMatchSelector(group.scheduling.Selector) {
+ // connection belongs to scheduling group i
+ return i
+ }
+ }
+ // Shouldn't get here since the last scheduling group should have a selector that matches any connection.
+ log.Errorf("BUG. connection with hash %x doesn't match any selector", conn.getHash().hashTotal)
+ lastGroupIdx := len(cs.groups) - 1
+ return lastGroupIdx
+}
+
+func (cs *connectionStore) addConnection(hashID uint64, conn connection) {
+ groupIdx := cs.getGroupIdx(conn)
+ mom := cs.groups[groupIdx].activeMom
+
+ err := mom.AddRecord(utils.Key(hashID), conn)
+ if err != nil {
+ log.Errorf("BUG. connection with hash %x already exists in store. %v", hashID, conn)
+ }
+ cs.hashID2groupIdx[hashID] = groupIdx
+
+ groupLabel := cs.groups[groupIdx].labelValue
+ activeLen := cs.groups[groupIdx].activeMom.Len()
+ cs.metrics.connStoreLength.WithLabelValues(groupLabel, activeLabel).Set(float64(activeLen))
+}
+
+func (cs *connectionStore) getConnection(hashID uint64) (connection, bool, bool) {
+ groupIdx, found := cs.hashID2groupIdx[hashID]
+ if !found {
+ return nil, false, false
+ }
+ mom := cs.groups[groupIdx].activeMom
+
+ // get connection from active map
+ isRunning := true
+ record, ok := mom.GetRecord(utils.Key(hashID))
+ if !ok {
+ // fallback on terminating map if not found
+ isRunning = false
+ mom := cs.groups[groupIdx].terminatingMom
+ record, ok = mom.GetRecord(utils.Key(hashID))
+ if !ok {
+ return nil, false, false
+ }
+ }
+ conn := record.(connection)
+ return conn, true, isRunning
+}
+
+func (cs *connectionStore) setConnectionTerminating(hashID uint64) {
+ conn, ok, active := cs.getConnection(hashID)
+ if !ok {
+ log.Panicf("BUG. connection hash %x doesn't exist", hashID)
+ return
+ } else if !active {
+ // connection is terminating
+ return
+ }
+ groupIdx := cs.hashID2groupIdx[hashID]
+ groupLabel := cs.groups[groupIdx].labelValue
+ activeMom := cs.groups[groupIdx].activeMom
+ terminatingMom := cs.groups[groupIdx].terminatingMom
+ timeout := cs.groups[groupIdx].scheduling.TerminatingTimeout.Duration
+ newExpiryTime := cs.now().Add(timeout)
+ conn.setExpiryTime(newExpiryTime)
+ // Remove connection from active map
+ activeMom.RemoveRecord(utils.Key(hashID))
+ activeLen := cs.groups[groupIdx].activeMom.Len()
+ cs.metrics.connStoreLength.WithLabelValues(groupLabel, activeLabel).Set(float64(activeLen))
+ // Add connection to terminating map
+ err := terminatingMom.AddRecord(utils.Key(hashID), conn)
+ if err != nil {
+ log.Errorf("BUG. connection with hash %x already exists in store. %v", hashID, conn)
+ }
+ terminatingLen := cs.groups[groupIdx].terminatingMom.Len()
+ cs.metrics.connStoreLength.WithLabelValues(groupLabel, terminatingLabel).Set(float64(terminatingLen))
+}
+
+func (cs *connectionStore) updateConnectionExpiryTime(hashID uint64) {
+ conn, ok, active := cs.getConnection(hashID)
+ if !ok {
+ log.Panicf("BUG. connection hash %x doesn't exist", hashID)
+ return
+ } else if !active {
+ // connection is terminating. expiry time can't be updated anymore
+ return
+ }
+ groupIdx := cs.hashID2groupIdx[hashID]
+ mom := cs.groups[groupIdx].activeMom
+ timeout := cs.groups[groupIdx].scheduling.EndConnectionTimeout.Duration
+ newExpiryTime := cs.now().Add(timeout)
+ conn.setExpiryTime(newExpiryTime)
+ // Move to the back of the list
+ err := mom.MoveToBack(utils.Key(hashID), expiryOrder)
+ if err != nil {
+ log.Panicf("BUG. Can't update connection expiry time for hash %x: %v", hashID, err)
+ return
+ }
+}
+
+func (cs *connectionStore) updateNextHeartbeatTime(hashID uint64) {
+ conn, ok, active := cs.getConnection(hashID)
+ if !ok {
+ log.Panicf("BUG. connection hash %x doesn't exist", hashID)
+ return
+ } else if !active {
+ // connection is terminating. heartbeat are disabled
+ return
+ }
+ groupIdx := cs.hashID2groupIdx[hashID]
+ mom := cs.groups[groupIdx].activeMom
+ timeout := cs.groups[groupIdx].scheduling.HeartbeatInterval.Duration
+ newNextHeartbeatTime := cs.now().Add(timeout)
+ conn.setNextHeartbeatTime(newNextHeartbeatTime)
+ // Move to the back of the list
+ err := mom.MoveToBack(utils.Key(hashID), nextHeartbeatTimeOrder)
+ if err != nil {
+ log.Panicf("BUG. Can't update next heartbeat time for hash %x: %v", hashID, err)
+ return
+ }
+}
+
+func (cs *connectionStore) popEndConnectionOfMap(mom *utils.MultiOrderedMap, group *groupType) []connection {
+ var poppedConnections []connection
+
+ mom.IterateFrontToBack(expiryOrder, func(r utils.Record) (shouldDelete, shouldStop bool) {
+ conn := r.(connection)
+ expiryTime := conn.getExpiryTime()
+ if cs.now().After(expiryTime) {
+ // The connection has expired. We want to pop it.
+ poppedConnections = append(poppedConnections, conn)
+ shouldDelete, shouldStop = true, false
+ delete(cs.hashID2groupIdx, conn.getHash().hashTotal)
+ } else {
+ // No more expired connections
+ shouldDelete, shouldStop = false, true
+ }
+ return
+ })
+ groupLabel := group.labelValue
+ momLen := mom.Len()
+ var phaseLabel string
+ switch mom {
+ case group.activeMom:
+ phaseLabel = activeLabel
+ case group.terminatingMom:
+ phaseLabel = terminatingLabel
+ }
+ cs.metrics.connStoreLength.WithLabelValues(groupLabel, phaseLabel).Set(float64(momLen))
+
+ return poppedConnections
+}
+
+func (cs *connectionStore) popEndConnections() []connection {
+ // Iterate over the connections by scheduling groups.
+ // In each scheduling group iterate over them by their expiry time from old to new.
+ var poppedConnections []connection
+ for _, group := range cs.groups {
+ // Pop terminating connections first
+ terminatedConnections := cs.popEndConnectionOfMap(group.terminatingMom, group)
+ poppedConnections = append(poppedConnections, terminatedConnections...)
+ cs.metrics.endConnections.WithLabelValues(group.labelValue, "FIN_flag").Add(float64(len(terminatedConnections)))
+
+ // Pop active connections that expired without TCP flag
+ timedoutConnections := cs.popEndConnectionOfMap(group.activeMom, group)
+ poppedConnections = append(poppedConnections, timedoutConnections...)
+ cs.metrics.endConnections.WithLabelValues(group.labelValue, "timeout").Add(float64(len(timedoutConnections)))
+ }
+ return poppedConnections
+}
+
+func (cs *connectionStore) prepareHeartbeats() []connection {
+ var connections []connection
+ // Iterate over the connections by scheduling groups.
+ // In each scheduling group iterate over them by their next heartbeat time from old to new.
+ for _, group := range cs.groups {
+ group.activeMom.IterateFrontToBack(nextHeartbeatTimeOrder, func(r utils.Record) (shouldDelete, shouldStop bool) {
+ conn := r.(connection)
+ nextHeartbeat := conn.getNextHeartbeatTime()
+ needToReport := cs.now().After(nextHeartbeat)
+ if needToReport {
+ connections = append(connections, conn)
+ cs.updateNextHeartbeatTime(conn.getHash().hashTotal)
+ shouldDelete, shouldStop = false, false
+ } else {
+ shouldDelete, shouldStop = false, true
+ }
+ return
+ })
+ }
+ return connections
+}
+
+func (cs *connectionStore) len() int {
+ return len(cs.hashID2groupIdx)
+}
+
+// schedulingGroupToLabelValue returns a string representation of a scheduling group to be used as a Prometheus label
+// value.
+func schedulingGroupToLabelValue(groupIdx int, group api.ConnTrackSchedulingGroup) string {
+ sb := strings.Builder{}
+ sb.WriteString(fmt.Sprintf("%v: ", groupIdx))
+ var keys []string
+ for k := range group.Selector {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ sb.WriteString(fmt.Sprintf("%s=%v, ", k, group.Selector[k]))
+ }
+ if len(group.Selector) == 0 {
+ sb.WriteString("DEFAULT")
+ }
+ return sb.String()
+}
+
+func newConnectionStore(scheduling []api.ConnTrackSchedulingGroup, metrics *metricsType, nowFunc func() time.Time) *connectionStore {
+ groups := make([]*groupType, len(scheduling))
+ for groupIdx, sg := range scheduling {
+ groups[groupIdx] = &groupType{
+ scheduling: sg,
+ activeMom: utils.NewMultiOrderedMap(expiryOrder, nextHeartbeatTimeOrder),
+ terminatingMom: utils.NewMultiOrderedMap(expiryOrder, nextHeartbeatTimeOrder),
+ labelValue: schedulingGroupToLabelValue(groupIdx, sg),
+ }
+ }
+
+ cs := &connectionStore{
+ groups: groups,
+ hashID2groupIdx: map[uint64]int{},
+ metrics: metrics,
+ now: nowFunc,
+ }
+ return cs
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go
new file mode 100644
index 000000000..2e695129b
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package conntrack
+
+// From: https://github.com/netobserv/netobserv-ebpf-agent/blob/c54e7eb9e37e8ef5bb948eff6141cdddf584a6f9/bpf/flows.c#L45-L56
+const (
+ FINFlag = uint32(0x01)
+ SYNFlag = uint32(0x02)
+ RSTFlag = uint32(0x04)
+ PSHFlag = uint32(0x08)
+ ACKFlag = uint32(0x10)
+ URGFlag = uint32(0x20)
+ ECEFlag = uint32(0x40)
+ CWRFlag = uint32(0x80)
+ // Custom flags
+ SYNACKFlag = uint32(0x100)
+ FINACKFlag = uint32(0x200)
+ RSTACKFlag = uint32(0x400)
+ // Note: The difference between SYNFlag | ACKFlag (0x12) and SYN_ACKFlag (0x100) is that the former indicates
+ // that a flowlog contains TCP packets with the SYN flag set and the ACK flag set, but not necessary in the same packet.
+ // While the latter indicates that a flowlog contains a TCP packet with both flags set.
+)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract.go
new file mode 100644
index 000000000..e33ba87f8
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract.go
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package extract
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ log "github.com/sirupsen/logrus"
+)
+
+type Extractor interface {
+ Extract(in []config.GenericMap) []config.GenericMap
+}
+
+type extractNone struct {
+}
+
+// Extract extracts a flow before being stored
+func (t *extractNone) Extract(f []config.GenericMap) []config.GenericMap {
+ return f
+}
+
+// NewExtractNone create a new extract
+func NewExtractNone() (Extractor, error) {
+ log.Debugf("entering NewExtractNone")
+ return &extractNone{}, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go
new file mode 100644
index 000000000..cc982ae93
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package extract
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ agg "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate"
+ log "github.com/sirupsen/logrus"
+)
+
+type aggregates struct {
+ agg.Aggregates
+}
+
+// Extract extracts a flow before being stored
+func (ea *aggregates) Extract(entries []config.GenericMap) []config.GenericMap {
+ err := ea.Aggregates.Evaluate(entries)
+ if err != nil {
+ log.Debugf("Evaluate error %v", err)
+ }
+
+ // TODO: This need to be async function that is being called for the metrics and not
+ // TODO: synchronized from the pipeline directly.
+ return ea.Aggregates.GetMetrics()
+}
+
+// NewExtractAggregate creates a new extractor
+func NewExtractAggregate(params config.StageParam) (Extractor, error) {
+ log.Debugf("entering NewExtractAggregate")
+ cfg, err := agg.NewAggregatesFromConfig(params.Extract.Aggregates)
+ if err != nil {
+ log.Errorf("error in NewAggregatesFromConfig: %v", err)
+ return nil, err
+ }
+
+ return &aggregates{
+ Aggregates: cfg,
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go
new file mode 100644
index 000000000..4d93b0485
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package extract
+
+import (
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ tb "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased"
+ log "github.com/sirupsen/logrus"
+)
+
+type timebased struct {
+ Filters []tb.FilterStruct
+ IndexKeyStructs map[string]*tb.IndexKeyTable
+}
+
+// Extract extracts a flow before being stored
+func (et *timebased) Extract(entries []config.GenericMap) []config.GenericMap {
+ log.Debugf("entering timebased Extract")
+ nowInSecs := time.Now()
+ // Populate the Table with the current entries
+ for _, entry := range entries {
+ log.Debugf("timebased Extract, entry = %v", entry)
+ tb.AddEntryToTables(et.IndexKeyStructs, entry, nowInSecs)
+ }
+
+ output := make([]config.GenericMap, 0)
+ // Calculate Filters based on time windows
+ for i := range et.Filters {
+ filter := &et.Filters[i]
+ filter.CalculateResults(nowInSecs)
+ filter.ComputeTopkBotk()
+ genMap := filter.CreateGenericMap()
+ output = append(output, genMap...)
+ }
+ log.Debugf("output of extract timebased: %v", output)
+
+ // delete entries from tables that are outside time windows
+ tb.DeleteOldEntriesFromTables(et.IndexKeyStructs, nowInSecs)
+
+ return output
+}
+
+// NewExtractTimebased creates a new extractor
+func NewExtractTimebased(params config.StageParam) (Extractor, error) {
+ var rules []api.TimebasedFilterRule
+ if params.Extract != nil && params.Extract.Timebased.Rules != nil {
+ rules = params.Extract.Timebased.Rules
+ }
+ log.Debugf("NewExtractTimebased; rules = %v", rules)
+
+ tmpIndexKeyStructs, tmpFilters := tb.CreateIndexKeysAndFilters(rules)
+
+ return &timebased{
+ Filters: tmpFilters,
+ IndexKeyStructs: tmpIndexKeyStructs,
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go
new file mode 100644
index 000000000..e753c3b31
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package timebased
+
+import (
+ "container/list"
+ "math"
+ "strings"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+func (fs *FilterStruct) CalculateResults(nowInSecs time.Time) {
+ log.Debugf("CalculateResults nowInSecs = %v", nowInSecs)
+ oldestValidTime := nowInSecs.Add(-fs.Rule.TimeInterval.Duration)
+ for tableKey, l := range fs.IndexKeyDataTable.dataTableMap {
+ var valueFloat64 = float64(0)
+ var err error
+ //nolint:exhaustive
+ switch fs.Rule.OperationType {
+ case api.FilterOperationLast:
+ // handle empty list
+ if l.Len() == 0 {
+ continue
+ }
+ valueFloat64, err = utils.ConvertToFloat64(l.Back().Value.(*TableEntry).entry[fs.Rule.OperationKey])
+ if err != nil {
+ continue
+ }
+ case api.FilterOperationDiff:
+ for e := l.Front(); e != nil; e = e.Next() {
+ cEntry := e.Value.(*TableEntry)
+ if cEntry.timeStamp.Before(oldestValidTime) {
+ // entry is out of time range; ignore it
+ continue
+ }
+ first, err := utils.ConvertToFloat64(e.Value.(*TableEntry).entry[fs.Rule.OperationKey])
+ if err != nil {
+ continue
+ }
+ last, err := utils.ConvertToFloat64(l.Back().Value.(*TableEntry).entry[fs.Rule.OperationKey])
+ if err != nil {
+ continue
+ }
+ valueFloat64 = last - first
+ break
+ }
+ default:
+ valueFloat64 = fs.CalculateValue(l, oldestValidTime)
+ }
+ fs.Results[tableKey] = &filterOperationResult{
+ values: tableKey,
+ operationResult: valueFloat64,
+ }
+ }
+ log.Debugf("CalculateResults Results = %v", fs.Results)
+}
+
+func (fs *FilterStruct) CalculateValue(l *list.List, oldestValidTime time.Time) float64 {
+ log.Debugf("CalculateValue nowInSecs = %v", oldestValidTime)
+ currentValue := getInitValue(fs.Rule.OperationType)
+ nItems := 0
+ for e := l.Front(); e != nil; e = e.Next() {
+ cEntry := e.Value.(*TableEntry)
+ if cEntry.timeStamp.Before(oldestValidTime) {
+ // entry is out of time range; ignore it
+ continue
+ }
+ if valueFloat64, err := utils.ConvertToFloat64(cEntry.entry[fs.Rule.OperationKey]); err != nil {
+ // Log as debug to avoid performance impact
+ log.Debugf("CalculateValue error with OperationKey %s: %v", fs.Rule.OperationKey, err)
+ } else {
+ nItems++
+ switch fs.Rule.OperationType {
+ case api.FilterOperationSum, api.FilterOperationAvg:
+ currentValue += valueFloat64
+ case api.FilterOperationMax:
+ currentValue = math.Max(currentValue, valueFloat64)
+ case api.FilterOperationMin:
+ currentValue = math.Min(currentValue, valueFloat64)
+ case api.FilterOperationCnt, api.FilterOperationLast, api.FilterOperationDiff:
+ }
+ }
+ }
+ if fs.Rule.OperationType == api.FilterOperationAvg && nItems > 0 {
+ currentValue /= float64(nItems)
+ }
+ if fs.Rule.OperationType == api.FilterOperationCnt {
+ currentValue = float64(nItems)
+ }
+ return currentValue
+}
+
+func getInitValue(operation api.FilterOperationEnum) float64 {
+ switch operation {
+ case api.FilterOperationSum,
+ api.FilterOperationAvg,
+ api.FilterOperationCnt,
+ api.FilterOperationLast,
+ api.FilterOperationDiff:
+ return 0
+ case api.FilterOperationMax:
+ return (-math.MaxFloat64)
+ case api.FilterOperationMin:
+ return math.MaxFloat64
+ default:
+ log.Panicf("unknown operation %v", operation)
+ return 0
+ }
+}
+
+func (fs *FilterStruct) ComputeTopkBotk() {
+ var output []filterOperationResult
+ if fs.Rule.TopK > 0 {
+ if fs.Rule.Reversed {
+ output = fs.computeBotK(fs.Results)
+ } else {
+ output = fs.computeTopK(fs.Results)
+ }
+ } else {
+ // return all Results; convert map to array
+ output = make([]filterOperationResult, len(fs.Results))
+ i := 0
+ for _, item := range fs.Results {
+ output[i] = *item
+ i++
+ }
+ }
+ fs.Output = output
+}
+
+func (fs *FilterStruct) CreateGenericMap() []config.GenericMap {
+ output := make([]config.GenericMap, 0)
+ for _, result := range fs.Output {
+ t := config.GenericMap{
+ "name": fs.Rule.Name,
+ "index_key": fs.Rule.IndexKey,
+ "operation": fs.Rule.OperationType,
+ }
+
+ // append operation key and result as key / value
+ t[fs.Rule.OperationKey] = result.operationResult
+
+ // append index key / value pairs
+ values := strings.Split(result.values, ",")
+ if len(fs.Rule.IndexKeys) == len(values) {
+ for i, k := range fs.Rule.IndexKeys {
+ t[k] = values[i]
+ }
+ }
+
+ log.Debugf("FilterStruct CreateGenericMap: %v", t)
+ output = append(output, t)
+ }
+ log.Debugf("FilterStruct CreateGenericMap: output = %v \n", output)
+ return output
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go
new file mode 100644
index 000000000..5c52dc656
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package timebased
+
+import (
+ "container/heap"
+ "math"
+
+ log "github.com/sirupsen/logrus"
+)
+
+// functions to manipulate a heap to generate TopK/BotK entries
+// We need to implement the heap interface: Len(), Less(), Swap(), Push(), Pop()
+
+type heapItem struct {
+ value float64
+ result *filterOperationResult
+}
+
+type topkHeap []heapItem
+type botkHeap []heapItem
+
+func (h topkHeap) Len() int {
+ return len(h)
+}
+
+func (h topkHeap) Less(i, j int) bool {
+ return h[i].value < h[j].value
+}
+
+func (h topkHeap) Swap(i, j int) {
+ h[i], h[j] = h[j], h[i]
+}
+
+func (h *topkHeap) Push(x interface{}) {
+ *h = append(*h, x.(heapItem))
+}
+
+func (h *topkHeap) Pop() interface{} {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+ return x
+}
+
+func (fs *FilterStruct) computeTopK(inputs filterOperationResults) []filterOperationResult {
+ // maintain a heap with k items, always dropping the lowest
+ // we will be left with the TopK items
+ var prevMin float64
+ prevMin = -math.MaxFloat64
+ topk := fs.Rule.TopK
+ h := &topkHeap{}
+ for key, metricMap := range inputs {
+ val := metricMap.operationResult
+ if val < prevMin {
+ continue
+ }
+ item := heapItem{
+ result: inputs[key],
+ value: val,
+ }
+ heap.Push(h, item)
+ if h.Len() > topk {
+ x := heap.Pop(h)
+ prevMin = x.(heapItem).value
+ }
+ }
+ log.Debugf("heap: %v", h)
+
+ // convert the remaining heap to a sorted array
+ result := make([]filterOperationResult, h.Len())
+ heapLen := h.Len()
+ for i := heapLen; i > 0; i-- {
+ poppedItem := heap.Pop(h).(heapItem)
+ log.Debugf("poppedItem: %v", poppedItem)
+ result[i-1] = *poppedItem.result
+ }
+ log.Debugf("topk items: %v", result)
+ return result
+}
+
+func (h botkHeap) Len() int {
+ return len(h)
+}
+
+// For a botk heap, we reverse the order of the Less() operation
+func (h botkHeap) Less(i, j int) bool {
+ return h[i].value > h[j].value
+}
+
+func (h botkHeap) Swap(i, j int) {
+ h[i], h[j] = h[j], h[i]
+}
+
+func (h *botkHeap) Push(x interface{}) {
+ *h = append(*h, x.(heapItem))
+}
+
+func (h *botkHeap) Pop() interface{} {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+ return x
+}
+
+func (fs *FilterStruct) computeBotK(inputs filterOperationResults) []filterOperationResult {
+ // maintain a heap with k items, always dropping the highest
+ // we will be left with the BotK items
+ var prevMax float64
+ prevMax = math.MaxFloat64
+ botk := fs.Rule.TopK
+ h := &botkHeap{}
+ for key, metricMap := range inputs {
+ val := metricMap.operationResult
+ if val > prevMax {
+ continue
+ }
+ item := heapItem{
+ result: inputs[key],
+ value: val,
+ }
+ heap.Push(h, item)
+ if h.Len() > botk {
+ x := heap.Pop(h)
+ prevMax = x.(heapItem).value
+ }
+ }
+ log.Debugf("heap: %v", h)
+
+ // convert the remaining heap to a sorted array
+ result := make([]filterOperationResult, h.Len())
+ heapLen := h.Len()
+ for i := heapLen; i > 0; i-- {
+ poppedItem := heap.Pop(h).(heapItem)
+ log.Debugf("poppedItem: %v", poppedItem)
+ result[i-1] = *poppedItem.result
+ }
+ log.Debugf("botk items: %v", result)
+ return result
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/tables.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/tables.go
new file mode 100644
index 000000000..3bdb41dfe
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/tables.go
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package timebased
+
+import (
+ "bytes"
+ "container/list"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+func AddEntryToTables(indexKeyStructs map[string]*IndexKeyTable, entry config.GenericMap, nowInSecs time.Time) {
+ for tableKey, recordTable := range indexKeyStructs {
+ keys := strings.Split(tableKey, ",")
+
+ validValuesCount := 0
+ var b bytes.Buffer
+ for _, key := range keys {
+ if b.Len() > 0 {
+ b.WriteRune(',')
+ }
+ if val, ok := entry[key]; ok {
+ valStr := utils.ConvertToString(val)
+ if len(valStr) > 0 {
+ b.WriteString(valStr)
+ validValuesCount++
+ }
+ }
+ }
+
+ // add entry to the table only if all values are non empty
+ if len(keys) == validValuesCount {
+ val := b.String()
+ log.Debugf("ExtractTimebased addEntryToTables: key = %s, recordTable = %v", tableKey, recordTable)
+ cEntry := &TableEntry{
+ timeStamp: nowInSecs,
+ entry: entry,
+ }
+ // allocate list if it does not yet exist
+ if recordTable.dataTableMap[val] == nil {
+ recordTable.dataTableMap[val] = list.New()
+ }
+ log.Debugf("ExtractTimebased addEntryToTables: adding to table %s", val)
+ AddEntryToTable(cEntry, recordTable.dataTableMap[val])
+ }
+ }
+}
+
+func AddEntryToTable(cEntry *TableEntry, tableList *list.List) {
+ log.Debugf("AddEntryToTable: adding table entry %v", cEntry)
+ tableList.PushBack(cEntry)
+}
+
+func DeleteOldEntriesFromTables(indexKeyStructs map[string]*IndexKeyTable, nowInSecs time.Time) {
+ for _, recordTable := range indexKeyStructs {
+ oldestTime := nowInSecs.Add(-recordTable.maxTimeInterval)
+ for _, tableMap := range recordTable.dataTableMap {
+ for {
+ head := tableMap.Front()
+ if head == nil {
+ break
+ }
+ tableEntry := head.Value.(*TableEntry)
+ if tableEntry.timeStamp.Before(oldestTime) {
+ tableMap.Remove(head)
+ continue
+ }
+ break
+ }
+ // TODO: if tableMap is empty, we should clean it up and remove it from recordTable.dataTableMap
+ }
+ }
+}
+
+func PrintTable(l *list.List) {
+ fmt.Printf("start PrintTable: \n")
+ for e := l.Front(); e != nil; e = e.Next() {
+ fmt.Printf("PrintTable: e = %v, Value = %v \n", e, e.Value)
+ }
+ fmt.Printf("end PrintTable: \n")
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go
new file mode 100644
index 000000000..d6fded1d6
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package timebased
+
+import (
+ "container/list"
+ "strings"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ log "github.com/sirupsen/logrus"
+)
+
+type FilterStruct struct {
+ Rule api.TimebasedFilterRule
+ IndexKeyDataTable *IndexKeyTable
+ Results filterOperationResults
+ Output []filterOperationResult
+}
+
+type filterOperationResults map[string]*filterOperationResult
+
+type filterOperationResult struct {
+ values string
+ operationResult float64
+}
+
+type DataTableMap map[string]*list.List
+
+type IndexKeyTable struct {
+ maxTimeInterval time.Duration
+ dataTableMap DataTableMap
+}
+
+type TableEntry struct {
+ timeStamp time.Time
+ entry config.GenericMap
+}
+
+// CreateIndexKeysAndFilters creates structures for each IndexKey that appears in the rules.
+// Note that the same IndexKey might appear in more than one Rule.
+// Connect IndexKey structure to its filters.
+// For each IndexKey, we need a table of history to handle the largest TimeInterval.
+func CreateIndexKeysAndFilters(rules []api.TimebasedFilterRule) (map[string]*IndexKeyTable, []FilterStruct) {
+ tmpIndexKeyStructs := make(map[string]*IndexKeyTable)
+ tmpFilters := make([]FilterStruct, 0)
+ for _, filterRule := range rules {
+ log.Debugf("CreateIndexKeysAndFilters: filterRule = %v", filterRule)
+ if len(filterRule.IndexKeys) > 0 {
+ // reuse indexKey as table index
+ filterRule.IndexKey = strings.Join(filterRule.IndexKeys, ",")
+ } else if len(filterRule.IndexKey) > 0 {
+ // append indexKey to indexKeys
+ filterRule.IndexKeys = append(filterRule.IndexKeys, filterRule.IndexKey)
+ } else {
+ log.Errorf("missing IndexKey(s) for filter %s", filterRule.Name)
+ continue
+ }
+ rStruct, ok := tmpIndexKeyStructs[filterRule.IndexKey]
+ if !ok {
+ rStruct = &IndexKeyTable{
+ maxTimeInterval: filterRule.TimeInterval.Duration,
+ dataTableMap: make(DataTableMap),
+ }
+ tmpIndexKeyStructs[filterRule.IndexKey] = rStruct
+ log.Debugf("new IndexKeyTable: name = %s = %v", filterRule.IndexKey, *rStruct)
+ } else if filterRule.TimeInterval.Duration > rStruct.maxTimeInterval {
+ rStruct.maxTimeInterval = filterRule.TimeInterval.Duration
+ }
+ // verify the validity of the OperationType field in the filterRule
+ switch filterRule.OperationType {
+ case api.FilterOperationLast,
+ api.FilterOperationDiff,
+ api.FilterOperationCnt,
+ api.FilterOperationAvg,
+ api.FilterOperationMax,
+ api.FilterOperationMin,
+ api.FilterOperationSum:
+ // OK; nothing to do
+ default:
+ log.Errorf("illegal operation type %s", filterRule.OperationType)
+ continue
+ }
+ tmpFilter := FilterStruct{
+ Rule: filterRule,
+ IndexKeyDataTable: rStruct,
+ Results: make(filterOperationResults),
+ }
+ log.Debugf("new Rule = %v", tmpFilter)
+ tmpFilters = append(tmpFilters, tmpFilter)
+ }
+ return tmpIndexKeyStructs, tmpFilters
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest.go
new file mode 100644
index 000000000..de804f1ac
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest.go
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package ingest
+
+import "github.com/netobserv/flowlogs-pipeline/pkg/config"
+
+type Ingester interface {
+ Ingest(out chan<- config.GenericMap)
+}
+type IngesterNone struct {
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go
new file mode 100644
index 000000000..bbe1a356d
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package ingest
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "net"
+
+ ms "github.com/mitchellh/mapstructure"
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/netsampler/goflow2/decoders/netflow/templates"
+ _ "github.com/netsampler/goflow2/decoders/netflow/templates/memory" // required for goflow in-memory templates
+ goflowFormat "github.com/netsampler/goflow2/format"
+ goflowCommonFormat "github.com/netsampler/goflow2/format/common"
+ _ "github.com/netsampler/goflow2/format/protobuf" // required for goflow protobuf
+ goflowpb "github.com/netsampler/goflow2/pb"
+ "github.com/netsampler/goflow2/utils"
+ log "github.com/sirupsen/logrus"
+ "google.golang.org/protobuf/proto"
+)
+
+const (
+ channelSize = 1000
+)
+
+type ingestCollector struct {
+ hostname string
+ port int
+ portLegacy int
+ in chan map[string]interface{}
+ exitChan <-chan struct{}
+ metrics *metrics
+}
+
+// TransportWrapper is an implementation of the goflow2 transport interface
+type TransportWrapper struct {
+ c chan map[string]interface{}
+}
+
+func NewWrapper(c chan map[string]interface{}) *TransportWrapper {
+ tw := TransportWrapper{c: c}
+ return &tw
+}
+
+func RenderMessage(message *goflowpb.FlowMessage) (map[string]interface{}, error) {
+ outputMap := make(map[string]interface{})
+ err := ms.Decode(message, &outputMap)
+ if err != nil {
+ return nil, err
+ }
+ outputMap["DstAddr"] = goflowCommonFormat.RenderIP(message.DstAddr)
+ outputMap["SrcAddr"] = goflowCommonFormat.RenderIP(message.SrcAddr)
+ outputMap["DstMac"] = renderMac(message.DstMac)
+ outputMap["SrcMac"] = renderMac(message.SrcMac)
+ return outputMap, nil
+}
+
+func renderMac(macValue uint64) string {
+ mac := make([]byte, 8)
+ binary.BigEndian.PutUint64(mac, macValue)
+ return net.HardwareAddr(mac[2:]).String()
+}
+
+func (w *TransportWrapper) Send(_, data []byte) error {
+ message := goflowpb.FlowMessage{}
+ err := proto.Unmarshal(data, &message)
+ if err != nil {
+ // temporary fix
+ // A PR was submitted to log this error from goflow2:
+ // https://github.com/netsampler/goflow2/pull/86
+ log.Error(err)
+ return err
+ }
+ renderedMsg, err := RenderMessage(&message)
+ if err == nil {
+ w.c <- renderedMsg
+ }
+ return err
+}
+
+// Ingest ingests entries from a network collector using goflow2 library (https://github.com/netsampler/goflow2)
+func (c *ingestCollector) Ingest(out chan<- config.GenericMap) {
+ ctx := context.Background()
+ c.metrics.createOutQueueLen(out)
+
+ // initialize background listeners (a.k.a.netflow+legacy collector)
+ c.initCollectorListener(ctx)
+
+ // forever process log lines received by collector
+ c.processLogLines(out)
+}
+
+func (c *ingestCollector) initCollectorListener(ctx context.Context) {
+ transporter := NewWrapper(c.in)
+ formatter, err := goflowFormat.FindFormat(ctx, "pb")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if c.port > 0 {
+ // cf https://github.com/netsampler/goflow2/pull/49
+ tpl, err := templates.FindTemplateSystem(ctx, "memory")
+ if err != nil {
+ log.Fatalf("goflow2 error: could not find memory template system: %v", err)
+ }
+ defer tpl.Close(ctx)
+
+ go func() {
+ sNF := utils.NewStateNetFlow()
+ sNF.Format = formatter
+ sNF.Transport = transporter
+ sNF.Logger = log.StandardLogger()
+ sNF.TemplateSystem = tpl
+
+ log.Infof("listening for netflow on host %s, port = %d", c.hostname, c.port)
+ err = sNF.FlowRoutine(1, c.hostname, c.port, false)
+ log.Fatal(err)
+ }()
+ }
+
+ if c.portLegacy > 0 {
+ go func() {
+ sLegacyNF := utils.NewStateNFLegacy()
+ sLegacyNF.Format = formatter
+ sLegacyNF.Transport = transporter
+ sLegacyNF.Logger = log.StandardLogger()
+
+ log.Infof("listening for legacy netflow on host %s, port = %d", c.hostname, c.portLegacy)
+ err = sLegacyNF.FlowRoutine(1, c.hostname, c.portLegacy, false)
+ log.Fatal(err)
+ }()
+ }
+}
+
+func (c *ingestCollector) processLogLines(out chan<- config.GenericMap) {
+ for {
+ select {
+ case <-c.exitChan:
+ log.Debugf("exiting ingestCollector because of signal")
+ return
+ case record := <-c.in:
+ out <- record
+ }
+ }
+}
+
+// NewIngestCollector create a new ingester
+func NewIngestCollector(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) {
+ jsonIngestCollector := api.IngestCollector{}
+ if params.Ingest != nil && params.Ingest.Collector != nil {
+ jsonIngestCollector = *params.Ingest.Collector
+ }
+ if jsonIngestCollector.HostName == "" {
+ return nil, fmt.Errorf("ingest hostname not specified")
+ }
+ if jsonIngestCollector.Port == 0 && jsonIngestCollector.PortLegacy == 0 {
+ return nil, fmt.Errorf("no ingest port specified")
+ }
+
+ log.Infof("hostname = %s", jsonIngestCollector.HostName)
+ log.Infof("port = %d", jsonIngestCollector.Port)
+ log.Infof("portLegacy = %d", jsonIngestCollector.PortLegacy)
+
+ in := make(chan map[string]interface{}, channelSize)
+ metrics := newMetrics(opMetrics, params.Name, params.Ingest.Type, func() int { return len(in) })
+
+ return &ingestCollector{
+ hostname: jsonIngestCollector.HostName,
+ port: jsonIngestCollector.Port,
+ portLegacy: jsonIngestCollector.PortLegacy,
+ exitChan: pUtils.ExitChannel(),
+ in: in,
+ metrics: metrics,
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go
new file mode 100644
index 000000000..f4d7ef3b5
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package ingest
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+type Fake struct {
+ Count int64
+ params config.Ingest
+ In chan config.GenericMap
+ exitChan <-chan struct{}
+}
+
+// Ingest reads records from an input channel and writes them as-is to the output channel
+func (inf *Fake) Ingest(out chan<- config.GenericMap) {
+ for {
+ select {
+ case <-inf.exitChan:
+ log.Debugf("exiting IngestFake because of signal")
+ return
+ case records := <-inf.In:
+ out <- records
+ atomic.AddInt64(&inf.Count, 1)
+ }
+ }
+}
+
+// NewIngestFake creates a new ingester
+func NewIngestFake(params config.StageParam) (Ingester, error) {
+ log.Debugf("entering NewIngestFake")
+ if params.Ingest == nil {
+ return nil, fmt.Errorf("ingest not specified")
+ }
+
+ return &Fake{
+ params: *params.Ingest,
+ In: make(chan config.GenericMap),
+ exitChan: utils.ExitChannel(),
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go
new file mode 100644
index 000000000..4d1127b6c
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package ingest
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ log "github.com/sirupsen/logrus"
+)
+
+type ingestFile struct {
+ params config.Ingest
+ decoder decode.Decoder
+ exitChan <-chan struct{}
+ PrevRecords []config.GenericMap
+ TotalRecords int
+}
+
+const (
+ delaySeconds = 10
+ chunkLines = 100
+)
+
+// Ingest ingests entries from a file and resends the same data every delaySeconds seconds
+func (ingestF *ingestFile) Ingest(out chan<- config.GenericMap) {
+ var filename string
+ if ingestF.params.File != nil {
+ filename = ingestF.params.File.Filename
+ }
+ var lines [][]byte
+ file, err := os.Open(filename)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func() {
+ _ = file.Close()
+ }()
+
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ text := scanner.Text()
+ log.Debugf("%s", text)
+ lines = append(lines, []byte(text))
+ }
+
+ log.Debugf("Ingesting %d log lines from %s", len(lines), filename)
+ switch ingestF.params.Type {
+ case "file":
+ ingestF.sendAllLines(lines, out)
+ case "file_loop":
+ // loop forever
+ ticker := time.NewTicker(time.Duration(delaySeconds) * time.Second)
+ for {
+ select {
+ case <-ingestF.exitChan:
+ log.Debugf("exiting ingestFile because of signal")
+ return
+ case <-ticker.C:
+ ingestF.sendAllLines(lines, out)
+ }
+ }
+ case "file_chunks":
+ // sends the lines in chunks. Useful for testing parallelization
+ ingestF.TotalRecords = len(lines)
+ for len(lines) > 0 {
+ if len(lines) > chunkLines {
+ ingestF.sendAllLines(lines[:chunkLines], out)
+ lines = lines[chunkLines:]
+ } else {
+ ingestF.sendAllLines(lines, out)
+ lines = nil
+ }
+ }
+ }
+}
+
+func (ingestF *ingestFile) sendAllLines(lines [][]byte, out chan<- config.GenericMap) {
+ log.Debugf("ingestFile sending %d lines", len(lines))
+ ingestF.TotalRecords = len(lines)
+ for _, line := range lines {
+ decoded, err := ingestF.decoder.Decode(line)
+ if err != nil {
+ log.WithError(err).Warnf("ignoring line")
+ continue
+ }
+ out <- decoded
+ }
+}
+
+// NewIngestFile create a new ingester
+func NewIngestFile(params config.StageParam) (Ingester, error) {
+ log.Debugf("entering NewIngestFile")
+ if params.Ingest == nil || params.Ingest.File == nil || params.Ingest.File.Filename == "" {
+ return nil, fmt.Errorf("ingest filename not specified")
+ }
+
+ log.Debugf("input file name = %s", params.Ingest.File.Filename)
+ decoder, err := decode.GetDecoder(params.Ingest.File.Decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ingestFile{
+ params: *params.Ingest,
+ exitChan: utils.ExitChannel(),
+ decoder: decoder,
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_grpc.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_grpc.go
new file mode 100644
index 000000000..16afb8d76
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_grpc.go
@@ -0,0 +1,120 @@
+package ingest
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/decode"
+ grpc "github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow"
+
+ "github.com/sirupsen/logrus"
+ grpc2 "google.golang.org/grpc"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+var glog = logrus.WithField("component", "ingest.GRPCProtobuf")
+
+const (
+ defaultBufferLen = 100
+)
+
+// GRPCProtobuf ingests data from the NetObserv eBPF Agent, using Protocol Buffers over gRPC
+type GRPCProtobuf struct {
+ collector *grpc.CollectorServer
+ flowPackets chan *pbflow.Records
+ metrics *metrics
+}
+
+func NewGRPCProtobuf(opMetrics *operational.Metrics, params config.StageParam) (*GRPCProtobuf, error) {
+ netObserv := api.IngestGRPCProto{}
+ if params.Ingest != nil && params.Ingest.GRPC != nil {
+ netObserv = *params.Ingest.GRPC
+ }
+ if netObserv.Port == 0 {
+ return nil, fmt.Errorf("ingest port not specified")
+ }
+ bufLen := netObserv.BufferLen
+ if bufLen == 0 {
+ bufLen = defaultBufferLen
+ }
+ flowPackets := make(chan *pbflow.Records, bufLen)
+ metrics := newMetrics(opMetrics, params.Name, params.Ingest.Type, func() int { return len(flowPackets) })
+ collector, err := grpc.StartCollector(netObserv.Port, flowPackets,
+ grpc.WithGRPCServerOptions(grpc2.UnaryInterceptor(instrumentGRPC(metrics))))
+ if err != nil {
+ return nil, err
+ }
+ return &GRPCProtobuf{
+ collector: collector,
+ flowPackets: flowPackets,
+ metrics: metrics,
+ }, nil
+}
+
+func (no *GRPCProtobuf) Ingest(out chan<- config.GenericMap) {
+ no.metrics.createOutQueueLen(out)
+ go func() {
+ <-pUtils.ExitChannel()
+ close(no.flowPackets)
+ no.collector.Close()
+ }()
+ for fp := range no.flowPackets {
+ glog.Debugf("Ingested %v records", len(fp.Entries))
+ for _, entry := range fp.Entries {
+ out <- decode.PBFlowToMap(entry)
+ }
+ }
+}
+
+func (no *GRPCProtobuf) Close() error {
+ err := no.collector.Close()
+ close(no.flowPackets)
+ return err
+}
+
+func instrumentGRPC(m *metrics) grpc2.UnaryServerInterceptor {
+ return func(
+ ctx context.Context,
+ req interface{},
+ info *grpc2.UnaryServerInfo,
+ handler grpc2.UnaryHandler,
+ ) (resp interface{}, err error) {
+ timer := m.stageDurationTimer()
+ timeReceived := timer.Start()
+ if info.FullMethod != "/pbflow.Collector/Send" {
+ return handler(ctx, req)
+ }
+ flowRecords := req.(*pbflow.Records)
+
+ // instrument difference between flow time and ingest time
+ for _, entry := range flowRecords.Entries {
+ delay := timeReceived.Sub(entry.TimeFlowEnd.AsTime()).Seconds()
+ m.latency.Observe(delay)
+ }
+
+ // instrument flows processed counter
+ m.flowsProcessed.Add(float64(len(flowRecords.Entries)))
+
+ // instrument message bytes
+ m.batchSizeBytes.Observe(float64(proto.Size(flowRecords)))
+
+ resp, err = handler(ctx, req)
+ if err != nil {
+ // "trace" level used to minimize performance impact
+ glog.Tracef("Reporting metric error: %v", err)
+ m.error(utils.ConvertToString(status.Code(err)))
+ }
+
+ // Stage duration
+ timer.ObserveMilliseconds()
+
+ return resp, err
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_inprocess.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_inprocess.go
new file mode 100644
index 000000000..266b26247
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_inprocess.go
@@ -0,0 +1,29 @@
+package ingest
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+)
+
+// InProcess ingester is meant to be imported and used from another program
+// via pipeline.StartFLPInProcess
+type InProcess struct {
+ in chan config.GenericMap
+}
+
+func NewInProcess(in chan config.GenericMap) *InProcess {
+ return &InProcess{in: in}
+}
+
+func (d *InProcess) Ingest(out chan<- config.GenericMap) {
+ go func() {
+ <-utils.ExitChannel()
+ d.Close()
+ }()
+ for rec := range d.in {
+ out <- rec
+ }
+}
+
+func (d *InProcess) Close() {
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go
new file mode 100644
index 000000000..c5dac331a
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package ingest
+
+import (
+ "errors"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ kafkago "github.com/segmentio/kafka-go"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/net/context"
+)
+
+var klog = logrus.WithField("component", "ingest.Kafka")
+
+type kafkaReadMessage interface {
+ ReadMessage(ctx context.Context) (kafkago.Message, error)
+ Config() kafkago.ReaderConfig
+ Stats() kafkago.ReaderStats
+}
+
+type ingestKafka struct {
+ kafkaReader kafkaReadMessage
+ decoder decode.Decoder
+ in chan []byte
+ exitChan <-chan struct{}
+ batchReadTimeout int64
+ batchMaxLength int
+ metrics *metrics
+ canLogMessages bool
+}
+
+const defaultBatchReadTimeout = int64(1000)
+const defaultKafkaBatchMaxLength = 500
+const defaultKafkaCommitInterval = 500
+
+const kafkaStatsPeriod = 15 * time.Second
+
+// Ingest ingests entries from kafka topic
+func (k *ingestKafka) Ingest(out chan<- config.GenericMap) {
+ klog.Debugf("entering ingestKafka.Ingest")
+ k.metrics.createOutQueueLen(out)
+
+ // initialize background listener
+ k.kafkaListener()
+
+ // forever process log lines received by collector
+ k.processLogLines(out)
+}
+
+// background thread to read kafka messages; place received items into ingestKafka input channel
+func (k *ingestKafka) kafkaListener() {
+ klog.Debugf("entering kafkaListener")
+
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ go k.reportStats()
+ }
+
+ go func() {
+ for {
+ if k.isStopped() {
+ klog.Info("gracefully exiting")
+ return
+ }
+ klog.Trace("fetching messages from Kafka")
+ // block until a message arrives
+ kafkaMessage, err := k.kafkaReader.ReadMessage(context.Background())
+ if err != nil {
+ klog.Errorln(err)
+ k.metrics.error("Cannot read message")
+ continue
+ }
+ if k.canLogMessages && logrus.IsLevelEnabled(logrus.TraceLevel) {
+ klog.Tracef("string(kafkaMessage) = %s\n", string(kafkaMessage.Value))
+ }
+ k.metrics.flowsProcessed.Inc()
+ messageLen := len(kafkaMessage.Value)
+ k.metrics.batchSizeBytes.Observe(float64(messageLen) + float64(len(kafkaMessage.Key)))
+ if messageLen > 0 {
+ // process message
+ k.in <- kafkaMessage.Value
+ }
+ }
+ }()
+}
+
+func (k *ingestKafka) isStopped() bool {
+ select {
+ case <-k.exitChan:
+ return true
+ default:
+ return false
+ }
+}
+
+func (k *ingestKafka) processRecordDelay(record config.GenericMap) {
+ timeFlowEndInterface, ok := record["TimeFlowEndMs"]
+ if !ok {
+ // "trace" level used to minimize performance impact
+ klog.Tracef("TimeFlowEndMs missing in record %v", record)
+ k.metrics.error("TimeFlowEndMs missing")
+ return
+ }
+ timeFlowEnd, ok := timeFlowEndInterface.(int64)
+ if !ok {
+ // "trace" level used to minimize performance impact
+ klog.Tracef("Cannot parse TimeFlowEndMs of record %v", record)
+ k.metrics.error("Cannot parse TimeFlowEndMs")
+ return
+ }
+ delay := time.Since(time.UnixMilli(timeFlowEnd)).Seconds()
+ k.metrics.latency.Observe(delay)
+}
+
+func (k *ingestKafka) processRecord(record []byte, out chan<- config.GenericMap) {
+ // Decode batch
+ decoded, err := k.decoder.Decode(record)
+ if err != nil {
+ klog.WithError(err).Warnf("ignoring flow")
+ return
+ }
+ k.processRecordDelay(decoded)
+
+ // Send batch
+ out <- decoded
+}
+
+// read items from ingestKafka input channel, pool them, and send down the pipeline
+func (k *ingestKafka) processLogLines(out chan<- config.GenericMap) {
+ for {
+ select {
+ case <-k.exitChan:
+ klog.Debugf("exiting ingestKafka because of signal")
+ return
+ case record := <-k.in:
+ k.processRecord(record, out)
+ }
+ }
+}
+
+// reportStats periodically reports kafka stats
+func (k *ingestKafka) reportStats() {
+ ticker := time.NewTicker(kafkaStatsPeriod)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-k.exitChan:
+ klog.Debug("gracefully exiting stats reporter")
+ case <-ticker.C:
+ klog.Debugf("reader stats: %#v", k.kafkaReader.Stats())
+ }
+ }
+}
+
+// NewIngestKafka create a new ingester
+// nolint:cyclop
+func NewIngestKafka(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) {
+ klog.Debugf("entering NewIngestKafka")
+ jsonIngestKafka := api.IngestKafka{}
+ var ingestType string
+ if params.Ingest != nil {
+ ingestType = params.Ingest.Type
+ if params.Ingest.Kafka != nil {
+ jsonIngestKafka = *params.Ingest.Kafka
+ }
+ }
+
+ // connect to the kafka server
+ startOffsetString := jsonIngestKafka.StartOffset
+ var startOffset int64
+ switch startOffsetString {
+ case "FirstOffset", "":
+ startOffset = kafkago.FirstOffset
+ case "LastOffset":
+ startOffset = kafkago.LastOffset
+ default:
+ startOffset = kafkago.FirstOffset
+ klog.Errorf("illegal value for StartOffset: %s\n", startOffsetString)
+ }
+ klog.Debugf("startOffset = %v", startOffset)
+ groupBalancers := make([]kafkago.GroupBalancer, 0)
+ for _, gb := range jsonIngestKafka.GroupBalancers {
+ switch gb {
+ case "range":
+ groupBalancers = append(groupBalancers, &kafkago.RangeGroupBalancer{})
+ case "roundRobin":
+ groupBalancers = append(groupBalancers, &kafkago.RoundRobinGroupBalancer{})
+ case "rackAffinity":
+ groupBalancers = append(groupBalancers, &kafkago.RackAffinityGroupBalancer{})
+ default:
+ klog.Warningf("groupbalancers parameter missing")
+ groupBalancers = append(groupBalancers, &kafkago.RoundRobinGroupBalancer{})
+ }
+ }
+
+ batchReadTimeout := defaultBatchReadTimeout
+ if jsonIngestKafka.BatchReadTimeout != 0 {
+ batchReadTimeout = jsonIngestKafka.BatchReadTimeout
+ }
+ klog.Infof("batchReadTimeout = %d", batchReadTimeout)
+
+ commitInterval := int64(defaultKafkaCommitInterval)
+ if jsonIngestKafka.CommitInterval != 0 {
+ commitInterval = jsonIngestKafka.CommitInterval
+ }
+ klog.Infof("commitInterval = %d", jsonIngestKafka.CommitInterval)
+
+ dialer := &kafkago.Dialer{
+ Timeout: kafkago.DefaultDialer.Timeout,
+ DualStack: kafkago.DefaultDialer.DualStack,
+ }
+ if jsonIngestKafka.TLS != nil {
+ klog.Infof("Using TLS configuration: %v", jsonIngestKafka.TLS)
+ tlsConfig, err := jsonIngestKafka.TLS.Build()
+ if err != nil {
+ return nil, err
+ }
+ dialer.TLS = tlsConfig
+ }
+
+ if jsonIngestKafka.SASL != nil {
+ m, err := utils.SetupSASLMechanism(jsonIngestKafka.SASL)
+ if err != nil {
+ return nil, err
+ }
+ dialer.SASLMechanism = m
+ }
+
+ readerConfig := kafkago.ReaderConfig{
+ Brokers: jsonIngestKafka.Brokers,
+ Topic: jsonIngestKafka.Topic,
+ GroupID: jsonIngestKafka.GroupID,
+ GroupBalancers: groupBalancers,
+ StartOffset: startOffset,
+ CommitInterval: time.Duration(commitInterval) * time.Millisecond,
+ Dialer: dialer,
+ }
+
+ if jsonIngestKafka.PullQueueCapacity > 0 {
+ readerConfig.QueueCapacity = jsonIngestKafka.PullQueueCapacity
+ }
+
+ if jsonIngestKafka.PullMaxBytes > 0 {
+ readerConfig.MaxBytes = jsonIngestKafka.PullMaxBytes
+ }
+
+ klog.Debugf("reader config: %#v", readerConfig)
+
+ kafkaReader := kafkago.NewReader(readerConfig)
+ if kafkaReader == nil {
+ errMsg := "NewIngestKafka: failed to create kafka-go reader"
+ klog.Errorf("%s", errMsg)
+ return nil, errors.New(errMsg)
+ }
+
+ decoder, err := decode.GetDecoder(jsonIngestKafka.Decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ bml := defaultKafkaBatchMaxLength
+ if jsonIngestKafka.BatchMaxLen != 0 {
+ bml = jsonIngestKafka.BatchMaxLen
+ }
+
+ in := make(chan []byte, 2*bml)
+ metrics := newMetrics(opMetrics, params.Name, ingestType, func() int { return len(in) })
+
+ return &ingestKafka{
+ kafkaReader: kafkaReader,
+ decoder: decoder,
+ exitChan: utils.ExitChannel(),
+ in: in,
+ batchMaxLength: bml,
+ batchReadTimeout: batchReadTimeout,
+ metrics: metrics,
+ canLogMessages: jsonIngestKafka.Decoder.Type == api.DecoderJSON,
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go
new file mode 100644
index 000000000..1b425cf4d
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package ingest
+
+import (
+ "bufio"
+ "os"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode"
+ pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ stdinChannelSize = 1000
+)
+
+var slog = logrus.WithField("component", "ingest.Stdin")
+
+type ingestStdin struct {
+ in chan string
+ eof chan struct{}
+ exitChan <-chan struct{}
+ metrics *metrics
+ decoder decode.Decoder
+}
+
+// Ingest ingests entries from stdin
+func (s *ingestStdin) Ingest(out chan<- config.GenericMap) {
+ slog.Debugf("entering ingestStdin.Ingest")
+ s.metrics.createOutQueueLen(out)
+
+ go s.getStdinInput()
+
+ // process log lines received by stdin
+ s.processLogLines(out)
+}
+
+func (s *ingestStdin) getStdinInput() {
+ scanner := bufio.NewScanner(os.Stdin)
+ // Loop to read lines from stdin until an error or EOF is encountered
+ for scanner.Scan() {
+ s.in <- scanner.Text()
+ }
+
+ // Check for errors
+ if err := scanner.Err(); err != nil {
+ slog.WithError(err).Errorf("Error reading standard input")
+ }
+ close(s.eof)
+}
+
+func (s *ingestStdin) processLogLines(out chan<- config.GenericMap) {
+ for {
+ select {
+ case <-s.exitChan:
+ slog.Debugf("exiting ingestStdin because of signal")
+ return
+ case <-s.eof:
+ slog.Debugf("exiting ingestStdin because of EOF")
+ return
+ case line := <-s.in:
+ s.processRecord(out, line)
+ }
+ }
+}
+
+func (s *ingestStdin) processRecord(out chan<- config.GenericMap, line string) {
+ slog.Debugf("Decoding %s", line)
+ decoded, err := s.decoder.Decode([]byte(line))
+ if err != nil {
+ slog.WithError(err).Warnf("ignoring line %v", line)
+ s.metrics.error("Ignoring line")
+ return
+ }
+ s.metrics.flowsProcessed.Inc()
+ out <- decoded
+}
+
+// NewIngestStdin create a new ingester
+func NewIngestStdin(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) {
+ slog.Debugf("Entering NewIngestStdin")
+
+ in := make(chan string, stdinChannelSize)
+ eof := make(chan struct{})
+ metrics := newMetrics(opMetrics, params.Name, params.Ingest.Type, func() int { return len(in) })
+ decoderParams := api.Decoder{Type: api.DecoderJSON}
+ decoder, err := decode.GetDecoder(decoderParams)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ingestStdin{
+ exitChan: pUtils.ExitChannel(),
+ in: in,
+ eof: eof,
+ metrics: metrics,
+ decoder: decoder,
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go
new file mode 100644
index 000000000..66ee85724
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package ingest
+
+import (
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/prometheus/client_golang/prometheus"
+ log "github.com/sirupsen/logrus"
+)
+
+type ingestSynthetic struct {
+ params api.IngestSynthetic
+ exitChan <-chan struct{}
+ flowLogsProcessed prometheus.Counter
+}
+
+const (
+ defaultConnections = 100
+ defaultBatchLen = 10
+ defaultFlowLogsPerMin = 2000
+)
+
+var (
+ flowLogsProcessed = operational.DefineMetric(
+ "ingest_synthetic_flows_processed",
+ "Number of flow logs processed",
+ operational.TypeCounter,
+ "stage",
+ )
+)
+
+// Ingest generates flow logs according to provided parameters
+func (ingestS *ingestSynthetic) Ingest(out chan<- config.GenericMap) {
+ log.Debugf("entering IngestSynthetic Ingest, params = %v", ingestS.params)
+ // get a list of flow log entries, one per desired connection
+ // these flow logs will be sent again and again to simulate ongoing traffic on those connections
+ flowLogs := utils.GenerateConnectionFlowEntries(ingestS.params.Connections)
+ nLogs := len(flowLogs)
+ next := 0
+
+ // compute time interval between batches; divide BatchMaxLen by FlowLogsPerMin and adjust the types
+ ticker := time.NewTicker(time.Duration(int(time.Minute*time.Duration(ingestS.params.BatchMaxLen)) / ingestS.params.FlowLogsPerMin))
+
+ // loop forever
+ for {
+ select {
+ case <-ingestS.exitChan:
+ log.Debugf("exiting IngestSynthetic because of signal")
+ return
+ case <-ticker.C:
+ log.Debugf("sending a batch of %d flow logs from index %d", ingestS.params.BatchMaxLen, next)
+ for i := 0; i < ingestS.params.BatchMaxLen; i++ {
+ out <- flowLogs[next]
+ ingestS.flowLogsProcessed.Inc()
+ next++
+ if next >= nLogs {
+ next = 0
+ }
+ }
+ }
+ }
+}
+
+// NewIngestSynthetic create a new ingester
+func NewIngestSynthetic(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) {
+ log.Debugf("entering NewIngestSynthetic")
+ confIngestSynthetic := api.IngestSynthetic{}
+ if params.Ingest != nil && params.Ingest.Synthetic != nil {
+ confIngestSynthetic = *params.Ingest.Synthetic
+ }
+ if confIngestSynthetic.Connections == 0 {
+ confIngestSynthetic.Connections = defaultConnections
+ }
+ if confIngestSynthetic.FlowLogsPerMin == 0 {
+ confIngestSynthetic.FlowLogsPerMin = defaultFlowLogsPerMin
+ }
+ if confIngestSynthetic.BatchMaxLen == 0 {
+ confIngestSynthetic.BatchMaxLen = defaultBatchLen
+ }
+ log.Debugf("params = %v", confIngestSynthetic)
+
+ return &ingestSynthetic{
+ params: confIngestSynthetic,
+ exitChan: utils.ExitChannel(),
+ flowLogsProcessed: opMetrics.NewCounter(&flowLogsProcessed, params.Name),
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/metrics.go
new file mode 100644
index 000000000..d59f761f2
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/metrics.go
@@ -0,0 +1,74 @@
+package ingest
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ latencyHistogram = operational.DefineMetric(
+ "ingest_latency_ms",
+ "Latency between flow end time and ingest time, in milliseconds",
+ operational.TypeHistogram,
+ "stage",
+ )
+ flowsProcessedCounter = operational.DefineMetric(
+ "ingest_flows_processed",
+ "Number of flows received by the ingester",
+ operational.TypeCounter,
+ "stage",
+ )
+ batchSizeBytesSummary = operational.DefineMetric(
+ "ingest_batch_size_bytes",
+ "Ingested batch size distribution, in bytes",
+ operational.TypeSummary,
+ "stage",
+ )
+ errorsCounter = operational.DefineMetric(
+ "ingest_errors",
+ "Counter of errors during ingestion",
+ operational.TypeCounter,
+ "stage", "type", "code",
+ )
+)
+
+type metrics struct {
+ *operational.Metrics
+ stage string
+ stageType string
+ stageDuration prometheus.Observer
+ latency prometheus.Histogram
+ flowsProcessed prometheus.Counter
+ batchSizeBytes prometheus.Summary
+ errors *prometheus.CounterVec
+}
+
+func newMetrics(opMetrics *operational.Metrics, stage, stageType string, inGaugeFunc func() int) *metrics {
+ opMetrics.CreateInQueueSizeGauge(stage, inGaugeFunc)
+ return &metrics{
+ Metrics: opMetrics,
+ stage: stage,
+ stageType: stageType,
+ latency: opMetrics.NewHistogram(&latencyHistogram, []float64{.001, .01, .1, 1, 10, 100, 1000, 10000}, stage),
+ stageDuration: opMetrics.GetOrCreateStageDurationHisto().WithLabelValues(stage),
+ flowsProcessed: opMetrics.NewCounter(&flowsProcessedCounter, stage),
+ batchSizeBytes: opMetrics.NewSummary(&batchSizeBytesSummary, stage),
+ errors: opMetrics.NewCounterVec(&errorsCounter),
+ }
+}
+
+func (m *metrics) createOutQueueLen(out chan<- config.GenericMap) {
+ m.CreateOutQueueSizeGauge(m.stage, func() int { return len(out) })
+}
+
+// Increment error counter
+// `code` should reflect any error code relative to this type. It can be a short string message,
+// but make sure to not include any dynamic value with high cardinality
+func (m *metrics) error(code string) {
+ m.errors.WithLabelValues(m.stage, m.stageType, code).Inc()
+}
+
+func (m *metrics) stageDurationTimer() *operational.Timer {
+ return operational.NewTimer(m.stageDuration)
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/inprocess.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/inprocess.go
new file mode 100644
index 000000000..9b578610b
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/inprocess.go
@@ -0,0 +1,32 @@
+package pipeline
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest"
+ "github.com/netobserv/flowlogs-pipeline/pkg/prometheus"
+)
+
+// StartFLPInProcess is an entry point to start the whole FLP / pipeline processing from imported code
+func StartFLPInProcess(cfg *config.ConfigFileStruct, in chan config.GenericMap) error {
+ promServer := prometheus.InitializePrometheus(&cfg.MetricsSettings)
+
+ // Create new flows pipeline
+ ingester := ingest.NewInProcess(in)
+ flp, err := newPipelineFromIngester(cfg, ingester)
+ if err != nil {
+ return fmt.Errorf("failed to initialize pipeline %w", err)
+ }
+
+ // Starts the flows pipeline; blocking call
+ go func() {
+ flp.Run()
+ if promServer != nil {
+ _ = promServer.Shutdown(context.Background())
+ }
+ }()
+
+ return nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline.go
new file mode 100644
index 000000000..35cde905a
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline.go
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2019 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package pipeline
+
+import (
+ "fmt"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest"
+ "github.com/netobserv/gopipes/pkg/node"
+ log "github.com/sirupsen/logrus"
+)
+
+// interface definitions of pipeline components
+const (
+ StageIngest = "ingest"
+ StageTransform = "transform"
+ StageExtract = "extract"
+ StageEncode = "encode"
+ StageWrite = "write"
+)
+
+// Pipeline manager
+type Pipeline struct {
+ startNodes []*node.Start[config.GenericMap]
+ terminalNodes []*node.Terminal[config.GenericMap]
+ pipelineEntryMap map[string]*pipelineEntry
+ IsRunning bool
+ // TODO: this field is only used for test verification. We should rewrite the build process
+ // to be able to remove it from here
+ pipelineStages []*pipelineEntry
+ Metrics *operational.Metrics
+ configWatcher *pipelineConfigWatcher
+}
+
+// NewPipeline defines the pipeline elements
+func NewPipeline(cfg *config.ConfigFileStruct) (*Pipeline, error) {
+ return newPipelineFromIngester(cfg, nil)
+}
+
+// newPipelineFromIngester defines the pipeline elements from a preset ingester (e.g. for in-process receiver)
+func newPipelineFromIngester(cfg *config.ConfigFileStruct, ing ingest.Ingester) (*Pipeline, error) {
+ log.Debugf("entering newPipelineFromIngester")
+
+ log.Debugf("stages = %v ", cfg.Pipeline)
+ log.Debugf("configParams = %v ", cfg.Parameters)
+
+ builder := newBuilder(cfg)
+ if ing != nil {
+ builder.presetIngester(ing)
+ }
+ if err := builder.readStages(); err != nil {
+ return nil, err
+ }
+ pipeline, err := builder.build()
+ if err != nil {
+ return nil, err
+ }
+ pipeline.configWatcher, err = newPipelineConfigWatcher(cfg, pipeline.pipelineEntryMap)
+ return pipeline, err
+}
+
+func (p *Pipeline) Run() {
+ // starting the graph
+ for _, s := range p.startNodes {
+ s.Start()
+ }
+ p.IsRunning = true
+
+ if p.configWatcher != nil {
+ go p.configWatcher.Run()
+ }
+
+ // blocking the execution until the graph terminal stages end
+ for _, t := range p.terminalNodes {
+ <-t.Done()
+ }
+ p.IsRunning = false
+}
+
+func (p *Pipeline) IsReady() error {
+ if !p.IsRunning {
+ return fmt.Errorf("pipeline is not running")
+ }
+ return nil
+}
+
+func (p *Pipeline) IsAlive() error {
+ if !p.IsRunning {
+ return fmt.Errorf("pipeline is not running")
+ }
+ return nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go
new file mode 100644
index 000000000..cfffc894f
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go
@@ -0,0 +1,516 @@
+package pipeline
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write"
+ k8sutils "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ "github.com/netobserv/gopipes/pkg/node"
+ "github.com/prometheus/client_golang/prometheus"
+ log "github.com/sirupsen/logrus"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+)
+
+const (
+ defaultNodeBufferLen = 1000
+ defaultExtractBatching = 1000
+ defaultExtractBatchingTimeout = 5 * time.Second
+)
+
+// Error wraps any error caused by a wrong formation of the pipeline
+type Error struct {
+ StageName string
+ wrapped error
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("pipeline stage %q: %s", e.StageName, e.wrapped.Error())
+}
+
+func (e *Error) Unwrap() error {
+ return e.wrapped
+}
+
+// builder stores the information that is only required during the build of the pipeline
+type builder struct {
+ pipelineStages []*pipelineEntry
+ configStages []config.Stage
+ configParams []config.StageParam
+ pipelineEntryMap map[string]*pipelineEntry
+ createdStages map[string]interface{}
+ startNodes []*node.Start[config.GenericMap]
+ terminalNodes []*node.Terminal[config.GenericMap]
+ opMetrics *operational.Metrics
+ stageDuration *prometheus.HistogramVec
+ batchMaxLen int
+ batchTimeout time.Duration
+ nodeBufferLen int
+ updtChans map[string]chan config.StageParam
+}
+
+type pipelineEntry struct {
+ stageName string
+ stageType string
+ Ingester ingest.Ingester
+ Transformer transform.Transformer
+ Extractor extract.Extractor
+ Encoder encode.Encoder
+ Writer write.Writer
+}
+
+func getDynConfig(cfg *config.ConfigFileStruct) ([]config.StageParam, error) {
+ k8sconfig, err := k8sutils.LoadK8sConfig(cfg.DynamicParameters.KubeConfigPath)
+ if err != nil {
+ log.Errorf("Cannot get k8s config: %v", err)
+ return nil, err
+ }
+
+ clientset, err := kubernetes.NewForConfig(k8sconfig)
+ if err != nil {
+ log.Errorf("Cannot init k8s config: %v", err)
+ return nil, err
+ }
+ cm, err := clientset.CoreV1().ConfigMaps(cfg.DynamicParameters.Namespace).Get(context.TODO(), cfg.DynamicParameters.Name, metav1.GetOptions{})
+ if err != nil {
+ log.Errorf("Cannot get dynamic config: %v", err)
+ return nil, err
+ }
+ rawConfig, ok := cm.Data[cfg.DynamicParameters.FileName]
+ if !ok {
+ log.Errorf("Cannot get file in configMap: %v", err)
+ return nil, err
+ }
+ dynConfig := config.HotReloadStruct{}
+ err = json.Unmarshal([]byte(rawConfig), &dynConfig)
+ if err != nil {
+ log.Errorf("Cannot parse config: %v", err)
+ return nil, err
+ }
+ return dynConfig.Parameters, nil
+}
+
+func newBuilder(cfg *config.ConfigFileStruct) *builder {
+ // Get global metrics settings
+ opMetrics := operational.NewMetrics(&cfg.MetricsSettings)
+ stageDuration := opMetrics.GetOrCreateStageDurationHisto()
+
+ bl := cfg.PerfSettings.BatcherMaxLen
+ if bl == 0 {
+ bl = defaultExtractBatching
+ }
+ bt := cfg.PerfSettings.BatcherTimeout
+ if bt == 0 {
+ bt = defaultExtractBatchingTimeout
+ }
+ nb := cfg.PerfSettings.NodeBufferLen
+ if nb == 0 {
+ nb = defaultNodeBufferLen
+ }
+
+ if cfg.DynamicParameters.Name != "" &&
+ cfg.DynamicParameters.Namespace != "" &&
+ cfg.DynamicParameters.FileName != "" {
+ dynParameters, err := getDynConfig(cfg)
+ if err == nil {
+ cfg.Parameters = append(cfg.Parameters, dynParameters...)
+ }
+ }
+
+ return &builder{
+ pipelineEntryMap: map[string]*pipelineEntry{},
+ createdStages: map[string]interface{}{},
+ configStages: cfg.Pipeline,
+ configParams: cfg.Parameters,
+ opMetrics: opMetrics,
+ stageDuration: stageDuration,
+ batchMaxLen: bl,
+ batchTimeout: bt,
+ nodeBufferLen: nb,
+ updtChans: map[string]chan config.StageParam{},
+ }
+}
+
+// use a preset ingester
+func (b *builder) presetIngester(ing ingest.Ingester) {
+ name := config.PresetIngesterStage
+ log.Debugf("stage = %v", name)
+ b.appendEntry(&pipelineEntry{
+ stageName: name,
+ stageType: StageIngest,
+ Ingester: ing,
+ })
+}
+
+// read the configuration stages definition and instantiate the corresponding native Go objects
+func (b *builder) readStages() error {
+ for _, param := range b.configParams {
+ log.Debugf("stage = %v", param.Name)
+ pEntry := pipelineEntry{
+ stageName: param.Name,
+ stageType: findStageType(¶m),
+ }
+ var err error
+ switch pEntry.stageType {
+ case StageIngest:
+ pEntry.Ingester, err = getIngester(b.opMetrics, param)
+ case StageTransform:
+ pEntry.Transformer, err = getTransformer(b.opMetrics, param)
+ case StageExtract:
+ pEntry.Extractor, err = getExtractor(b.opMetrics, param)
+ case StageEncode:
+ pEntry.Encoder, err = getEncoder(b.opMetrics, param)
+ case StageWrite:
+ pEntry.Writer, err = getWriter(b.opMetrics, param)
+ default:
+ err = fmt.Errorf("invalid stage type: %v, stage name: %v", pEntry.stageType, pEntry.stageName)
+ }
+ if err != nil {
+ return err
+ }
+ b.appendEntry(&pEntry)
+ }
+ log.Debugf("pipeline = %v", b.pipelineStages)
+ return nil
+}
+
+func (b *builder) appendEntry(pEntry *pipelineEntry) {
+ b.pipelineEntryMap[pEntry.stageName] = pEntry
+ b.pipelineStages = append(b.pipelineStages, pEntry)
+ log.Debugf("pipeline = %v", b.pipelineStages)
+}
+
+// reads the configured Go stages and connects between them
+// readStages must be invoked before this
+func (b *builder) build() (*Pipeline, error) {
+ // accounts start and middle nodes that are connected to another node
+ sendingNodes := map[string]struct{}{}
+ // accounts middle or terminal nodes that receive data from another node
+ receivingNodes := map[string]struct{}{}
+ for _, connection := range b.configStages {
+ if connection.Name == "" || connection.Follows == "" {
+ // ignore entries that do not represent a connection
+ continue
+ }
+ // instantiates (or loads from cache) the destination node of a connection
+ dstEntry, ok := b.pipelineEntryMap[connection.Name]
+ if !ok {
+ return nil, fmt.Errorf("unknown pipeline stage: %s", connection.Name)
+ }
+ dstNode, err := b.getStageNode(dstEntry, connection.Name)
+ if err != nil {
+ return nil, err
+ }
+ dst, ok := dstNode.(node.Receiver[config.GenericMap])
+ if !ok {
+ return nil, fmt.Errorf("stage %q of type %q can't receive data",
+ connection.Name, dstEntry.stageType)
+ }
+ // instantiates (or loads from cache) the source node of a connection
+ srcEntry, ok := b.pipelineEntryMap[connection.Follows]
+ if !ok {
+ return nil, fmt.Errorf("unknown pipeline stage: %s", connection.Follows)
+ }
+ srcNode, err := b.getStageNode(srcEntry, connection.Follows)
+ if err != nil {
+ return nil, err
+ }
+ src, ok := srcNode.(node.Sender[config.GenericMap])
+ if !ok {
+ return nil, fmt.Errorf("stage %q of type %q can't send data",
+ connection.Follows, srcEntry.stageType)
+ }
+ log.Infof("connecting stages: %s --> %s", connection.Follows, connection.Name)
+
+ sendingNodes[connection.Follows] = struct{}{}
+ receivingNodes[connection.Name] = struct{}{}
+ // connects source and destination node, and catches any panic from the Go-Pipes library.
+ var catchErr *Error
+ func() {
+ defer func() {
+ if msg := recover(); msg != nil {
+ catchErr = &Error{
+ StageName: connection.Name,
+ wrapped: fmt.Errorf("%q and %q stages haven't compatible input/outputs: %v",
+ connection.Follows, connection.Name, msg),
+ }
+ }
+ }()
+ src.SendsTo(dst)
+ }()
+ if catchErr != nil {
+ return nil, catchErr
+ }
+ }
+
+ if err := b.verifyConnections(sendingNodes, receivingNodes); err != nil {
+ return nil, err
+ }
+ if len(b.startNodes) == 0 {
+ return nil, errors.New("no ingesters have been defined")
+ }
+ if len(b.terminalNodes) == 0 {
+ return nil, errors.New("no writers have been defined")
+ }
+ return &Pipeline{
+ startNodes: b.startNodes,
+ terminalNodes: b.terminalNodes,
+ pipelineStages: b.pipelineStages,
+ pipelineEntryMap: b.pipelineEntryMap,
+ Metrics: b.opMetrics,
+ }, nil
+}
+
+// verifies that all the start and middle nodes send data to another node
+// verifies that all the middle and terminal nodes receive data from another node
+func (b *builder) verifyConnections(sendingNodes, receivingNodes map[string]struct{}) error {
+ for _, stg := range b.pipelineStages {
+ if isReceptor(stg) {
+ if _, ok := receivingNodes[stg.stageName]; !ok {
+ return &Error{
+ StageName: stg.stageName,
+ wrapped: fmt.Errorf("pipeline stage from type %q"+
+ " should receive data from at least another stage", stg.stageType),
+ }
+ }
+ }
+ if isSender(stg) {
+ if _, ok := sendingNodes[stg.stageName]; !ok {
+ return &Error{
+ StageName: stg.stageName,
+ wrapped: fmt.Errorf("pipeline stage from type %q"+
+ " should send data to at least another stage", stg.stageType),
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func isReceptor(p *pipelineEntry) bool {
+ return p.stageType != StageIngest
+}
+
+func isSender(p *pipelineEntry) bool {
+ return p.stageType != StageWrite && p.stageType != StageEncode
+}
+
+func (b *builder) runMeasured(name string, f func()) {
+ start := time.Now()
+ f()
+ duration := time.Since(start)
+ b.stageDuration.WithLabelValues(name).Observe(float64(duration.Milliseconds()))
+}
+
+func (b *builder) getStageNode(pe *pipelineEntry, stageID string) (interface{}, error) {
+ if stg, ok := b.createdStages[stageID]; ok {
+ return stg, nil
+ }
+ var stage interface{}
+ // TODO: modify all the types' interfaces to not need to write loops here, the same
+ // as we do with Ingest
+ switch pe.stageType {
+ case StageIngest:
+ init := node.AsStart(pe.Ingester.Ingest)
+ b.startNodes = append(b.startNodes, init)
+ stage = init
+ case StageWrite:
+ term := node.AsTerminal(func(in <-chan config.GenericMap) {
+ b.opMetrics.CreateInQueueSizeGauge(stageID, func() int { return len(in) })
+ for i := range in {
+ b.runMeasured(stageID, func() {
+ pe.Writer.Write(i)
+ })
+ }
+ }, node.ChannelBufferLen(b.nodeBufferLen))
+ b.terminalNodes = append(b.terminalNodes, term)
+ stage = term
+ case StageEncode:
+ encode := node.AsTerminal(func(in <-chan config.GenericMap) {
+ b.opMetrics.CreateInQueueSizeGauge(stageID, func() int { return len(in) })
+ for i := range in {
+ b.runMeasured(stageID, func() {
+ pe.Encoder.Encode(i)
+ })
+ }
+ }, node.ChannelBufferLen(b.nodeBufferLen))
+ b.terminalNodes = append(b.terminalNodes, encode)
+ stage = encode
+ case StageTransform:
+ stage = node.AsMiddle(func(in <-chan config.GenericMap, out chan<- config.GenericMap) {
+ b.opMetrics.CreateInQueueSizeGauge(stageID, func() int { return len(in) })
+ b.opMetrics.CreateOutQueueSizeGauge(stageID, func() int { return len(out) })
+ for i := range in {
+ b.runMeasured(stageID, func() {
+ if transformed, ok := pe.Transformer.Transform(i); ok {
+ out <- transformed
+ }
+ })
+ }
+ }, node.ChannelBufferLen(b.nodeBufferLen))
+ case StageExtract:
+ stage = node.AsMiddle(func(in <-chan config.GenericMap, out chan<- config.GenericMap) {
+ b.opMetrics.CreateInQueueSizeGauge(stageID, func() int { return len(in) })
+ b.opMetrics.CreateOutQueueSizeGauge(stageID, func() int { return len(out) })
+ // TODO: replace batcher by rewriting the different extractor implementations
+ // to keep the status while processing flows one by one
+ utils.Batcher(utils.ExitChannel(), b.batchMaxLen, b.batchTimeout, in,
+ func(maps []config.GenericMap) {
+ outs := pe.Extractor.Extract(maps)
+ for _, o := range outs {
+ out <- o
+ }
+ },
+ )
+ }, node.ChannelBufferLen(b.nodeBufferLen))
+ default:
+ return nil, &Error{
+ StageName: stageID,
+ wrapped: fmt.Errorf("invalid stage type: %s", pe.stageType),
+ }
+ }
+ b.createdStages[stageID] = stage
+ return stage, nil
+}
+
+func getIngester(opMetrics *operational.Metrics, params config.StageParam) (ingest.Ingester, error) {
+ var ingester ingest.Ingester
+ var err error
+ switch params.Ingest.Type {
+ case api.FileType, api.FileLoopType, api.FileChunksType:
+ ingester, err = ingest.NewIngestFile(params)
+ case api.SyntheticType:
+ ingester, err = ingest.NewIngestSynthetic(opMetrics, params)
+ case api.CollectorType:
+ ingester, err = ingest.NewIngestCollector(opMetrics, params)
+ case api.StdinType:
+ ingester, err = ingest.NewIngestStdin(opMetrics, params)
+ case api.KafkaType:
+ ingester, err = ingest.NewIngestKafka(opMetrics, params)
+ case api.GRPCType:
+ ingester, err = ingest.NewGRPCProtobuf(opMetrics, params)
+ case api.FakeType:
+ ingester, err = ingest.NewIngestFake(params)
+ default:
+ panic(fmt.Sprintf("`ingest` type %s not defined", params.Ingest.Type))
+ }
+ return ingester, err
+}
+
+func getWriter(opMetrics *operational.Metrics, params config.StageParam) (write.Writer, error) {
+ var writer write.Writer
+ var err error
+ switch params.Write.Type {
+ case api.GRPCType:
+ writer, err = write.NewWriteGRPC(params)
+ case api.StdoutType:
+ writer, err = write.NewWriteStdout(params)
+ case api.NoneType:
+ writer, err = write.NewWriteNone()
+ case api.LokiType:
+ writer, err = write.NewWriteLoki(opMetrics, params)
+ case api.IpfixType:
+ writer, err = write.NewWriteIpfix(params)
+ case api.FakeType:
+ writer, err = write.NewWriteFake(params)
+ default:
+ panic(fmt.Sprintf("`write` type %s not defined; if no writer needed, specify `none`", params.Write.Type))
+ }
+ return writer, err
+}
+
+func getTransformer(opMetrics *operational.Metrics, params config.StageParam) (transform.Transformer, error) {
+ var transformer transform.Transformer
+ var err error
+ switch params.Transform.Type {
+ case api.GenericType:
+ transformer, err = transform.NewTransformGeneric(params)
+ case api.FilterType:
+ transformer, err = transform.NewTransformFilter(params)
+ case api.NetworkType:
+ transformer, err = transform.NewTransformNetwork(params, opMetrics)
+ case api.NoneType:
+ transformer, err = transform.NewTransformNone()
+ default:
+ panic(fmt.Sprintf("`transform` type %s not defined; if no transformer needed, specify `none`", params.Transform.Type))
+ }
+ return transformer, err
+}
+
+func getExtractor(opMetrics *operational.Metrics, params config.StageParam) (extract.Extractor, error) {
+ var extractor extract.Extractor
+ var err error
+ switch params.Extract.Type {
+ case api.NoneType:
+ extractor, _ = extract.NewExtractNone()
+ case api.AggregateType:
+ extractor, err = extract.NewExtractAggregate(params)
+ case api.ConnTrackType:
+ extractor, err = conntrack.NewConnectionTrack(opMetrics, params, clock.New())
+ case api.TimebasedType:
+ extractor, err = extract.NewExtractTimebased(params)
+ default:
+ panic(fmt.Sprintf("`extract` type %s not defined; if no extractor needed, specify `none`", params.Extract.Type))
+ }
+ return extractor, err
+}
+
+func getEncoder(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+ var encoder encode.Encoder
+ var err error
+ switch params.Encode.Type {
+ case api.PromType:
+ encoder, err = encode.NewEncodeProm(opMetrics, params)
+ case api.KafkaType:
+ encoder, err = encode.NewEncodeKafka(opMetrics, params)
+ case api.S3Type:
+ encoder, err = encode.NewEncodeS3(opMetrics, params)
+ case api.OtlpLogsType:
+ encoder, err = opentelemetry.NewEncodeOtlpLogs(opMetrics, params)
+ case api.OtlpMetricsType:
+ encoder, err = opentelemetry.NewEncodeOtlpMetrics(opMetrics, params)
+ case api.OtlpTracesType:
+ encoder, err = opentelemetry.NewEncodeOtlpTraces(opMetrics, params)
+ case api.NoneType:
+ encoder, _ = encode.NewEncodeNone()
+ default:
+ panic(fmt.Sprintf("`encode` type %s not defined; if no encoder needed, specify `none`", params.Encode.Type))
+ }
+ return encoder, err
+}
+
+// findStageParameters finds the matching config.param structure and identifies the stage type
+func findStageType(param *config.StageParam) string {
+ log.Debugf("findStageType: stage = %v", param.Name)
+ if param.Ingest != nil && param.Ingest.Type != "" {
+ return StageIngest
+ }
+ if param.Transform != nil && param.Transform.Type != "" {
+ return StageTransform
+ }
+ if param.Extract != nil && param.Extract.Type != "" {
+ return StageExtract
+ }
+ if param.Encode != nil && param.Encode.Type != "" {
+ return StageEncode
+ }
+ if param.Write != nil && param.Write.Type != "" {
+ return StageWrite
+ }
+ return "unknown"
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_watcher.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_watcher.go
new file mode 100644
index 000000000..35eaf1a41
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_watcher.go
@@ -0,0 +1,114 @@
+package pipeline
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ log "github.com/sirupsen/logrus"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/kubernetes"
+)
+
+type pipelineConfigWatcher struct {
+ clientSet kubernetes.Clientset
+ cmName string
+ cmNamespace string
+ configFile string
+ pipelineEntryMap map[string]*pipelineEntry
+}
+
+func newPipelineConfigWatcher(cfg *config.ConfigFileStruct, pipelineEntryMap map[string]*pipelineEntry) (*pipelineConfigWatcher, error) {
+ if cfg.DynamicParameters.Name == "" ||
+ cfg.DynamicParameters.Namespace == "" ||
+ cfg.DynamicParameters.FileName == "" {
+ return nil, nil
+ }
+
+ config, err := utils.LoadK8sConfig(cfg.DynamicParameters.KubeConfigPath)
+ if err != nil {
+ return nil, err
+ }
+
+ clientset, err := kubernetes.NewForConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ pipelineCW := pipelineConfigWatcher{
+ clientSet: *clientset,
+ pipelineEntryMap: pipelineEntryMap,
+ cmName: cfg.DynamicParameters.Name,
+ cmNamespace: cfg.DynamicParameters.Namespace,
+ configFile: cfg.DynamicParameters.FileName,
+ }
+
+ return &pipelineCW, nil
+
+}
+
+func (pcw *pipelineConfigWatcher) Run() {
+ for {
+ watcher, err := pcw.clientSet.CoreV1().ConfigMaps(pcw.cmNamespace).Watch(context.TODO(),
+ metav1.SingleObject(metav1.ObjectMeta{Name: pcw.cmName, Namespace: pcw.cmNamespace}))
+ if err != nil {
+ log.Errorf("Unable to create watcher: %s", err)
+ return
+ }
+ pcw.handleEvent(watcher.ResultChan())
+ }
+}
+
+func (pcw *pipelineConfigWatcher) handleEvent(eventChannel <-chan watch.Event) {
+ for {
+ event, open := <-eventChannel
+ if open {
+ switch event.Type {
+ case watch.Added:
+ fallthrough
+ case watch.Modified:
+ // Update our endpoint
+ if updatedMap, ok := event.Object.(*corev1.ConfigMap); ok {
+ pcw.updateFromConfigmap(updatedMap)
+ }
+ case watch.Deleted:
+ fallthrough
+ case watch.Bookmark:
+ case watch.Error:
+ default:
+ // Do nothing
+ }
+ } else {
+ // If eventChannel is closed, it means the server has closed the connection
+ return
+ }
+ }
+}
+
+func (pcw *pipelineConfigWatcher) updateFromConfigmap(cm *corev1.ConfigMap) {
+ if rawConfig, ok := cm.Data[pcw.configFile]; ok {
+ config := config.HotReloadStruct{}
+ err := json.Unmarshal([]byte(rawConfig), &config)
+ if err != nil {
+ log.Errorf("Cannot parse config: %v", err)
+ return
+ }
+ for _, param := range config.Parameters {
+ if pentry, ok := pcw.pipelineEntryMap[param.Name]; ok {
+ pcw.updateEntry(pentry, param)
+ }
+ }
+ }
+}
+
+func (pcw *pipelineConfigWatcher) updateEntry(pEntry *pipelineEntry, param config.StageParam) {
+ switch pEntry.stageType {
+ case StageEncode:
+ pEntry.Encoder.Update(param)
+ default:
+ log.Warningf("Hot reloading not supported for: %s", pEntry.stageType)
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/cni.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/cni.go
new file mode 100644
index 000000000..05533886d
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/cni.go
@@ -0,0 +1,9 @@
+package cni
+
+import (
+ v1 "k8s.io/api/core/v1"
+)
+
+type Plugin interface {
+ GetNodeIPs(node *v1.Node) []string
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/multus.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/multus.go
new file mode 100644
index 000000000..b37d45ea9
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/multus.go
@@ -0,0 +1,134 @@
+package cni
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ v1 "k8s.io/api/core/v1"
+)
+
+const (
+ statusAnnotation = "k8s.v1.cni.cncf.io/network-status"
+ // Index names
+ indexIP = "ip"
+ indexMAC = "mac"
+ indexInterface = "interface"
+)
+
+type MultusHandler struct {
+}
+
+type SecondaryNetKey struct {
+ NetworkName string
+ Key string
+}
+
+func (m *MultusHandler) BuildKeys(flow config.GenericMap, rule *api.K8sRule, secNets []api.SecondaryNetwork) []SecondaryNetKey {
+ if len(secNets) == 0 {
+ return nil
+ }
+ var keys []SecondaryNetKey
+ for _, sn := range secNets {
+ snKeys := m.buildSNKeys(flow, rule, &sn)
+ if snKeys != nil {
+ keys = append(keys, snKeys...)
+ }
+ }
+ return keys
+}
+
+func (m *MultusHandler) buildSNKeys(flow config.GenericMap, rule *api.K8sRule, sn *api.SecondaryNetwork) []SecondaryNetKey {
+ var keys []SecondaryNetKey
+
+ var ip, mac string
+ var interfaces []string
+ if _, ok := sn.Index[indexIP]; ok && len(rule.IPField) > 0 {
+ ip, ok = flow.LookupString(rule.IPField)
+ if !ok {
+ return nil
+ }
+ }
+ if _, ok := sn.Index[indexMAC]; ok && len(rule.MACField) > 0 {
+ mac, ok = flow.LookupString(rule.MACField)
+ if !ok {
+ return nil
+ }
+ }
+ if _, ok := sn.Index[indexInterface]; ok && len(rule.InterfacesField) > 0 {
+ v, ok := flow[rule.InterfacesField]
+ if !ok {
+ return nil
+ }
+ interfaces, ok = v.([]string)
+ if !ok {
+ return nil
+ }
+ }
+
+ macIP := "~" + ip + "~" + mac
+ if interfaces == nil {
+ return []SecondaryNetKey{{NetworkName: sn.Name, Key: macIP}}
+ }
+ for _, intf := range interfaces {
+ keys = append(keys, SecondaryNetKey{NetworkName: sn.Name, Key: intf + macIP})
+ }
+
+ return keys
+}
+
+func (m *MultusHandler) GetPodUniqueKeys(pod *v1.Pod, secNets []api.SecondaryNetwork) ([]string, error) {
+ if len(secNets) == 0 {
+ return nil, nil
+ }
+ // Cf https://k8snetworkplumbingwg.github.io/multus-cni/docs/quickstart.html#network-status-annotations
+ if statusAnnotationJSON, ok := pod.Annotations[statusAnnotation]; ok {
+ var networks []NetStatItem
+ if err := json.Unmarshal([]byte(statusAnnotationJSON), &networks); err != nil {
+ return nil, fmt.Errorf("failed to index from network-status annotation, cannot read annotation %s: %w", statusAnnotation, err)
+ }
+ var keys []string
+ for _, network := range networks {
+ for _, snConfig := range secNets {
+ if snConfig.Name == network.Name {
+ keys = append(keys, network.Keys(snConfig)...)
+ }
+ }
+ }
+ return keys, nil
+ }
+ // Annotation not present => just ignore, no error
+ return nil, nil
+}
+
+type NetStatItem struct {
+ Name string `json:"name"`
+ Interface string `json:"interface"`
+ IPs []string `json:"ips"`
+ MAC string `json:"mac"`
+}
+
+func (n *NetStatItem) Keys(snConfig api.SecondaryNetwork) []string {
+ var mac, intf string
+ if _, ok := snConfig.Index[indexMAC]; ok {
+ mac = n.MAC
+ }
+ if _, ok := snConfig.Index[indexInterface]; ok {
+ intf = n.Interface
+ }
+ if _, ok := snConfig.Index[indexIP]; ok {
+ var keys []string
+ for _, ip := range n.IPs {
+ keys = append(keys, key(intf, ip, mac))
+ }
+ return keys
+ }
+ // Ignore IP
+ return []string{key(intf, "", mac)}
+}
+
+func key(intf, ip, mac string) string {
+ return intf + "~" + ip + "~" + strings.ToUpper(mac)
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/ovn_kubernetes.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/ovn_kubernetes.go
new file mode 100644
index 000000000..ae5701de1
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/ovn_kubernetes.go
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package cni
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+
+ log "github.com/sirupsen/logrus"
+ v1 "k8s.io/api/core/v1"
+)
+
+const (
+ ovnSubnetAnnotation = "k8s.ovn.org/node-subnets"
+)
+
+type OVNPlugin struct {
+ Plugin
+}
+
+func (o *OVNPlugin) GetNodeIPs(node *v1.Node) []string {
+ // Add IP that is used in OVN for some traffic on mp0 interface
+ // (no IP / error returned when not using ovn-k)
+ ip, err := findOvnMp0IP(node.Annotations)
+ if err != nil {
+ // Log the error as Info, do not block other ips indexing
+ log.Infof("failed to index OVN mp0 IP: %v", err)
+ } else if ip != "" {
+ return []string{ip}
+ }
+ return nil
+}
+
+func unmarshalOVNAnnotation(annot []byte) (string, error) {
+ // Depending on OVN (OCP) version, the annotation might be JSON-encoded as a string (legacy), or an array of strings
+ var subnetsAsArray map[string][]string
+ err := json.Unmarshal(annot, &subnetsAsArray)
+ if err == nil {
+ if subnets, ok := subnetsAsArray["default"]; ok {
+ if len(subnets) > 0 {
+ return subnets[0], nil
+ }
+ }
+ return "", fmt.Errorf("unexpected content for annotation %s: %s", ovnSubnetAnnotation, annot)
+ }
+
+ var subnetsAsString map[string]string
+ err = json.Unmarshal(annot, &subnetsAsString)
+ if err == nil {
+ if subnet, ok := subnetsAsString["default"]; ok {
+ return subnet, nil
+ }
+ return "", fmt.Errorf("unexpected content for annotation %s: %s", ovnSubnetAnnotation, annot)
+ }
+
+ return "", fmt.Errorf("cannot read annotation %s: %w", ovnSubnetAnnotation, err)
+}
+
+func findOvnMp0IP(annotations map[string]string) (string, error) {
+ if subnetsJSON, ok := annotations[ovnSubnetAnnotation]; ok {
+ subnet, err := unmarshalOVNAnnotation([]byte(subnetsJSON))
+ if err != nil {
+ return "", err
+ }
+ // From subnet like 10.128.0.0/23, we want to index IP 10.128.0.2
+ ip0, _, err := net.ParseCIDR(subnet)
+ if err != nil {
+ return "", err
+ }
+ ip4 := ip0.To4()
+ if ip4 == nil {
+ // TODO: what's the rule with ipv6?
+ return "", nil
+ }
+ return fmt.Sprintf("%d.%d.%d.%d", ip4[0], ip4[1], ip4[2], ip4[3]+2), nil
+ }
+ // Annotation not present (expected if not using ovn-kubernetes) => just ignore, no error
+ return "", nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go
new file mode 100644
index 000000000..b6a79df7a
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go
@@ -0,0 +1,154 @@
+package kubernetes
+
+import (
+ "strings"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ inf "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers"
+ "github.com/sirupsen/logrus"
+)
+
+var informers inf.InformersInterface = &inf.Informers{}
+
+// For testing
+func MockInformers() {
+ informers = inf.NewInformersMock()
+}
+
+func InitFromConfig(config api.NetworkTransformKubeConfig, opMetrics *operational.Metrics) error {
+ return informers.InitFromConfig(config, opMetrics)
+}
+
+func Enrich(outputEntry config.GenericMap, rule *api.K8sRule) {
+ ip, ok := outputEntry.LookupString(rule.IPField)
+ if !ok {
+ return
+ }
+ potentialKeys := informers.BuildSecondaryNetworkKeys(outputEntry, rule)
+ kubeInfo, err := informers.GetInfo(potentialKeys, ip)
+ if err != nil {
+ logrus.WithError(err).Tracef("can't find kubernetes info for keys %v and IP %s", potentialKeys, ip)
+ return
+ }
+ if rule.Assignee != "otel" {
+ // NETOBSERV-666: avoid putting empty namespaces or Loki aggregation queries will
+ // differentiate between empty and nil namespaces.
+ if kubeInfo.Namespace != "" {
+ outputEntry[rule.Output+"_Namespace"] = kubeInfo.Namespace
+ }
+ outputEntry[rule.Output+"_Name"] = kubeInfo.Name
+ outputEntry[rule.Output+"_Type"] = kubeInfo.Type
+ outputEntry[rule.Output+"_OwnerName"] = kubeInfo.Owner.Name
+ outputEntry[rule.Output+"_OwnerType"] = kubeInfo.Owner.Type
+ outputEntry[rule.Output+"_NetworkName"] = kubeInfo.NetworkName
+ if rule.LabelsPrefix != "" {
+ for labelKey, labelValue := range kubeInfo.Labels {
+ outputEntry[rule.LabelsPrefix+"_"+labelKey] = labelValue
+ }
+ }
+ if kubeInfo.HostIP != "" {
+ outputEntry[rule.Output+"_HostIP"] = kubeInfo.HostIP
+ if kubeInfo.HostName != "" {
+ outputEntry[rule.Output+"_HostName"] = kubeInfo.HostName
+ }
+ }
+ fillInK8sZone(outputEntry, rule, kubeInfo, "_Zone")
+ } else {
+ // NOTE: Some of these fields are taken from opentelemetry specs.
+ // See https://opentelemetry.io/docs/specs/semconv/resource/k8s/
+ // Other fields (not specified in the specs) are named similarly
+ if kubeInfo.Namespace != "" {
+ outputEntry[rule.Output+"k8s.namespace.name"] = kubeInfo.Namespace
+ }
+ switch kubeInfo.Type {
+ case inf.TypeNode:
+ outputEntry[rule.Output+"k8s.node.name"] = kubeInfo.Name
+ outputEntry[rule.Output+"k8s.node.uid"] = kubeInfo.UID
+ case inf.TypePod:
+ outputEntry[rule.Output+"k8s.pod.name"] = kubeInfo.Name
+ outputEntry[rule.Output+"k8s.pod.uid"] = kubeInfo.UID
+ case inf.TypeService:
+ outputEntry[rule.Output+"k8s.service.name"] = kubeInfo.Name
+ outputEntry[rule.Output+"k8s.service.uid"] = kubeInfo.UID
+ }
+ outputEntry[rule.Output+"k8s.name"] = kubeInfo.Name
+ outputEntry[rule.Output+"k8s.type"] = kubeInfo.Type
+ outputEntry[rule.Output+"k8s.owner.name"] = kubeInfo.Owner.Name
+ outputEntry[rule.Output+"k8s.owner.type"] = kubeInfo.Owner.Type
+ if rule.LabelsPrefix != "" {
+ for labelKey, labelValue := range kubeInfo.Labels {
+ outputEntry[rule.LabelsPrefix+"."+labelKey] = labelValue
+ }
+ }
+ if kubeInfo.HostIP != "" {
+ outputEntry[rule.Output+"k8s.host.ip"] = kubeInfo.HostIP
+ if kubeInfo.HostName != "" {
+ outputEntry[rule.Output+"k8s.host.name"] = kubeInfo.HostName
+ }
+ }
+ fillInK8sZone(outputEntry, rule, kubeInfo, "k8s.zone")
+ }
+}
+
+const nodeZoneLabelName = "topology.kubernetes.io/zone"
+
+func fillInK8sZone(outputEntry config.GenericMap, rule *api.K8sRule, kubeInfo *inf.Info, zonePrefix string) {
+ if !rule.AddZone {
+ // Nothing to do
+ return
+ }
+ switch kubeInfo.Type {
+ case inf.TypeNode:
+ zone, ok := kubeInfo.Labels[nodeZoneLabelName]
+ if ok {
+ outputEntry[rule.Output+zonePrefix] = zone
+ }
+ return
+ case inf.TypePod:
+ nodeInfo, err := informers.GetNodeInfo(kubeInfo.HostName)
+ if err != nil {
+ logrus.WithError(err).Tracef("can't find nodes info for node %v", kubeInfo.HostName)
+ return
+ }
+ if nodeInfo != nil {
+ zone, ok := nodeInfo.Labels[nodeZoneLabelName]
+ if ok {
+ outputEntry[rule.Output+zonePrefix] = zone
+ }
+ }
+ return
+
+ case inf.TypeService:
+ // A service is not assigned to a dedicated zone, skipping
+ return
+ }
+}
+
+func EnrichLayer(outputEntry config.GenericMap, rule *api.K8sInfraRule) {
+ outputEntry[rule.Output] = "infra"
+ for _, nsnameFields := range rule.NamespaceNameFields {
+ if namespace, _ := outputEntry.LookupString(nsnameFields.Namespace); namespace != "" {
+ name, _ := outputEntry.LookupString(nsnameFields.Name)
+ if objectIsApp(namespace, name, rule) {
+ outputEntry[rule.Output] = "app"
+ return
+ }
+ }
+ }
+}
+
+func objectIsApp(namespace, name string, rule *api.K8sInfraRule) bool {
+ for _, prefix := range rule.InfraPrefixes {
+ if strings.HasPrefix(namespace, prefix) {
+ return false
+ }
+ }
+ for _, ref := range rule.InfraRefs {
+ if namespace == ref.Namespace && name == ref.Name {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go
new file mode 100644
index 000000000..513d32ecc
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go
@@ -0,0 +1,204 @@
+package informers
+
+import (
+ "errors"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni"
+ "github.com/stretchr/testify/mock"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/cache"
+)
+
+var (
+ secondaryNetConfig = []api.SecondaryNetwork{
+ {
+ Name: "my-network",
+ Index: map[string]any{"mac": nil},
+ },
+ }
+)
+
+type Mock struct {
+ mock.Mock
+ InformersInterface
+}
+
+func NewInformersMock() *Mock {
+ inf := new(Mock)
+ inf.On("InitFromConfig", mock.Anything, mock.Anything).Return(nil)
+ return inf
+}
+
+func (o *Mock) InitFromConfig(cfg api.NetworkTransformKubeConfig, opMetrics *operational.Metrics) error {
+ args := o.Called(cfg, opMetrics)
+ return args.Error(0)
+}
+
+type IndexerMock struct {
+ mock.Mock
+ cache.Indexer
+}
+
+type InformerMock struct {
+ mock.Mock
+ InformerInterface
+}
+
+type InformerInterface interface {
+ cache.SharedInformer
+ AddIndexers(indexers cache.Indexers) error
+ GetIndexer() cache.Indexer
+}
+
+func (m *IndexerMock) ByIndex(indexName, indexedValue string) ([]interface{}, error) {
+ args := m.Called(indexName, indexedValue)
+ return args.Get(0).([]interface{}), args.Error(1)
+}
+
+func (m *IndexerMock) GetByKey(key string) (interface{}, bool, error) {
+ args := m.Called(key)
+ return args.Get(0), args.Bool(1), args.Error(2)
+}
+
+func (m *InformerMock) GetIndexer() cache.Indexer {
+ args := m.Called()
+ return args.Get(0).(cache.Indexer)
+}
+
+func (m *IndexerMock) MockPod(ip, mac, intf, name, namespace, nodeIP string, owner *Owner) {
+ var ownerRef []metav1.OwnerReference
+ if owner != nil {
+ ownerRef = []metav1.OwnerReference{{
+ Kind: owner.Type,
+ Name: owner.Name,
+ }}
+ }
+ info := Info{
+ Type: "Pod",
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ OwnerReferences: ownerRef,
+ },
+ HostIP: nodeIP,
+ ips: []string{},
+ secondaryNetKeys: []string{},
+ }
+ if len(mac) > 0 {
+ nsi := cni.NetStatItem{
+ Interface: intf,
+ MAC: mac,
+ IPs: []string{ip},
+ }
+ info.secondaryNetKeys = nsi.Keys(secondaryNetConfig[0])
+ m.On("ByIndex", IndexCustom, info.secondaryNetKeys[0]).Return([]interface{}{&info}, nil)
+ }
+ if len(ip) > 0 {
+ info.ips = []string{ip}
+ m.On("ByIndex", IndexIP, ip).Return([]interface{}{&info}, nil)
+ }
+}
+
+func (m *IndexerMock) MockNode(ip, name string) {
+ m.On("ByIndex", IndexIP, ip).Return([]interface{}{&Info{
+ Type: "Node",
+ ObjectMeta: metav1.ObjectMeta{Name: name},
+ ips: []string{ip},
+ }}, nil)
+}
+
+func (m *IndexerMock) MockService(ip, name, namespace string) {
+ m.On("ByIndex", IndexIP, ip).Return([]interface{}{&Info{
+ Type: "Service",
+ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
+ ips: []string{ip},
+ }}, nil)
+}
+
+func (m *IndexerMock) MockReplicaSet(name, namespace string, owner Owner) {
+ m.On("GetByKey", namespace+"/"+name).Return(&metav1.ObjectMeta{
+ Name: name,
+ OwnerReferences: []metav1.OwnerReference{{
+ Kind: owner.Type,
+ Name: owner.Name,
+ }},
+ }, true, nil)
+}
+
+func (m *IndexerMock) FallbackNotFound() {
+ m.On("ByIndex", IndexIP, mock.Anything).Return([]interface{}{}, nil)
+}
+
+func SetupIndexerMocks(kd *Informers) (pods, nodes, svc, rs *IndexerMock) {
+ // pods informer
+ pods = &IndexerMock{}
+ pim := InformerMock{}
+ pim.On("GetIndexer").Return(pods)
+ kd.pods = &pim
+ // nodes informer
+ nodes = &IndexerMock{}
+ him := InformerMock{}
+ him.On("GetIndexer").Return(nodes)
+ kd.nodes = &him
+ // svc informer
+ svc = &IndexerMock{}
+ sim := InformerMock{}
+ sim.On("GetIndexer").Return(svc)
+ kd.services = &sim
+ // rs informer
+ rs = &IndexerMock{}
+ rim := InformerMock{}
+ rim.On("GetIndexer").Return(rs)
+ kd.replicaSets = &rim
+ return
+}
+
+type FakeInformers struct {
+ InformersInterface
+ ipInfo map[string]*Info
+ customKeysInfo map[string]*Info
+ nodes map[string]*Info
+}
+
+func SetupStubs(ipInfo map[string]*Info, customKeysInfo map[string]*Info, nodes map[string]*Info) *FakeInformers {
+ return &FakeInformers{
+ ipInfo: ipInfo,
+ customKeysInfo: customKeysInfo,
+ nodes: nodes,
+ }
+}
+
+func (f *FakeInformers) InitFromConfig(_ api.NetworkTransformKubeConfig, _ *operational.Metrics) error {
+ return nil
+}
+
+func (f *FakeInformers) GetInfo(keys []cni.SecondaryNetKey, ip string) (*Info, error) {
+ if len(keys) > 0 {
+ i := f.customKeysInfo[keys[0].Key]
+ if i != nil {
+ return i, nil
+ }
+ }
+
+ i := f.ipInfo[ip]
+ if i != nil {
+ return i, nil
+ }
+ return nil, errors.New("notFound")
+}
+
+func (f *FakeInformers) BuildSecondaryNetworkKeys(flow config.GenericMap, rule *api.K8sRule) []cni.SecondaryNetKey {
+ m := cni.MultusHandler{}
+ return m.BuildKeys(flow, rule, secondaryNetConfig)
+}
+
+func (f *FakeInformers) GetNodeInfo(n string) (*Info, error) {
+ i := f.nodes[n]
+ if i != nil {
+ return i, nil
+ }
+ return nil, errors.New("notFound")
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go
new file mode 100644
index 000000000..2c7a4a4ce
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package informers
+
+import (
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ "github.com/sirupsen/logrus"
+
+ "github.com/prometheus/client_golang/prometheus"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ inf "k8s.io/client-go/informers"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/metadata"
+ "k8s.io/client-go/metadata/metadatainformer"
+ "k8s.io/client-go/tools/cache"
+)
+
+const (
+ kubeConfigEnvVariable = "KUBECONFIG"
+ syncTime = 10 * time.Minute
+ IndexCustom = "byCustomKey"
+ IndexIP = "byIP"
+ TypeNode = "Node"
+ TypePod = "Pod"
+ TypeService = "Service"
+)
+
+var (
+ log = logrus.WithField("component", "transform.Network.Kubernetes")
+ cniPlugins = map[string]cni.Plugin{
+ api.OVN: &cni.OVNPlugin{},
+ }
+ multus = cni.MultusHandler{}
+)
+
+//nolint:revive
+type InformersInterface interface {
+ BuildSecondaryNetworkKeys(flow config.GenericMap, rule *api.K8sRule) []cni.SecondaryNetKey
+ GetInfo([]cni.SecondaryNetKey, string) (*Info, error)
+ GetNodeInfo(string) (*Info, error)
+ InitFromConfig(api.NetworkTransformKubeConfig, *operational.Metrics) error
+}
+
+type Informers struct {
+ InformersInterface
+ // pods, nodes and services cache the different object types as *Info pointers
+ pods cache.SharedIndexInformer
+ nodes cache.SharedIndexInformer
+ services cache.SharedIndexInformer
+ // replicaSets caches the ReplicaSets as partially-filled *ObjectMeta pointers
+ replicaSets cache.SharedIndexInformer
+ stopChan chan struct{}
+ mdStopChan chan struct{}
+ managedCNI []string
+ secondaryNetworks []api.SecondaryNetwork
+ indexerHitMetric *prometheus.CounterVec
+}
+
+type Owner struct {
+ Type string
+ Name string
+}
+
+// Info contains precollected metadata for Pods, Nodes and Services.
+// Not all the fields are populated for all the above types. To save
+// memory, we just keep in memory the necessary data for each Type.
+// For more information about which fields are set for each type, please
+// refer to the instantiation function of the respective informers.
+type Info struct {
+ // Informers need that internal object is an ObjectMeta instance
+ metav1.ObjectMeta
+ Type string
+ Owner Owner
+ HostName string
+ HostIP string
+ NetworkName string
+ ips []string
+ secondaryNetKeys []string
+}
+
+var (
+ ipIndexer = func(obj interface{}) ([]string, error) {
+ return obj.(*Info).ips, nil
+ }
+ customKeyIndexer = func(obj interface{}) ([]string, error) {
+ return obj.(*Info).secondaryNetKeys, nil
+ }
+)
+
+func (k *Informers) BuildSecondaryNetworkKeys(flow config.GenericMap, rule *api.K8sRule) []cni.SecondaryNetKey {
+ return multus.BuildKeys(flow, rule, k.secondaryNetworks)
+}
+
+func (k *Informers) GetInfo(potentialKeys []cni.SecondaryNetKey, ip string) (*Info, error) {
+ if info, ok := k.fetchInformers(potentialKeys, ip); ok {
+ // Owner data might be discovered after the owned, so we fetch it
+ // at the last moment
+ if info.Owner.Name == "" {
+ info.Owner = k.getOwner(info)
+ }
+ return info, nil
+ }
+
+ return nil, fmt.Errorf("informers can't find IP %s", ip)
+}
+
+func (k *Informers) fetchInformers(potentialKeys []cni.SecondaryNetKey, ip string) (*Info, bool) {
+ if info, ok := k.fetchPodInformer(potentialKeys, ip); ok {
+ // it might happen that the Host is discovered after the Pod
+ if info.HostName == "" {
+ info.HostName = k.getHostName(info.HostIP)
+ }
+ return info, true
+ }
+ // Nodes are only indexed by IP
+ if info, ok := k.infoForIP(k.nodes.GetIndexer(), "Node", ip); ok {
+ return info, true
+ }
+ // Services are only indexed by IP
+ if info, ok := k.infoForIP(k.services.GetIndexer(), "Service", ip); ok {
+ return info, true
+ }
+ return nil, false
+}
+
+func (k *Informers) fetchPodInformer(potentialKeys []cni.SecondaryNetKey, ip string) (*Info, bool) {
+ // 1. Check if the unique key matches any Pod (secondary networks / multus case)
+ if info, ok := k.infoForCustomKeys(k.pods.GetIndexer(), "Pod", potentialKeys); ok {
+ return info, ok
+ }
+ // 2. Check if the IP matches any Pod (primary network)
+ return k.infoForIP(k.pods.GetIndexer(), "Pod", ip)
+}
+
+func (k *Informers) increaseIndexerHits(kind, namespace, network, warn string) {
+ k.indexerHitMetric.WithLabelValues(kind, namespace, network, warn).Inc()
+}
+
+func (k *Informers) infoForCustomKeys(idx cache.Indexer, kind string, potentialKeys []cni.SecondaryNetKey) (*Info, bool) {
+ for _, key := range potentialKeys {
+ objs, err := idx.ByIndex(IndexCustom, key.Key)
+ if err != nil {
+ k.increaseIndexerHits(kind, "", key.NetworkName, "informer error")
+ log.WithError(err).WithField("key", key).Debug("error accessing unique key index, ignoring")
+ return nil, false
+ }
+ if len(objs) > 0 {
+ info := objs[0].(*Info)
+ info.NetworkName = key.NetworkName
+ if len(objs) > 1 {
+ k.increaseIndexerHits(kind, info.Namespace, key.NetworkName, "multiple matches")
+ log.WithField("key", key).Debugf("found %d objects matching this key, returning first", len(objs))
+ } else {
+ k.increaseIndexerHits(kind, info.Namespace, key.NetworkName, "")
+ }
+ log.Tracef("infoForUniqueKey found key %v", info)
+ return info, true
+ }
+ }
+ return nil, false
+}
+
+func (k *Informers) infoForIP(idx cache.Indexer, kind string, ip string) (*Info, bool) {
+ objs, err := idx.ByIndex(IndexIP, ip)
+ if err != nil {
+ k.increaseIndexerHits(kind, "", "primary", "informer error")
+ log.WithError(err).WithField("ip", ip).Debug("error accessing IP index, ignoring")
+ return nil, false
+ }
+ if len(objs) > 0 {
+ info := objs[0].(*Info)
+ info.NetworkName = "primary"
+ if len(objs) > 1 {
+ k.increaseIndexerHits(kind, info.Namespace, "primary", "multiple matches")
+ log.WithField("ip", ip).Debugf("found %d objects matching this IP, returning first", len(objs))
+ } else {
+ k.increaseIndexerHits(kind, info.Namespace, "primary", "")
+ }
+ log.Tracef("infoForIP found ip %v", info)
+ return info, true
+ }
+ return nil, false
+}
+
+func (k *Informers) GetNodeInfo(name string) (*Info, error) {
+ item, ok, err := k.nodes.GetIndexer().GetByKey(name)
+ if err != nil {
+ return nil, err
+ } else if ok {
+ return item.(*Info), nil
+ }
+ return nil, nil
+}
+
+func (k *Informers) getOwner(info *Info) Owner {
+ if len(info.OwnerReferences) != 0 {
+ ownerReference := info.OwnerReferences[0]
+ if ownerReference.Kind != "ReplicaSet" {
+ return Owner{
+ Name: ownerReference.Name,
+ Type: ownerReference.Kind,
+ }
+ }
+
+ item, ok, err := k.replicaSets.GetIndexer().GetByKey(info.Namespace + "/" + ownerReference.Name)
+ if err != nil {
+ log.WithError(err).WithField("key", info.Namespace+"/"+ownerReference.Name).
+ Debug("can't get ReplicaSet info from informer. Ignoring")
+ } else if ok {
+ rsInfo := item.(*metav1.ObjectMeta)
+ if len(rsInfo.OwnerReferences) > 0 {
+ return Owner{
+ Name: rsInfo.OwnerReferences[0].Name,
+ Type: rsInfo.OwnerReferences[0].Kind,
+ }
+ }
+ }
+ }
+ // If no owner references found, return itself as owner
+ return Owner{
+ Name: info.Name,
+ Type: info.Type,
+ }
+}
+
+func (k *Informers) getHostName(hostIP string) string {
+ if hostIP != "" {
+ if info, ok := k.infoForIP(k.nodes.GetIndexer(), "Node (indirect)", hostIP); ok {
+ return info.Name
+ }
+ }
+ return ""
+}
+
+func (k *Informers) initNodeInformer(informerFactory inf.SharedInformerFactory) error {
+ nodes := informerFactory.Core().V1().Nodes().Informer()
+ // Transform any *v1.Node instance into a *Info instance to save space
+ // in the informer's cache
+ if err := nodes.SetTransform(func(i interface{}) (interface{}, error) {
+ node, ok := i.(*v1.Node)
+ if !ok {
+ return nil, fmt.Errorf("was expecting a Node. Got: %T", i)
+ }
+ ips := make([]string, 0, len(node.Status.Addresses))
+ hostIP := ""
+ for _, address := range node.Status.Addresses {
+ ip := net.ParseIP(address.Address)
+ if ip != nil {
+ ips = append(ips, ip.String())
+ if hostIP == "" {
+ hostIP = ip.String()
+ }
+ }
+ }
+ // CNI-dependent logic (must not fail when the CNI is not installed)
+ for _, name := range k.managedCNI {
+ if plugin := cniPlugins[name]; plugin != nil {
+ moreIPs := plugin.GetNodeIPs(node)
+ if moreIPs != nil {
+ ips = append(ips, moreIPs...)
+ }
+ }
+ }
+
+ return &Info{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: node.Name,
+ Labels: node.Labels,
+ },
+ ips: ips,
+ Type: TypeNode,
+ // We duplicate HostIP and HostName information to simplify later filtering e.g. by
+ // Host IP, where we want to get all the Pod flows by src/dst host, but also the actual
+ // host-to-host flows by the same field.
+ HostIP: hostIP,
+ HostName: node.Name,
+ }, nil
+ }); err != nil {
+ return fmt.Errorf("can't set nodes transform: %w", err)
+ }
+ indexers := cache.Indexers{IndexIP: ipIndexer}
+ if err := nodes.AddIndexers(indexers); err != nil {
+ return fmt.Errorf("can't add %s indexer to Nodes informer: %w", IndexIP, err)
+ }
+ k.nodes = nodes
+ return nil
+}
+
+func (k *Informers) initPodInformer(informerFactory inf.SharedInformerFactory) error {
+ pods := informerFactory.Core().V1().Pods().Informer()
+ // Transform any *v1.Pod instance into a *Info instance to save space
+ // in the informer's cache
+ if err := pods.SetTransform(func(i interface{}) (interface{}, error) {
+ pod, ok := i.(*v1.Pod)
+ if !ok {
+ return nil, fmt.Errorf("was expecting a Pod. Got: %T", i)
+ }
+ ips := make([]string, 0, len(pod.Status.PodIPs))
+ for _, ip := range pod.Status.PodIPs {
+ // ignoring host-networked Pod IPs
+ if ip.IP != pod.Status.HostIP {
+ ips = append(ips, ip.IP)
+ }
+ }
+ // Index from secondary network info
+ keys, err := multus.GetPodUniqueKeys(pod, k.secondaryNetworks)
+ if err != nil {
+ // Log the error as Info, do not block other ips indexing
+ log.WithError(err).Infof("Secondary network cannot be identified")
+ }
+
+ return &Info{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: pod.Name,
+ Namespace: pod.Namespace,
+ Labels: pod.Labels,
+ OwnerReferences: pod.OwnerReferences,
+ },
+ Type: TypePod,
+ HostIP: pod.Status.HostIP,
+ HostName: pod.Spec.NodeName,
+ secondaryNetKeys: keys,
+ ips: ips,
+ }, nil
+ }); err != nil {
+ return fmt.Errorf("can't set pods transform: %w", err)
+ }
+ indexers := cache.Indexers{
+ IndexIP: ipIndexer,
+ IndexCustom: customKeyIndexer,
+ }
+ if err := pods.AddIndexers(indexers); err != nil {
+ return fmt.Errorf("can't add indexers to Pods informer: %w", err)
+ }
+
+ k.pods = pods
+ return nil
+}
+
+func (k *Informers) initServiceInformer(informerFactory inf.SharedInformerFactory) error {
+ services := informerFactory.Core().V1().Services().Informer()
+ // Transform any *v1.Service instance into a *Info instance to save space
+ // in the informer's cache
+ if err := services.SetTransform(func(i interface{}) (interface{}, error) {
+ svc, ok := i.(*v1.Service)
+ if !ok {
+ return nil, fmt.Errorf("was expecting a Service. Got: %T", i)
+ }
+ ips := make([]string, 0, len(svc.Spec.ClusterIPs))
+ for _, ip := range svc.Spec.ClusterIPs {
+ // ignoring None IPs
+ if isServiceIPSet(ip) {
+ ips = append(ips, ip)
+ }
+ }
+ return &Info{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: svc.Name,
+ Namespace: svc.Namespace,
+ Labels: svc.Labels,
+ },
+ Type: TypeService,
+ ips: ips,
+ }, nil
+ }); err != nil {
+ return fmt.Errorf("can't set services transform: %w", err)
+ }
+ indexers := cache.Indexers{IndexIP: ipIndexer}
+ if err := services.AddIndexers(indexers); err != nil {
+ return fmt.Errorf("can't add %s indexer to Pods informer: %w", IndexIP, err)
+ }
+
+ k.services = services
+ return nil
+}
+
+func (k *Informers) initReplicaSetInformer(informerFactory metadatainformer.SharedInformerFactory) error {
+ k.replicaSets = informerFactory.ForResource(
+ schema.GroupVersionResource{
+ Group: "apps",
+ Version: "v1",
+ Resource: "replicasets",
+ }).Informer()
+ // To save space, instead of storing a complete *metav1.ObjectMeta instance, the
+ // informer's cache will store only the minimal required fields
+ if err := k.replicaSets.SetTransform(func(i interface{}) (interface{}, error) {
+ rs, ok := i.(*metav1.PartialObjectMetadata)
+ if !ok {
+ return nil, fmt.Errorf("was expecting a ReplicaSet. Got: %T", i)
+ }
+ return &metav1.ObjectMeta{
+ Name: rs.Name,
+ Namespace: rs.Namespace,
+ OwnerReferences: rs.OwnerReferences,
+ }, nil
+ }); err != nil {
+ return fmt.Errorf("can't set ReplicaSets transform: %w", err)
+ }
+ return nil
+}
+
+func (k *Informers) InitFromConfig(cfg api.NetworkTransformKubeConfig, opMetrics *operational.Metrics) error {
+ // Initialization variables
+ k.stopChan = make(chan struct{})
+ k.mdStopChan = make(chan struct{})
+
+ kconf, err := utils.LoadK8sConfig(cfg.ConfigPath)
+ if err != nil {
+ return err
+ }
+
+ kubeClient, err := kubernetes.NewForConfig(kconf)
+ if err != nil {
+ return err
+ }
+
+ metaKubeClient, err := metadata.NewForConfig(kconf)
+ if err != nil {
+ return err
+ }
+
+ k.managedCNI = cfg.ManagedCNI
+ if k.managedCNI == nil {
+ k.managedCNI = []string{api.OVN}
+ }
+ k.secondaryNetworks = cfg.SecondaryNetworks
+ k.indexerHitMetric = opMetrics.CreateIndexerHitCounter()
+ err = k.initInformers(kubeClient, metaKubeClient)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (k *Informers) initInformers(client kubernetes.Interface, metaClient metadata.Interface) error {
+ informerFactory := inf.NewSharedInformerFactory(client, syncTime)
+ metadataInformerFactory := metadatainformer.NewSharedInformerFactory(metaClient, syncTime)
+ err := k.initNodeInformer(informerFactory)
+ if err != nil {
+ return err
+ }
+ err = k.initPodInformer(informerFactory)
+ if err != nil {
+ return err
+ }
+ err = k.initServiceInformer(informerFactory)
+ if err != nil {
+ return err
+ }
+ err = k.initReplicaSetInformer(metadataInformerFactory)
+ if err != nil {
+ return err
+ }
+
+ log.Debugf("starting kubernetes informers, waiting for synchronization")
+ informerFactory.Start(k.stopChan)
+ informerFactory.WaitForCacheSync(k.stopChan)
+ log.Debugf("kubernetes informers started")
+
+ log.Debugf("starting kubernetes metadata informers, waiting for synchronization")
+ metadataInformerFactory.Start(k.mdStopChan)
+ metadataInformerFactory.WaitForCacheSync(k.mdStopChan)
+ log.Debugf("kubernetes metadata informers started")
+ return nil
+}
+
+func isServiceIPSet(ip string) bool {
+ return ip != v1.ClusterIPNone && ip != ""
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go
new file mode 100644
index 000000000..42f9ab4a7
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package location
+
+import (
+ "archive/zip"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ip2location/ip2location-go/v9"
+ log "github.com/sirupsen/logrus"
+)
+
+type Info struct {
+ CountryName string `json:"country_name"`
+ CountryLongName string `json:"country_long"`
+ RegionName string `json:"region_name"`
+ CityName string `json:"city_name"`
+ Latitude string `json:"latitude"`
+ Longitude string `json:"longitude"`
+}
+
+const (
+ dbFilename = "IP2LOCATION-LITE-DB9.BIN"
+ dbFileLocation = "/tmp/location_db.bin"
+ dbZIPFileLocation = "/tmp/location_db.bin" + ".zip"
+ // REF: Original location from ip2location DB is: "https://www.ip2location.com/download/?token=OpOljbgT6K2WJnFrFBBmBzRVNpHlcYqNN4CMeGavvh0pPOpyu16gKQyqvDMxTDF4&file=DB9LITEBIN"
+ dbURL = "https://raw.githubusercontent.com/netobserv/flowlogs-pipeline/main/contrib/location/location.db"
+)
+
+var locationDB *ip2location.DB
+
+type OSIO struct {
+ Stat func(string) (os.FileInfo, error)
+ Create func(string) (*os.File, error)
+ MkdirAll func(string, os.FileMode) error
+ OpenFile func(string, int, os.FileMode) (*os.File, error)
+ Copy func(io.Writer, io.Reader) (int64, error)
+}
+
+var _osio = OSIO{}
+var _dbURL string
+var locationDBMutex *sync.Mutex
+
+func init() {
+ _osio.Stat = os.Stat
+ _osio.Create = os.Create
+ _osio.MkdirAll = os.MkdirAll
+ _osio.OpenFile = os.OpenFile
+ _osio.Copy = io.Copy
+ _dbURL = dbURL
+ locationDBMutex = &sync.Mutex{}
+}
+
+func InitLocationDB() error {
+ locationDBMutex.Lock()
+ defer locationDBMutex.Unlock()
+
+ if _, statErr := _osio.Stat(dbFileLocation); errors.Is(statErr, os.ErrNotExist) {
+ log.Infof("Downloading location DB into local file %s ", dbFileLocation)
+ out, createErr := _osio.Create(dbZIPFileLocation)
+ if createErr != nil {
+ return fmt.Errorf("failed os.Create %w ", createErr)
+ }
+
+ timeout := time.Minute
+ tr := &http.Transport{IdleConnTimeout: timeout}
+ client := &http.Client{Transport: tr, Timeout: timeout}
+ resp, getErr := client.Get(_dbURL)
+ if getErr != nil {
+ return fmt.Errorf("failed http.Get %w ", getErr)
+ }
+
+ log.Infof("Got response %s", resp.Status)
+
+ written, copyErr := io.Copy(out, resp.Body)
+ if copyErr != nil {
+ return fmt.Errorf("failed io.Copy %w ", copyErr)
+ }
+
+ log.Infof("Wrote %d bytes to %s", written, dbZIPFileLocation)
+
+ bodyCloseErr := resp.Body.Close()
+ if bodyCloseErr != nil {
+ return fmt.Errorf("failed resp.Body.Close %w ", bodyCloseErr)
+ }
+
+ outCloseErr := out.Close()
+ if outCloseErr != nil {
+ return fmt.Errorf("failed out.Close %w ", outCloseErr)
+ }
+
+ unzipErr := unzip(dbZIPFileLocation, dbFileLocation)
+ if unzipErr != nil {
+ file, openErr := os.Open(dbFileLocation + "/" + dbFilename)
+ if openErr == nil {
+ fi, fileStatErr := file.Stat()
+ if fileStatErr == nil {
+ log.Infof("length of %s is: %d", dbFileLocation+"/"+dbFilename, fi.Size())
+ _ = file.Close()
+ } else {
+ log.Infof("file.Stat err %v", fileStatErr)
+ }
+ } else {
+ log.Infof("os.Open err %v", openErr)
+ }
+
+ fileContent, readFileErr := os.ReadFile(dbFileLocation + "/" + dbFilename)
+ if readFileErr == nil {
+ log.Infof("content of first 100 bytes of %s is: %s", dbFileLocation+"/"+dbFilename, fileContent[:100])
+ } else {
+ log.Infof("os.ReadFile err %v", readFileErr)
+ }
+
+ return fmt.Errorf("failed unzip %w ", unzipErr)
+ }
+
+ log.Infof("Download completed successfully")
+ }
+
+ log.Debugf("Loading location DB")
+ db, openDBErr := ip2location.OpenDB(dbFileLocation + "/" + dbFilename)
+ if openDBErr != nil {
+ return fmt.Errorf("OpenDB err - %w ", openDBErr)
+ }
+
+ locationDB = db
+ return nil
+}
+
+func GetLocation(ip string) (*Info, error) {
+
+ if locationDB == nil {
+ return nil, fmt.Errorf("no location DB available")
+ }
+
+ res, err := locationDB.Get_all(ip)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Info{
+ CountryName: res.Country_short,
+ CountryLongName: res.Country_long,
+ RegionName: res.Region,
+ CityName: res.City,
+ Latitude: fmt.Sprintf("%f", res.Latitude),
+ Longitude: fmt.Sprintf("%f", res.Longitude),
+ }, nil
+}
+
+//goland:noinspection ALL
+func unzip(src, dest string) error {
+ r, err := zip.OpenReader(src)
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ for _, f := range r.File {
+ rc, err := f.Open()
+ if err != nil {
+ return err
+ }
+ defer rc.Close()
+
+ filePath := filepath.Join(dest, f.Name)
+ if f.FileInfo().IsDir() {
+ err = _osio.MkdirAll(filePath, f.Mode())
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ } else {
+ var fileDir string
+ if lastIndex := strings.LastIndex(filePath, string(os.PathSeparator)); lastIndex > -1 {
+ fileDir = filePath[:lastIndex]
+ }
+
+ err = _osio.MkdirAll(fileDir, f.Mode())
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ df, err := _osio.OpenFile(
+ filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
+ if err != nil {
+ return err
+ }
+ defer df.Close()
+
+ _, err = _osio.Copy(df, rc)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb/netdb.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb/netdb.go
new file mode 100644
index 000000000..f4b4f21d8
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb/netdb.go
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * > Note: this code is a revised and enhanced version of the netdb.go file
+ * > from https://github.com/dominikh/go-netdb/ (MIT License)
+ */
+
+package netdb
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+)
+
+var slog = logrus.WithField("component", "netdb.ServiceNames")
+
+type numKey struct {
+ port int
+ protocolNumber int
+}
+
+type nameKey struct {
+ port int
+ protocolName string
+}
+
+type ServiceNames struct {
+ protoNums map[int]struct{}
+ // key: protocol name, value: protocol number
+ protoNames map[string]int
+ byPort map[int]string
+ byProtoNum map[numKey]string
+ byProtoName map[nameKey]string
+}
+
+// LoadServicesDB receives readers to the /etc/protocols and /etc/services formatted content
+// and returns a database that allow querying service names from ports and protocol information
+func LoadServicesDB(protocols, services io.Reader) (*ServiceNames, error) {
+ log := slog.WithField("method", "LoadServicesDB")
+ db := ServiceNames{
+ protoNums: map[int]struct{}{},
+ protoNames: map[string]int{},
+ byPort: map[int]string{},
+ byProtoNum: map[numKey]string{},
+ byProtoName: map[nameKey]string{},
+ }
+ // Load protocols
+ protoData, err := io.ReadAll(protocols)
+ if err != nil {
+ return nil, fmt.Errorf("reading protocols data: %w", err)
+ }
+
+ // key: proto name, value: aliases
+ protoAliases := map[string][]string{}
+
+ for i, line := range strings.Split(string(protoData), "\n") {
+ line = strings.TrimSpace(line)
+ split := strings.SplitN(line, "#", 2)
+ fields := strings.Fields(split[0])
+ if len(fields) < 2 {
+ continue
+ }
+
+ num, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ logrus.ErrorKey: err,
+ "lineNum": i,
+ "line": line,
+ }).Debug("wrong protocol number. Ignoring entry")
+ continue
+ }
+
+ db.protoNums[int(num)] = struct{}{}
+ db.protoNames[fields[0]] = int(num)
+ for _, alias := range fields[2:] {
+ db.protoNames[alias] = int(num)
+ }
+ protoAliases[fields[0]] = fields[2:]
+ }
+
+ // Load services
+ svcData, err := io.ReadAll(services)
+ if err != nil {
+ return nil, fmt.Errorf("reading services data: %w", err)
+ }
+
+ for i, line := range strings.Split(string(svcData), "\n") {
+ line = strings.TrimSpace(line)
+ split := strings.SplitN(line, "#", 2)
+ fields := strings.Fields(split[0])
+ if len(fields) < 2 {
+ continue
+ }
+
+ svcName := fields[0]
+ portproto := strings.SplitN(fields[1], "/", 2)
+ protoName := portproto[1]
+ port, err := strconv.ParseInt(portproto[0], 10, 32)
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ logrus.ErrorKey: err,
+ "lineNum": i,
+ "line": line,
+ }).Debug("wrong service port number. Ignoring entry")
+ continue
+ }
+ db.byPort[int(port)] = svcName
+ if protoNum, ok := db.protoNames[protoName]; ok {
+ db.byProtoNum[numKey{port: int(port), protocolNumber: protoNum}] = svcName
+ }
+ db.byProtoName[nameKey{port: int(port), protocolName: protoName}] = svcName
+ for _, alias := range protoAliases[protoName] {
+ db.byProtoName[nameKey{port: int(port), protocolName: alias}] = svcName
+ }
+ }
+ return &db, nil
+}
+
+// ByPortAndProtocolName returns the service name given a port and a protocol name (or
+// its alias). If the protocol does not exist, returns the name of any service matching
+// the port number.
+func (db *ServiceNames) ByPortAndProtocolName(port int, nameOrAlias string) string {
+ if _, ok := db.protoNames[nameOrAlias]; ok {
+ return db.byProtoName[nameKey{port: port, protocolName: nameOrAlias}]
+ }
+ return db.byPort[port]
+}
+
+// ByPortAndProtocolNumber returns the service name given a port and a protocol number.
+// If the protocol does not exist, returns the name of any service matching
+// the port number.
+func (db *ServiceNames) ByPortAndProtocolNumber(port, protoNum int) string {
+ if _, ok := db.protoNums[protoNum]; ok {
+ return db.byProtoNum[numKey{port: port, protocolNumber: protoNum}]
+ }
+ return db.byPort[port]
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform.go
new file mode 100644
index 000000000..5b51a41ae
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform.go
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transform
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/sirupsen/logrus"
+)
+
+type Transformer interface {
+ Transform(in config.GenericMap) (config.GenericMap, bool)
+}
+
+type transformNone struct {
+}
+
+// Transform transforms a flow before being stored
+func (t *transformNone) Transform(f config.GenericMap) (config.GenericMap, bool) {
+ return f, true
+}
+
+// NewTransformNone create a new transform
+func NewTransformNone() (Transformer, error) {
+ logrus.Debugf("entering NewTransformNone")
+ return &transformNone{}, nil
+}
+
+type Definition struct {
+ Type string
+ Generic api.TransformGeneric
+ Network api.TransformNetwork
+}
+
+type Definitions []Definition
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go
new file mode 100644
index 000000000..0d8dd7189
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transform
+
+import (
+ "fmt"
+ "math/rand"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/Knetic/govaluate"
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils/filters"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ tlog = logrus.WithField("component", "transform.Filter")
+ rndgen = rand.New(rand.NewSource(time.Now().UnixNano()))
+)
+
+type Filter struct {
+ Rules []api.TransformFilterRule
+ KeepRules []predicatesRule
+}
+
+type predicatesRule struct {
+ predicates []filters.Predicate
+ sampling uint16
+}
+
+// Transform transforms a flow; if false is returned as a second argument, the entry is dropped
+func (f *Filter) Transform(entry config.GenericMap) (config.GenericMap, bool) {
+ tlog.Tracef("f = %v", f)
+ outputEntry := entry.Copy()
+ labels := make(map[string]string)
+ if len(f.KeepRules) > 0 {
+ keep := false
+ for _, r := range f.KeepRules {
+ if applyPredicates(outputEntry, r) {
+ keep = true
+ break
+ }
+ }
+ if !keep {
+ return nil, false
+ }
+ }
+ for i := range f.Rules {
+ tlog.Tracef("rule = %v", f.Rules[i])
+ if cont := applyRule(outputEntry, labels, &f.Rules[i]); !cont {
+ return nil, false
+ }
+ }
+ // process accumulated labels into comma separated string
+ if len(labels) > 0 {
+ var sb strings.Builder
+ for key, value := range labels {
+ sb.WriteString(key)
+ sb.WriteString("=")
+ sb.WriteString(value)
+ sb.WriteString(",")
+ }
+ // remove trailing comma
+ labelsString := sb.String()
+ labelsString = strings.TrimRight(labelsString, ",")
+ outputEntry["labels"] = labelsString
+ }
+ return outputEntry, true
+}
+
+// Apply a rule. Returns false if it must stop processing rules (e.g. if entry must be removed)
+// nolint:cyclop
+func applyRule(entry config.GenericMap, labels map[string]string, rule *api.TransformFilterRule) bool {
+ switch rule.Type {
+ case api.RemoveField:
+ delete(entry, rule.RemoveField.Input)
+ case api.RemoveEntryIfExists:
+ if _, ok := entry[rule.RemoveEntry.Input]; ok {
+ return false
+ }
+ case api.RemoveEntryIfDoesntExist:
+ if _, ok := entry[rule.RemoveEntry.Input]; !ok {
+ return false
+ }
+ case api.RemoveEntryIfEqual:
+ if val, ok := entry[rule.RemoveEntry.Input]; ok {
+ if val == rule.RemoveEntry.Value {
+ return false
+ }
+ }
+ case api.RemoveEntryIfNotEqual:
+ if val, ok := entry[rule.RemoveEntry.Input]; ok {
+ if val != rule.RemoveEntry.Value {
+ return false
+ }
+ }
+ case api.AddField:
+ entry[rule.AddField.Input] = rule.AddField.Value
+ case api.AddFieldIfDoesntExist:
+ if _, ok := entry[rule.AddFieldIfDoesntExist.Input]; !ok {
+ entry[rule.AddFieldIfDoesntExist.Input] = rule.AddFieldIfDoesntExist.Value
+ }
+ case api.AddRegExIf:
+ matched, err := regexp.MatchString(rule.AddRegExIf.Parameters, utils.ConvertToString(entry[rule.AddRegExIf.Input]))
+ if err != nil {
+ return true
+ }
+ if matched {
+ entry[rule.AddRegExIf.Output] = entry[rule.AddRegExIf.Input]
+ entry[rule.AddRegExIf.Output+"_Matched"] = true
+ }
+ case api.AddFieldIf:
+ expressionString := fmt.Sprintf("val %s", rule.AddFieldIf.Parameters)
+ expression, err := govaluate.NewEvaluableExpression(expressionString)
+ if err != nil {
+ log.Warningf("Can't evaluate AddIf rule: %+v expression: %v. err %v", rule, expressionString, err)
+ return true
+ }
+ result, evaluateErr := expression.Evaluate(map[string]interface{}{"val": entry[rule.AddFieldIf.Input]})
+ if evaluateErr == nil && result.(bool) {
+ if rule.AddFieldIf.Assignee != "" {
+ entry[rule.AddFieldIf.Output] = rule.AddFieldIf.Assignee
+ } else {
+ entry[rule.AddFieldIf.Output] = entry[rule.AddFieldIf.Input]
+ }
+ entry[rule.AddFieldIf.Output+"_Evaluate"] = true
+ }
+ case api.AddLabel:
+ labels[rule.AddLabel.Input] = utils.ConvertToString(rule.AddLabel.Value)
+ case api.AddLabelIf:
+ // TODO perhaps add a cache of previously evaluated expressions
+ expressionString := fmt.Sprintf("val %s", rule.AddLabelIf.Parameters)
+ expression, err := govaluate.NewEvaluableExpression(expressionString)
+ if err != nil {
+ log.Warningf("Can't evaluate AddLabelIf rule: %+v expression: %v. err %v", rule, expressionString, err)
+ return true
+ }
+ result, evaluateErr := expression.Evaluate(map[string]interface{}{"val": entry[rule.AddLabelIf.Input]})
+ if evaluateErr == nil && result.(bool) {
+ labels[rule.AddLabelIf.Output] = rule.AddLabelIf.Assignee
+ }
+ case api.RemoveEntryAllSatisfied:
+ return !isRemoveEntrySatisfied(entry, rule.RemoveEntryAllSatisfied)
+ case api.ConditionalSampling:
+ return sample(entry, rule.ConditionalSampling)
+ case api.KeepEntryAllSatisfied:
+ // This should be processed only in "applyPredicates". Failure to do so is a bug.
+ tlog.Panicf("unexpected KeepEntryAllSatisfied: %v", rule)
+ default:
+ tlog.Panicf("unknown type %s for transform.Filter rule: %v", rule.Type, rule)
+ }
+ return true
+}
+
+func isRemoveEntrySatisfied(entry config.GenericMap, rules []*api.RemoveEntryRule) bool {
+ for _, r := range rules {
+ // applyRule returns false if the entry must be removed
+ if dontRemove := applyRule(entry, nil, &api.TransformFilterRule{Type: api.TransformFilterEnum(r.Type), RemoveEntry: r.RemoveEntry}); dontRemove {
+ return false
+ }
+ }
+ return true
+}
+
+func applyPredicates(entry config.GenericMap, rule predicatesRule) bool {
+ if !rollSampling(rule.sampling) {
+ return false
+ }
+ for _, p := range rule.predicates {
+ if !p(entry) {
+ return false
+ }
+ }
+ return true
+}
+
+func sample(entry config.GenericMap, rules []*api.SamplingCondition) bool {
+ for _, r := range rules {
+ if isRemoveEntrySatisfied(entry, r.Rules) {
+ return rollSampling(r.Value)
+ }
+ }
+ return true
+}
+
+func rollSampling(value uint16) bool {
+ return value == 0 || (rndgen.Intn(int(value)) == 0)
+}
+
+// NewTransformFilter create a new filter transform
+func NewTransformFilter(params config.StageParam) (Transformer, error) {
+ tlog.Debugf("entering NewTransformFilter")
+ keepRules := []predicatesRule{}
+ rules := []api.TransformFilterRule{}
+ if params.Transform != nil && params.Transform.Filter != nil {
+ params.Transform.Filter.Preprocess()
+ for i := range params.Transform.Filter.Rules {
+ baseRules := ¶ms.Transform.Filter.Rules[i]
+ if baseRules.Type == api.KeepEntryAllSatisfied {
+ pr := predicatesRule{sampling: baseRules.KeepEntrySampling}
+ for _, keepRule := range baseRules.KeepEntryAllSatisfied {
+ pred, err := filters.FromKeepEntry(keepRule)
+ if err != nil {
+ return nil, err
+ }
+ pr.predicates = append(pr.predicates, pred)
+ }
+ keepRules = append(keepRules, pr)
+ } else {
+ rules = append(rules, *baseRules)
+ }
+ }
+ }
+ transformFilter := &Filter{
+ Rules: rules,
+ KeepRules: keepRules,
+ }
+ return transformFilter, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go
new file mode 100644
index 000000000..f30ca840d
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy ofthe License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specificlanguage governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transform
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/sirupsen/logrus"
+)
+
+var glog = logrus.WithField("component", "transform.Generic")
+
+type Generic struct {
+ policy api.TransformGenericOperationEnum
+ rules []api.GenericTransformRule
+}
+
+// Transform transforms a flow to a new set of keys
+func (g *Generic) Transform(entry config.GenericMap) (config.GenericMap, bool) {
+ var outputEntry config.GenericMap
+ ok := true
+ glog.Tracef("Transform input = %v", entry)
+ if g.policy != "replace_keys" {
+ outputEntry = entry.Copy()
+ } else {
+ outputEntry = config.GenericMap{}
+ }
+ for _, transformRule := range g.rules {
+ if transformRule.Multiplier != 0 {
+ ok = g.performMultiplier(entry, transformRule, outputEntry)
+ } else {
+ outputEntry[transformRule.Output] = entry[transformRule.Input]
+ }
+ }
+ glog.Tracef("Transform output = %v", outputEntry)
+ return outputEntry, ok
+}
+
+func (g *Generic) performMultiplier(entry config.GenericMap, transformRule api.GenericTransformRule, outputEntry config.GenericMap) bool {
+ ok := true
+ switch val := entry[transformRule.Input].(type) {
+ case int:
+ outputEntry[transformRule.Output] = transformRule.Multiplier * val
+ case uint:
+ outputEntry[transformRule.Output] = uint(transformRule.Multiplier) * val
+ case int8:
+ outputEntry[transformRule.Output] = int8(transformRule.Multiplier) * val
+ case uint8:
+ outputEntry[transformRule.Output] = uint8(transformRule.Multiplier) * val
+ case int16:
+ outputEntry[transformRule.Output] = int16(transformRule.Multiplier) * val
+ case uint16:
+ outputEntry[transformRule.Output] = uint16(transformRule.Multiplier) * val
+ case int32:
+ outputEntry[transformRule.Output] = int32(transformRule.Multiplier) * val
+ case uint32:
+ outputEntry[transformRule.Output] = uint32(transformRule.Multiplier) * val
+ case int64:
+ outputEntry[transformRule.Output] = int64(transformRule.Multiplier) * val
+ case uint64:
+ outputEntry[transformRule.Output] = uint64(transformRule.Multiplier) * val
+ case float32:
+ outputEntry[transformRule.Output] = float32(transformRule.Multiplier) * val
+ case float64:
+ outputEntry[transformRule.Output] = float64(transformRule.Multiplier) * val
+ default:
+ ok = false
+ glog.Errorf("%s not of numerical type; cannot perform multiplication", transformRule.Output)
+ }
+ return ok
+}
+
+// NewTransformGeneric create a new transform
+func NewTransformGeneric(params config.StageParam) (Transformer, error) {
+ glog.Debugf("entering NewTransformGeneric")
+ genConfig := api.TransformGeneric{}
+ if params.Transform != nil && params.Transform.Generic != nil {
+ genConfig = *params.Transform.Generic
+ }
+ glog.Debugf("params.Transform.Generic = %v", genConfig)
+ rules := genConfig.Rules
+ policy := genConfig.Policy
+ switch policy {
+ case api.ReplaceKeys, api.PreserveOriginalKeys, "":
+ // valid; nothing to do
+ glog.Infof("NewTransformGeneric, policy = %s", policy)
+ default:
+ glog.Panicf("unknown policy %s for transform.generic", policy)
+ }
+ transformGeneric := &Generic{
+ policy: policy,
+ rules: rules,
+ }
+ glog.Debugf("transformGeneric = %v", transformGeneric)
+ return transformGeneric, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go
new file mode 100644
index 000000000..09934ae7e
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transform
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ util "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+ "github.com/sirupsen/logrus"
+)
+
+var log = logrus.WithField("component", "transform.Network")
+
+type Network struct {
+ api.TransformNetwork
+ svcNames *netdb.ServiceNames
+ snLabels []subnetLabel
+ ipLabelCache *utils.TimedCache
+}
+
+type subnetLabel struct {
+ cidrs []*net.IPNet
+ name string
+}
+
+//nolint:cyclop
+func (n *Network) Transform(inputEntry config.GenericMap) (config.GenericMap, bool) {
+ // copy input entry before transform to avoid alteration on parallel stages
+ outputEntry := inputEntry.Copy()
+
+ for _, rule := range n.Rules {
+ switch rule.Type {
+ case api.NetworkAddSubnet:
+ if rule.AddSubnet == nil {
+ log.Errorf("Missing add subnet configuration")
+ continue
+ }
+ if v, ok := outputEntry.LookupString(rule.AddSubnet.Input); ok {
+ _, ipv4Net, err := net.ParseCIDR(v + rule.AddSubnet.SubnetMask)
+ if err != nil {
+ log.Warningf("Can't find subnet for IP %v and prefix length %s - err %v", v, rule.AddSubnet.SubnetMask, err)
+ continue
+ }
+ outputEntry[rule.AddSubnet.Output] = ipv4Net.String()
+ }
+ case api.NetworkAddLocation:
+ if rule.AddLocation == nil {
+ log.Errorf("Missing add location configuration")
+ continue
+ }
+ var locationInfo *location.Info
+ locationInfo, err := location.GetLocation(util.ConvertToString(outputEntry[rule.AddLocation.Input]))
+ if err != nil {
+ log.Warningf("Can't find location for IP %v err %v", outputEntry[rule.AddLocation.Input], err)
+ continue
+ }
+ outputEntry[rule.AddLocation.Output+"_CountryName"] = locationInfo.CountryName
+ outputEntry[rule.AddLocation.Output+"_CountryLongName"] = locationInfo.CountryLongName
+ outputEntry[rule.AddLocation.Output+"_RegionName"] = locationInfo.RegionName
+ outputEntry[rule.AddLocation.Output+"_CityName"] = locationInfo.CityName
+ outputEntry[rule.AddLocation.Output+"_Latitude"] = locationInfo.Latitude
+ outputEntry[rule.AddLocation.Output+"_Longitude"] = locationInfo.Longitude
+ case api.NetworkAddService:
+ if rule.AddService == nil {
+ log.Errorf("Missing add service configuration")
+ continue
+ }
+ // Should be optimized (unused in netobserv)
+ protocol := fmt.Sprintf("%v", outputEntry[rule.AddService.Protocol])
+ portNumber, err := strconv.Atoi(fmt.Sprintf("%v", outputEntry[rule.AddService.Input]))
+ if err != nil {
+ log.Errorf("Can't convert port to int: Port %v - err %v", outputEntry[rule.AddService.Input], err)
+ continue
+ }
+ var serviceName string
+ protocolAsNumber, err := strconv.Atoi(protocol)
+ if err == nil {
+ // protocol has been submitted as number
+ serviceName = n.svcNames.ByPortAndProtocolNumber(portNumber, protocolAsNumber)
+ } else {
+ // protocol has been submitted as any string
+ serviceName = n.svcNames.ByPortAndProtocolName(portNumber, protocol)
+ }
+ if serviceName == "" {
+ if err != nil {
+ log.Debugf("Can't find service name for Port %v and protocol %v - err %v", outputEntry[rule.AddService.Input], protocol, err)
+ continue
+ }
+ }
+ outputEntry[rule.AddService.Output] = serviceName
+ case api.NetworkAddKubernetes:
+ kubernetes.Enrich(outputEntry, rule.Kubernetes)
+ case api.NetworkAddKubernetesInfra:
+ if rule.KubernetesInfra == nil {
+ logrus.Error("transformation rule: Missing configuration ")
+ continue
+ }
+ kubernetes.EnrichLayer(outputEntry, rule.KubernetesInfra)
+ case api.NetworkReinterpretDirection:
+ reinterpretDirection(outputEntry, &n.DirectionInfo)
+ case api.NetworkAddSubnetLabel:
+ if rule.AddSubnetLabel == nil {
+ logrus.Error("AddSubnetLabel rule: Missing configuration ")
+ continue
+ }
+ if anyIP, ok := outputEntry[rule.AddSubnetLabel.Input]; ok {
+ if strIP, ok := anyIP.(string); ok {
+ lbl, ok := n.ipLabelCache.GetCacheEntry(strIP)
+ if !ok {
+ lbl = n.applySubnetLabel(strIP)
+ n.ipLabelCache.UpdateCacheEntry(strIP, lbl)
+ }
+ if lbl != "" {
+ outputEntry[rule.AddSubnetLabel.Output] = lbl
+ }
+ }
+ }
+ case api.NetworkDecodeTCPFlags:
+ if anyFlags, ok := outputEntry[rule.DecodeTCPFlags.Input]; ok && anyFlags != nil {
+ if flags, ok := anyFlags.(uint16); ok {
+ flags := util.DecodeTCPFlags(flags)
+ outputEntry[rule.DecodeTCPFlags.Output] = flags
+ }
+ }
+
+ default:
+ log.Panicf("unknown type %s for transform.Network rule: %v", rule.Type, rule)
+ }
+ }
+
+ return outputEntry, true
+}
+
+func (n *Network) applySubnetLabel(strIP string) string {
+ ip := net.ParseIP(strIP)
+ if ip != nil {
+ for _, subnetCat := range n.snLabels {
+ for _, cidr := range subnetCat.cidrs {
+ if cidr.Contains(ip) {
+ return subnetCat.name
+ }
+ }
+ }
+ }
+ return ""
+}
+
+// NewTransformNetwork create a new transform
+//
+//nolint:cyclop
+func NewTransformNetwork(params config.StageParam, opMetrics *operational.Metrics) (Transformer, error) {
+ var needToInitLocationDB = false
+ var needToInitKubeData = false
+ var needToInitNetworkServices = false
+
+ jsonNetworkTransform := api.TransformNetwork{}
+ if params.Transform != nil && params.Transform.Network != nil {
+ jsonNetworkTransform = *params.Transform.Network
+ }
+ for _, rule := range jsonNetworkTransform.Rules {
+ switch rule.Type {
+ case api.NetworkAddLocation:
+ needToInitLocationDB = true
+ case api.NetworkAddKubernetes:
+ needToInitKubeData = true
+ case api.NetworkAddKubernetesInfra:
+ needToInitKubeData = true
+ case api.NetworkAddService:
+ needToInitNetworkServices = true
+ case api.NetworkReinterpretDirection:
+ if err := validateReinterpretDirectionConfig(&jsonNetworkTransform.DirectionInfo); err != nil {
+ return nil, err
+ }
+ case api.NetworkAddSubnetLabel:
+ if len(jsonNetworkTransform.SubnetLabels) == 0 {
+ return nil, fmt.Errorf("a rule '%s' was found, but there are no subnet labels configured", api.NetworkAddSubnetLabel)
+ }
+ case api.NetworkAddSubnet, api.NetworkDecodeTCPFlags:
+ // nothing
+ }
+ }
+
+ if needToInitLocationDB {
+ err := location.InitLocationDB()
+ if err != nil {
+ log.Debugf("location.InitLocationDB error: %v", err)
+ }
+ }
+
+ if needToInitKubeData {
+ err := kubernetes.InitFromConfig(jsonNetworkTransform.KubeConfig, opMetrics)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var servicesDB *netdb.ServiceNames
+ if needToInitNetworkServices {
+ pFilename, sFilename := jsonNetworkTransform.GetServiceFiles()
+ var err error
+ protos, err := os.Open(pFilename)
+ if err != nil {
+ return nil, fmt.Errorf("opening protocols file %q: %w", pFilename, err)
+ }
+ defer protos.Close()
+ services, err := os.Open(sFilename)
+ if err != nil {
+ return nil, fmt.Errorf("opening services file %q: %w", sFilename, err)
+ }
+ defer services.Close()
+ servicesDB, err = netdb.LoadServicesDB(protos, services)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var subnetCats []subnetLabel
+ for _, category := range jsonNetworkTransform.SubnetLabels {
+ var cidrs []*net.IPNet
+ for _, cidr := range category.CIDRs {
+ _, parsed, err := net.ParseCIDR(cidr)
+ if err != nil {
+ return nil, fmt.Errorf("category %s: fail to parse CIDR, %w", category.Name, err)
+ }
+ cidrs = append(cidrs, parsed)
+ }
+ if len(cidrs) > 0 {
+ subnetCats = append(subnetCats, subnetLabel{name: category.Name, cidrs: cidrs})
+ }
+ }
+
+ return &Network{
+ TransformNetwork: api.TransformNetwork{
+ Rules: jsonNetworkTransform.Rules,
+ DirectionInfo: jsonNetworkTransform.DirectionInfo,
+ },
+ svcNames: servicesDB,
+ snLabels: subnetCats,
+ ipLabelCache: utils.NewQuietExpiringTimedCache(2 * time.Minute),
+ }, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go
new file mode 100644
index 000000000..5f466088f
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go
@@ -0,0 +1,64 @@
+package transform
+
+import (
+ "fmt"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+)
+
+const (
+ ingress = 0
+ egress = 1
+ inner = 2
+)
+
+func validateReinterpretDirectionConfig(info *api.NetworkTransformDirectionInfo) error {
+ if info.FlowDirectionField == "" {
+ return fmt.Errorf("invalid config for transform.Network rule %s: missing FlowDirectionField", api.NetworkReinterpretDirection)
+ }
+ if info.ReporterIPField == "" {
+ return fmt.Errorf("invalid config for transform.Network rule %s: missing ReporterIPField", api.NetworkReinterpretDirection)
+ }
+ if info.SrcHostField == "" {
+ return fmt.Errorf("invalid config for transform.Network rule %s: missing SrcHostField", api.NetworkReinterpretDirection)
+ }
+ if info.DstHostField == "" {
+ return fmt.Errorf("invalid config for transform.Network rule %s: missing DstHostField", api.NetworkReinterpretDirection)
+ }
+ return nil
+}
+
+func reinterpretDirection(output config.GenericMap, info *api.NetworkTransformDirectionInfo) {
+ if fd, ok := output[info.FlowDirectionField]; ok && len(info.IfDirectionField) > 0 {
+ output[info.IfDirectionField] = fd
+ }
+ var srcNode, dstNode, reporter string
+ if gen, ok := output[info.ReporterIPField]; ok {
+ if str, ok := gen.(string); ok {
+ reporter = str
+ }
+ }
+ if len(reporter) == 0 {
+ return
+ }
+ if gen, ok := output[info.SrcHostField]; ok {
+ if str, ok := gen.(string); ok {
+ srcNode = str
+ }
+ }
+ if gen, ok := output[info.DstHostField]; ok {
+ if str, ok := gen.(string); ok {
+ dstNode = str
+ }
+ }
+ if srcNode != dstNode {
+ if srcNode == reporter {
+ output[info.FlowDirectionField] = egress
+ } else if dstNode == reporter {
+ output[info.FlowDirectionField] = ingress
+ }
+ } else if srcNode != "" {
+ output[info.FlowDirectionField] = inner
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/batcher.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/batcher.go
new file mode 100644
index 000000000..1b01816e0
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/batcher.go
@@ -0,0 +1,44 @@
+package utils
+
+import (
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/sirupsen/logrus"
+)
+
+func Batcher(
+ closeCh <-chan struct{},
+ maxBatchLength int,
+ batchTimeout time.Duration,
+ inCh <-chan config.GenericMap,
+ action func([]config.GenericMap),
+) {
+ log := logrus.WithField("component", "utils.Batcher")
+ invokeTicker := time.NewTicker(batchTimeout)
+ var entries []config.GenericMap
+ log.Debug("starting")
+ for {
+ select {
+ case <-closeCh:
+ log.Debug("exiting due to closeCh")
+ return
+ case <-invokeTicker.C:
+ if len(entries) == 0 {
+ continue
+ }
+ log.Debugf("ticker signal: invoking action with %d entries", len(entries))
+ es := entries
+ entries = nil
+ action(es)
+ case gm := <-inCh:
+ entries = append(entries, gm)
+ if len(entries) >= maxBatchLength {
+ log.Debugf("batch complete: invoking action with %d entries", len(entries))
+ es := entries
+ entries = nil
+ action(es)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/connections.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/connections.go
new file mode 100644
index 000000000..d2da7fa6e
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/connections.go
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package utils
+
+import (
+ "fmt"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+)
+
+const subnetBatchSize = 254
+
+// GenerateConnectionFlowEntries generates data with one entry for each of nConnections
+// Create the entries in a predictable manner so that the first K entries in each call
+// to the function reproduce the same connection.
+// avoid using addresses 0 and 255 since these sometimes have special meanings.
+func GenerateConnectionFlowEntries(nConnections int) []config.GenericMap {
+ entries := make([]config.GenericMap, 0)
+ n1 := subnetBatchSize
+ n2 := subnetBatchSize
+ n3 := subnetBatchSize
+ n4 := subnetBatchSize
+ count := 0
+ for l := 1; l <= n4; l++ {
+ for k := 1; k <= n3; k++ {
+ for j := 1; j <= n2; j++ {
+ for i := 1; i <= n1; i++ {
+ srcAddr := fmt.Sprintf("%d.%d.%d.%d", l, k, j, i)
+ count++
+ entry := config.GenericMap{
+ "SrcAddr": srcAddr,
+ "SrcPort": 1234,
+ "DstAddr": "11.1.1.1",
+ "DstPort": 8000,
+ "Bytes": 100,
+ "Packets": 1,
+ "Proto": 6,
+ "SrcAS": 0,
+ "DstAS": 0,
+ "TimeReceived": 0,
+ }
+ entries = append(entries, entry)
+ if count >= nConnections {
+ return entries
+ }
+ }
+ }
+ }
+ }
+ return entries
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/exit.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/exit.go
new file mode 100644
index 000000000..7bbfb7cc2
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/exit.go
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package utils
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ exitChannel chan struct{}
+)
+
+func ExitChannel() <-chan struct{} {
+ return exitChannel
+}
+
+// InitExitChannel and CloseExitChannel are needed for some tests
+func InitExitChannel() {
+ exitChannel = make(chan struct{})
+}
+
+func CloseExitChannel() {
+ close(exitChannel)
+}
+
+func SetupElegantExit() {
+ logrus.Debugf("entering SetupElegantExit")
+ // handle elegant exit; create support for channels of go routines that want to exit cleanly
+ exitChannel = make(chan struct{})
+ exitSigChan := make(chan os.Signal, 1)
+ logrus.Debugf("registered exit signal channel")
+ signal.Notify(exitSigChan, syscall.SIGINT, syscall.SIGTERM)
+ go func() {
+ // wait for exit signal; then stop all the other go functions
+ sig := <-exitSigChan
+ logrus.Debugf("received exit signal = %v", sig)
+ close(exitChannel)
+ logrus.Debugf("exiting SetupElegantExit go function")
+ }()
+ logrus.Debugf("exiting SetupElegantExit")
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/multiorderedmap.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/multiorderedmap.go
new file mode 100644
index 000000000..17bebe36e
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/multiorderedmap.go
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// This file defines a multi-ordered map data structure. It supports insertion, deletion and retrieval in O(1) on
+// average like a regular map. In addition, it allows iterating over the records by multiple orders.
+// New records are pushed back to each of the defined orders. Existing records can be moved to the back of a specific
+// order by MoveToBack()
+// Note: MultiOrderedMap isn't responsible for keeping the records sorted. The user should take care of that.
+
+package utils
+
+import (
+ "container/list"
+ "fmt"
+)
+
+type Key uint64
+type Record interface{}
+type OrderID string
+type processRecordFunc func(Record) (delete, stop bool)
+
+type recordWrapper struct {
+ record Record
+ key Key
+ orderID2element map[OrderID]*list.Element
+}
+
+type MultiOrderedMap struct {
+ m map[Key]*recordWrapper
+ orders map[OrderID]*list.List
+}
+
+// NewMultiOrderedMap returns an initialized MultiOrderedMap.
+func NewMultiOrderedMap(orderIDs ...OrderID) *MultiOrderedMap {
+ mom := &MultiOrderedMap{
+ m: map[Key]*recordWrapper{},
+ orders: map[OrderID]*list.List{},
+ }
+ for _, id := range orderIDs {
+ mom.orders[id] = list.New()
+ }
+ return mom
+}
+
+// Len returns the number of records of the multi-ordered map mom.
+func (mom MultiOrderedMap) Len() int {
+ return len(mom.m)
+}
+
+// AddRecord adds a record to the multi-ordered map.
+func (mom MultiOrderedMap) AddRecord(key Key, record Record) error {
+ if _, found := mom.GetRecord(key); found {
+ return fmt.Errorf("record with key %x already exists", key)
+ }
+ rw := &recordWrapper{key: key, record: record, orderID2element: map[OrderID]*list.Element{}}
+ mom.m[key] = rw
+ for orderID, orderList := range mom.orders {
+ elem := orderList.PushBack(rw)
+ rw.orderID2element[orderID] = elem
+ }
+ return nil
+}
+
+// GetRecord returns the record of key `key` and true if the key exists. Otherwise, nil and false is returned.
+func (mom MultiOrderedMap) GetRecord(key Key) (Record, bool) {
+ rw, found := mom.m[key]
+ if !found {
+ return nil, false
+ }
+ return rw.record, true
+}
+
+// RemoveRecord removes the record of key `key`. If the key doesn't exist, RemoveRecord is a no-op.
+func (mom MultiOrderedMap) RemoveRecord(key Key) {
+ rw, found := mom.m[key]
+ if !found {
+ return
+ }
+ for orderID, elem := range rw.orderID2element {
+ mom.orders[orderID].Remove(elem)
+ }
+ delete(mom.m, key)
+}
+
+// MoveToBack moves the record of key `key` to the back of orderID. If the key or the orderID doesn't exist, an error
+// is returned.
+func (mom MultiOrderedMap) MoveToBack(key Key, orderID OrderID) error {
+ rw, found := mom.m[key]
+ if !found {
+ return fmt.Errorf("can't MoveToBack non-existing key %x (order id %q)", key, orderID)
+ }
+ elem, found := rw.orderID2element[orderID]
+ if !found {
+ return fmt.Errorf("can't MoveToBack non-existing order id %q (key %x)", orderID, key)
+ }
+ mom.orders[orderID].MoveToBack(elem)
+ return nil
+}
+
+// MoveToFront moves the record of key `key` to the front of orderID. If the key or the orderID doesn't exist, an error
+// is returned.
+func (mom MultiOrderedMap) MoveToFront(key Key, orderID OrderID) error {
+ rw, found := mom.m[key]
+ if !found {
+ return fmt.Errorf("can't MoveToFront non-existing key %x (order id %q)", key, orderID)
+ }
+ elem, found := rw.orderID2element[orderID]
+ if !found {
+ return fmt.Errorf("can't MoveToFront non-existing order id %q (key %x)", orderID, key)
+ }
+ mom.orders[orderID].MoveToFront(elem)
+ return nil
+}
+
+// IterateFrontToBack iterates over the records by orderID. It applies function f() on each record.
+// f() returns two booleans `delete` and `stop` that control whether to remove the record from the multi-ordered map
+// and whether to stop the iteration respectively.
+func (mom MultiOrderedMap) IterateFrontToBack(orderID OrderID, f processRecordFunc) {
+ if _, found := mom.orders[orderID]; !found {
+ panic(fmt.Sprintf("Unknown order id %q", orderID))
+ }
+ // How to remove element from list while iterating the same list in golang
+ // https://stackoverflow.com/a/27662823/2749989
+ var next *list.Element
+ for e := mom.orders[orderID].Front(); e != nil; e = next {
+ rw := e.Value.(*recordWrapper)
+ next = e.Next()
+ shouldDelete, shouldStop := f(rw.record)
+ if shouldDelete {
+ mom.RemoveRecord(rw.key)
+ }
+ if shouldStop {
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/params_parse.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/params_parse.go
new file mode 100644
index 000000000..0406db8ce
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/params_parse.go
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package utils
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go
new file mode 100644
index 000000000..06d0c75d2
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go
@@ -0,0 +1,40 @@
+package utils
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/segmentio/kafka-go/sasl"
+ "github.com/segmentio/kafka-go/sasl/plain"
+ "github.com/segmentio/kafka-go/sasl/scram"
+)
+
+func SetupSASLMechanism(cfg *api.SASLConfig) (sasl.Mechanism, error) {
+ // Read client ID
+ id, err := os.ReadFile(cfg.ClientIDPath)
+ if err != nil {
+ return nil, err
+ }
+ strID := strings.TrimSpace(string(id))
+ // Read password
+ pwd, err := os.ReadFile(cfg.ClientSecretPath)
+ if err != nil {
+ return nil, err
+ }
+ strPwd := strings.TrimSpace(string(pwd))
+ var mechanism sasl.Mechanism
+ switch cfg.Type {
+ case api.SASLPlain:
+ mechanism = plain.Mechanism{Username: strID, Password: strPwd}
+ case api.SASLScramSHA512:
+ mechanism, err = scram.Mechanism(scram.SHA512, strID, strPwd)
+ default:
+ return nil, fmt.Errorf("unknown SASL type: %s", cfg.Type)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return mechanism, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go
new file mode 100644
index 000000000..bedc38c4f
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package utils
+
+import (
+ "container/list"
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+)
+
+var log = logrus.WithField("component", "utils.TimedCache")
+
+// Functions to manage an LRU cache with an expiry
+// When an item expires, allow a callback to allow the specific implementation to perform its particular cleanup
+// Size of cache may be limited by setting maxEntries; if cache is full, do not enter new items.
+
+type CacheCallback func(entry interface{})
+
+type cacheEntry struct {
+ key string
+ lastUpdatedTime time.Time
+ e *list.Element
+ SourceEntry interface{}
+}
+
+type TimedCacheMap map[string]*cacheEntry
+
+// If maxEntries is non-zero, this limits the number of entries in the cache to the number specified.
+// If maxEntries is zero, the cache has no size limit.
+type TimedCache struct {
+ mu sync.RWMutex
+ cacheList *list.List
+ cacheMap TimedCacheMap
+ maxEntries int
+ cacheLenMetric prometheus.Gauge
+}
+
+func (tc *TimedCache) GetCacheEntry(key string) (interface{}, bool) {
+ tc.mu.RLock()
+ defer tc.mu.RUnlock()
+ cEntry, ok := tc.cacheMap[key]
+ if ok {
+ return cEntry.SourceEntry, ok
+ }
+ return nil, ok
+}
+
+var uclog = log.WithField("method", "UpdateCacheEntry")
+
+// If cache entry exists, update it and return it; if it does not exist, create it if there is room.
+// If we exceed the size of the cache, then do not allocate new entry
+func (tc *TimedCache) UpdateCacheEntry(key string, entry interface{}) bool {
+ nowInSecs := time.Now()
+ tc.mu.Lock()
+ defer tc.mu.Unlock()
+ cEntry, ok := tc.cacheMap[key]
+ if ok {
+ // item already exists in cache; update the element and move to end of list
+ cEntry.lastUpdatedTime = nowInSecs
+ // move to end of list
+ tc.cacheList.MoveToBack(cEntry.e)
+ } else {
+ // create new entry for cache
+ if (tc.maxEntries > 0) && (tc.cacheList.Len() >= tc.maxEntries) {
+ return false
+ }
+ cEntry = &cacheEntry{
+ lastUpdatedTime: nowInSecs,
+ key: key,
+ SourceEntry: entry,
+ }
+ uclog.Tracef("adding entry: %#v", cEntry)
+ // place at end of list
+ cEntry.e = tc.cacheList.PushBack(cEntry)
+ tc.cacheMap[key] = cEntry
+ if tc.cacheLenMetric != nil {
+ tc.cacheLenMetric.Inc()
+ }
+ }
+ return true
+}
+
+func (tc *TimedCache) GetCacheLen() int {
+ tc.mu.RLock()
+ defer tc.mu.RUnlock()
+ return tc.cacheList.Len()
+}
+
+// We expect that the function calling Iterate might make updates to the entries by calling UpdateCacheEntry()
+// We therefore cannot take the lock at this point since it will conflict with the call in UpdateCacheEntry()
+// TODO: If the callback needs to update the cache, then we need a method to perform it without taking the lock again.
+func (tc *TimedCache) Iterate(f func(key string, value interface{})) {
+ tc.mu.RLock()
+ defer tc.mu.RUnlock()
+ for k, v := range tc.cacheMap {
+ f(k, v.SourceEntry)
+ }
+}
+
+// CleanupExpiredEntries removes items from cache that were last touched more than expiryTime seconds ago
+func (tc *TimedCache) CleanupExpiredEntries(expiry time.Duration, callback CacheCallback) {
+ tc.mu.Lock()
+ defer tc.mu.Unlock()
+
+ clog := log.WithFields(logrus.Fields{
+ "mapLen": len(tc.cacheMap),
+ "listLen": tc.cacheList.Len(),
+ })
+ clog.Debugf("cleaning up expried entries")
+
+ expireTime := time.Now().Add(-expiry)
+ deleted := 0
+ // go through the list until we reach recently used entries
+ for {
+ listEntry := tc.cacheList.Front()
+ if listEntry == nil {
+ return
+ }
+ pCacheInfo := listEntry.Value.(*cacheEntry)
+ if pCacheInfo.lastUpdatedTime.After(expireTime) {
+ // no more expired items
+ clog.Debugf("deleted %d expired entries", deleted)
+ return
+ }
+ deleted++
+ if callback != nil {
+ callback(pCacheInfo.SourceEntry)
+ }
+ delete(tc.cacheMap, pCacheInfo.key)
+ tc.cacheList.Remove(listEntry)
+ if tc.cacheLenMetric != nil {
+ tc.cacheLenMetric.Dec()
+ }
+ }
+}
+
+func NewTimedCache(maxEntries int, cacheLenMetric prometheus.Gauge) *TimedCache {
+ l := &TimedCache{
+ cacheList: list.New(),
+ cacheMap: make(TimedCacheMap),
+ maxEntries: maxEntries,
+ cacheLenMetric: cacheLenMetric,
+ }
+ return l
+}
+
+func NewQuietExpiringTimedCache(expiry time.Duration) *TimedCache {
+ l := &TimedCache{
+ cacheList: list.New(),
+ cacheMap: make(TimedCacheMap),
+ }
+
+ ticker := time.NewTicker(expiry)
+ go func() {
+ for {
+ select {
+ case <-ExitChannel():
+ return
+ case <-ticker.C:
+ l.CleanupExpiredEntries(expiry, func(_ interface{}) {})
+ }
+ }
+ }()
+
+ return l
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/client.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/client.go
new file mode 100644
index 000000000..9af18a435
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/client.go
@@ -0,0 +1,41 @@
+package grpc
+
+import (
+ "flag"
+ "log"
+
+ pb "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/utils"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+)
+
+// ClientConnection wraps a gRPC+protobuf connection
+type ClientConnection struct {
+ client pb.CollectorClient
+ conn *grpc.ClientConn
+}
+
+func ConnectClient(hostIP string, hostPort int) (*ClientConnection, error) {
+ flag.Parse()
+ // Set up a connection to the server.
+ socket := utils.GetSocket(hostIP, hostPort)
+ conn, err := grpc.NewClient(socket, grpc.WithTransportCredentials(insecure.NewCredentials()))
+
+ if err != nil {
+ log.Fatalf("did not connect: %v", err)
+ }
+
+ return &ClientConnection{
+ client: pb.NewCollectorClient(conn),
+ conn: conn,
+ }, nil
+}
+
+func (cp *ClientConnection) Client() pb.CollectorClient {
+ return cp.client
+}
+
+func (cp *ClientConnection) Close() error {
+ return cp.conn.Close()
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap.pb.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap.pb.go
new file mode 100644
index 000000000..a3ba1db8a
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap.pb.go
@@ -0,0 +1,209 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.0
+// protoc v3.19.6
+// source: proto/genericmap.proto
+
+package genericmap
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The request message containing the GenericMap
+type Flow struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GenericMap *anypb.Any `protobuf:"bytes,1,opt,name=genericMap,proto3" json:"genericMap,omitempty"`
+}
+
+func (x *Flow) Reset() {
+ *x = Flow{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_proto_genericmap_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Flow) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Flow) ProtoMessage() {}
+
+func (x *Flow) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_genericmap_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Flow.ProtoReflect.Descriptor instead.
+func (*Flow) Descriptor() ([]byte, []int) {
+ return file_proto_genericmap_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Flow) GetGenericMap() *anypb.Any {
+ if x != nil {
+ return x.GenericMap
+ }
+ return nil
+}
+
+// intentionally empty
+type CollectorReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CollectorReply) Reset() {
+ *x = CollectorReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_proto_genericmap_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectorReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectorReply) ProtoMessage() {}
+
+func (x *CollectorReply) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_genericmap_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectorReply.ProtoReflect.Descriptor instead.
+func (*CollectorReply) Descriptor() ([]byte, []int) {
+ return file_proto_genericmap_proto_rawDescGZIP(), []int{1}
+}
+
+var File_proto_genericmap_proto protoreflect.FileDescriptor
+
+var file_proto_genericmap_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x6d,
+ 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
+ 0x63, 0x6d, 0x61, 0x70, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x3c, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x34, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x69, 0x63, 0x4d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
+ 0x79, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4d, 0x61, 0x70, 0x22, 0x10, 0x0a,
+ 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32,
+ 0x43, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04,
+ 0x53, 0x65, 0x6e, 0x64, 0x12, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x6d, 0x61,
+ 0x70, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x1a, 0x1a, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63,
+ 0x6d, 0x61, 0x70, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70,
+ 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0e, 0x5a, 0x0c, 0x2e, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
+ 0x63, 0x6d, 0x61, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_proto_genericmap_proto_rawDescOnce sync.Once
+ file_proto_genericmap_proto_rawDescData = file_proto_genericmap_proto_rawDesc
+)
+
+func file_proto_genericmap_proto_rawDescGZIP() []byte {
+ file_proto_genericmap_proto_rawDescOnce.Do(func() {
+ file_proto_genericmap_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_genericmap_proto_rawDescData)
+ })
+ return file_proto_genericmap_proto_rawDescData
+}
+
+var file_proto_genericmap_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_proto_genericmap_proto_goTypes = []interface{}{
+ (*Flow)(nil), // 0: genericmap.Flow
+ (*CollectorReply)(nil), // 1: genericmap.CollectorReply
+ (*anypb.Any)(nil), // 2: google.protobuf.Any
+}
+var file_proto_genericmap_proto_depIdxs = []int32{
+ 2, // 0: genericmap.Flow.genericMap:type_name -> google.protobuf.Any
+ 0, // 1: genericmap.Collector.Send:input_type -> genericmap.Flow
+ 1, // 2: genericmap.Collector.Send:output_type -> genericmap.CollectorReply
+ 2, // [2:3] is the sub-list for method output_type
+ 1, // [1:2] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_proto_genericmap_proto_init() }
+func file_proto_genericmap_proto_init() {
+ if File_proto_genericmap_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_proto_genericmap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Flow); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_proto_genericmap_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectorReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_proto_genericmap_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_proto_genericmap_proto_goTypes,
+ DependencyIndexes: file_proto_genericmap_proto_depIdxs,
+ MessageInfos: file_proto_genericmap_proto_msgTypes,
+ }.Build()
+ File_proto_genericmap_proto = out.File
+ file_proto_genericmap_proto_rawDesc = nil
+ file_proto_genericmap_proto_goTypes = nil
+ file_proto_genericmap_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap_grpc.pb.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap_grpc.pb.go
new file mode 100644
index 000000000..12a8d8aaf
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap_grpc.pb.go
@@ -0,0 +1,105 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.2.0
+// - protoc v3.19.6
+// source: proto/genericmap.proto
+
+package genericmap
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// CollectorClient is the client API for Collector service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type CollectorClient interface {
+ Send(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*CollectorReply, error)
+}
+
+type collectorClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewCollectorClient(cc grpc.ClientConnInterface) CollectorClient {
+ return &collectorClient{cc}
+}
+
+func (c *collectorClient) Send(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*CollectorReply, error) {
+ out := new(CollectorReply)
+ err := c.cc.Invoke(ctx, "/genericmap.Collector/Send", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// CollectorServer is the server API for Collector service.
+// All implementations must embed UnimplementedCollectorServer
+// for forward compatibility
+type CollectorServer interface {
+ Send(context.Context, *Flow) (*CollectorReply, error)
+ mustEmbedUnimplementedCollectorServer()
+}
+
+// UnimplementedCollectorServer must be embedded to have forward compatible implementations.
+type UnimplementedCollectorServer struct {
+}
+
+func (UnimplementedCollectorServer) Send(context.Context, *Flow) (*CollectorReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
+}
+func (UnimplementedCollectorServer) mustEmbedUnimplementedCollectorServer() {}
+
+// UnsafeCollectorServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to CollectorServer will
+// result in compilation errors.
+type UnsafeCollectorServer interface {
+ mustEmbedUnimplementedCollectorServer()
+}
+
+func RegisterCollectorServer(s grpc.ServiceRegistrar, srv CollectorServer) {
+ s.RegisterService(&Collector_ServiceDesc, srv)
+}
+
+func _Collector_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Flow)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(CollectorServer).Send(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/genericmap.Collector/Send",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(CollectorServer).Send(ctx, req.(*Flow))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Collector_ServiceDesc is the grpc.ServiceDesc for Collector service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Collector_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "genericmap.Collector",
+ HandlerType: (*CollectorServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Send",
+ Handler: _Collector_Send_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "proto/genericmap.proto",
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/server.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/server.go
new file mode 100644
index 000000000..0a085e924
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/server.go
@@ -0,0 +1,77 @@
+package grpc
+
+import (
+ "context"
+ "fmt"
+ "net"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/reflection"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap"
+)
+
+// CollectorServer wraps a Flow Collector connection & session
+type CollectorServer struct {
+ grpcServer *grpc.Server
+}
+
+type collectorOptions struct {
+ grpcServerOptions []grpc.ServerOption
+}
+
+// CollectorOption allows overriding the default configuration of the CollectorServer instance.
+// Use them in the StartCollector function.
+type CollectorOption func(options *collectorOptions)
+
+func WithGRPCServerOptions(options ...grpc.ServerOption) CollectorOption {
+ return func(copt *collectorOptions) {
+ copt.grpcServerOptions = options
+ }
+}
+
+// StartCollector listens in background for gRPC+Protobuf flows in the given port, and forwards each
+// set of *genericmap.Flow by the provided channel.
+func StartCollector(
+ port int, recordForwarder chan<- *genericmap.Flow, options ...CollectorOption,
+) (*CollectorServer, error) {
+ copts := collectorOptions{}
+ for _, opt := range options {
+ opt(&copts)
+ }
+
+ lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
+ if err != nil {
+ return nil, err
+ }
+ grpcServer := grpc.NewServer(copts.grpcServerOptions...)
+ genericmap.RegisterCollectorServer(grpcServer, &collectorAPI{
+ recordForwarder: recordForwarder,
+ })
+ reflection.Register(grpcServer)
+ go func() {
+ if err := grpcServer.Serve(lis); err != nil {
+ panic("error connecting to server: " + err.Error())
+ }
+ }()
+ return &CollectorServer{
+ grpcServer: grpcServer,
+ }, nil
+}
+
+func (c *CollectorServer) Close() error {
+ c.grpcServer.Stop()
+ return nil
+}
+
+type collectorAPI struct {
+ genericmap.UnimplementedCollectorServer
+ recordForwarder chan<- *genericmap.Flow
+}
+
+var okReply = &genericmap.CollectorReply{}
+
+func (c *collectorAPI) Send(_ context.Context, records *genericmap.Flow) (*genericmap.CollectorReply, error) {
+ c.recordForwarder <- records
+ return okReply, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/metrics.go
new file mode 100644
index 000000000..16e2511b5
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/metrics.go
@@ -0,0 +1,18 @@
+package write
+
+import (
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type metrics struct {
+ *operational.Metrics
+ recordsWritten prometheus.Counter
+}
+
+func newMetrics(opMetrics *operational.Metrics, stage string) *metrics {
+ return &metrics{
+ Metrics: opMetrics,
+ recordsWritten: opMetrics.CreateRecordsWrittenCounter(stage),
+ }
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go
new file mode 100644
index 000000000..2f6ed4775
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy ofthe License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specificlanguage governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package write
+
+import (
+ "sync"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/sirupsen/logrus"
+)
+
+type Writer interface {
+ Write(in config.GenericMap)
+}
+type None struct {
+ // synchronized access to avoid race conditions
+ mt sync.Mutex
+ prevRecords []config.GenericMap
+}
+
+// Write writes entries
+func (t *None) Write(in config.GenericMap) {
+ logrus.Debugf("entering Write none, in = %v", in)
+ t.mt.Lock()
+ t.prevRecords = append(t.prevRecords, in)
+ t.mt.Unlock()
+}
+
+func (t *None) PrevRecords() []config.GenericMap {
+ t.mt.Lock()
+ defer t.mt.Unlock()
+ var copies []config.GenericMap
+ for _, rec := range t.prevRecords {
+ copies = append(copies, rec.Copy())
+ }
+ return copies
+}
+
+// NewWriteNone create a new write
+func NewWriteNone() (Writer, error) {
+ return &None{}, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go
new file mode 100644
index 000000000..362415e7b
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package write
+
+import (
+ "sync"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/sirupsen/logrus"
+)
+
+type Fake struct {
+ // access is locked and copied to avoid race condition errors during tests
+ mt sync.Mutex
+ allRecords []config.GenericMap
+}
+
+// Write stores in memory all records.
+func (w *Fake) Write(in config.GenericMap) {
+ logrus.Trace("entering writeFake Write")
+ w.mt.Lock()
+ w.allRecords = append(w.allRecords, in.Copy())
+ w.mt.Unlock()
+}
+
+func (w *Fake) AllRecords() []config.GenericMap {
+ w.mt.Lock()
+ defer w.mt.Unlock()
+ var copies []config.GenericMap
+ for _, r := range w.allRecords {
+ copies = append(copies, r.Copy())
+ }
+ return copies
+}
+
+// NewWriteFake creates a new write.
+func NewWriteFake(_ config.StageParam) (Writer, error) {
+ logrus.Debugf("entering NewWriteFake")
+ w := &Fake{}
+ return w, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_grpc.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_grpc.go
new file mode 100644
index 000000000..c89d5f361
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_grpc.go
@@ -0,0 +1,55 @@
+package write
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap"
+ "github.com/sirupsen/logrus"
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+type writeGRPC struct {
+ hostIP string
+ hostPort int
+ clientConn *grpc.ClientConnection
+}
+
+// Write writes a flow before being stored
+func (t *writeGRPC) Write(v config.GenericMap) {
+ logrus.Tracef("entering writeGRPC Write %s", v)
+ value, _ := json.Marshal(v)
+ if _, err := t.clientConn.Client().Send(context.TODO(), &genericmap.Flow{
+ GenericMap: &anypb.Any{
+ Value: value,
+ },
+ }); err != nil {
+ logrus.Errorf("writeGRPC send error: %v", err)
+ }
+}
+
+// NewWriteGRPC create a new write
+func NewWriteGRPC(params config.StageParam) (Writer, error) {
+ logrus.Debugf("entering NewWriteGRPC")
+
+ writeGRPC := &writeGRPC{}
+ if params.Write != nil && params.Write.GRPC != nil {
+ if err := params.Write.GRPC.Validate(); err != nil {
+ return nil, fmt.Errorf("the provided config is not valid: %w", err)
+ }
+ writeGRPC.hostIP = params.Write.GRPC.TargetHost
+ writeGRPC.hostPort = params.Write.GRPC.TargetPort
+ } else {
+ return nil, fmt.Errorf("write.grpc param is mandatory: %v", params.Write)
+ }
+ logrus.Debugf("NewWriteGRPC ConnectClient %s:%d...", writeGRPC.hostIP, writeGRPC.hostPort)
+ clientConn, err := grpc.ConnectClient(writeGRPC.hostIP, writeGRPC.hostPort)
+ if err != nil {
+ return nil, err
+ }
+ writeGRPC.clientConn = clientConn
+ return writeGRPC, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go
new file mode 100644
index 000000000..49f8d4882
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go
@@ -0,0 +1,579 @@
+/*
+ * Copyright (C) 2024 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package write
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/sirupsen/logrus"
+ "github.com/vmware/go-ipfix/pkg/entities"
+ ipfixExporter "github.com/vmware/go-ipfix/pkg/exporter"
+ "github.com/vmware/go-ipfix/pkg/registry"
+)
+
+type writeIpfix struct {
+ hostPort string
+ transport string
+ templateIDv4 uint16
+ templateIDv6 uint16
+ enrichEnterpriseID uint32
+ exporter *ipfixExporter.ExportingProcess
+ entitiesV4 []entities.InfoElementWithValue
+ entitiesV6 []entities.InfoElementWithValue
+}
+
+type FieldMap struct {
+ Key string
+ Getter func(entities.InfoElementWithValue) any
+ Setter func(entities.InfoElementWithValue, any)
+ Matcher func(entities.InfoElementWithValue, any) bool
+ Optional bool
+}
+
+// IPv6Type value as defined in IEEE 802: https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml
+const IPv6Type uint16 = 0x86DD
+
+var (
+ ilog = logrus.WithField("component", "write.Ipfix")
+ IANAFields = []string{
+ "ethernetType",
+ "flowDirection",
+ "sourceMacAddress",
+ "destinationMacAddress",
+ "protocolIdentifier",
+ "sourceTransportPort",
+ "destinationTransportPort",
+ "octetDeltaCount",
+ "flowStartMilliseconds",
+ "flowEndMilliseconds",
+ "packetDeltaCount",
+ "interfaceName",
+ }
+ IPv4IANAFields = append([]string{
+ "sourceIPv4Address",
+ "destinationIPv4Address",
+ }, IANAFields...)
+ IPv6IANAFields = append([]string{
+ "sourceIPv6Address",
+ "destinationIPv6Address",
+ "nextHeaderIPv6",
+ }, IANAFields...)
+ KubeFields = []string{
+ "sourcePodNamespace",
+ "sourcePodName",
+ "destinationPodNamespace",
+ "destinationPodName",
+ "sourceNodeName",
+ "destinationNodeName",
+ }
+ CustomNetworkFields = []string{
+ "timeFlowRttNs",
+ "interfaces",
+ "directions",
+ }
+
+ MapIPFIXKeys = map[string]FieldMap{
+ "sourceIPv4Address": {
+ Key: "SrcAddr",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) },
+ },
+ "destinationIPv4Address": {
+ Key: "DstAddr",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) },
+ },
+ "sourceIPv6Address": {
+ Key: "SrcAddr",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) },
+ },
+ "destinationIPv6Address": {
+ Key: "DstAddr",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) },
+ },
+ "nextHeaderIPv6": {
+ Key: "Proto",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned8Value() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned8Value(rec.(uint8)) },
+ },
+ "sourceMacAddress": {
+ Key: "SrcMac",
+ Setter: func(elt entities.InfoElementWithValue, rec any) {
+ elt.SetMacAddressValue(net.HardwareAddr(rec.(string)))
+ },
+ Matcher: func(_ entities.InfoElementWithValue, _ any) bool {
+ // Getting some discrepancies here, need to figure out why
+ return true
+ },
+ },
+ "destinationMacAddress": {
+ Key: "DstMac",
+ Setter: func(elt entities.InfoElementWithValue, rec any) {
+ elt.SetMacAddressValue(net.HardwareAddr(rec.(string)))
+ },
+ Matcher: func(_ entities.InfoElementWithValue, _ any) bool {
+ // Getting some discrepancies here, need to figure out why
+ return true
+ },
+ },
+ "ethernetType": {
+ Key: "Etype",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) },
+ },
+ "flowDirection": {
+ Key: "IfDirections",
+ Setter: func(elt entities.InfoElementWithValue, rec any) {
+ if dirs, ok := rec.([]int); ok && len(dirs) > 0 {
+ elt.SetUnsigned8Value(uint8(dirs[0]))
+ }
+ },
+ Matcher: func(elt entities.InfoElementWithValue, expected any) bool {
+ ifdirs := expected.([]int)
+ return int(elt.GetUnsigned8Value()) == ifdirs[0]
+ },
+ },
+ "directions": {
+ Key: "IfDirections",
+ Getter: func(elt entities.InfoElementWithValue) any {
+ var dirs []int
+ for _, dir := range strings.Split(elt.GetStringValue(), ",") {
+ d, _ := strconv.Atoi(dir)
+ dirs = append(dirs, d)
+ }
+ return dirs
+ },
+ Setter: func(elt entities.InfoElementWithValue, rec any) {
+ if dirs, ok := rec.([]int); ok && len(dirs) > 0 {
+ var asStr []string
+ for _, dir := range dirs {
+ asStr = append(asStr, strconv.Itoa(dir))
+ }
+ elt.SetStringValue(strings.Join(asStr, ","))
+ }
+ },
+ },
+ "protocolIdentifier": {
+ Key: "Proto",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned8Value() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned8Value(rec.(uint8)) },
+ },
+ "sourceTransportPort": {
+ Key: "SrcPort",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) },
+ },
+ "destinationTransportPort": {
+ Key: "DstPort",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) },
+ },
+ "octetDeltaCount": {
+ Key: "Bytes",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned64Value() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(rec.(uint64)) },
+ },
+ "flowStartMilliseconds": {
+ Key: "TimeFlowStartMs",
+ Getter: func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) },
+ },
+ "flowEndMilliseconds": {
+ Key: "TimeFlowEndMs",
+ Getter: func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) },
+ },
+ "packetDeltaCount": {
+ Key: "Packets",
+ Getter: func(elt entities.InfoElementWithValue) any { return uint32(elt.GetUnsigned64Value()) },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(uint32))) },
+ },
+ "interfaceName": {
+ Key: "Interfaces",
+ Setter: func(elt entities.InfoElementWithValue, rec any) {
+ if ifs, ok := rec.([]string); ok && len(ifs) > 0 {
+ elt.SetStringValue(ifs[0])
+ }
+ },
+ Matcher: func(elt entities.InfoElementWithValue, expected any) bool {
+ ifs := expected.([]string)
+ return elt.GetStringValue() == ifs[0]
+ },
+ },
+ "interfaces": {
+ Key: "Interfaces",
+ Getter: func(elt entities.InfoElementWithValue) any { return strings.Split(elt.GetStringValue(), ",") },
+ Setter: func(elt entities.InfoElementWithValue, rec any) {
+ if ifs, ok := rec.([]string); ok {
+ elt.SetStringValue(strings.Join(ifs, ","))
+ }
+ },
+ },
+ "sourcePodNamespace": {
+ Key: "SrcK8S_Namespace",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+ Optional: true,
+ },
+ "sourcePodName": {
+ Key: "SrcK8S_Name",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+ Optional: true,
+ },
+ "destinationPodNamespace": {
+ Key: "DstK8S_Namespace",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+ Optional: true,
+ },
+ "destinationPodName": {
+ Key: "DstK8S_Name",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+ Optional: true,
+ },
+ "sourceNodeName": {
+ Key: "SrcK8S_HostName",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+ Optional: true,
+ },
+ "destinationNodeName": {
+ Key: "DstK8S_HostName",
+ Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+ Optional: true,
+ },
+ "timeFlowRttNs": {
+ Key: "TimeFlowRttNs",
+ Getter: func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) },
+ Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) },
+ Optional: true,
+ },
+ }
+)
+
+func addElementToTemplate(elementName string, value []byte, elements *[]entities.InfoElementWithValue, registryID uint32) error {
+ element, err := registry.GetInfoElement(elementName, registryID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Did not find the element with name %s", elementName)
+ return err
+ }
+ ie, err := entities.DecodeAndCreateInfoElementWithValue(element, value)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to decode element %s", elementName)
+ return err
+ }
+ *elements = append(*elements, ie)
+ return nil
+}
+
+func addNetworkEnrichmentToTemplate(elements *[]entities.InfoElementWithValue, registryID uint32) error {
+ for _, field := range CustomNetworkFields {
+ if err := addElementToTemplate(field, nil, elements, registryID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addKubeContextToTemplate(elements *[]entities.InfoElementWithValue, registryID uint32) error {
+ for _, field := range KubeFields {
+ if err := addElementToTemplate(field, nil, elements, registryID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func loadCustomRegistry(enterpriseID uint32) error {
+ err := registry.InitNewRegistry(enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to initialize registry")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("sourcePodNamespace", 7733, entities.String, enterpriseID, 65535)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("sourcePodName", 7734, entities.String, enterpriseID, 65535)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("destinationPodNamespace", 7735, entities.String, enterpriseID, 65535)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("destinationPodName", 7736, entities.String, enterpriseID, 65535)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("sourceNodeName", 7737, entities.String, enterpriseID, 65535)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("destinationNodeName", 7738, entities.String, enterpriseID, 65535)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("timeFlowRttNs", 7740, entities.Unsigned64, enterpriseID, 8)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("interfaces", 7741, entities.String, enterpriseID, 65535)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ err = registry.PutInfoElement((*entities.NewInfoElement("directions", 7742, entities.String, enterpriseID, 65535)), enterpriseID)
+ if err != nil {
+ ilog.WithError(err).Errorf("Failed to register element")
+ return err
+ }
+ return nil
+}
+
+func SendTemplateRecordv4(exporter *ipfixExporter.ExportingProcess, enrichEnterpriseID uint32) (uint16, []entities.InfoElementWithValue, error) {
+ templateID := exporter.NewTemplateID()
+ templateSet := entities.NewSet(false)
+ err := templateSet.PrepareSet(entities.Template, templateID)
+ if err != nil {
+ ilog.WithError(err).Error("Failed in PrepareSet")
+ return 0, nil, err
+ }
+ elements := make([]entities.InfoElementWithValue, 0)
+
+ for _, field := range IPv4IANAFields {
+ err = addElementToTemplate(field, nil, &elements, registry.IANAEnterpriseID)
+ if err != nil {
+ return 0, nil, err
+ }
+ }
+ if enrichEnterpriseID != 0 {
+ err = addKubeContextToTemplate(&elements, enrichEnterpriseID)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addNetworkEnrichmentToTemplate(&elements, enrichEnterpriseID)
+ if err != nil {
+ return 0, nil, err
+ }
+ }
+ err = templateSet.AddRecord(elements, templateID)
+ if err != nil {
+ ilog.WithError(err).Error("Failed in Add Record")
+ return 0, nil, err
+ }
+ _, err = exporter.SendSet(templateSet)
+ if err != nil {
+ ilog.WithError(err).Error("Failed to send template record")
+ return 0, nil, err
+ }
+
+ return templateID, elements, nil
+}
+
+func SendTemplateRecordv6(exporter *ipfixExporter.ExportingProcess, enrichEnterpriseID uint32) (uint16, []entities.InfoElementWithValue, error) {
+ templateID := exporter.NewTemplateID()
+ templateSet := entities.NewSet(false)
+ err := templateSet.PrepareSet(entities.Template, templateID)
+ if err != nil {
+ return 0, nil, err
+ }
+ elements := make([]entities.InfoElementWithValue, 0)
+
+ for _, field := range IPv6IANAFields {
+ err = addElementToTemplate(field, nil, &elements, registry.IANAEnterpriseID)
+ if err != nil {
+ return 0, nil, err
+ }
+ }
+ if enrichEnterpriseID != 0 {
+ err = addKubeContextToTemplate(&elements, enrichEnterpriseID)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addNetworkEnrichmentToTemplate(&elements, enrichEnterpriseID)
+ if err != nil {
+ return 0, nil, err
+ }
+ }
+
+ err = templateSet.AddRecord(elements, templateID)
+ if err != nil {
+ return 0, nil, err
+ }
+ _, err = exporter.SendSet(templateSet)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return templateID, elements, nil
+}
+
+//nolint:cyclop
+func setElementValue(record config.GenericMap, ieValPtr *entities.InfoElementWithValue) error {
+ ieVal := *ieValPtr
+ name := ieVal.GetName()
+ mapping, ok := MapIPFIXKeys[name]
+ if !ok {
+ return nil
+ }
+ if value := record[mapping.Key]; value != nil {
+ mapping.Setter(ieVal, value)
+ } else if !mapping.Optional {
+ return fmt.Errorf("unable to find %s (%s) in record", name, mapping.Key)
+ }
+ return nil
+}
+
+func setEntities(record config.GenericMap, elements *[]entities.InfoElementWithValue) error {
+ for _, ieVal := range *elements {
+ err := setElementValue(record, &ieVal)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+func (t *writeIpfix) sendDataRecord(record config.GenericMap, v6 bool) error {
+ dataSet := entities.NewSet(false)
+ var templateID uint16
+ if v6 {
+ templateID = t.templateIDv6
+ err := setEntities(record, &t.entitiesV6)
+ if err != nil {
+ return err
+ }
+ } else {
+ templateID = t.templateIDv4
+ err := setEntities(record, &t.entitiesV4)
+ if err != nil {
+ return err
+ }
+ }
+ err := dataSet.PrepareSet(entities.Data, templateID)
+ if err != nil {
+ return err
+ }
+ if v6 {
+ err = dataSet.AddRecord(t.entitiesV6, templateID)
+ if err != nil {
+ return err
+ }
+ } else {
+ err = dataSet.AddRecord(t.entitiesV4, templateID)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = t.exporter.SendSet(dataSet)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Write writes a flow before being stored
+func (t *writeIpfix) Write(entry config.GenericMap) {
+ ilog.Tracef("entering writeIpfix Write")
+ if IPv6Type == entry["Etype"].(uint16) {
+ err := t.sendDataRecord(entry, true)
+ if err != nil {
+ ilog.WithError(err).Error("Failed in send v6 IPFIX record")
+ }
+ } else {
+ err := t.sendDataRecord(entry, false)
+ if err != nil {
+ ilog.WithError(err).Error("Failed in send v4 IPFIX record")
+ }
+ }
+}
+
+// NewWriteIpfix creates a new write
+func NewWriteIpfix(params config.StageParam) (Writer, error) {
+ ilog.Debugf("entering NewWriteIpfix")
+
+ ipfixConfigIn := api.WriteIpfix{}
+ if params.Write != nil && params.Write.Ipfix != nil {
+ ipfixConfigIn = *params.Write.Ipfix
+ }
+ // need to combine defaults with parameters that are provided in the config yaml file
+ ipfixConfigIn.SetDefaults()
+
+ if err := ipfixConfigIn.Validate(); err != nil {
+ return nil, fmt.Errorf("the provided config is not valid: %w", err)
+ }
+ writeIpfix := &writeIpfix{}
+ if params.Write != nil && params.Write.Ipfix != nil {
+ writeIpfix.transport = params.Write.Ipfix.Transport
+ writeIpfix.hostPort = fmt.Sprintf("%s:%d", params.Write.Ipfix.TargetHost, params.Write.Ipfix.TargetPort)
+ writeIpfix.enrichEnterpriseID = uint32(params.Write.Ipfix.EnterpriseID)
+ }
+ // Initialize IPFIX registry and send templates
+ registry.LoadRegistry()
+ var err error
+ if params.Write != nil && params.Write.Ipfix != nil && params.Write.Ipfix.EnterpriseID != 0 {
+ err = loadCustomRegistry(writeIpfix.enrichEnterpriseID)
+ if err != nil {
+ ilog.Fatalf("Failed to load Custom(%d) Registry", writeIpfix.enrichEnterpriseID)
+ }
+ }
+
+ // Create exporter using local server info
+ input := ipfixExporter.ExporterInput{
+ CollectorAddress: writeIpfix.hostPort,
+ CollectorProtocol: writeIpfix.transport,
+ ObservationDomainID: 1,
+ TempRefTimeout: 1,
+ }
+ writeIpfix.exporter, err = ipfixExporter.InitExportingProcess(input)
+ if err != nil {
+ ilog.Fatalf("Got error when connecting to server %s: %v", writeIpfix.hostPort, err)
+ return nil, err
+ }
+ ilog.Infof("Created exporter connecting to server with address: %s", writeIpfix.hostPort)
+
+ writeIpfix.templateIDv4, writeIpfix.entitiesV4, err = SendTemplateRecordv4(writeIpfix.exporter, writeIpfix.enrichEnterpriseID)
+ if err != nil {
+ ilog.WithError(err).Error("Failed in send IPFIX template v4 record")
+ return nil, err
+ }
+
+ writeIpfix.templateIDv6, writeIpfix.entitiesV6, err = SendTemplateRecordv6(writeIpfix.exporter, writeIpfix.enrichEnterpriseID)
+ if err != nil {
+ ilog.WithError(err).Error("Failed in send IPFIX template v6 record")
+ return nil, err
+ }
+ return writeIpfix, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go
new file mode 100644
index 000000000..ceea8450d
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2022 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package write
+
+import (
+ "fmt"
+ "math"
+ "strings"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/operational"
+ pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+
+ logAdapter "github.com/go-kit/kit/log/logrus"
+ jsonIter "github.com/json-iterator/go"
+ "github.com/netobserv/loki-client-go/loki"
+ "github.com/netobserv/loki-client-go/pkg/backoff"
+ "github.com/netobserv/loki-client-go/pkg/urlutil"
+ "github.com/prometheus/common/model"
+ "github.com/sirupsen/logrus"
+)
+
+var jsonEncodingConfig = jsonIter.Config{}.Froze()
+
+var (
+ keyReplacer = strings.NewReplacer("/", "_", ".", "_", "-", "_")
+)
+
+var log = logrus.WithField("component", "write.Loki")
+
+type emitter interface {
+ Handle(labels model.LabelSet, timestamp time.Time, record string) error
+}
+
+// Loki record writer
+type Loki struct {
+ lokiConfig loki.Config
+ apiConfig api.WriteLoki
+ timestampScale float64
+ saneLabels map[string]model.LabelName
+ client emitter
+ timeNow func() time.Time
+ exitChan <-chan struct{}
+ metrics *metrics
+}
+
+func buildLokiConfig(c *api.WriteLoki) (loki.Config, error) {
+ batchWait, err := time.ParseDuration(c.BatchWait)
+ if err != nil {
+ return loki.Config{}, fmt.Errorf("failed in parsing BatchWait : %w", err)
+ }
+
+ timeout, err := time.ParseDuration(c.Timeout)
+ if err != nil {
+ return loki.Config{}, fmt.Errorf("failed in parsing Timeout : %w", err)
+ }
+
+ minBackoff, err := time.ParseDuration(c.MinBackoff)
+ if err != nil {
+ return loki.Config{}, fmt.Errorf("failed in parsing MinBackoff : %w", err)
+ }
+
+ maxBackoff, err := time.ParseDuration(c.MaxBackoff)
+ if err != nil {
+ return loki.Config{}, fmt.Errorf("failed in parsing MaxBackoff : %w", err)
+ }
+
+ cfg := loki.Config{
+ TenantID: c.TenantID,
+ BatchWait: batchWait,
+ BatchSize: c.BatchSize,
+ Timeout: timeout,
+ BackoffConfig: backoff.BackoffConfig{
+ MinBackoff: minBackoff,
+ MaxBackoff: maxBackoff,
+ MaxRetries: c.MaxRetries,
+ },
+ }
+ if c.ClientConfig != nil {
+ cfg.Client = *c.ClientConfig
+ }
+ var clientURL urlutil.URLValue
+ err = clientURL.Set(strings.TrimSuffix(c.URL, "/") + "/loki/api/v1/push")
+ if err != nil {
+ return cfg, fmt.Errorf("failed to parse client URL: %w", err)
+ }
+ cfg.URL = clientURL
+ return cfg, nil
+}
+
+func (l *Loki) ProcessRecord(in config.GenericMap) error {
+ // copy record before process to avoid alteration on parallel stages
+ out := in.Copy()
+ labels := model.LabelSet{}
+
+ // Add static labels from config
+ for k, v := range l.apiConfig.StaticLabels {
+ labels[k] = v
+ }
+ l.addLabels(in, labels)
+
+ // Remove labels and configured ignore list from record
+ ignoreList := l.apiConfig.IgnoreList
+ ignoreList = append(ignoreList, l.apiConfig.Labels...)
+ for _, label := range ignoreList {
+ delete(out, label)
+ }
+
+ js, err := jsonEncodingConfig.Marshal(out)
+ if err != nil {
+ return err
+ }
+
+ timestamp := l.extractTimestamp(out)
+ err = l.client.Handle(labels, timestamp, string(js))
+ if err == nil {
+ l.metrics.recordsWritten.Inc()
+ }
+ return err
+}
+
+func (l *Loki) extractTimestamp(record map[string]interface{}) time.Time {
+ if l.apiConfig.TimestampLabel == "" {
+ return l.timeNow()
+ }
+ timestamp, ok := record[string(l.apiConfig.TimestampLabel)]
+ if !ok {
+ log.WithField("timestampLabel", l.apiConfig.TimestampLabel).
+ Warnf("Timestamp label not found in record. Using local time")
+ return l.timeNow()
+ }
+ ft, ok := getFloat64(timestamp)
+ if !ok {
+ log.WithField(string(l.apiConfig.TimestampLabel), timestamp).
+ Warnf("Invalid timestamp found: float64 expected but got %T. Using local time", timestamp)
+ return l.timeNow()
+ }
+ if ft == 0 {
+ log.WithField("timestampLabel", l.apiConfig.TimestampLabel).
+ Warnf("Empty timestamp in record. Using local time")
+ return l.timeNow()
+ }
+
+ tsNanos := int64(ft * l.timestampScale)
+ return time.Unix(tsNanos/int64(time.Second), tsNanos%int64(time.Second))
+}
+
+func (l *Loki) addLabels(record config.GenericMap, labels model.LabelSet) {
+ // Add non-static labels from record
+ for _, label := range l.apiConfig.Labels {
+ val, ok := record[label]
+ if !ok {
+ continue
+ }
+ sanitized, ok := l.saneLabels[label]
+ if !ok {
+ continue
+ }
+ lv := model.LabelValue(utils.ConvertToString(val))
+ if !lv.IsValid() {
+ log.WithFields(logrus.Fields{"key": label, "value": val}).
+ Debug("Invalid label value. Ignoring it")
+ continue
+ }
+ labels[sanitized] = lv
+ }
+}
+
+func getFloat64(timestamp interface{}) (ft float64, ok bool) {
+ switch i := timestamp.(type) {
+ case float64:
+ return i, true
+ case float32:
+ return float64(i), true
+ case int64:
+ return float64(i), true
+ case int32:
+ return float64(i), true
+ case uint64:
+ return float64(i), true
+ case uint32:
+ return float64(i), true
+ case int:
+ return float64(i), true
+ default:
+ log.Warnf("Type %T is not implemented for float64 conversion\n", i)
+ return math.NaN(), false
+ }
+}
+
+// Write writes a flow before being stored
+func (l *Loki) Write(entry config.GenericMap) {
+ log.Tracef("writing entry: %#v", entry)
+ err := l.ProcessRecord(entry)
+ if err != nil {
+ log.WithError(err).Warn("can't write into loki")
+ }
+}
+
+// NewWriteLoki creates a Loki writer from configuration
+func NewWriteLoki(opMetrics *operational.Metrics, params config.StageParam) (*Loki, error) {
+ log.Debugf("entering NewWriteLoki")
+ lokiConfigIn := api.WriteLoki{}
+ if params.Write != nil && params.Write.Loki != nil {
+ lokiConfigIn = *params.Write.Loki
+ }
+ // need to combine defaults with parameters that are provided in the config yaml file
+ lokiConfigIn.SetDefaults()
+
+ if err := lokiConfigIn.Validate(); err != nil {
+ return nil, fmt.Errorf("the provided config is not valid: %w", err)
+ }
+
+ lokiConfig, buildconfigErr := buildLokiConfig(&lokiConfigIn)
+ if buildconfigErr != nil {
+ return nil, buildconfigErr
+ }
+ client, newWithLoggerErr := loki.NewWithLogger(lokiConfig, logAdapter.NewLogger(log.WithField("module", "export/loki")))
+ if newWithLoggerErr != nil {
+ return nil, newWithLoggerErr
+ }
+
+ timestampScale, err := time.ParseDuration(lokiConfigIn.TimestampScale)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse TimestampScale: %w", err)
+ }
+
+ // Sanitize label keys
+ saneLabels := make(map[string]model.LabelName, len(lokiConfigIn.Labels))
+ for _, label := range lokiConfigIn.Labels {
+ sanitized := model.LabelName(keyReplacer.Replace(label))
+ if sanitized.IsValid() {
+ saneLabels[label] = sanitized
+ } else {
+ log.WithFields(logrus.Fields{"key": label, "sanitized": sanitized}).
+ Debug("Invalid label. Ignoring it")
+ }
+ }
+
+ l := &Loki{
+ lokiConfig: lokiConfig,
+ apiConfig: lokiConfigIn,
+ timestampScale: float64(timestampScale),
+ saneLabels: saneLabels,
+ client: client,
+ timeNow: time.Now,
+ exitChan: pUtils.ExitChannel(),
+ metrics: newMetrics(opMetrics, params.Name),
+ }
+
+ return l, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_stdout.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_stdout.go
new file mode 100644
index 000000000..5c228305a
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_stdout.go
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2021 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package write
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "sort"
+ "text/tabwriter"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/sirupsen/logrus"
+)
+
+type writeStdout struct {
+ format string
+}
+
+// Write writes a flow before being stored
+func (t *writeStdout) Write(v config.GenericMap) {
+ logrus.Tracef("entering writeStdout Write")
+ if t.format == "json" {
+ txt, _ := json.Marshal(v)
+ fmt.Println(string(txt))
+ } else if t.format == "fields" {
+ var order sort.StringSlice
+ for fieldName := range v {
+ order = append(order, fieldName)
+ }
+ order.Sort()
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
+ fmt.Fprintf(w, "\n\nFlow record at %s:\n", time.Now().Format(time.StampMilli))
+ for _, field := range order {
+ fmt.Fprintf(w, "%v\t=\t%v\n", field, v[field])
+ }
+ w.Flush()
+ } else {
+ fmt.Printf("%s: %v\n", time.Now().Format(time.StampMilli), v)
+ }
+}
+
+// NewWriteStdout create a new write
+func NewWriteStdout(params config.StageParam) (Writer, error) {
+ logrus.Debugf("entering NewWriteStdout")
+ writeStdout := &writeStdout{}
+ if params.Write != nil && params.Write.Stdout != nil {
+ writeStdout.format = params.Write.Stdout.Format
+ }
+ return writeStdout, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go
new file mode 100644
index 000000000..6353c1801
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package prometheus
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net/http"
+ "sync"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/server"
+ prom "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ plog = logrus.WithField("component", "prometheus")
+ maybePanic = plog.Fatalf
+ SharedServer = &PromServer{}
+)
+
+type PromServer struct {
+ httpServer *http.Server
+ namedRegistries sync.Map
+}
+
+func (ps *PromServer) Gather() ([]*dto.MetricFamily, error) {
+ all := prom.Gatherers{}
+ ps.namedRegistries.Range(func(_, value interface{}) bool {
+ r := value.(prom.Gatherer)
+ all = append(all, r)
+ return true
+ })
+ return all.Gather()
+}
+
+func (ps *PromServer) Shutdown(ctx context.Context) error {
+ return ps.httpServer.Shutdown(ctx)
+}
+
+func (ps *PromServer) SetRegistry(name string, registry prom.Gatherer) {
+ ps.namedRegistries.Store(name, registry)
+}
+
+// InitializePrometheus starts the global Prometheus server, used for operational metrics and prom-encode stages if they don't override the server settings
+func InitializePrometheus(settings *config.MetricsSettings) *PromServer {
+ if settings.NoPanic {
+ maybePanic = plog.Errorf
+ }
+ if settings.DisableGlobalServer {
+ plog.Info("Disabled global Prometheus server - no operational metrics will be available")
+ return nil
+ }
+ r := prom.DefaultGatherer
+ if settings.SuppressGoMetrics {
+ // set up private prometheus registry
+ r = prom.NewRegistry()
+ }
+ SharedServer = StartServerAsync(&settings.PromConnectionInfo, "", r)
+ return SharedServer
+}
+
+// StartServerAsync listens for prometheus resource usage requests
+func StartServerAsync(conn *api.PromConnectionInfo, regName string, registry prom.Gatherer) *PromServer {
+ // create prometheus server for operational metrics
+ // if value of address is empty, then by default it will take 0.0.0.0
+ port := conn.Port
+ if port == 0 {
+ port = 9090
+ }
+ addr := fmt.Sprintf("%s:%v", conn.Address, port)
+ plog.Infof("StartServerAsync: addr = %s", addr)
+
+ httpServer := http.Server{
+ Addr: addr,
+ // TLS clients must use TLS 1.2 or higher
+ TLSConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ },
+ }
+ // The Handler function provides a default handler to expose metrics
+ // via an HTTP server. "/metrics" is the usual endpoint for that.
+ mux := http.NewServeMux()
+ httpServer.Handler = mux
+ server.Default(&httpServer)
+
+ go func() {
+ var err error
+ if conn.TLS != nil {
+ err = httpServer.ListenAndServeTLS(conn.TLS.CertPath, conn.TLS.KeyPath)
+ } else {
+ err = httpServer.ListenAndServe()
+ }
+ if err != nil && !errors.Is(err, http.ErrServerClosed) {
+ maybePanic("error in http.ListenAndServe: %v", err)
+ }
+ }()
+
+ p := PromServer{httpServer: &httpServer}
+ p.namedRegistries.Store(regName, registry)
+
+ mux.Handle("/metrics", promhttp.HandlerFor(&p, promhttp.HandlerOpts{}))
+
+ return &p
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go
new file mode 100644
index 000000000..92fdb7fb5
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go
@@ -0,0 +1,46 @@
+package server
+
+import (
+ "crypto/tls"
+ "net/http"
+ "time"
+
+ "github.com/sirupsen/logrus"
+)
+
+var slog = logrus.WithField("module", "server")
+
+func Default(srv *http.Server) *http.Server {
+ // defaults taken from https://bruinsslot.jp/post/go-secure-webserver/ can be overriden by caller
+ if srv.Handler != nil {
+ // No more than 2MB body
+ srv.Handler = http.MaxBytesHandler(srv.Handler, 2<<20)
+ } else {
+ slog.Warnf("Handler not yet set on server while securing defaults. Make sure a MaxByte middleware is used.")
+ }
+ if srv.ReadTimeout == 0 {
+ srv.ReadTimeout = 10 * time.Second
+ }
+ if srv.ReadHeaderTimeout == 0 {
+ srv.ReadHeaderTimeout = 5 * time.Second
+ }
+ if srv.WriteTimeout == 0 {
+ srv.WriteTimeout = 10 * time.Second
+ }
+ if srv.IdleTimeout == 0 {
+ srv.IdleTimeout = 120 * time.Second
+ }
+ if srv.MaxHeaderBytes == 0 {
+ srv.MaxHeaderBytes = 1 << 20 // 1MB
+ }
+ if srv.TLSConfig == nil {
+ srv.TLSConfig = &tls.Config{}
+ }
+ if srv.TLSConfig.MinVersion == 0 {
+ srv.TLSConfig.MinVersion = tls.VersionTLS13
+ }
+ // Disable http/2
+ srv.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0)
+
+ return srv
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/utils/filters/filters.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/utils/filters/filters.go
new file mode 100644
index 000000000..8bb12fcb6
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/utils/filters/filters.go
@@ -0,0 +1,153 @@
+package filters
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/api"
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/utils"
+)
+
+type Predicate func(config.GenericMap) bool
+
+var variableExtractor = regexp.MustCompile(`\$\(([^\)]+)\)`)
+
+func Presence(key string) Predicate {
+ return func(flow config.GenericMap) bool {
+ _, found := flow[key]
+ return found
+ }
+}
+
+func Absence(key string) Predicate {
+ return func(flow config.GenericMap) bool {
+ _, found := flow[key]
+ return !found
+ }
+}
+
+func Equal(key string, filterValue any, convertString bool) Predicate {
+ varLookups := extractVarLookups(filterValue)
+ if len(varLookups) > 0 {
+ return func(flow config.GenericMap) bool {
+ if val, found := flow[key]; found {
+ // Variable injection => convert to string
+ sVal, ok := val.(string)
+ if !ok {
+ sVal = utils.ConvertToString(val)
+ }
+ injected := injectVars(flow, filterValue.(string), varLookups)
+ return sVal == injected
+ }
+ return false
+ }
+ }
+ if convertString {
+ return func(flow config.GenericMap) bool {
+ if val, found := flow[key]; found {
+ sVal, ok := val.(string)
+ if !ok {
+ sVal = utils.ConvertToString(val)
+ }
+ return sVal == filterValue
+ }
+ return false
+ }
+ }
+ return func(flow config.GenericMap) bool {
+ if val, found := flow[key]; found {
+ return val == filterValue
+ }
+ return false
+ }
+}
+
+func NotEqual(key string, filterValue any, convertString bool) Predicate {
+ pred := Equal(key, filterValue, convertString)
+ return func(flow config.GenericMap) bool { return !pred(flow) }
+}
+
+func Regex(key string, filterRegex *regexp.Regexp) Predicate {
+ return func(flow config.GenericMap) bool {
+ if val, found := flow[key]; found {
+ sVal, ok := val.(string)
+ if !ok {
+ sVal = utils.ConvertToString(val)
+ }
+ return filterRegex.MatchString(sVal)
+ }
+ return false
+ }
+}
+
+func NotRegex(key string, filterRegex *regexp.Regexp) Predicate {
+ pred := Regex(key, filterRegex)
+ return func(flow config.GenericMap) bool { return !pred(flow) }
+}
+
+func extractVarLookups(value any) [][]string {
+ // Extract list of variables to lookup
+ // E.g: filter "$(SrcAddr):$(SrcPort)" would return [SrcAddr,SrcPort]
+ if sVal, isString := value.(string); isString {
+ if len(sVal) > 0 {
+ return variableExtractor.FindAllStringSubmatch(sVal, -1)
+ }
+ }
+ return nil
+}
+
+func injectVars(flow config.GenericMap, filterValue string, varLookups [][]string) string {
+ injected := filterValue
+ for _, matchGroup := range varLookups {
+ var value string
+ if rawVal, found := flow[matchGroup[1]]; found {
+ if sVal, ok := rawVal.(string); ok {
+ value = sVal
+ } else {
+ value = utils.ConvertToString(rawVal)
+ }
+ }
+ injected = strings.ReplaceAll(injected, matchGroup[0], value)
+ }
+ return injected
+}
+
+func FromKeepEntry(from *api.KeepEntryRule) (Predicate, error) {
+ switch from.Type {
+ case api.KeepEntryIfExists:
+ return Presence(from.KeepEntry.Input), nil
+ case api.KeepEntryIfDoesntExist:
+ return Absence(from.KeepEntry.Input), nil
+ case api.KeepEntryIfEqual:
+ return Equal(from.KeepEntry.Input, from.KeepEntry.Value, true), nil
+ case api.KeepEntryIfNotEqual:
+ return NotEqual(from.KeepEntry.Input, from.KeepEntry.Value, true), nil
+ case api.KeepEntryIfRegexMatch:
+ if r, err := compileRegex(from.KeepEntry); err != nil {
+ return nil, err
+ } else {
+ return Regex(from.KeepEntry.Input, r), nil
+ }
+ case api.KeepEntryIfNotRegexMatch:
+ if r, err := compileRegex(from.KeepEntry); err != nil {
+ return nil, err
+ } else {
+ return NotRegex(from.KeepEntry.Input, r), nil
+ }
+ }
+ return nil, fmt.Errorf("keep entry rule type not recognized: %s", from.Type)
+}
+
+func compileRegex(from *api.TransformFilterGenericRule) (*regexp.Regexp, error) {
+ s, ok := from.Value.(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid regex keep rule: rule value must be a string [%v]", from)
+ }
+ r, err := regexp.Compile(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid regex keep rule: cannot compile regex [%w]", err)
+ }
+ return r, nil
+}
diff --git a/vendor/github.com/netobserv/gopipes/LICENSE b/vendor/github.com/netobserv/gopipes/LICENSE
new file mode 100644
index 000000000..091e18352
--- /dev/null
+++ b/vendor/github.com/netobserv/gopipes/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2022 Mario Macias Lloret
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/netobserv/gopipes/pkg/node/internal/connect/connectors.go b/vendor/github.com/netobserv/gopipes/pkg/node/internal/connect/connectors.go
new file mode 100644
index 000000000..b9d7c8c54
--- /dev/null
+++ b/vendor/github.com/netobserv/gopipes/pkg/node/internal/connect/connectors.go
@@ -0,0 +1,99 @@
+package connect
+
+import (
+ "sync/atomic"
+)
+
+// Joiner provides shared access to the input channel of a node of the type IN
+type Joiner[IN any] struct {
+ totalSenders int32
+ bufLen int
+ channel chan IN
+}
+
+// NewJoiner creates a joiner for a given channel type and buffer length
+func NewJoiner[IN any](bufferLength int) Joiner[IN] {
+ return Joiner[IN]{
+ bufLen: bufferLength,
+ channel: make(chan IN, bufferLength),
+ }
+}
+
+// Receiver gets access to the channel as a receiver
+func (j *Joiner[IN]) Receiver() chan IN {
+ return j.channel
+}
+
+// AcquireSender gets acces to the channel as a sender. The acquirer must finally invoke
+// ReleaseSender to make sure that the channel is closed when all the senders released it.
+func (j *Joiner[IN]) AcquireSender() chan IN {
+ atomic.AddInt32(&j.totalSenders, 1)
+ return j.channel
+}
+
+// ReleaseSender will close the channel when all the invokers of the AcquireSender have invoked
+// this function
+func (j *Joiner[IN]) ReleaseSender() {
+ // if no senders, we close the main channel
+ if atomic.AddInt32(&j.totalSenders, -1) == 0 {
+ close(j.channel)
+ }
+}
+
+// Releaser is a function that will allow releasing a forked channel.
+type Releaser func()
+
+// Forker manages the access to a Node's output (send) channel. When a node sends to only
+// one node, this will work as a single channel. When a node sends to N nodes,
+// it will spawn N channels that are cloned from the original channel in a goroutine.
+type Forker[OUT any] struct {
+ sendCh chan OUT
+ releaseChannel Releaser
+}
+
+// Fork provides connection to a group of output Nodes, accessible through their respective
+// Joiner instances.
+func Fork[T any](joiners ...*Joiner[T]) Forker[T] {
+ if len(joiners) == 0 {
+ panic("can't fork 0 joiners")
+ }
+ // if there is only one joiner, we directly send the data to the channel, without intermediation
+ if len(joiners) == 1 {
+ return Forker[T]{
+ sendCh: joiners[0].AcquireSender(),
+ releaseChannel: joiners[0].ReleaseSender,
+ }
+ }
+ // channel used as input from the source Node
+ sendCh := make(chan T, joiners[0].bufLen)
+
+ // channels that clone the contents of the sendCh
+ forwarders := make([]chan T, len(joiners))
+ for i := 0; i < len(joiners); i++ {
+ forwarders[i] = joiners[i].AcquireSender()
+ }
+ go func() {
+ for in := range sendCh {
+ for i := 0; i < len(joiners); i++ {
+ forwarders[i] <- in
+ }
+ }
+ for i := 0; i < len(joiners); i++ {
+ joiners[i].ReleaseSender()
+ }
+ }()
+ return Forker[T]{
+ sendCh: sendCh,
+ releaseChannel: func() { close(sendCh) },
+ }
+}
+
+// Sender acquires the channel that will receive the data from the source node
+func (f *Forker[OUT]) Sender() chan OUT {
+ return f.sendCh
+}
+
+// Close the input channel and, in cascade, all the forked channels
+func (f *Forker[OUT]) Close() {
+ f.releaseChannel()
+}
diff --git a/vendor/github.com/netobserv/gopipes/pkg/node/node.go b/vendor/github.com/netobserv/gopipes/pkg/node/node.go
new file mode 100644
index 000000000..72b99e792
--- /dev/null
+++ b/vendor/github.com/netobserv/gopipes/pkg/node/node.go
@@ -0,0 +1,239 @@
+// Package node provides functionalities to create nodes and interconnect them.
+// A Node is a function container that can be connected via channels to other nodes.
+// A node can send data to multiple nodes, and receive data from multiple nodes.
+package node
+
+import (
+ "context"
+ "reflect"
+
+ "github.com/netobserv/gopipes/pkg/node/internal/connect"
+)
+
+// StartFunc is a function that receives a writable channel as unique argument, and sends
+// value to that channel during an indefinite amount of time.
+type StartFunc[OUT any] func(out chan<- OUT)
+
+// StartFuncCtx is a StartFunc that also receives a context as a first argument. If the passed
+// context is cancelled via the ctx.Done() function, the implementer function should end,
+// so the cancel will be propagated to the later nodes.
+type StartFuncCtx[OUT any] func(ctx context.Context, out chan<- OUT)
+
+// MiddleFunc is a function that receives a readable channel as first argument,
+// and a writable channel as second argument.
+// It must process the inputs from the input channel until it's closed.
+type MiddleFunc[IN, OUT any] func(in <-chan IN, out chan<- OUT)
+
+// TerminalFunc is a function that receives a readable channel as unique argument.
+// It must process the inputs from the input channel until it's closed.
+type TerminalFunc[IN any] func(out <-chan IN)
+
+// TODO: OutType and InType methods are candidates for deprecation
+
+// Sender is any node that can send data to another node: node.Start and node.Middle
+type Sender[OUT any] interface {
+ // SendsTo connect a sender with a group of receivers
+ SendsTo(...Receiver[OUT])
+ // OutType returns the inner type of the Sender's output channel
+ OutType() reflect.Type
+}
+
+// Receiver is any node that can receive data from another node: node.Middle and node.Terminal
+type Receiver[IN any] interface {
+ isStarted() bool
+ start()
+ joiner() *connect.Joiner[IN]
+ // InType returns the inner type of the Receiver's input channel
+ InType() reflect.Type
+}
+
+// Start nodes are the starting points of a graph. This is, all the nodes that bring information
+// from outside the graph: e.g. because they generate them or because they acquire them from an
+// external source like a Web Service.
+// A graph must have at least one Start node.
+// A Start node must have at least one output node.
+type Start[OUT any] struct {
+ outs []Receiver[OUT]
+ fun StartFuncCtx[OUT]
+ outType reflect.Type
+}
+
+func (s *Start[OUT]) SendsTo(outputs ...Receiver[OUT]) {
+ s.outs = append(s.outs, outputs...)
+}
+
+// OutType is deprecated. It will be removed in future versions.
+func (s *Start[OUT]) OutType() reflect.Type {
+ return s.outType
+}
+
+// Middle is any intermediate node that receives data from another node, processes/filters it,
+// and forwards the data to another node.
+// An Middle node must have at least one output node.
+type Middle[IN, OUT any] struct {
+ outs []Receiver[OUT]
+ inputs connect.Joiner[IN]
+ started bool
+ fun MiddleFunc[IN, OUT]
+ outType reflect.Type
+ inType reflect.Type
+}
+
+func (i *Middle[IN, OUT]) joiner() *connect.Joiner[IN] {
+ return &i.inputs
+}
+
+func (i *Middle[IN, OUT]) isStarted() bool {
+ return i.started
+}
+
+func (s *Middle[IN, OUT]) SendsTo(outputs ...Receiver[OUT]) {
+ s.outs = append(s.outs, outputs...)
+}
+
+func (m *Middle[IN, OUT]) OutType() reflect.Type {
+ return m.outType
+}
+
+func (m *Middle[IN, OUT]) InType() reflect.Type {
+ return m.inType
+}
+
+// Terminal is any node that receives data from another node and does not forward it to another node,
+// but can process it and send the results to outside the graph (e.g. memory, storage, web...)
+type Terminal[IN any] struct {
+ inputs connect.Joiner[IN]
+ started bool
+ fun TerminalFunc[IN]
+ done chan struct{}
+ inType reflect.Type
+}
+
+func (i *Terminal[IN]) joiner() *connect.Joiner[IN] {
+ return &i.inputs
+}
+
+func (t *Terminal[IN]) isStarted() bool {
+ return t.started
+}
+
+// Done returns a channel that is closed when the Terminal node has ended its processing. This
+// is, when all its inputs have been also closed. Waiting for all the Terminal nodes to finish
+// allows blocking the execution until all the data in the graph has been processed and all the
+// previous stages have ended
+func (t *Terminal[IN]) Done() <-chan struct{} {
+ return t.done
+}
+
+func (m *Terminal[IN]) InType() reflect.Type {
+ return m.inType
+}
+
+// AsStart wraps a StartFunc into a Start node.
+// Deprecated. Use AsStart or AsStartCtx
+func AsInit[OUT any](fun StartFunc[OUT]) *Start[OUT] {
+ return AsStart(fun)
+}
+
+// AsStart wraps a StartFunc into a Start node.
+func AsStart[OUT any](fun StartFunc[OUT]) *Start[OUT] {
+ return AsStartCtx(func(_ context.Context, out chan<- OUT) {
+ fun(out)
+ })
+}
+
+// AsStartCtx wraps a StartFuncCtx into a Start node.
+func AsStartCtx[OUT any](fun StartFuncCtx[OUT]) *Start[OUT] {
+ var out OUT
+ return &Start[OUT]{
+ fun: fun,
+ outType: reflect.TypeOf(out),
+ }
+}
+
+// AsMiddle wraps an MiddleFunc into an Middle node.
+func AsMiddle[IN, OUT any](fun MiddleFunc[IN, OUT], opts ...Option) *Middle[IN, OUT] {
+ var in IN
+ var out OUT
+ options := getOptions(opts...)
+ return &Middle[IN, OUT]{
+ inputs: connect.NewJoiner[IN](options.channelBufferLen),
+ fun: fun,
+ inType: reflect.TypeOf(in),
+ outType: reflect.TypeOf(out),
+ }
+}
+
+// AsTerminal wraps a TerminalFunc into a Terminal node.
+func AsTerminal[IN any](fun TerminalFunc[IN], opts ...Option) *Terminal[IN] {
+ var i IN
+ options := getOptions(opts...)
+ return &Terminal[IN]{
+ inputs: connect.NewJoiner[IN](options.channelBufferLen),
+ fun: fun,
+ done: make(chan struct{}),
+ inType: reflect.TypeOf(i),
+ }
+}
+
+// Start the function wrapped in the Start node. Either this method or StartCtx should be invoked
+// for all the start nodes of the same graph, so the graph can properly start and finish.
+func (i *Start[OUT]) Start() {
+ i.StartCtx(context.TODO())
+}
+
+// StartCtx starts the function wrapped in the Start node, allow passing a context that can be
+// used by the wrapped function. Either this method or Start should be invoked
+// for all the start nodes of the same graph, so the graph can properly start and finish.
+func (i *Start[OUT]) StartCtx(ctx context.Context) {
+ if len(i.outs) == 0 {
+ panic("Start node should have outputs")
+ }
+ joiners := make([]*connect.Joiner[OUT], 0, len(i.outs))
+ for _, out := range i.outs {
+ joiners = append(joiners, out.joiner())
+ if !out.isStarted() {
+ out.start()
+ }
+ }
+ forker := connect.Fork(joiners...)
+ go func() {
+ i.fun(ctx, forker.Sender())
+ forker.Close()
+ }()
+}
+
+func (i *Middle[IN, OUT]) start() {
+ if len(i.outs) == 0 {
+ panic("Middle node should have outputs")
+ }
+ i.started = true
+ joiners := make([]*connect.Joiner[OUT], 0, len(i.outs))
+ for _, out := range i.outs {
+ joiners = append(joiners, out.joiner())
+ if !out.isStarted() {
+ out.start()
+ }
+ }
+ forker := connect.Fork(joiners...)
+ go func() {
+ i.fun(i.inputs.Receiver(), forker.Sender())
+ forker.Close()
+ }()
+}
+
+func (t *Terminal[IN]) start() {
+ t.started = true
+ go func() {
+ t.fun(t.inputs.Receiver())
+ close(t.done)
+ }()
+}
+
+func getOptions(opts ...Option) creationOptions {
+ options := defaultOptions
+ for _, opt := range opts {
+ opt(&options)
+ }
+ return options
+}
diff --git a/vendor/github.com/netobserv/gopipes/pkg/node/options.go b/vendor/github.com/netobserv/gopipes/pkg/node/options.go
new file mode 100644
index 000000000..f03027ce5
--- /dev/null
+++ b/vendor/github.com/netobserv/gopipes/pkg/node/options.go
@@ -0,0 +1,22 @@
+package node
+
+type creationOptions struct {
+ // if 0, channel is unbuffered
+ channelBufferLen int
+}
+
+var defaultOptions = creationOptions{
+ channelBufferLen: 0,
+}
+
+// Option allows overriding the default values of node instantiation
+type Option func(options *creationOptions)
+
+// ChannelBufferLen is a node.Option that allows specifying the length of the input
+// channels for a given node. The default value is 0, which means that the channels
+// are unbuffered.
+func ChannelBufferLen(length int) Option {
+ return func(options *creationOptions) {
+ options.channelBufferLen = length
+ }
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/LICENSE b/vendor/github.com/netobserv/loki-client-go/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/netobserv/loki-client-go/loki/batch.go b/vendor/github.com/netobserv/loki-client-go/loki/batch.go
new file mode 100644
index 000000000..473e05c68
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/loki/batch.go
@@ -0,0 +1,107 @@
+package loki
+
+import (
+ "time"
+
+ "github.com/gogo/protobuf/proto"
+ "github.com/golang/snappy"
+ json "github.com/json-iterator/go"
+
+ "github.com/netobserv/loki-client-go/pkg/logproto"
+)
+
+// batch holds pending log streams waiting to be sent to Loki, and it's used
+// to reduce the number of push requests to Loki aggregating multiple log streams
+// and entries in a single batch request. In case of multi-tenant Promtail, log
+// streams for each tenant are stored in a dedicated batch.
+type batch struct {
+ streams map[string]*logproto.Stream
+ bytes int
+ createdAt time.Time
+}
+
+func newBatch(entries ...entry) *batch {
+ b := &batch{
+ streams: map[string]*logproto.Stream{},
+ bytes: 0,
+ createdAt: time.Now(),
+ }
+
+ // Add entries to the batch
+ for _, entry := range entries {
+ b.add(entry)
+ }
+
+ return b
+}
+
+// add an entry to the batch
+func (b *batch) add(entry entry) {
+ b.bytes += len(entry.Line)
+
+ // Append the entry to an already existing stream (if any)
+ labels := entry.labels.String()
+ if stream, ok := b.streams[labels]; ok {
+ stream.Entries = append(stream.Entries, entry.Entry)
+ return
+ }
+
+ // Add the entry as a new stream
+ b.streams[labels] = &logproto.Stream{
+ Labels: labels,
+ Entries: []logproto.Entry{entry.Entry},
+ }
+}
+
+// sizeBytes returns the current batch size in bytes
+func (b *batch) sizeBytes() int {
+ return b.bytes
+}
+
+// sizeBytesAfter returns the size of the batch after the input entry
+// will be added to the batch itself
+func (b *batch) sizeBytesAfter(entry entry) int {
+ return b.bytes + len(entry.Line)
+}
+
+// age of the batch since its creation
+func (b *batch) age() time.Duration {
+ return time.Since(b.createdAt)
+}
+
+// encode the batch as snappy-compressed push request, and returns
+// the encoded bytes and the number of encoded entries
+func (b *batch) encode() ([]byte, int, error) {
+ req, entriesCount := b.createPushRequest()
+ buf, err := proto.Marshal(req)
+ if err != nil {
+ return nil, 0, err
+ }
+ buf = snappy.Encode(nil, buf)
+ return buf, entriesCount, nil
+}
+
+// encode the batch as json push request, and returns
+// the encoded bytes and the number of encoded entries
+func (b *batch) encodeJSON() ([]byte, int, error) {
+ req, entriesCount := b.createPushRequest()
+ buf, err := json.Marshal(req)
+ if err != nil {
+ return nil, 0, err
+ }
+ return buf, entriesCount, nil
+}
+
+// creates push request and returns it, together with number of entries
+func (b *batch) createPushRequest() (*logproto.PushRequest, int) {
+ req := logproto.PushRequest{
+ Streams: make([]logproto.Stream, 0, len(b.streams)),
+ }
+
+ entriesCount := 0
+ for _, stream := range b.streams {
+ req.Streams = append(req.Streams, *stream)
+ entriesCount += len(stream.Entries)
+ }
+ return &req, entriesCount
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/loki/client.go b/vendor/github.com/netobserv/loki-client-go/loki/client.go
new file mode 100644
index 000000000..bf3093b5e
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/loki/client.go
@@ -0,0 +1,404 @@
+package loki
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/netobserv/loki-client-go/pkg/backoff"
+ "github.com/prometheus/prometheus/promql/parser"
+
+ "github.com/netobserv/loki-client-go/pkg/metric"
+
+ "github.com/go-kit/kit/log"
+ "github.com/go-kit/kit/log/level"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/version"
+
+ "github.com/netobserv/loki-client-go/pkg/helpers"
+ "github.com/netobserv/loki-client-go/pkg/logproto"
+)
+
+const (
+ protoContentType = "application/x-protobuf"
+ JSONContentType = "application/json"
+ maxErrMsgLen = 1024
+
+ // Label reserved to override the tenant ID while processing
+ // pipeline stages
+ ReservedLabelTenantID = "__tenant_id__"
+
+ LatencyLabel = "filename"
+ HostLabel = "host"
+ MetricPrefix = "netobserv"
+)
+
+var (
+ encodedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: MetricPrefix,
+ Name: "loki_encoded_bytes_total",
+ Help: "Number of bytes encoded and ready to send.",
+ }, []string{HostLabel})
+ sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: MetricPrefix,
+ Name: "loki_sent_bytes_total",
+ Help: "Number of bytes sent.",
+ }, []string{HostLabel})
+ droppedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: MetricPrefix,
+ Name: "loki_dropped_bytes_total",
+ Help: "Number of bytes dropped because failed to be sent to the ingester after all retries.",
+ }, []string{HostLabel})
+ sentEntries = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: MetricPrefix,
+ Name: "loki_sent_entries_total",
+ Help: "Number of log entries sent to the ingester.",
+ }, []string{HostLabel})
+ droppedEntries = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: MetricPrefix,
+ Name: "loki_dropped_entries_total",
+ Help: "Number of log entries dropped because failed to be sent to the ingester after all retries.",
+ }, []string{HostLabel})
+ requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: MetricPrefix,
+ Name: "loki_request_duration_seconds",
+ Help: "Duration of send requests.",
+ }, []string{"status_code", HostLabel})
+ batchRetries = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: MetricPrefix,
+ Name: "loki_batch_retries_total",
+ Help: "Number of times batches has had to be retried.",
+ }, []string{HostLabel})
+ streamLag *metric.Gauges
+
+ countersWithHost = []*prometheus.CounterVec{
+ encodedBytes, sentBytes, droppedBytes, sentEntries, droppedEntries,
+ }
+
+ UserAgent = fmt.Sprintf("promtail/%s", version.Version)
+)
+
+func init() {
+ prometheus.MustRegister(encodedBytes)
+ prometheus.MustRegister(sentBytes)
+ prometheus.MustRegister(droppedBytes)
+ prometheus.MustRegister(sentEntries)
+ prometheus.MustRegister(droppedEntries)
+ prometheus.MustRegister(requestDuration)
+ prometheus.MustRegister(batchRetries)
+ var err error
+ streamLag, err = metric.NewGauges(MetricPrefix+"_loki_stream_lag_seconds",
+ "Difference between current time and last batch timestamp for successful sends",
+ metric.GaugeConfig{Action: "set"},
+ int64(1*time.Minute.Seconds()), // This strips out files which update slowly and reduces noise in this metric.
+ )
+ if err != nil {
+ panic(err)
+ }
+ prometheus.MustRegister(streamLag)
+}
+
+// Client for pushing logs in snappy-compressed protos over HTTP.
+type Client struct {
+ logger log.Logger
+ cfg Config
+ client *http.Client
+ quit chan struct{}
+ once sync.Once
+ entries chan entry
+ wg sync.WaitGroup
+
+ externalLabels model.LabelSet
+}
+
+type entry struct {
+ tenantID string
+ labels model.LabelSet
+ logproto.Entry
+}
+
+// New makes a new Client from config
+func New(cfg Config) (*Client, error) {
+ logger := level.NewFilter(log.NewLogfmtLogger(os.Stdout), level.AllowWarn())
+ return NewWithLogger(cfg, logger)
+}
+
+// NewWithDefault creates a new client with default configuration.
+func NewWithDefault(url string) (*Client, error) {
+ cfg, err := NewDefaultConfig(url)
+ if err != nil {
+ return nil, err
+ }
+ return New(cfg)
+}
+
+// NewWithLogger makes a new Client from a logger and a config
+func NewWithLogger(cfg Config, logger log.Logger) (*Client, error) {
+ if cfg.URL.URL == nil {
+ return nil, errors.New("client needs target URL")
+ }
+
+ c := &Client{
+ logger: log.With(logger, "component", "client", "host", cfg.URL.Host),
+ cfg: cfg,
+ quit: make(chan struct{}),
+ entries: make(chan entry),
+
+ externalLabels: cfg.ExternalLabels.LabelSet,
+ }
+
+ err := cfg.Client.Validate()
+ if err != nil {
+ return nil, err
+ }
+
+ c.client, err = config.NewClientFromConfig(cfg.Client, "promtail", config.WithKeepAlivesDisabled(), config.WithHTTP2Disabled())
+ if err != nil {
+ return nil, err
+ }
+
+ c.client.Timeout = cfg.Timeout
+
+ // Initialize counters to 0 so the metrics are exported before the first
+ // occurrence of incrementing to avoid missing metrics.
+ for _, counter := range countersWithHost {
+ counter.WithLabelValues(c.cfg.URL.Host).Add(0)
+ }
+
+ c.wg.Add(1)
+ go c.run()
+ return c, nil
+}
+
+func (c *Client) run() {
+ batches := map[string]*batch{}
+
+ // Given the client handles multiple batches (1 per tenant) and each batch
+ // can be created at a different point in time, we look for batches whose
+ // max wait time has been reached every 10 times per BatchWait, so that the
+ // maximum delay we have sending batches is 10% of the max waiting time.
+ // We apply a cap of 10ms to the ticker, to avoid too frequent checks in
+ // case the BatchWait is very low.
+ minWaitCheckFrequency := 10 * time.Millisecond
+ maxWaitCheckFrequency := c.cfg.BatchWait / 10
+ if maxWaitCheckFrequency < minWaitCheckFrequency {
+ maxWaitCheckFrequency = minWaitCheckFrequency
+ }
+
+ maxWaitCheck := time.NewTicker(maxWaitCheckFrequency)
+
+ defer func() {
+ // Send all pending batches
+ for tenantID, batch := range batches {
+ c.sendBatch(tenantID, batch)
+ }
+
+ c.wg.Done()
+ }()
+
+ for {
+ select {
+ case <-c.quit:
+ return
+
+ case e := <-c.entries:
+ batch, ok := batches[e.tenantID]
+
+ // If the batch doesn't exist yet, we create a new one with the entry
+ if !ok {
+ batches[e.tenantID] = newBatch(e)
+ break
+ }
+
+ // If adding the entry to the batch will increase the size over the max
+ // size allowed, we do send the current batch and then create a new one
+ if batch.sizeBytesAfter(e) > c.cfg.BatchSize {
+ c.sendBatch(e.tenantID, batch)
+
+ batches[e.tenantID] = newBatch(e)
+ break
+ }
+
+ // The max size of the batch isn't reached, so we can add the entry
+ batch.add(e)
+
+ case <-maxWaitCheck.C:
+ // Send all batches whose max wait time has been reached
+ for tenantID, batch := range batches {
+ if batch.age() < c.cfg.BatchWait {
+ continue
+ }
+
+ c.sendBatch(tenantID, batch)
+ delete(batches, tenantID)
+ }
+ }
+ }
+}
+
+func (c *Client) sendBatch(tenantID string, batch *batch) {
+ var (
+ err error
+ buf []byte
+ entriesCount int
+ )
+ if c.cfg.EncodeJson {
+ buf, entriesCount, err = batch.encodeJSON()
+ } else {
+ buf, entriesCount, err = batch.encode()
+ }
+
+ if err != nil {
+ level.Error(c.logger).Log("msg", "error encoding batch", "error", err)
+ return
+ }
+ bufBytes := float64(len(buf))
+ encodedBytes.WithLabelValues(c.cfg.URL.Host).Add(bufBytes)
+
+ ctx := context.Background()
+ backoff := backoff.New(ctx, c.cfg.BackoffConfig)
+ var status int
+ for backoff.Ongoing() {
+ start := time.Now()
+ status, err = c.send(ctx, tenantID, buf)
+ requestDuration.WithLabelValues(strconv.Itoa(status), c.cfg.URL.Host).Observe(time.Since(start).Seconds())
+
+ if err == nil {
+ sentBytes.WithLabelValues(c.cfg.URL.Host).Add(bufBytes)
+ sentEntries.WithLabelValues(c.cfg.URL.Host).Add(float64(entriesCount))
+ for _, s := range batch.streams {
+ lbls, err := parser.ParseMetric(s.Labels)
+ if err != nil {
+ // is this possible?
+ level.Warn(c.logger).Log("msg", "error converting stream label string to label.Labels, cannot update lagging metric", "error", err)
+ return
+ }
+ var lblSet model.LabelSet
+ for i := range lbls {
+ if lbls[i].Name == LatencyLabel {
+ lblSet = model.LabelSet{
+ model.LabelName(HostLabel): model.LabelValue(c.cfg.URL.Host),
+ model.LabelName(LatencyLabel): model.LabelValue(lbls[i].Value),
+ }
+ }
+ }
+ if lblSet != nil {
+ streamLag.With(lblSet).Set(time.Since(s.Entries[len(s.Entries)-1].Timestamp).Seconds())
+ }
+ }
+ return
+ }
+
+ // Only retry 429s, 500s and connection-level errors.
+ if status > 0 && status != 429 && status/100 != 5 {
+ break
+ }
+
+ level.Warn(c.logger).Log("msg", "error sending batch, will retry", "status", status, "error", err)
+ batchRetries.WithLabelValues(c.cfg.URL.Host).Inc()
+ backoff.Wait()
+ }
+
+ if err != nil {
+ level.Error(c.logger).Log("msg", "final error sending batch", "status", status, "error", err)
+ droppedBytes.WithLabelValues(c.cfg.URL.Host).Add(bufBytes)
+ droppedEntries.WithLabelValues(c.cfg.URL.Host).Add(float64(entriesCount))
+ }
+}
+
+func (c *Client) send(ctx context.Context, tenantID string, buf []byte) (int, error) {
+ ctx, cancel := context.WithTimeout(ctx, c.cfg.Timeout)
+ defer cancel()
+ req, err := http.NewRequest("POST", c.cfg.URL.String(), bytes.NewReader(buf))
+ if err != nil {
+ return -1, err
+ }
+ req = req.WithContext(ctx)
+ req.Header.Set("Content-Type", protoContentType)
+ if c.cfg.EncodeJson {
+ req.Header.Set("Content-Type", JSONContentType)
+ }
+ req.Header.Set("User-Agent", UserAgent)
+
+ // If the tenant ID is not empty promtail is running in multi-tenant mode, so
+ // we should send it to Loki
+ if tenantID != "" {
+ req.Header.Set("X-Scope-OrgID", tenantID)
+ }
+
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return -1, err
+ }
+ defer helpers.LogError(c.logger, "closing response body", resp.Body.Close)
+
+ if resp.StatusCode/100 != 2 {
+ scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen))
+ line := ""
+ if scanner.Scan() {
+ line = scanner.Text()
+ }
+ err = fmt.Errorf("server returned HTTP status %s (%d): %s", resp.Status, resp.StatusCode, line)
+ }
+ return resp.StatusCode, err
+}
+
+func (c *Client) getTenantID(labels model.LabelSet) string {
+ // Check if it has been overridden while processing the pipeline stages
+ if value, ok := labels[ReservedLabelTenantID]; ok {
+ return string(value)
+ }
+
+ // Check if has been specified in the config
+ if c.cfg.TenantID != "" {
+ return c.cfg.TenantID
+ }
+
+ // Defaults to an empty string, which means the X-Scope-OrgID header
+ // will not be sent
+ return ""
+}
+
+// Stop the client.
+func (c *Client) Stop() {
+ c.once.Do(func() { close(c.quit) })
+ c.wg.Wait()
+}
+
+// Handle implement EntryHandler; adds a new line to the next batch; send is async.
+func (c *Client) Handle(ls model.LabelSet, t time.Time, s string) error {
+ if len(c.externalLabels) > 0 {
+ ls = c.externalLabels.Merge(ls)
+ }
+
+ // Get the tenant ID in case it has been overridden while processing
+ // the pipeline stages, then remove the special label
+ tenantID := c.getTenantID(ls)
+ if _, ok := ls[ReservedLabelTenantID]; ok {
+ // Clone the label set to not manipulate the input one
+ ls = ls.Clone()
+ delete(ls, ReservedLabelTenantID)
+ }
+
+ c.entries <- entry{tenantID, ls, logproto.Entry{
+ Timestamp: t,
+ Line: s,
+ }}
+ return nil
+}
+
+func (c *Client) UnregisterLatencyMetric(labels model.LabelSet) {
+ labels[HostLabel] = model.LabelValue(c.cfg.URL.Host)
+ streamLag.Delete(labels)
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/loki/config.go b/vendor/github.com/netobserv/loki-client-go/loki/config.go
new file mode 100644
index 000000000..0241f9c1e
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/loki/config.go
@@ -0,0 +1,109 @@
+package loki
+
+import (
+ "flag"
+ "time"
+
+ "github.com/netobserv/loki-client-go/pkg/backoff"
+ "github.com/netobserv/loki-client-go/pkg/labelutil"
+ "github.com/netobserv/loki-client-go/pkg/urlutil"
+ "github.com/prometheus/common/config"
+)
+
+// NOTE the helm chart for promtail and fluent-bit also have defaults for these values, please update to match if you make changes here.
+const (
+ BatchWait = 1 * time.Second
+ BatchSize int = 1024 * 1024
+ MinBackoff = 500 * time.Millisecond
+ MaxBackoff = 5 * time.Minute
+ MaxRetries int = 10
+ Timeout = 10 * time.Second
+)
+
+// Config describes configuration for a HTTP pusher client.
+type Config struct {
+ URL urlutil.URLValue
+ BatchWait time.Duration
+ BatchSize int
+
+ Client config.HTTPClientConfig `yaml:",inline"`
+
+ BackoffConfig backoff.BackoffConfig `yaml:"backoff_config"`
+ // The labels to add to any time series or alerts when communicating with loki
+ ExternalLabels labelutil.LabelSet `yaml:"external_labels,omitempty"`
+ Timeout time.Duration `yaml:"timeout"`
+
+ // The tenant ID to use when pushing logs to Loki (empty string means
+ // single tenant mode)
+ TenantID string `yaml:"tenant_id"`
+
+ // Use Loki JSON api as opposed to the snappy protobuf.
+ EncodeJson bool `yaml:"encode_json"`
+}
+
+// NewDefaultConfig creates a default configuration for a given target Loki URL.
+func NewDefaultConfig(url string) (Config, error) {
+ var cfg Config
+ var u urlutil.URLValue
+ f := &flag.FlagSet{}
+ cfg.RegisterFlags(f)
+ if err := f.Parse(nil); err != nil {
+ return cfg, err
+ }
+ if err := u.Set(url); err != nil {
+ return cfg, err
+ }
+ cfg.URL = u
+ return cfg, nil
+}
+
+// RegisterFlags with prefix registers flags where every name is prefixed by
+// prefix. If prefix is a non-empty string, prefix should end with a period.
+func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.Var(&c.URL, prefix+"client.url", "URL of log server")
+ f.DurationVar(&c.BatchWait, prefix+"client.batch-wait", BatchWait, "Maximum wait period before sending batch.")
+ f.IntVar(&c.BatchSize, prefix+"client.batch-size-bytes", BatchSize, "Maximum batch size to accrue before sending. ")
+ // Default backoff schedule: 0.5s, 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s(4.267m) For a total time of 511.5s(8.5m) before logs are lost
+ f.IntVar(&c.BackoffConfig.MaxRetries, prefix+"client.max-retries", MaxRetries, "Maximum number of retires when sending batches.")
+ f.DurationVar(&c.BackoffConfig.MinBackoff, prefix+"client.min-backoff", MinBackoff, "Initial backoff time between retries.")
+ f.DurationVar(&c.BackoffConfig.MaxBackoff, prefix+"client.max-backoff", MaxBackoff, "Maximum backoff time between retries.")
+ f.DurationVar(&c.Timeout, prefix+"client.timeout", Timeout, "Maximum time to wait for server to respond to a request")
+ f.Var(&c.ExternalLabels, prefix+"client.external-labels", "list of external labels to add to each log (e.g: --client.external-labels=lb1=v1,lb2=v2)")
+
+ f.StringVar(&c.TenantID, prefix+"client.tenant-id", "", "Tenant ID to use when pushing logs to Loki.")
+ f.BoolVar(&c.EncodeJson, prefix+"client.encode-json", false, "Encode payload in JSON, default to snappy protobuf")
+}
+
+// RegisterFlags registers flags.
+func (c *Config) RegisterFlags(flags *flag.FlagSet) {
+ c.RegisterFlagsWithPrefix("", flags)
+}
+
+// UnmarshalYAML implement Yaml Unmarshaler
+func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ type raw Config
+ var cfg raw
+ if c.URL.URL != nil {
+ // we used flags to set that value, which already has sane default.
+ cfg = raw(*c)
+ } else {
+ // force sane defaults.
+ cfg = raw{
+ BackoffConfig: backoff.BackoffConfig{
+ MaxBackoff: MaxBackoff,
+ MaxRetries: MaxRetries,
+ MinBackoff: MinBackoff,
+ },
+ BatchSize: BatchSize,
+ BatchWait: BatchWait,
+ Timeout: Timeout,
+ }
+ }
+
+ if err := unmarshal(&cfg); err != nil {
+ return err
+ }
+
+ *c = Config(cfg)
+ return nil
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/backoff/backoff.go b/vendor/github.com/netobserv/loki-client-go/pkg/backoff/backoff.go
new file mode 100644
index 000000000..3c922c1c1
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/backoff/backoff.go
@@ -0,0 +1,117 @@
+package backoff
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "math/rand"
+ "time"
+)
+
+// BackoffConfig configures a Backoff
+type BackoffConfig struct {
+ MinBackoff time.Duration `yaml:"min_period"` // start backoff at this level
+ MaxBackoff time.Duration `yaml:"max_period"` // increase exponentially to this level
+ MaxRetries int `yaml:"max_retries"` // give up after this many; zero means infinite retries
+}
+
+// RegisterFlags for BackoffConfig.
+func (cfg *BackoffConfig) RegisterFlags(prefix string, f *flag.FlagSet) {
+ f.DurationVar(&cfg.MinBackoff, prefix+".backoff-min-period", 100*time.Millisecond, "Minimum delay when backing off.")
+ f.DurationVar(&cfg.MaxBackoff, prefix+".backoff-max-period", 10*time.Second, "Maximum delay when backing off.")
+ f.IntVar(&cfg.MaxRetries, prefix+".backoff-retries", 10, "Number of times to backoff and retry before failing.")
+}
+
+// Backoff implements exponential backoff with randomized wait times
+type Backoff struct {
+ cfg BackoffConfig
+ ctx context.Context
+ numRetries int
+ nextDelayMin time.Duration
+ nextDelayMax time.Duration
+}
+
+// New creates a Backoff object. Pass a Context that can also terminate the operation.
+func New(ctx context.Context, cfg BackoffConfig) *Backoff {
+ return &Backoff{
+ cfg: cfg,
+ ctx: ctx,
+ nextDelayMin: cfg.MinBackoff,
+ nextDelayMax: doubleDuration(cfg.MinBackoff, cfg.MaxBackoff),
+ }
+}
+
+// Reset the Backoff back to its initial condition
+func (b *Backoff) Reset() {
+ b.numRetries = 0
+ b.nextDelayMin = b.cfg.MinBackoff
+ b.nextDelayMax = doubleDuration(b.cfg.MinBackoff, b.cfg.MaxBackoff)
+}
+
+// Ongoing returns true if caller should keep going
+func (b *Backoff) Ongoing() bool {
+ // Stop if Context has errored or max retry count is exceeded
+ return b.ctx.Err() == nil && (b.cfg.MaxRetries == 0 || b.numRetries < b.cfg.MaxRetries)
+}
+
+// Err returns the reason for terminating the backoff, or nil if it didn't terminate
+func (b *Backoff) Err() error {
+ if b.ctx.Err() != nil {
+ return b.ctx.Err()
+ }
+ if b.cfg.MaxRetries != 0 && b.numRetries >= b.cfg.MaxRetries {
+ return fmt.Errorf("terminated after %d retries", b.numRetries)
+ }
+ return nil
+}
+
+// NumRetries returns the number of retries so far
+func (b *Backoff) NumRetries() int {
+ return b.numRetries
+}
+
+// Wait sleeps for the backoff time then increases the retry count and backoff time
+// Returns immediately if Context is terminated
+func (b *Backoff) Wait() {
+ // Increase the number of retries and get the next delay
+ sleepTime := b.NextDelay()
+
+ if b.Ongoing() {
+ select {
+ case <-b.ctx.Done():
+ case <-time.After(sleepTime):
+ }
+ }
+}
+
+func (b *Backoff) NextDelay() time.Duration {
+ b.numRetries++
+
+ // Handle the edge case the min and max have the same value
+ // (or due to some misconfig max is < min)
+ if b.nextDelayMin >= b.nextDelayMax {
+ return b.nextDelayMin
+ }
+
+ // Add a jitter within the next exponential backoff range
+ sleepTime := b.nextDelayMin + time.Duration(rand.Int63n(int64(b.nextDelayMax-b.nextDelayMin)))
+
+ // Apply the exponential backoff to calculate the next jitter
+ // range, unless we've already reached the max
+ if b.nextDelayMax < b.cfg.MaxBackoff {
+ b.nextDelayMin = doubleDuration(b.nextDelayMin, b.cfg.MaxBackoff)
+ b.nextDelayMax = doubleDuration(b.nextDelayMax, b.cfg.MaxBackoff)
+ }
+
+ return sleepTime
+}
+
+func doubleDuration(value time.Duration, max time.Duration) time.Duration {
+ value = value * 2
+
+ if value <= max {
+ return value
+ }
+
+ return max
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/helpers/config.go b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/config.go
new file mode 100644
index 000000000..7dce2260f
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/config.go
@@ -0,0 +1,18 @@
+package helpers
+
+import (
+ "io/ioutil"
+
+ "github.com/pkg/errors"
+ yaml "gopkg.in/yaml.v2"
+)
+
+// LoadConfig read YAML-formatted config from filename into cfg.
+func LoadConfig(filename string, cfg interface{}) error {
+ buf, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return errors.Wrap(err, "Error reading config file")
+ }
+
+ return yaml.UnmarshalStrict(buf, cfg)
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/helpers/logerror.go b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/logerror.go
new file mode 100644
index 000000000..dd75ff5e9
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/logerror.go
@@ -0,0 +1,13 @@
+package helpers
+
+import (
+ "github.com/go-kit/kit/log"
+ "github.com/go-kit/kit/log/level"
+)
+
+// LogError logs any error returned by f; useful when deferring Close etc.
+func LogError(logger log.Logger, message string, f func() error) {
+ if err := f(); err != nil {
+ level.Error(logger).Log("message", message, "error", err)
+ }
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/helpers/math.go b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/math.go
new file mode 100644
index 000000000..2902683fa
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/math.go
@@ -0,0 +1,9 @@
+package helpers
+
+// MinUint32 return the min of a and b.
+func MinUint32(a, b uint32) uint32 {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/labelutil/label.go b/vendor/github.com/netobserv/loki-client-go/pkg/labelutil/label.go
new file mode 100644
index 000000000..6df0b04d6
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/labelutil/label.go
@@ -0,0 +1,99 @@
+package labelutil
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/common/model"
+ "gopkg.in/yaml.v2"
+)
+
+// LabelSet is a labelSet that can be used as a flag.
+type LabelSet struct {
+ model.LabelSet `yaml:",inline"`
+}
+
+// String implements flag.Value
+// Format: a=1,b=2
+func (v LabelSet) String() string {
+ if v.LabelSet == nil {
+ return ""
+ }
+ records := make([]string, 0, len(v.LabelSet)>>1)
+ for k, v := range v.LabelSet {
+ records = append(records, string(k)+"="+string(v))
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+// Set implements flag.Value
+func (v *LabelSet) Set(s string) error {
+ var ss []string
+ n := strings.Count(s, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", s)
+ case 1:
+ ss = append(ss, strings.Trim(s, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(s))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := model.LabelSet{}
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[model.LabelName(kv[0])] = model.LabelValue(kv[1])
+ }
+
+ if err := out.Validate(); err != nil {
+ return err
+ }
+ v.LabelSet = out
+ return nil
+}
+
+// UnmarshalYAML the Unmarshaler interface of the yaml pkg.
+func (v *LabelSet) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ lbSet := model.LabelSet{}
+ err := unmarshal(&lbSet)
+ if err != nil {
+ return err
+ }
+ v.LabelSet = lbSet
+ return nil
+}
+
+// MarshalYAML implements yaml.Marshaller.
+func (v LabelSet) MarshalYAML() (interface{}, error) {
+ out, err := yaml.Marshal(ModelLabelSetToMap(v.LabelSet))
+ if err != nil {
+ return nil, err
+ }
+ return string(out), nil
+}
+
+// ModelLabelSetToMap convert a model.LabelSet to a map[string]string
+func ModelLabelSetToMap(m model.LabelSet) map[string]string {
+ result := map[string]string{}
+ for k, v := range m {
+ result[string(k)] = string(v)
+ }
+ return result
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go
new file mode 100644
index 000000000..3a68d2614
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go
@@ -0,0 +1,23 @@
+package logproto
+
+import "github.com/prometheus/prometheus/pkg/labels"
+
+// Note, this is not very efficient and use should be minimized as it requires label construction on each comparison
+type SeriesIdentifiers []SeriesIdentifier
+
+func (ids SeriesIdentifiers) Len() int { return len(ids) }
+func (ids SeriesIdentifiers) Swap(i, j int) { ids[i], ids[j] = ids[j], ids[i] }
+func (ids SeriesIdentifiers) Less(i, j int) bool {
+ a, b := labels.FromMap(ids[i].Labels), labels.FromMap(ids[j].Labels)
+ return labels.Compare(a, b) <= 0
+}
+
+type Streams []Stream
+
+func (xs Streams) Len() int { return len(xs) }
+func (xs Streams) Swap(i, j int) { xs[i], xs[j] = xs[j], xs[i] }
+func (xs Streams) Less(i, j int) bool { return xs[i].Labels <= xs[j].Labels }
+
+func (s Series) Len() int { return len(s.Samples) }
+func (s Series) Swap(i, j int) { s.Samples[i], s.Samples[j] = s.Samples[j], s.Samples[i] }
+func (s Series) Less(i, j int) bool { return s.Samples[i].Timestamp < s.Samples[j].Timestamp }
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.pb.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.pb.go
new file mode 100644
index 000000000..b1acaa0d5
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.pb.go
@@ -0,0 +1,8031 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: pkg/logproto/logproto.proto
+
+package logproto
+
+import (
+ bytes "bytes"
+ context "context"
+ encoding_binary "encoding/binary"
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ _ "github.com/gogo/protobuf/types"
+ github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+ grpc "google.golang.org/grpc"
+ io "io"
+ math "math"
+ reflect "reflect"
+ strconv "strconv"
+ strings "strings"
+ time "time"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Direction int32
+
+const (
+ FORWARD Direction = 0
+ BACKWARD Direction = 1
+)
+
+var Direction_name = map[int32]string{
+ 0: "FORWARD",
+ 1: "BACKWARD",
+}
+
+var Direction_value = map[string]int32{
+ "FORWARD": 0,
+ "BACKWARD": 1,
+}
+
+func (Direction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{0}
+}
+
+type PushRequest struct {
+ Streams []Stream `protobuf:"bytes,1,rep,name=streams,proto3,customtype=Stream" json:"streams"`
+}
+
+func (m *PushRequest) Reset() { *m = PushRequest{} }
+func (*PushRequest) ProtoMessage() {}
+func (*PushRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{0}
+}
+func (m *PushRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PushRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PushRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PushRequest.Merge(m, src)
+}
+func (m *PushRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *PushRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PushRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PushRequest proto.InternalMessageInfo
+
+type PushResponse struct {
+}
+
+func (m *PushResponse) Reset() { *m = PushResponse{} }
+func (*PushResponse) ProtoMessage() {}
+func (*PushResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{1}
+}
+func (m *PushResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PushResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PushResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PushResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PushResponse.Merge(m, src)
+}
+func (m *PushResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *PushResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_PushResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PushResponse proto.InternalMessageInfo
+
+type QueryRequest struct {
+ Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
+ Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+ Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"`
+ End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"`
+ Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"`
+ Shards []string `protobuf:"bytes,7,rep,name=shards,proto3" json:"shards,omitempty"`
+}
+
+func (m *QueryRequest) Reset() { *m = QueryRequest{} }
+func (*QueryRequest) ProtoMessage() {}
+func (*QueryRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{2}
+}
+func (m *QueryRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryRequest.Merge(m, src)
+}
+func (m *QueryRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryRequest proto.InternalMessageInfo
+
+func (m *QueryRequest) GetSelector() string {
+ if m != nil {
+ return m.Selector
+ }
+ return ""
+}
+
+func (m *QueryRequest) GetLimit() uint32 {
+ if m != nil {
+ return m.Limit
+ }
+ return 0
+}
+
+func (m *QueryRequest) GetStart() time.Time {
+ if m != nil {
+ return m.Start
+ }
+ return time.Time{}
+}
+
+func (m *QueryRequest) GetEnd() time.Time {
+ if m != nil {
+ return m.End
+ }
+ return time.Time{}
+}
+
+func (m *QueryRequest) GetDirection() Direction {
+ if m != nil {
+ return m.Direction
+ }
+ return FORWARD
+}
+
+func (m *QueryRequest) GetShards() []string {
+ if m != nil {
+ return m.Shards
+ }
+ return nil
+}
+
+type SampleQueryRequest struct {
+ Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
+ Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"`
+ End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"`
+ Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"`
+}
+
+func (m *SampleQueryRequest) Reset() { *m = SampleQueryRequest{} }
+func (*SampleQueryRequest) ProtoMessage() {}
+func (*SampleQueryRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{3}
+}
+func (m *SampleQueryRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SampleQueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SampleQueryRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SampleQueryRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SampleQueryRequest.Merge(m, src)
+}
+func (m *SampleQueryRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *SampleQueryRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SampleQueryRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SampleQueryRequest proto.InternalMessageInfo
+
+func (m *SampleQueryRequest) GetSelector() string {
+ if m != nil {
+ return m.Selector
+ }
+ return ""
+}
+
+func (m *SampleQueryRequest) GetStart() time.Time {
+ if m != nil {
+ return m.Start
+ }
+ return time.Time{}
+}
+
+func (m *SampleQueryRequest) GetEnd() time.Time {
+ if m != nil {
+ return m.End
+ }
+ return time.Time{}
+}
+
+func (m *SampleQueryRequest) GetShards() []string {
+ if m != nil {
+ return m.Shards
+ }
+ return nil
+}
+
+type SampleQueryResponse struct {
+ Series []Series `protobuf:"bytes,1,rep,name=series,proto3,customtype=Series" json:"series,omitempty"`
+}
+
+func (m *SampleQueryResponse) Reset() { *m = SampleQueryResponse{} }
+func (*SampleQueryResponse) ProtoMessage() {}
+func (*SampleQueryResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{4}
+}
+func (m *SampleQueryResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SampleQueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SampleQueryResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SampleQueryResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SampleQueryResponse.Merge(m, src)
+}
+func (m *SampleQueryResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *SampleQueryResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_SampleQueryResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SampleQueryResponse proto.InternalMessageInfo
+
+type QueryResponse struct {
+ Streams []Stream `protobuf:"bytes,1,rep,name=streams,proto3,customtype=Stream" json:"streams,omitempty"`
+}
+
+func (m *QueryResponse) Reset() { *m = QueryResponse{} }
+func (*QueryResponse) ProtoMessage() {}
+func (*QueryResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{5}
+}
+func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryResponse.Merge(m, src)
+}
+func (m *QueryResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryResponse proto.InternalMessageInfo
+
+type LabelRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Values bool `protobuf:"varint,2,opt,name=values,proto3" json:"values,omitempty"`
+ Start *time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start,omitempty"`
+ End *time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end,omitempty"`
+}
+
+func (m *LabelRequest) Reset() { *m = LabelRequest{} }
+func (*LabelRequest) ProtoMessage() {}
+func (*LabelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{6}
+}
+func (m *LabelRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LabelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LabelRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LabelRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelRequest.Merge(m, src)
+}
+func (m *LabelRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LabelRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelRequest proto.InternalMessageInfo
+
+func (m *LabelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *LabelRequest) GetValues() bool {
+ if m != nil {
+ return m.Values
+ }
+ return false
+}
+
+func (m *LabelRequest) GetStart() *time.Time {
+ if m != nil {
+ return m.Start
+ }
+ return nil
+}
+
+func (m *LabelRequest) GetEnd() *time.Time {
+ if m != nil {
+ return m.End
+ }
+ return nil
+}
+
+type LabelResponse struct {
+ Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (m *LabelResponse) Reset() { *m = LabelResponse{} }
+func (*LabelResponse) ProtoMessage() {}
+func (*LabelResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{7}
+}
+func (m *LabelResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LabelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LabelResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LabelResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelResponse.Merge(m, src)
+}
+func (m *LabelResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LabelResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelResponse proto.InternalMessageInfo
+
+func (m *LabelResponse) GetValues() []string {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type StreamAdapter struct {
+ Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"`
+ Entries []EntryAdapter `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries"`
+}
+
+func (m *StreamAdapter) Reset() { *m = StreamAdapter{} }
+func (*StreamAdapter) ProtoMessage() {}
+func (*StreamAdapter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{8}
+}
+func (m *StreamAdapter) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StreamAdapter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_StreamAdapter.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *StreamAdapter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StreamAdapter.Merge(m, src)
+}
+func (m *StreamAdapter) XXX_Size() int {
+ return m.Size()
+}
+func (m *StreamAdapter) XXX_DiscardUnknown() {
+ xxx_messageInfo_StreamAdapter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StreamAdapter proto.InternalMessageInfo
+
+func (m *StreamAdapter) GetLabels() string {
+ if m != nil {
+ return m.Labels
+ }
+ return ""
+}
+
+func (m *StreamAdapter) GetEntries() []EntryAdapter {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+type EntryAdapter struct {
+ Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
+ Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
+}
+
+func (m *EntryAdapter) Reset() { *m = EntryAdapter{} }
+func (*EntryAdapter) ProtoMessage() {}
+func (*EntryAdapter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{9}
+}
+func (m *EntryAdapter) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EntryAdapter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_EntryAdapter.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *EntryAdapter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EntryAdapter.Merge(m, src)
+}
+func (m *EntryAdapter) XXX_Size() int {
+ return m.Size()
+}
+func (m *EntryAdapter) XXX_DiscardUnknown() {
+ xxx_messageInfo_EntryAdapter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EntryAdapter proto.InternalMessageInfo
+
+func (m *EntryAdapter) GetTimestamp() time.Time {
+ if m != nil {
+ return m.Timestamp
+ }
+ return time.Time{}
+}
+
+func (m *EntryAdapter) GetLine() string {
+ if m != nil {
+ return m.Line
+ }
+ return ""
+}
+
+type Sample struct {
+ Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"ts"`
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value"`
+ Hash uint64 `protobuf:"varint,3,opt,name=hash,proto3" json:"hash"`
+}
+
+func (m *Sample) Reset() { *m = Sample{} }
+func (*Sample) ProtoMessage() {}
+func (*Sample) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{10}
+}
+func (m *Sample) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Sample) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Sample.Merge(m, src)
+}
+func (m *Sample) XXX_Size() int {
+ return m.Size()
+}
+func (m *Sample) XXX_DiscardUnknown() {
+ xxx_messageInfo_Sample.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Sample proto.InternalMessageInfo
+
+func (m *Sample) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+func (m *Sample) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func (m *Sample) GetHash() uint64 {
+ if m != nil {
+ return m.Hash
+ }
+ return 0
+}
+
+type Series struct {
+ Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"`
+ Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
+}
+
+func (m *Series) Reset() { *m = Series{} }
+func (*Series) ProtoMessage() {}
+func (*Series) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{11}
+}
+func (m *Series) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Series) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Series.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Series) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Series.Merge(m, src)
+}
+func (m *Series) XXX_Size() int {
+ return m.Size()
+}
+func (m *Series) XXX_DiscardUnknown() {
+ xxx_messageInfo_Series.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Series proto.InternalMessageInfo
+
+func (m *Series) GetLabels() string {
+ if m != nil {
+ return m.Labels
+ }
+ return ""
+}
+
+func (m *Series) GetSamples() []Sample {
+ if m != nil {
+ return m.Samples
+ }
+ return nil
+}
+
+type TailRequest struct {
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"`
+ Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
+ Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"`
+}
+
+func (m *TailRequest) Reset() { *m = TailRequest{} }
+func (*TailRequest) ProtoMessage() {}
+func (*TailRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{12}
+}
+func (m *TailRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TailRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TailRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TailRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TailRequest.Merge(m, src)
+}
+func (m *TailRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *TailRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_TailRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TailRequest proto.InternalMessageInfo
+
+func (m *TailRequest) GetQuery() string {
+ if m != nil {
+ return m.Query
+ }
+ return ""
+}
+
+func (m *TailRequest) GetDelayFor() uint32 {
+ if m != nil {
+ return m.DelayFor
+ }
+ return 0
+}
+
+func (m *TailRequest) GetLimit() uint32 {
+ if m != nil {
+ return m.Limit
+ }
+ return 0
+}
+
+func (m *TailRequest) GetStart() time.Time {
+ if m != nil {
+ return m.Start
+ }
+ return time.Time{}
+}
+
+type TailResponse struct {
+ Stream *Stream `protobuf:"bytes,1,opt,name=stream,proto3,customtype=Stream" json:"stream,omitempty"`
+ DroppedStreams []*DroppedStream `protobuf:"bytes,2,rep,name=droppedStreams,proto3" json:"droppedStreams,omitempty"`
+}
+
+func (m *TailResponse) Reset() { *m = TailResponse{} }
+func (*TailResponse) ProtoMessage() {}
+func (*TailResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{13}
+}
+func (m *TailResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TailResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TailResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TailResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TailResponse.Merge(m, src)
+}
+func (m *TailResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *TailResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_TailResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TailResponse proto.InternalMessageInfo
+
+func (m *TailResponse) GetDroppedStreams() []*DroppedStream {
+ if m != nil {
+ return m.DroppedStreams
+ }
+ return nil
+}
+
+type SeriesRequest struct {
+ Start time.Time `protobuf:"bytes,1,opt,name=start,proto3,stdtime" json:"start"`
+ End time.Time `protobuf:"bytes,2,opt,name=end,proto3,stdtime" json:"end"`
+ Groups []string `protobuf:"bytes,3,rep,name=groups,proto3" json:"groups,omitempty"`
+}
+
+func (m *SeriesRequest) Reset() { *m = SeriesRequest{} }
+func (*SeriesRequest) ProtoMessage() {}
+func (*SeriesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{14}
+}
+func (m *SeriesRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SeriesRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SeriesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeriesRequest.Merge(m, src)
+}
+func (m *SeriesRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeriesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeriesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeriesRequest proto.InternalMessageInfo
+
+func (m *SeriesRequest) GetStart() time.Time {
+ if m != nil {
+ return m.Start
+ }
+ return time.Time{}
+}
+
+func (m *SeriesRequest) GetEnd() time.Time {
+ if m != nil {
+ return m.End
+ }
+ return time.Time{}
+}
+
+func (m *SeriesRequest) GetGroups() []string {
+ if m != nil {
+ return m.Groups
+ }
+ return nil
+}
+
+type SeriesResponse struct {
+ Series []SeriesIdentifier `protobuf:"bytes,1,rep,name=series,proto3" json:"series"`
+}
+
+func (m *SeriesResponse) Reset() { *m = SeriesResponse{} }
+func (*SeriesResponse) ProtoMessage() {}
+func (*SeriesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{15}
+}
+func (m *SeriesResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SeriesResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SeriesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeriesResponse.Merge(m, src)
+}
+func (m *SeriesResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeriesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeriesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeriesResponse proto.InternalMessageInfo
+
+func (m *SeriesResponse) GetSeries() []SeriesIdentifier {
+ if m != nil {
+ return m.Series
+ }
+ return nil
+}
+
+type SeriesIdentifier struct {
+ Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *SeriesIdentifier) Reset() { *m = SeriesIdentifier{} }
+func (*SeriesIdentifier) ProtoMessage() {}
+func (*SeriesIdentifier) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{16}
+}
+func (m *SeriesIdentifier) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeriesIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SeriesIdentifier.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SeriesIdentifier) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeriesIdentifier.Merge(m, src)
+}
+func (m *SeriesIdentifier) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeriesIdentifier) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeriesIdentifier.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeriesIdentifier proto.InternalMessageInfo
+
+func (m *SeriesIdentifier) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+type DroppedStream struct {
+ From time.Time `protobuf:"bytes,1,opt,name=from,proto3,stdtime" json:"from"`
+ To time.Time `protobuf:"bytes,2,opt,name=to,proto3,stdtime" json:"to"`
+ Labels string `protobuf:"bytes,3,opt,name=labels,proto3" json:"labels,omitempty"`
+}
+
+func (m *DroppedStream) Reset() { *m = DroppedStream{} }
+func (*DroppedStream) ProtoMessage() {}
+func (*DroppedStream) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{17}
+}
+func (m *DroppedStream) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DroppedStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DroppedStream.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DroppedStream) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DroppedStream.Merge(m, src)
+}
+func (m *DroppedStream) XXX_Size() int {
+ return m.Size()
+}
+func (m *DroppedStream) XXX_DiscardUnknown() {
+ xxx_messageInfo_DroppedStream.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DroppedStream proto.InternalMessageInfo
+
+func (m *DroppedStream) GetFrom() time.Time {
+ if m != nil {
+ return m.From
+ }
+ return time.Time{}
+}
+
+func (m *DroppedStream) GetTo() time.Time {
+ if m != nil {
+ return m.To
+ }
+ return time.Time{}
+}
+
+func (m *DroppedStream) GetLabels() string {
+ if m != nil {
+ return m.Labels
+ }
+ return ""
+}
+
+type TimeSeriesChunk struct {
+ FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"`
+ UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
+ Labels []*LabelPair `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty"`
+ Chunks []*Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks,omitempty"`
+}
+
+func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} }
+func (*TimeSeriesChunk) ProtoMessage() {}
+func (*TimeSeriesChunk) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{18}
+}
+func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeriesChunk.Merge(m, src)
+}
+func (m *TimeSeriesChunk) XXX_Size() int {
+ return m.Size()
+}
+func (m *TimeSeriesChunk) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeSeriesChunk.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeSeriesChunk proto.InternalMessageInfo
+
+func (m *TimeSeriesChunk) GetFromIngesterId() string {
+ if m != nil {
+ return m.FromIngesterId
+ }
+ return ""
+}
+
+func (m *TimeSeriesChunk) GetUserId() string {
+ if m != nil {
+ return m.UserId
+ }
+ return ""
+}
+
+func (m *TimeSeriesChunk) GetLabels() []*LabelPair {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *TimeSeriesChunk) GetChunks() []*Chunk {
+ if m != nil {
+ return m.Chunks
+ }
+ return nil
+}
+
+type LabelPair struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (*LabelPair) ProtoMessage() {}
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{19}
+}
+func (m *LabelPair) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LabelPair) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelPair.Merge(m, src)
+}
+func (m *LabelPair) XXX_Size() int {
+ return m.Size()
+}
+func (m *LabelPair) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelPair.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+
+func (m *LabelPair) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+type Chunk struct {
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *Chunk) Reset() { *m = Chunk{} }
+func (*Chunk) ProtoMessage() {}
+func (*Chunk) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{20}
+}
+func (m *Chunk) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Chunk.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Chunk) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Chunk.Merge(m, src)
+}
+func (m *Chunk) XXX_Size() int {
+ return m.Size()
+}
+func (m *Chunk) XXX_DiscardUnknown() {
+ xxx_messageInfo_Chunk.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Chunk proto.InternalMessageInfo
+
+func (m *Chunk) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type TransferChunksResponse struct {
+}
+
+func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} }
+func (*TransferChunksResponse) ProtoMessage() {}
+func (*TransferChunksResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{21}
+}
+func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TransferChunksResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TransferChunksResponse.Merge(m, src)
+}
+func (m *TransferChunksResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *TransferChunksResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_TransferChunksResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo
+
+type TailersCountRequest struct {
+}
+
+func (m *TailersCountRequest) Reset() { *m = TailersCountRequest{} }
+func (*TailersCountRequest) ProtoMessage() {}
+func (*TailersCountRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{22}
+}
+func (m *TailersCountRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TailersCountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TailersCountRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TailersCountRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TailersCountRequest.Merge(m, src)
+}
+func (m *TailersCountRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *TailersCountRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_TailersCountRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TailersCountRequest proto.InternalMessageInfo
+
+type TailersCountResponse struct {
+ Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (m *TailersCountResponse) Reset() { *m = TailersCountResponse{} }
+func (*TailersCountResponse) ProtoMessage() {}
+func (*TailersCountResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{23}
+}
+func (m *TailersCountResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TailersCountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TailersCountResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TailersCountResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TailersCountResponse.Merge(m, src)
+}
+func (m *TailersCountResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *TailersCountResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_TailersCountResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TailersCountResponse proto.InternalMessageInfo
+
+func (m *TailersCountResponse) GetCount() uint32 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+type GetChunkIDsRequest struct {
+ Matchers string `protobuf:"bytes,1,opt,name=matchers,proto3" json:"matchers,omitempty"`
+ Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"`
+ End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"`
+}
+
+func (m *GetChunkIDsRequest) Reset() { *m = GetChunkIDsRequest{} }
+func (*GetChunkIDsRequest) ProtoMessage() {}
+func (*GetChunkIDsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{24}
+}
+func (m *GetChunkIDsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GetChunkIDsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GetChunkIDsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GetChunkIDsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetChunkIDsRequest.Merge(m, src)
+}
+func (m *GetChunkIDsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *GetChunkIDsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetChunkIDsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetChunkIDsRequest proto.InternalMessageInfo
+
+func (m *GetChunkIDsRequest) GetMatchers() string {
+ if m != nil {
+ return m.Matchers
+ }
+ return ""
+}
+
+func (m *GetChunkIDsRequest) GetStart() time.Time {
+ if m != nil {
+ return m.Start
+ }
+ return time.Time{}
+}
+
+func (m *GetChunkIDsRequest) GetEnd() time.Time {
+ if m != nil {
+ return m.End
+ }
+ return time.Time{}
+}
+
+type GetChunkIDsResponse struct {
+ ChunkIDs []string `protobuf:"bytes,1,rep,name=chunkIDs,proto3" json:"chunkIDs,omitempty"`
+}
+
+func (m *GetChunkIDsResponse) Reset() { *m = GetChunkIDsResponse{} }
+func (*GetChunkIDsResponse) ProtoMessage() {}
+func (*GetChunkIDsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{25}
+}
+func (m *GetChunkIDsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GetChunkIDsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GetChunkIDsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GetChunkIDsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetChunkIDsResponse.Merge(m, src)
+}
+func (m *GetChunkIDsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *GetChunkIDsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetChunkIDsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetChunkIDsResponse proto.InternalMessageInfo
+
+func (m *GetChunkIDsResponse) GetChunkIDs() []string {
+ if m != nil {
+ return m.ChunkIDs
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("logproto.Direction", Direction_name, Direction_value)
+ proto.RegisterType((*PushRequest)(nil), "logproto.PushRequest")
+ proto.RegisterType((*PushResponse)(nil), "logproto.PushResponse")
+ proto.RegisterType((*QueryRequest)(nil), "logproto.QueryRequest")
+ proto.RegisterType((*SampleQueryRequest)(nil), "logproto.SampleQueryRequest")
+ proto.RegisterType((*SampleQueryResponse)(nil), "logproto.SampleQueryResponse")
+ proto.RegisterType((*QueryResponse)(nil), "logproto.QueryResponse")
+ proto.RegisterType((*LabelRequest)(nil), "logproto.LabelRequest")
+ proto.RegisterType((*LabelResponse)(nil), "logproto.LabelResponse")
+ proto.RegisterType((*StreamAdapter)(nil), "logproto.StreamAdapter")
+ proto.RegisterType((*EntryAdapter)(nil), "logproto.EntryAdapter")
+ proto.RegisterType((*Sample)(nil), "logproto.Sample")
+ proto.RegisterType((*Series)(nil), "logproto.Series")
+ proto.RegisterType((*TailRequest)(nil), "logproto.TailRequest")
+ proto.RegisterType((*TailResponse)(nil), "logproto.TailResponse")
+ proto.RegisterType((*SeriesRequest)(nil), "logproto.SeriesRequest")
+ proto.RegisterType((*SeriesResponse)(nil), "logproto.SeriesResponse")
+ proto.RegisterType((*SeriesIdentifier)(nil), "logproto.SeriesIdentifier")
+ proto.RegisterMapType((map[string]string)(nil), "logproto.SeriesIdentifier.LabelsEntry")
+ proto.RegisterType((*DroppedStream)(nil), "logproto.DroppedStream")
+ proto.RegisterType((*TimeSeriesChunk)(nil), "logproto.TimeSeriesChunk")
+ proto.RegisterType((*LabelPair)(nil), "logproto.LabelPair")
+ proto.RegisterType((*Chunk)(nil), "logproto.Chunk")
+ proto.RegisterType((*TransferChunksResponse)(nil), "logproto.TransferChunksResponse")
+ proto.RegisterType((*TailersCountRequest)(nil), "logproto.TailersCountRequest")
+ proto.RegisterType((*TailersCountResponse)(nil), "logproto.TailersCountResponse")
+ proto.RegisterType((*GetChunkIDsRequest)(nil), "logproto.GetChunkIDsRequest")
+ proto.RegisterType((*GetChunkIDsResponse)(nil), "logproto.GetChunkIDsResponse")
+}
+
+func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) }
+
+var fileDescriptor_c28a5f14f1f4c79a = []byte{
+ // 1366 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4b, 0x8f, 0x13, 0xc7,
+ 0x13, 0x77, 0xfb, 0x31, 0xb6, 0xcb, 0x0f, 0xac, 0xde, 0x65, 0xd7, 0x7f, 0x03, 0x63, 0xab, 0x85,
+ 0xc0, 0xfa, 0x87, 0x78, 0x83, 0xf3, 0xe2, 0x91, 0x87, 0xd6, 0x6c, 0x08, 0x4b, 0x50, 0x80, 0x01,
+ 0x09, 0x09, 0x29, 0x42, 0xb3, 0x76, 0xaf, 0x3d, 0x5a, 0x7b, 0xc6, 0xcc, 0xb4, 0x91, 0xf6, 0x96,
+ 0x0f, 0x90, 0x48, 0xdc, 0x72, 0xe0, 0x9a, 0x43, 0x94, 0x43, 0x3e, 0x07, 0x47, 0x94, 0x13, 0xca,
+ 0xc1, 0x09, 0xe6, 0x12, 0xad, 0x72, 0xe0, 0x23, 0x44, 0xfd, 0x98, 0x99, 0xb6, 0xd9, 0x4d, 0x30,
+ 0x97, 0x5c, 0x3c, 0x5d, 0xd5, 0xd5, 0xd5, 0x55, 0xbf, 0xfe, 0x55, 0x75, 0x1b, 0x4e, 0x8c, 0xf7,
+ 0xfa, 0x1b, 0x43, 0xaf, 0x3f, 0xf6, 0x3d, 0xe6, 0x45, 0x83, 0x96, 0xf8, 0xc5, 0xb9, 0x50, 0xae,
+ 0xd5, 0xfb, 0x9e, 0xd7, 0x1f, 0xd2, 0x0d, 0x21, 0xed, 0x4c, 0x76, 0x37, 0x98, 0x33, 0xa2, 0x01,
+ 0xb3, 0x47, 0x63, 0x69, 0x5a, 0x7b, 0xb7, 0xef, 0xb0, 0xc1, 0x64, 0xa7, 0xd5, 0xf5, 0x46, 0x1b,
+ 0x7d, 0xaf, 0xef, 0xc5, 0x96, 0x5c, 0x92, 0xde, 0xf9, 0x48, 0x9a, 0x93, 0x7b, 0x50, 0xb8, 0x35,
+ 0x09, 0x06, 0x16, 0x7d, 0x38, 0xa1, 0x01, 0xc3, 0xd7, 0x20, 0x1b, 0x30, 0x9f, 0xda, 0xa3, 0xa0,
+ 0x8a, 0x1a, 0xa9, 0x66, 0xa1, 0xbd, 0xde, 0x8a, 0x42, 0xb9, 0x23, 0x26, 0x36, 0x7b, 0xf6, 0x98,
+ 0x51, 0xbf, 0x73, 0xfc, 0xb7, 0x69, 0xdd, 0x90, 0xaa, 0x83, 0x69, 0x3d, 0x5c, 0x65, 0x85, 0x03,
+ 0x52, 0x86, 0xa2, 0x74, 0x1c, 0x8c, 0x3d, 0x37, 0xa0, 0xe4, 0x49, 0x12, 0x8a, 0xb7, 0x27, 0xd4,
+ 0xdf, 0x0f, 0xb7, 0xaa, 0x41, 0x2e, 0xa0, 0x43, 0xda, 0x65, 0x9e, 0x5f, 0x45, 0x0d, 0xd4, 0xcc,
+ 0x5b, 0x91, 0x8c, 0x57, 0x21, 0x33, 0x74, 0x46, 0x0e, 0xab, 0x26, 0x1b, 0xa8, 0x59, 0xb2, 0xa4,
+ 0x80, 0x2f, 0x41, 0x26, 0x60, 0xb6, 0xcf, 0xaa, 0xa9, 0x06, 0x6a, 0x16, 0xda, 0xb5, 0x96, 0xc4,
+ 0xa2, 0x15, 0x66, 0xd8, 0xba, 0x1b, 0x62, 0xd1, 0xc9, 0x3d, 0x9d, 0xd6, 0x13, 0x8f, 0x7f, 0xaf,
+ 0x23, 0x4b, 0x2e, 0xc1, 0x1f, 0x41, 0x8a, 0xba, 0xbd, 0x6a, 0x7a, 0x89, 0x95, 0x7c, 0x01, 0x3e,
+ 0x0f, 0xf9, 0x9e, 0xe3, 0xd3, 0x2e, 0x73, 0x3c, 0xb7, 0x9a, 0x69, 0xa0, 0x66, 0xb9, 0xbd, 0x12,
+ 0x43, 0xb2, 0x15, 0x4e, 0x59, 0xb1, 0x15, 0x3e, 0x07, 0x46, 0x30, 0xb0, 0xfd, 0x5e, 0x50, 0xcd,
+ 0x36, 0x52, 0xcd, 0x7c, 0x67, 0xf5, 0x60, 0x5a, 0xaf, 0x48, 0xcd, 0x39, 0x6f, 0xe4, 0x30, 0x3a,
+ 0x1a, 0xb3, 0x7d, 0x4b, 0xd9, 0x5c, 0x4f, 0xe7, 0x8c, 0x4a, 0x96, 0xfc, 0x8a, 0x00, 0xdf, 0xb1,
+ 0x47, 0xe3, 0x21, 0x7d, 0x63, 0x8c, 0x22, 0x34, 0x92, 0x6f, 0x8d, 0x46, 0x6a, 0x59, 0x34, 0xe2,
+ 0xd4, 0xd2, 0xff, 0x9e, 0x1a, 0xb9, 0x09, 0x2b, 0x73, 0x39, 0x49, 0x26, 0xe0, 0x0b, 0x60, 0x04,
+ 0xd4, 0x77, 0x68, 0x48, 0xb1, 0x8a, 0x46, 0x31, 0xa1, 0xef, 0x94, 0x9f, 0x4e, 0xeb, 0x48, 0xf0,
+ 0x4b, 0xc8, 0x96, 0xb2, 0x27, 0x16, 0x94, 0xe6, 0x5d, 0x6d, 0xbe, 0x31, 0x5d, 0x63, 0x97, 0x42,
+ 0x1d, 0xf3, 0xf4, 0x17, 0x04, 0xc5, 0x1b, 0xf6, 0x0e, 0x1d, 0x86, 0x98, 0x63, 0x48, 0xbb, 0xf6,
+ 0x88, 0x2a, 0xbc, 0xc5, 0x18, 0xaf, 0x81, 0xf1, 0xc8, 0x1e, 0x4e, 0x68, 0x20, 0xc0, 0xce, 0x59,
+ 0x4a, 0x5a, 0x96, 0x91, 0xe8, 0xad, 0x19, 0x89, 0xa2, 0x33, 0x20, 0x67, 0xa1, 0xa4, 0xe2, 0x55,
+ 0x20, 0xc4, 0xc1, 0x71, 0x0c, 0xf2, 0x61, 0x70, 0xe4, 0x11, 0x94, 0xe6, 0x30, 0xc0, 0x04, 0x8c,
+ 0x21, 0x5f, 0x19, 0xc8, 0xdc, 0x3a, 0x70, 0x30, 0xad, 0x2b, 0x8d, 0xa5, 0xbe, 0x1c, 0x51, 0xea,
+ 0x32, 0x71, 0x3a, 0x49, 0x81, 0xe8, 0x5a, 0x8c, 0xe8, 0x17, 0x2e, 0xf3, 0xf7, 0x43, 0x40, 0x8f,
+ 0x71, 0x66, 0xf0, 0xca, 0x57, 0xe6, 0x56, 0x38, 0x20, 0x8f, 0xa0, 0xa8, 0x5b, 0xe2, 0x6b, 0x90,
+ 0x8f, 0x9a, 0x94, 0xd8, 0xf9, 0x9f, 0xd3, 0x2d, 0x2b, 0xc7, 0x49, 0x16, 0x88, 0xa4, 0xe3, 0xc5,
+ 0xf8, 0x24, 0xa4, 0x87, 0x8e, 0x4b, 0xc5, 0x21, 0xe4, 0x3b, 0xb9, 0x83, 0x69, 0x5d, 0xc8, 0x96,
+ 0xf8, 0x25, 0x23, 0x30, 0x24, 0xdd, 0xf0, 0xe9, 0xc5, 0x1d, 0x53, 0x1d, 0x43, 0x7a, 0xd4, 0xbd,
+ 0xd5, 0x21, 0x23, 0x90, 0x12, 0xee, 0x50, 0x27, 0x7f, 0x30, 0xad, 0x4b, 0x85, 0x25, 0x3f, 0x7c,
+ 0xbb, 0x81, 0x1d, 0x0c, 0xc4, 0xe1, 0xa6, 0xe5, 0x76, 0x5c, 0xb6, 0xc4, 0x2f, 0x71, 0x40, 0xd1,
+ 0xf3, 0x8d, 0x70, 0xbd, 0x0c, 0xd9, 0x40, 0x04, 0x17, 0xe2, 0xaa, 0xb3, 0x5e, 0x4c, 0xc4, 0x88,
+ 0x2a, 0x43, 0x2b, 0x1c, 0x90, 0x1f, 0x10, 0x14, 0xee, 0xda, 0x4e, 0x44, 0xd1, 0x55, 0xc8, 0x3c,
+ 0xe4, 0x75, 0xa0, 0x38, 0x2a, 0x05, 0xde, 0x2c, 0x7a, 0x74, 0x68, 0xef, 0x5f, 0xf5, 0x7c, 0x11,
+ 0x72, 0xc9, 0x8a, 0xe4, 0xb8, 0xa1, 0xa6, 0x0f, 0x6d, 0xa8, 0x99, 0xa5, 0x5b, 0xc8, 0xf5, 0x74,
+ 0x2e, 0x59, 0x49, 0x91, 0xef, 0x10, 0x14, 0x65, 0x64, 0x8a, 0x8c, 0x97, 0xc1, 0x90, 0x95, 0xa5,
+ 0x4e, 0xfa, 0xc8, 0x82, 0x04, 0xad, 0x18, 0xd5, 0x12, 0xfc, 0x39, 0x94, 0x7b, 0xbe, 0x37, 0x1e,
+ 0xd3, 0xde, 0x1d, 0x55, 0xd5, 0xc9, 0xc5, 0xaa, 0xde, 0xd2, 0xe7, 0xad, 0x05, 0x73, 0xf2, 0x04,
+ 0x41, 0x49, 0xf5, 0x0c, 0x05, 0x55, 0x94, 0x22, 0x7a, 0xeb, 0x2e, 0x99, 0x5c, 0xb6, 0x4b, 0xae,
+ 0x81, 0xd1, 0xf7, 0xbd, 0xc9, 0x38, 0xa8, 0xa6, 0x64, 0x41, 0x4a, 0x89, 0x5c, 0x87, 0x72, 0x18,
+ 0xdc, 0x11, 0xad, 0xb0, 0xb6, 0xd8, 0x0a, 0xb7, 0x7b, 0xd4, 0x65, 0xce, 0xae, 0x43, 0xfd, 0x4e,
+ 0x9a, 0x6f, 0x12, 0xb5, 0xc2, 0xef, 0x11, 0x54, 0x16, 0x4d, 0xf0, 0x67, 0x1a, 0x11, 0xb9, 0xbb,
+ 0x33, 0x47, 0xbb, 0x6b, 0x89, 0x1e, 0x12, 0x88, 0x42, 0x0d, 0x49, 0x5a, 0xbb, 0x08, 0x05, 0x4d,
+ 0x8d, 0x2b, 0x90, 0xda, 0xa3, 0x21, 0xc9, 0xf8, 0x90, 0xd3, 0x28, 0x2e, 0x99, 0xbc, 0xaa, 0x93,
+ 0x4b, 0xc9, 0x0b, 0x88, 0x53, 0xb4, 0x34, 0x77, 0x36, 0xf8, 0x02, 0xa4, 0x77, 0x7d, 0x6f, 0xb4,
+ 0x14, 0xf0, 0x62, 0x05, 0xfe, 0x00, 0x92, 0xcc, 0x5b, 0x0a, 0xf6, 0x24, 0xf3, 0x38, 0xea, 0x2a,
+ 0xf9, 0x94, 0x08, 0x4e, 0x49, 0xe4, 0x67, 0x04, 0xc7, 0xf8, 0x1a, 0x89, 0xc0, 0x95, 0xc1, 0xc4,
+ 0xdd, 0xc3, 0x4d, 0xa8, 0xf0, 0x9d, 0x1e, 0x38, 0x6e, 0x9f, 0x06, 0x8c, 0xfa, 0x0f, 0x9c, 0x9e,
+ 0x4a, 0xb3, 0xcc, 0xf5, 0xdb, 0x4a, 0xbd, 0xdd, 0xc3, 0xeb, 0x90, 0x9d, 0x04, 0xd2, 0x40, 0xe6,
+ 0x6c, 0x70, 0x71, 0xbb, 0x87, 0xdf, 0xd1, 0xb6, 0xe3, 0x58, 0x6b, 0xaf, 0x02, 0x81, 0xe1, 0x2d,
+ 0xdb, 0xf1, 0xa3, 0xea, 0x3f, 0x0b, 0x46, 0x97, 0x6f, 0x2c, 0xef, 0xcd, 0x42, 0xfb, 0x58, 0x6c,
+ 0x2c, 0x02, 0xb2, 0xd4, 0x34, 0xf9, 0x10, 0xf2, 0xd1, 0xea, 0x43, 0x6f, 0xa2, 0x43, 0x4f, 0x80,
+ 0x9c, 0x80, 0x8c, 0x4c, 0x0c, 0x43, 0xba, 0x67, 0x33, 0x5b, 0x2c, 0x29, 0x5a, 0x62, 0x4c, 0xaa,
+ 0xb0, 0x76, 0xd7, 0xb7, 0xdd, 0x60, 0x97, 0xfa, 0xc2, 0x28, 0xa2, 0x1f, 0x39, 0x0e, 0x2b, 0xbc,
+ 0x78, 0xa9, 0x1f, 0x5c, 0xf1, 0x26, 0x2e, 0x53, 0x35, 0x43, 0xce, 0xc1, 0xea, 0xbc, 0x5a, 0xb1,
+ 0x75, 0x15, 0x32, 0x5d, 0xae, 0x10, 0xde, 0x4b, 0x96, 0x14, 0xc8, 0x8f, 0x08, 0xf0, 0x97, 0x94,
+ 0x09, 0xd7, 0xdb, 0x5b, 0x81, 0xf6, 0x74, 0x19, 0xd9, 0xac, 0x3b, 0xa0, 0x7e, 0x10, 0x3e, 0x5d,
+ 0x42, 0xf9, 0xbf, 0x78, 0xba, 0x90, 0xf3, 0xb0, 0x32, 0x17, 0xa5, 0xca, 0xa9, 0x06, 0xb9, 0xae,
+ 0xd2, 0xa9, 0xeb, 0x33, 0x92, 0xff, 0x7f, 0x06, 0xf2, 0xd1, 0x03, 0x0f, 0x17, 0x20, 0x7b, 0xf5,
+ 0xa6, 0x75, 0x6f, 0xd3, 0xda, 0xaa, 0x24, 0x70, 0x11, 0x72, 0x9d, 0xcd, 0x2b, 0x5f, 0x09, 0x09,
+ 0xb5, 0x37, 0xc1, 0xe0, 0x4f, 0x5d, 0xea, 0xe3, 0x8f, 0x21, 0xcd, 0x47, 0xf8, 0x78, 0x7c, 0xbe,
+ 0xda, 0xeb, 0xba, 0xb6, 0xb6, 0xa8, 0x56, 0xe7, 0x90, 0x68, 0xff, 0x95, 0x82, 0x2c, 0x7f, 0xda,
+ 0xf0, 0x2a, 0xfe, 0x04, 0x32, 0xe2, 0x95, 0x83, 0x35, 0x73, 0xfd, 0x55, 0x58, 0x5b, 0x7f, 0x4d,
+ 0x1f, 0xfa, 0x79, 0x0f, 0xe1, 0xaf, 0xa1, 0x20, 0x94, 0xea, 0x2a, 0x3c, 0xb9, 0x78, 0xcd, 0xcc,
+ 0x79, 0x3a, 0x75, 0xc4, 0xac, 0xe6, 0xef, 0x12, 0x64, 0x04, 0x23, 0xf5, 0x68, 0xf4, 0xf7, 0x92,
+ 0x1e, 0xcd, 0xdc, 0xbb, 0x84, 0x24, 0xf0, 0x45, 0x48, 0x73, 0x22, 0xe9, 0x70, 0x68, 0xd7, 0x98,
+ 0x0e, 0x87, 0x7e, 0x87, 0x88, 0x6d, 0x3f, 0x8d, 0x6e, 0xd7, 0xf5, 0xc5, 0x26, 0x16, 0x2e, 0xaf,
+ 0xbe, 0x3e, 0x11, 0xed, 0x7c, 0x53, 0x5e, 0x4b, 0x21, 0x85, 0xf1, 0xa9, 0xf9, 0xad, 0x16, 0x18,
+ 0x5f, 0x33, 0x8f, 0x9a, 0x8e, 0x1c, 0xde, 0x80, 0x82, 0x46, 0x1f, 0x1d, 0xd6, 0xd7, 0xb9, 0xaf,
+ 0xc3, 0x7a, 0x08, 0xe7, 0x48, 0xa2, 0xfd, 0x0d, 0xe4, 0xc2, 0x1e, 0x83, 0x6f, 0x43, 0x79, 0xbe,
+ 0x3c, 0xf1, 0xff, 0xb4, 0x68, 0xe6, 0x1b, 0x57, 0xad, 0xa1, 0x4d, 0x1d, 0x5e, 0xd3, 0x89, 0x26,
+ 0xea, 0xdc, 0x7f, 0xf6, 0xc2, 0x4c, 0x3c, 0x7f, 0x61, 0x26, 0x5e, 0xbd, 0x30, 0xd1, 0xb7, 0x33,
+ 0x13, 0xfd, 0x34, 0x33, 0xd1, 0xd3, 0x99, 0x89, 0x9e, 0xcd, 0x4c, 0xf4, 0xc7, 0xcc, 0x44, 0x7f,
+ 0xce, 0xcc, 0xc4, 0xab, 0x99, 0x89, 0x1e, 0xbf, 0x34, 0x13, 0xcf, 0x5e, 0x9a, 0x89, 0xe7, 0x2f,
+ 0xcd, 0xc4, 0xfd, 0xd3, 0xfa, 0x3f, 0x47, 0xdf, 0xde, 0xb5, 0x5d, 0x7b, 0x63, 0xe8, 0xed, 0x39,
+ 0x1b, 0xfa, 0x3f, 0xd3, 0x1d, 0x43, 0x7c, 0xde, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x21,
+ 0xc8, 0x3d, 0xb0, 0x0e, 0x00, 0x00,
+}
+
+func (x Direction) String() string {
+ s, ok := Direction_name[int32(x)]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(x))
+}
+func (this *PushRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*PushRequest)
+ if !ok {
+ that2, ok := that.(PushRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.Streams) != len(that1.Streams) {
+ return false
+ }
+ for i := range this.Streams {
+ if !this.Streams[i].Equal(that1.Streams[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *PushResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*PushResponse)
+ if !ok {
+ that2, ok := that.(PushResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ return true
+}
+func (this *QueryRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*QueryRequest)
+ if !ok {
+ that2, ok := that.(QueryRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Selector != that1.Selector {
+ return false
+ }
+ if this.Limit != that1.Limit {
+ return false
+ }
+ if !this.Start.Equal(that1.Start) {
+ return false
+ }
+ if !this.End.Equal(that1.End) {
+ return false
+ }
+ if this.Direction != that1.Direction {
+ return false
+ }
+ if len(this.Shards) != len(that1.Shards) {
+ return false
+ }
+ for i := range this.Shards {
+ if this.Shards[i] != that1.Shards[i] {
+ return false
+ }
+ }
+ return true
+}
+func (this *SampleQueryRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*SampleQueryRequest)
+ if !ok {
+ that2, ok := that.(SampleQueryRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Selector != that1.Selector {
+ return false
+ }
+ if !this.Start.Equal(that1.Start) {
+ return false
+ }
+ if !this.End.Equal(that1.End) {
+ return false
+ }
+ if len(this.Shards) != len(that1.Shards) {
+ return false
+ }
+ for i := range this.Shards {
+ if this.Shards[i] != that1.Shards[i] {
+ return false
+ }
+ }
+ return true
+}
+func (this *SampleQueryResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*SampleQueryResponse)
+ if !ok {
+ that2, ok := that.(SampleQueryResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.Series) != len(that1.Series) {
+ return false
+ }
+ for i := range this.Series {
+ if !this.Series[i].Equal(that1.Series[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *QueryResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*QueryResponse)
+ if !ok {
+ that2, ok := that.(QueryResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.Streams) != len(that1.Streams) {
+ return false
+ }
+ for i := range this.Streams {
+ if !this.Streams[i].Equal(that1.Streams[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *LabelRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*LabelRequest)
+ if !ok {
+ that2, ok := that.(LabelRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Name != that1.Name {
+ return false
+ }
+ if this.Values != that1.Values {
+ return false
+ }
+ if that1.Start == nil {
+ if this.Start != nil {
+ return false
+ }
+ } else if !this.Start.Equal(*that1.Start) {
+ return false
+ }
+ if that1.End == nil {
+ if this.End != nil {
+ return false
+ }
+ } else if !this.End.Equal(*that1.End) {
+ return false
+ }
+ return true
+}
+func (this *LabelResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*LabelResponse)
+ if !ok {
+ that2, ok := that.(LabelResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.Values) != len(that1.Values) {
+ return false
+ }
+ for i := range this.Values {
+ if this.Values[i] != that1.Values[i] {
+ return false
+ }
+ }
+ return true
+}
+func (this *StreamAdapter) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*StreamAdapter)
+ if !ok {
+ that2, ok := that.(StreamAdapter)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Labels != that1.Labels {
+ return false
+ }
+ if len(this.Entries) != len(that1.Entries) {
+ return false
+ }
+ for i := range this.Entries {
+ if !this.Entries[i].Equal(&that1.Entries[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *EntryAdapter) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*EntryAdapter)
+ if !ok {
+ that2, ok := that.(EntryAdapter)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if !this.Timestamp.Equal(that1.Timestamp) {
+ return false
+ }
+ if this.Line != that1.Line {
+ return false
+ }
+ return true
+}
+func (this *Sample) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*Sample)
+ if !ok {
+ that2, ok := that.(Sample)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Timestamp != that1.Timestamp {
+ return false
+ }
+ if this.Value != that1.Value {
+ return false
+ }
+ if this.Hash != that1.Hash {
+ return false
+ }
+ return true
+}
+func (this *Series) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*Series)
+ if !ok {
+ that2, ok := that.(Series)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Labels != that1.Labels {
+ return false
+ }
+ if len(this.Samples) != len(that1.Samples) {
+ return false
+ }
+ for i := range this.Samples {
+ if !this.Samples[i].Equal(&that1.Samples[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *TailRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*TailRequest)
+ if !ok {
+ that2, ok := that.(TailRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Query != that1.Query {
+ return false
+ }
+ if this.DelayFor != that1.DelayFor {
+ return false
+ }
+ if this.Limit != that1.Limit {
+ return false
+ }
+ if !this.Start.Equal(that1.Start) {
+ return false
+ }
+ return true
+}
+func (this *TailResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*TailResponse)
+ if !ok {
+ that2, ok := that.(TailResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if that1.Stream == nil {
+ if this.Stream != nil {
+ return false
+ }
+ } else if !this.Stream.Equal(*that1.Stream) {
+ return false
+ }
+ if len(this.DroppedStreams) != len(that1.DroppedStreams) {
+ return false
+ }
+ for i := range this.DroppedStreams {
+ if !this.DroppedStreams[i].Equal(that1.DroppedStreams[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *SeriesRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*SeriesRequest)
+ if !ok {
+ that2, ok := that.(SeriesRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if !this.Start.Equal(that1.Start) {
+ return false
+ }
+ if !this.End.Equal(that1.End) {
+ return false
+ }
+ if len(this.Groups) != len(that1.Groups) {
+ return false
+ }
+ for i := range this.Groups {
+ if this.Groups[i] != that1.Groups[i] {
+ return false
+ }
+ }
+ return true
+}
+func (this *SeriesResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*SeriesResponse)
+ if !ok {
+ that2, ok := that.(SeriesResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.Series) != len(that1.Series) {
+ return false
+ }
+ for i := range this.Series {
+ if !this.Series[i].Equal(&that1.Series[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *SeriesIdentifier) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*SeriesIdentifier)
+ if !ok {
+ that2, ok := that.(SeriesIdentifier)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.Labels) != len(that1.Labels) {
+ return false
+ }
+ for i := range this.Labels {
+ if this.Labels[i] != that1.Labels[i] {
+ return false
+ }
+ }
+ return true
+}
+func (this *DroppedStream) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*DroppedStream)
+ if !ok {
+ that2, ok := that.(DroppedStream)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if !this.From.Equal(that1.From) {
+ return false
+ }
+ if !this.To.Equal(that1.To) {
+ return false
+ }
+ if this.Labels != that1.Labels {
+ return false
+ }
+ return true
+}
+func (this *TimeSeriesChunk) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*TimeSeriesChunk)
+ if !ok {
+ that2, ok := that.(TimeSeriesChunk)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.FromIngesterId != that1.FromIngesterId {
+ return false
+ }
+ if this.UserId != that1.UserId {
+ return false
+ }
+ if len(this.Labels) != len(that1.Labels) {
+ return false
+ }
+ for i := range this.Labels {
+ if !this.Labels[i].Equal(that1.Labels[i]) {
+ return false
+ }
+ }
+ if len(this.Chunks) != len(that1.Chunks) {
+ return false
+ }
+ for i := range this.Chunks {
+ if !this.Chunks[i].Equal(that1.Chunks[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *LabelPair) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*LabelPair)
+ if !ok {
+ that2, ok := that.(LabelPair)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Name != that1.Name {
+ return false
+ }
+ if this.Value != that1.Value {
+ return false
+ }
+ return true
+}
+func (this *Chunk) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*Chunk)
+ if !ok {
+ that2, ok := that.(Chunk)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if !bytes.Equal(this.Data, that1.Data) {
+ return false
+ }
+ return true
+}
+func (this *TransferChunksResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*TransferChunksResponse)
+ if !ok {
+ that2, ok := that.(TransferChunksResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ return true
+}
+func (this *TailersCountRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*TailersCountRequest)
+ if !ok {
+ that2, ok := that.(TailersCountRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ return true
+}
+func (this *TailersCountResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*TailersCountResponse)
+ if !ok {
+ that2, ok := that.(TailersCountResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Count != that1.Count {
+ return false
+ }
+ return true
+}
+func (this *GetChunkIDsRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*GetChunkIDsRequest)
+ if !ok {
+ that2, ok := that.(GetChunkIDsRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Matchers != that1.Matchers {
+ return false
+ }
+ if !this.Start.Equal(that1.Start) {
+ return false
+ }
+ if !this.End.Equal(that1.End) {
+ return false
+ }
+ return true
+}
+func (this *GetChunkIDsResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*GetChunkIDsResponse)
+ if !ok {
+ that2, ok := that.(GetChunkIDsResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.ChunkIDs) != len(that1.ChunkIDs) {
+ return false
+ }
+ for i := range this.ChunkIDs {
+ if this.ChunkIDs[i] != that1.ChunkIDs[i] {
+ return false
+ }
+ }
+ return true
+}
+func (this *PushRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.PushRequest{")
+ s = append(s, "Streams: "+fmt.Sprintf("%#v", this.Streams)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *PushResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&logproto.PushResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *QueryRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&logproto.QueryRequest{")
+ s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n")
+ s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "Direction: "+fmt.Sprintf("%#v", this.Direction)+",\n")
+ s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SampleQueryRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&logproto.SampleQueryRequest{")
+ s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SampleQueryResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.SampleQueryResponse{")
+ s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *QueryResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.QueryResponse{")
+ s = append(s, "Streams: "+fmt.Sprintf("%#v", this.Streams)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *LabelRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&logproto.LabelRequest{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *LabelResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.LabelResponse{")
+ s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *StreamAdapter) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&logproto.StreamAdapter{")
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
+ if this.Entries != nil {
+ vs := make([]*EntryAdapter, len(this.Entries))
+ for i := range vs {
+ vs[i] = &this.Entries[i]
+ }
+ s = append(s, "Entries: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EntryAdapter) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&logproto.EntryAdapter{")
+ s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
+ s = append(s, "Line: "+fmt.Sprintf("%#v", this.Line)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Sample) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&logproto.Sample{")
+ s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "Hash: "+fmt.Sprintf("%#v", this.Hash)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Series) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&logproto.Series{")
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
+ if this.Samples != nil {
+ vs := make([]*Sample, len(this.Samples))
+ for i := range vs {
+ vs[i] = &this.Samples[i]
+ }
+ s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TailRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&logproto.TailRequest{")
+ s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n")
+ s = append(s, "DelayFor: "+fmt.Sprintf("%#v", this.DelayFor)+",\n")
+ s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TailResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&logproto.TailResponse{")
+ s = append(s, "Stream: "+fmt.Sprintf("%#v", this.Stream)+",\n")
+ if this.DroppedStreams != nil {
+ s = append(s, "DroppedStreams: "+fmt.Sprintf("%#v", this.DroppedStreams)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SeriesRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&logproto.SeriesRequest{")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "Groups: "+fmt.Sprintf("%#v", this.Groups)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SeriesResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.SeriesResponse{")
+ if this.Series != nil {
+ vs := make([]*SeriesIdentifier, len(this.Series))
+ for i := range vs {
+ vs[i] = &this.Series[i]
+ }
+ s = append(s, "Series: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SeriesIdentifier) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.SeriesIdentifier{")
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ if this.Labels != nil {
+ s = append(s, "Labels: "+mapStringForLabels+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *DroppedStream) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&logproto.DroppedStream{")
+ s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n")
+ s = append(s, "To: "+fmt.Sprintf("%#v", this.To)+",\n")
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TimeSeriesChunk) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&logproto.TimeSeriesChunk{")
+ s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n")
+ s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n")
+ if this.Labels != nil {
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
+ }
+ if this.Chunks != nil {
+ s = append(s, "Chunks: "+fmt.Sprintf("%#v", this.Chunks)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *LabelPair) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&logproto.LabelPair{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Chunk) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.Chunk{")
+ s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TransferChunksResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&logproto.TransferChunksResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TailersCountRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&logproto.TailersCountRequest{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TailersCountResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.TailersCountResponse{")
+ s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetChunkIDsRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&logproto.GetChunkIDsRequest{")
+ s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetChunkIDsResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.GetChunkIDsResponse{")
+ s = append(s, "ChunkIDs: "+fmt.Sprintf("%#v", this.ChunkIDs)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringLogproto(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// PusherClient is the client API for Pusher service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type PusherClient interface {
+ Push(ctx context.Context, in *PushRequest, opts ...grpc.CallOption) (*PushResponse, error)
+}
+
+type pusherClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewPusherClient(cc *grpc.ClientConn) PusherClient {
+ return &pusherClient{cc}
+}
+
+func (c *pusherClient) Push(ctx context.Context, in *PushRequest, opts ...grpc.CallOption) (*PushResponse, error) {
+ out := new(PushResponse)
+ err := c.cc.Invoke(ctx, "/logproto.Pusher/Push", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// PusherServer is the server API for Pusher service.
+type PusherServer interface {
+ Push(context.Context, *PushRequest) (*PushResponse, error)
+}
+
+func RegisterPusherServer(s *grpc.Server, srv PusherServer) {
+ s.RegisterService(&_Pusher_serviceDesc, srv)
+}
+
+func _Pusher_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PushRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PusherServer).Push(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/logproto.Pusher/Push",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PusherServer).Push(ctx, req.(*PushRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Pusher_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "logproto.Pusher",
+ HandlerType: (*PusherServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Push",
+ Handler: _Pusher_Push_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "pkg/logproto/logproto.proto",
+}
+
+// QuerierClient is the client API for Querier service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QuerierClient interface {
+ Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Querier_QueryClient, error)
+ QuerySample(ctx context.Context, in *SampleQueryRequest, opts ...grpc.CallOption) (Querier_QuerySampleClient, error)
+ Label(ctx context.Context, in *LabelRequest, opts ...grpc.CallOption) (*LabelResponse, error)
+ Tail(ctx context.Context, in *TailRequest, opts ...grpc.CallOption) (Querier_TailClient, error)
+ Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (*SeriesResponse, error)
+ TailersCount(ctx context.Context, in *TailersCountRequest, opts ...grpc.CallOption) (*TailersCountResponse, error)
+ GetChunkIDs(ctx context.Context, in *GetChunkIDsRequest, opts ...grpc.CallOption) (*GetChunkIDsResponse, error)
+}
+
+type querierClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewQuerierClient(cc *grpc.ClientConn) QuerierClient {
+ return &querierClient{cc}
+}
+
+func (c *querierClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Querier_QueryClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[0], "/logproto.Querier/Query", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &querierQueryClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Querier_QueryClient interface {
+ Recv() (*QueryResponse, error)
+ grpc.ClientStream
+}
+
+type querierQueryClient struct {
+ grpc.ClientStream
+}
+
+func (x *querierQueryClient) Recv() (*QueryResponse, error) {
+ m := new(QueryResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *querierClient) QuerySample(ctx context.Context, in *SampleQueryRequest, opts ...grpc.CallOption) (Querier_QuerySampleClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[1], "/logproto.Querier/QuerySample", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &querierQuerySampleClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Querier_QuerySampleClient interface {
+ Recv() (*SampleQueryResponse, error)
+ grpc.ClientStream
+}
+
+type querierQuerySampleClient struct {
+ grpc.ClientStream
+}
+
+func (x *querierQuerySampleClient) Recv() (*SampleQueryResponse, error) {
+ m := new(SampleQueryResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *querierClient) Label(ctx context.Context, in *LabelRequest, opts ...grpc.CallOption) (*LabelResponse, error) {
+ out := new(LabelResponse)
+ err := c.cc.Invoke(ctx, "/logproto.Querier/Label", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *querierClient) Tail(ctx context.Context, in *TailRequest, opts ...grpc.CallOption) (Querier_TailClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[2], "/logproto.Querier/Tail", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &querierTailClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Querier_TailClient interface {
+ Recv() (*TailResponse, error)
+ grpc.ClientStream
+}
+
+type querierTailClient struct {
+ grpc.ClientStream
+}
+
+func (x *querierTailClient) Recv() (*TailResponse, error) {
+ m := new(TailResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *querierClient) Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (*SeriesResponse, error) {
+ out := new(SeriesResponse)
+ err := c.cc.Invoke(ctx, "/logproto.Querier/Series", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *querierClient) TailersCount(ctx context.Context, in *TailersCountRequest, opts ...grpc.CallOption) (*TailersCountResponse, error) {
+ out := new(TailersCountResponse)
+ err := c.cc.Invoke(ctx, "/logproto.Querier/TailersCount", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *querierClient) GetChunkIDs(ctx context.Context, in *GetChunkIDsRequest, opts ...grpc.CallOption) (*GetChunkIDsResponse, error) {
+ out := new(GetChunkIDsResponse)
+ err := c.cc.Invoke(ctx, "/logproto.Querier/GetChunkIDs", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QuerierServer is the server API for Querier service.
+type QuerierServer interface {
+ Query(*QueryRequest, Querier_QueryServer) error
+ QuerySample(*SampleQueryRequest, Querier_QuerySampleServer) error
+ Label(context.Context, *LabelRequest) (*LabelResponse, error)
+ Tail(*TailRequest, Querier_TailServer) error
+ Series(context.Context, *SeriesRequest) (*SeriesResponse, error)
+ TailersCount(context.Context, *TailersCountRequest) (*TailersCountResponse, error)
+ GetChunkIDs(context.Context, *GetChunkIDsRequest) (*GetChunkIDsResponse, error)
+}
+
+func RegisterQuerierServer(s *grpc.Server, srv QuerierServer) {
+ s.RegisterService(&_Querier_serviceDesc, srv)
+}
+
+func _Querier_Query_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(QueryRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(QuerierServer).Query(m, &querierQueryServer{stream})
+}
+
+type Querier_QueryServer interface {
+ Send(*QueryResponse) error
+ grpc.ServerStream
+}
+
+type querierQueryServer struct {
+ grpc.ServerStream
+}
+
+func (x *querierQueryServer) Send(m *QueryResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Querier_QuerySample_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SampleQueryRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(QuerierServer).QuerySample(m, &querierQuerySampleServer{stream})
+}
+
+type Querier_QuerySampleServer interface {
+ Send(*SampleQueryResponse) error
+ grpc.ServerStream
+}
+
+type querierQuerySampleServer struct {
+ grpc.ServerStream
+}
+
+func (x *querierQuerySampleServer) Send(m *SampleQueryResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Querier_Label_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LabelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QuerierServer).Label(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/logproto.Querier/Label",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QuerierServer).Label(ctx, req.(*LabelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Querier_Tail_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(TailRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(QuerierServer).Tail(m, &querierTailServer{stream})
+}
+
+type Querier_TailServer interface {
+ Send(*TailResponse) error
+ grpc.ServerStream
+}
+
+type querierTailServer struct {
+ grpc.ServerStream
+}
+
+func (x *querierTailServer) Send(m *TailResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Querier_Series_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QuerierServer).Series(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/logproto.Querier/Series",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QuerierServer).Series(ctx, req.(*SeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Querier_TailersCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TailersCountRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QuerierServer).TailersCount(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/logproto.Querier/TailersCount",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QuerierServer).TailersCount(ctx, req.(*TailersCountRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Querier_GetChunkIDs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetChunkIDsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QuerierServer).GetChunkIDs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/logproto.Querier/GetChunkIDs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QuerierServer).GetChunkIDs(ctx, req.(*GetChunkIDsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Querier_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "logproto.Querier",
+ HandlerType: (*QuerierServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Label",
+ Handler: _Querier_Label_Handler,
+ },
+ {
+ MethodName: "Series",
+ Handler: _Querier_Series_Handler,
+ },
+ {
+ MethodName: "TailersCount",
+ Handler: _Querier_TailersCount_Handler,
+ },
+ {
+ MethodName: "GetChunkIDs",
+ Handler: _Querier_GetChunkIDs_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Query",
+ Handler: _Querier_Query_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "QuerySample",
+ Handler: _Querier_QuerySample_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "Tail",
+ Handler: _Querier_Tail_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "pkg/logproto/logproto.proto",
+}
+
+// IngesterClient is the client API for Ingester service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type IngesterClient interface {
+ TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error)
+}
+
+type ingesterClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewIngesterClient(cc *grpc.ClientConn) IngesterClient {
+ return &ingesterClient{cc}
+}
+
+func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/logproto.Ingester/TransferChunks", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &ingesterTransferChunksClient{stream}
+ return x, nil
+}
+
+type Ingester_TransferChunksClient interface {
+ Send(*TimeSeriesChunk) error
+ CloseAndRecv() (*TransferChunksResponse, error)
+ grpc.ClientStream
+}
+
+type ingesterTransferChunksClient struct {
+ grpc.ClientStream
+}
+
+func (x *ingesterTransferChunksClient) Send(m *TimeSeriesChunk) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, error) {
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ m := new(TransferChunksResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// IngesterServer is the server API for Ingester service.
+type IngesterServer interface {
+ TransferChunks(Ingester_TransferChunksServer) error
+}
+
+func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) {
+ s.RegisterService(&_Ingester_serviceDesc, srv)
+}
+
+func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream})
+}
+
+type Ingester_TransferChunksServer interface {
+ SendAndClose(*TransferChunksResponse) error
+ Recv() (*TimeSeriesChunk, error)
+ grpc.ServerStream
+}
+
+type ingesterTransferChunksServer struct {
+ grpc.ServerStream
+}
+
+func (x *ingesterTransferChunksServer) SendAndClose(m *TransferChunksResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) {
+ m := new(TimeSeriesChunk)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _Ingester_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "logproto.Ingester",
+ HandlerType: (*IngesterServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "TransferChunks",
+ Handler: _Ingester_TransferChunks_Handler,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "pkg/logproto/logproto.proto",
+}
+
+func (m *PushRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PushRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Streams) > 0 {
+ for _, msg := range m.Streams {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PushResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PushResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *QueryRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Selector) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Selector)))
+ i += copy(dAtA[i:], m.Selector)
+ }
+ if m.Limit != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Limit))
+ }
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)))
+ n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End)))
+ n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ if m.Direction != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Direction))
+ }
+ if len(m.Shards) > 0 {
+ for _, s := range m.Shards {
+ dAtA[i] = 0x3a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *SampleQueryRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SampleQueryRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Selector) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Selector)))
+ i += copy(dAtA[i:], m.Selector)
+ }
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)))
+ n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End)))
+ n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ if len(m.Shards) > 0 {
+ for _, s := range m.Shards {
+ dAtA[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *SampleQueryResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SampleQueryResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Series) > 0 {
+ for _, msg := range m.Series {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *QueryResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Streams) > 0 {
+ for _, msg := range m.Streams {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LabelRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LabelRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if m.Values {
+ dAtA[i] = 0x10
+ i++
+ if m.Values {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.Start != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start)))
+ n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ if m.End != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.End)))
+ n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ return i, nil
+}
+
+func (m *LabelResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LabelResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ dAtA[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *StreamAdapter) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StreamAdapter) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels)))
+ i += copy(dAtA[i:], m.Labels)
+ }
+ if len(m.Entries) > 0 {
+ for _, msg := range m.Entries {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *EntryAdapter) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EntryAdapter) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
+ n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ if len(m.Line) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Line)))
+ i += copy(dAtA[i:], m.Line)
+ }
+ return i, nil
+}
+
+func (m *Sample) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Timestamp))
+ }
+ if m.Value != 0 {
+ dAtA[i] = 0x11
+ i++
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i += 8
+ }
+ if m.Hash != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Hash))
+ }
+ return i, nil
+}
+
+func (m *Series) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Series) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels)))
+ i += copy(dAtA[i:], m.Labels)
+ }
+ if len(m.Samples) > 0 {
+ for _, msg := range m.Samples {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *TailRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TailRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Query) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Query)))
+ i += copy(dAtA[i:], m.Query)
+ }
+ if m.DelayFor != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(m.DelayFor))
+ }
+ if m.Limit != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Limit))
+ }
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)))
+ n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ return i, nil
+}
+
+func (m *TailResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TailResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Stream != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Stream.Size()))
+ n9, err := m.Stream.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ if len(m.DroppedStreams) > 0 {
+ for _, msg := range m.DroppedStreams {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *SeriesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeriesRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)))
+ n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End)))
+ n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *SeriesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeriesResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Series) > 0 {
+ for _, msg := range m.Series {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *SeriesIdentifier) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeriesIdentifier) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ dAtA[i] = 0xa
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovLogproto(uint64(len(k))) + 1 + len(v) + sovLogproto(uint64(len(v)))
+ i = encodeVarintLogproto(dAtA, i, uint64(mapSize))
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(k)))
+ i += copy(dAtA[i:], k)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(v)))
+ i += copy(dAtA[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *DroppedStream) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DroppedStream) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.From)))
+ n12, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.To)))
+ n13, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ if len(m.Labels) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels)))
+ i += copy(dAtA[i:], m.Labels)
+ }
+ return i, nil
+}
+
+func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.FromIngesterId) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.FromIngesterId)))
+ i += copy(dAtA[i:], m.FromIngesterId)
+ }
+ if len(m.UserId) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.UserId)))
+ i += copy(dAtA[i:], m.UserId)
+ }
+ if len(m.Labels) > 0 {
+ for _, msg := range m.Labels {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Chunks) > 0 {
+ for _, msg := range m.Chunks {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LabelPair) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ return i, nil
+}
+
+func (m *Chunk) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Chunk) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Data) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Data)))
+ i += copy(dAtA[i:], m.Data)
+ }
+ return i, nil
+}
+
+func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *TailersCountRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TailersCountRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *TailersCountResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TailersCountResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Count != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Count))
+ }
+ return i, nil
+}
+
+func (m *GetChunkIDsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetChunkIDsRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Matchers) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Matchers)))
+ i += copy(dAtA[i:], m.Matchers)
+ }
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)))
+ n14, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End)))
+ n15, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ return i, nil
+}
+
+func (m *GetChunkIDsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetChunkIDsResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ChunkIDs) > 0 {
+ for _, s := range m.ChunkIDs {
+ dAtA[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func encodeVarintLogproto(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *PushRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Streams) > 0 {
+ for _, e := range m.Streams {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PushResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *QueryRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Selector)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if m.Limit != 0 {
+ n += 1 + sovLogproto(uint64(m.Limit))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
+ n += 1 + l + sovLogproto(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End)
+ n += 1 + l + sovLogproto(uint64(l))
+ if m.Direction != 0 {
+ n += 1 + sovLogproto(uint64(m.Direction))
+ }
+ if len(m.Shards) > 0 {
+ for _, s := range m.Shards {
+ l = len(s)
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SampleQueryRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Selector)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
+ n += 1 + l + sovLogproto(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End)
+ n += 1 + l + sovLogproto(uint64(l))
+ if len(m.Shards) > 0 {
+ for _, s := range m.Shards {
+ l = len(s)
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SampleQueryResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Series) > 0 {
+ for _, e := range m.Series {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *QueryResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Streams) > 0 {
+ for _, e := range m.Streams {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LabelRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if m.Values {
+ n += 2
+ }
+ if m.Start != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start)
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if m.End != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.End)
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ return n
+}
+
+func (m *LabelResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StreamAdapter) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Labels)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if len(m.Entries) > 0 {
+ for _, e := range m.Entries {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EntryAdapter) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
+ n += 1 + l + sovLogproto(uint64(l))
+ l = len(m.Line)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ return n
+}
+
+func (m *Sample) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ n += 1 + sovLogproto(uint64(m.Timestamp))
+ }
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.Hash != 0 {
+ n += 1 + sovLogproto(uint64(m.Hash))
+ }
+ return n
+}
+
+func (m *Series) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Labels)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if len(m.Samples) > 0 {
+ for _, e := range m.Samples {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TailRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Query)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if m.DelayFor != 0 {
+ n += 1 + sovLogproto(uint64(m.DelayFor))
+ }
+ if m.Limit != 0 {
+ n += 1 + sovLogproto(uint64(m.Limit))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
+ n += 1 + l + sovLogproto(uint64(l))
+ return n
+}
+
+func (m *TailResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Stream != nil {
+ l = m.Stream.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if len(m.DroppedStreams) > 0 {
+ for _, e := range m.DroppedStreams {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeriesRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
+ n += 1 + l + sovLogproto(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End)
+ n += 1 + l + sovLogproto(uint64(l))
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ l = len(s)
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeriesResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Series) > 0 {
+ for _, e := range m.Series {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeriesIdentifier) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovLogproto(uint64(len(k))) + 1 + len(v) + sovLogproto(uint64(len(v)))
+ n += mapEntrySize + 1 + sovLogproto(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *DroppedStream) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.From)
+ n += 1 + l + sovLogproto(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.To)
+ n += 1 + l + sovLogproto(uint64(l))
+ l = len(m.Labels)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ return n
+}
+
+func (m *TimeSeriesChunk) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.FromIngesterId)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ l = len(m.UserId)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if len(m.Labels) > 0 {
+ for _, e := range m.Labels {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ if len(m.Chunks) > 0 {
+ for _, e := range m.Chunks {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LabelPair) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ return n
+}
+
+func (m *Chunk) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ return n
+}
+
+func (m *TransferChunksResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *TailersCountRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *TailersCountResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Count != 0 {
+ n += 1 + sovLogproto(uint64(m.Count))
+ }
+ return n
+}
+
+func (m *GetChunkIDsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Matchers)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
+ n += 1 + l + sovLogproto(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End)
+ n += 1 + l + sovLogproto(uint64(l))
+ return n
+}
+
+func (m *GetChunkIDsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ChunkIDs) > 0 {
+ for _, s := range m.ChunkIDs {
+ l = len(s)
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovLogproto(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozLogproto(x uint64) (n int) {
+ return sovLogproto(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *PushRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PushRequest{`,
+ `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PushResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PushResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *QueryRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&QueryRequest{`,
+ `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`,
+ `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
+ `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Direction:` + fmt.Sprintf("%v", this.Direction) + `,`,
+ `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SampleQueryRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SampleQueryRequest{`,
+ `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`,
+ `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SampleQueryResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SampleQueryResponse{`,
+ `Series:` + fmt.Sprintf("%v", this.Series) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *QueryResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&QueryResponse{`,
+ `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LabelRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LabelRequest{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+ `Start:` + strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1) + `,`,
+ `End:` + strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LabelResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LabelResponse{`,
+ `Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StreamAdapter) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StreamAdapter{`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+ `Entries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Entries), "EntryAdapter", "EntryAdapter", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EntryAdapter) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EntryAdapter{`,
+ `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Line:` + fmt.Sprintf("%v", this.Line) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Sample) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Sample{`,
+ `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `Hash:` + fmt.Sprintf("%v", this.Hash) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Series) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Series{`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+ `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TailRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TailRequest{`,
+ `Query:` + fmt.Sprintf("%v", this.Query) + `,`,
+ `DelayFor:` + fmt.Sprintf("%v", this.DelayFor) + `,`,
+ `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
+ `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TailResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TailResponse{`,
+ `Stream:` + fmt.Sprintf("%v", this.Stream) + `,`,
+ `DroppedStreams:` + strings.Replace(fmt.Sprintf("%v", this.DroppedStreams), "DroppedStream", "DroppedStream", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeriesRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeriesRequest{`,
+ `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeriesResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeriesResponse{`,
+ `Series:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Series), "SeriesIdentifier", "SeriesIdentifier", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeriesIdentifier) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k, _ := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&SeriesIdentifier{`,
+ `Labels:` + mapStringForLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DroppedStream) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DroppedStream{`,
+ `From:` + strings.Replace(strings.Replace(this.From.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `To:` + strings.Replace(strings.Replace(this.To.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TimeSeriesChunk) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TimeSeriesChunk{`,
+ `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`,
+ `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`,
+ `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1) + `,`,
+ `Chunks:` + strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LabelPair) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LabelPair{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Chunk) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Chunk{`,
+ `Data:` + fmt.Sprintf("%v", this.Data) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TransferChunksResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TransferChunksResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TailersCountRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TailersCountRequest{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TailersCountResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TailersCountResponse{`,
+ `Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetChunkIDsRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetChunkIDsRequest{`,
+ `Matchers:` + fmt.Sprintf("%v", this.Matchers) + `,`,
+ `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetChunkIDsResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetChunkIDsResponse{`,
+ `ChunkIDs:` + fmt.Sprintf("%v", this.ChunkIDs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringLogproto(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *PushRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PushRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PushRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Streams = append(m.Streams, Stream{})
+ if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PushResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PushResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PushResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Selector = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ }
+ m.Limit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Limit |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Direction", wireType)
+ }
+ m.Direction = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Direction |= Direction(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SampleQueryRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SampleQueryRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SampleQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Selector = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SampleQueryResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SampleQueryResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SampleQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Series = append(m.Series, Series{})
+ if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Streams = append(m.Streams, Stream{})
+ if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Values = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Start == nil {
+ m.Start = new(time.Time)
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Start, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.End == nil {
+ m.End = new(time.Time)
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.End, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StreamAdapter) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StreamAdapter: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StreamAdapter: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Labels = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Entries = append(m.Entries, EntryAdapter{})
+ if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EntryAdapter) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EntryAdapter: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EntryAdapter: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Line = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Sample) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Sample: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Value = float64(math.Float64frombits(v))
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ m.Hash = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Hash |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Series) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Series: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Series: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Labels = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Samples = append(m.Samples, Sample{})
+ if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TailRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TailRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TailRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Query = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DelayFor", wireType)
+ }
+ m.DelayFor = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DelayFor |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ }
+ m.Limit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Limit |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TailResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TailResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TailResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Stream == nil {
+ m.Stream = &Stream{}
+ }
+ if err := m.Stream.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedStreams", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DroppedStreams = append(m.DroppedStreams, &DroppedStream{})
+ if err := m.DroppedStreams[len(m.DroppedStreams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeriesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeriesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeriesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeriesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeriesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeriesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Series = append(m.Series, SeriesIdentifier{})
+ if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeriesIdentifier) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeriesIdentifier: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeriesIdentifier: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DroppedStream) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DroppedStream: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DroppedStream: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.From, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.To, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Labels = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TimeSeriesChunk: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TimeSeriesChunk: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FromIngesterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Labels = append(m.Labels, &LabelPair{})
+ if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Chunks = append(m.Chunks, &Chunk{})
+ if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelPair) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelPair: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Chunk) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Chunk: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TransferChunksResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TransferChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TailersCountRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TailersCountRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TailersCountRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TailersCountResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TailersCountResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TailersCountResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ m.Count = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Count |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetChunkIDsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetChunkIDsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetChunkIDsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Matchers = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetChunkIDsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetChunkIDsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetChunkIDsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChunkIDs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChunkIDs = append(m.ChunkIDs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipLogproto(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthLogproto
+ }
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthLogproto
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipLogproto(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthLogproto
+ }
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthLogproto = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowLogproto = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.proto b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.proto
new file mode 100644
index 000000000..deac4f94c
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.proto
@@ -0,0 +1,166 @@
+syntax = "proto3";
+
+package logproto;
+
+option go_package = "github.com/netobserv/loki-client-go/pkg/logproto";
+
+import "google/protobuf/timestamp.proto";
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+service Pusher {
+ rpc Push(PushRequest) returns (PushResponse) {};
+}
+
+service Querier {
+ rpc Query(QueryRequest) returns (stream QueryResponse) {};
+ rpc QuerySample(SampleQueryRequest) returns (stream SampleQueryResponse) {};
+ rpc Label(LabelRequest) returns (LabelResponse) {};
+ rpc Tail(TailRequest) returns (stream TailResponse) {};
+ rpc Series(SeriesRequest) returns (SeriesResponse) {};
+ rpc TailersCount(TailersCountRequest) returns (TailersCountResponse) {};
+ rpc GetChunkIDs(GetChunkIDsRequest) returns (GetChunkIDsResponse) {}; // GetChunkIDs returns ChunkIDs from the index store holding logs for given selectors and time-range.
+}
+
+service Ingester {
+ rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {};
+}
+
+message PushRequest {
+ repeated StreamAdapter streams = 1 [(gogoproto.jsontag) = "streams", (gogoproto.customtype) = "Stream"];
+}
+
+message PushResponse {
+}
+
+message QueryRequest {
+ string selector = 1;
+ uint32 limit = 2;
+ google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ Direction direction = 5;
+ reserved 6;
+ repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"];
+}
+
+message SampleQueryRequest {
+ string selector = 1;
+ google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"];
+}
+
+message SampleQueryResponse {
+ repeated Series series = 1 [(gogoproto.customtype) = "Series", (gogoproto.nullable) = true];
+}
+
+enum Direction {
+ FORWARD = 0;
+ BACKWARD = 1;
+}
+
+message QueryResponse {
+ repeated StreamAdapter streams = 1 [(gogoproto.customtype) = "Stream", (gogoproto.nullable) = true];
+}
+
+message LabelRequest {
+ string name = 1;
+ bool values = 2; // True to fetch label values, false for fetch labels names.
+ google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true];
+ google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true];
+}
+
+message LabelResponse {
+ repeated string values = 1;
+}
+
+message StreamAdapter {
+ string labels = 1 [(gogoproto.jsontag) = "labels"];
+ repeated EntryAdapter entries = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "entries"];
+}
+
+message EntryAdapter {
+ google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false, (gogoproto.jsontag) = "ts"];
+ string line = 2 [(gogoproto.jsontag) = "line"];
+}
+
+message Sample {
+ int64 timestamp = 1 [(gogoproto.jsontag) = "ts"];
+ double value = 2 [(gogoproto.jsontag) = "value"];
+ uint64 hash = 3 [(gogoproto.jsontag) = "hash"];
+}
+
+message Series {
+ string labels = 1 [(gogoproto.jsontag) = "labels"];
+ repeated Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "samples"];
+}
+
+message TailRequest {
+ string query = 1;
+ reserved 2;
+ uint32 delayFor = 3;
+ uint32 limit = 4;
+ google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message TailResponse {
+ StreamAdapter stream = 1 [(gogoproto.customtype) = "Stream"];
+ repeated DroppedStream droppedStreams = 2;
+}
+
+message SeriesRequest {
+ google.protobuf.Timestamp start = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ google.protobuf.Timestamp end = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ repeated string groups = 3;
+}
+
+message SeriesResponse {
+ repeated SeriesIdentifier series = 1 [(gogoproto.nullable) = false];
+}
+
+message SeriesIdentifier {
+ map labels = 1;
+}
+
+message DroppedStream {
+ google.protobuf.Timestamp from = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ google.protobuf.Timestamp to = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ string labels = 3;
+}
+
+message TimeSeriesChunk {
+ string from_ingester_id = 1;
+ string user_id = 2;
+ repeated LabelPair labels = 3;
+ repeated Chunk chunks = 4;
+}
+
+message LabelPair {
+ string name = 1;
+ string value = 2;
+}
+
+message Chunk {
+ bytes data = 1;
+}
+
+message TransferChunksResponse {
+
+}
+
+message TailersCountRequest {
+
+}
+
+message TailersCountResponse {
+ uint32 count = 1;
+}
+
+message GetChunkIDsRequest {
+ string matchers = 1;
+ google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+ google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message GetChunkIDsResponse {
+ repeated string chunkIDs = 1;
+}
\ No newline at end of file
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/timestamp.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/timestamp.go
new file mode 100644
index 000000000..f2fb323f9
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/timestamp.go
@@ -0,0 +1,106 @@
+package logproto
+
+import (
+ "errors"
+ strconv "strconv"
+ time "time"
+
+ "github.com/gogo/protobuf/types"
+)
+
+const (
+ // Seconds field of the earliest valid Timestamp.
+ // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ minValidSeconds = -62135596800
+ // Seconds field just after the latest valid Timestamp.
+ // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *types.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return errors.New("timestamp: " + formatTimestamp(ts) + " before 0001-01-01")
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return errors.New("timestamp: " + formatTimestamp(ts) + " after 10000-01-01")
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return errors.New("timestamp: " + formatTimestamp(ts) + ": nanos not in range [0, 1e9)")
+ }
+ return nil
+}
+
+// formatTimestamp is equivalent to fmt.Sprintf("%#v", ts)
+// but avoids the escape incurred by using fmt.Sprintf, eliminating
+// unnecessary heap allocations.
+func formatTimestamp(ts *types.Timestamp) string {
+ if ts == nil {
+ return "nil"
+ }
+
+ seconds := strconv.FormatInt(ts.Seconds, 10)
+ nanos := strconv.FormatInt(int64(ts.Nanos), 10)
+ return "&types.Timestamp{Seconds: " + seconds + ",\nNanos: " + nanos + ",\n}"
+}
+
+func SizeOfStdTime(t time.Time) int {
+ ts, err := timestampProto(t)
+ if err != nil {
+ return 0
+ }
+ return ts.Size()
+}
+
+func StdTimeMarshalTo(t time.Time, data []byte) (int, error) {
+ ts, err := timestampProto(t)
+ if err != nil {
+ return 0, err
+ }
+ return ts.MarshalTo(data)
+}
+
+func StdTimeUnmarshal(t *time.Time, data []byte) error {
+ ts := &types.Timestamp{}
+ if err := ts.Unmarshal(data); err != nil {
+ return err
+ }
+ tt, err := timestampFromProto(ts)
+ if err != nil {
+ return err
+ }
+ *t = tt
+ return nil
+}
+
+func timestampFromProto(ts *types.Timestamp) (time.Time, error) {
+ // Don't return the zero value on error, because corresponds to a valid
+ // timestamp. Instead return whatever time.Unix gives us.
+ var t time.Time
+ if ts == nil {
+ t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+ } else {
+ t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+ }
+ return t, validateTimestamp(ts)
+}
+
+func timestampProto(t time.Time) (types.Timestamp, error) {
+ ts := types.Timestamp{
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
+ }
+ return ts, validateTimestamp(&ts)
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/types.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/types.go
new file mode 100644
index 000000000..f2a2ac064
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/types.go
@@ -0,0 +1,475 @@
+package logproto
+
+import (
+ "fmt"
+ "io"
+ "time"
+
+ jsoniter "github.com/json-iterator/go"
+ "github.com/prometheus/prometheus/promql/parser"
+)
+
+// Stream contains a unique labels set as a string and a set of entries for it.
+// We are not using the proto generated version but this custom one so that we
+// can improve serialization see benchmark.
+type Stream struct {
+ Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"`
+ Entries []Entry `protobuf:"bytes,2,rep,name=entries,proto3,customtype=EntryAdapter" json:"entries"`
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (r *PushRequest) MarshalJSON() ([]byte, error) {
+ stream := jsoniter.ConfigDefault.BorrowStream(nil)
+ defer jsoniter.ConfigDefault.ReturnStream(stream)
+
+ stream.WriteObjectStart()
+ stream.WriteObjectField("streams")
+ stream.WriteArrayStart()
+ for i, s := range r.Streams {
+ stream.WriteObjectStart()
+ stream.WriteObjectField("stream")
+ stream.WriteObjectStart()
+ lbs, err := parser.ParseMetric(s.Labels)
+ if err != nil {
+ continue
+ }
+ for i, lb := range lbs {
+ stream.WriteObjectField(lb.Name)
+ stream.WriteStringWithHTMLEscaped(lb.Value)
+ if i != len(lbs)-1 {
+ stream.WriteMore()
+ }
+ }
+ stream.WriteObjectEnd()
+ stream.WriteMore()
+ stream.WriteObjectField("values")
+ stream.WriteArrayStart()
+ for i, entry := range s.Entries {
+ stream.WriteArrayStart()
+ stream.WriteRaw(fmt.Sprintf(`"%d"`, entry.Timestamp.UnixNano()))
+ stream.WriteMore()
+ stream.WriteStringWithHTMLEscaped(entry.Line)
+ stream.WriteArrayEnd()
+ if i != len(s.Entries)-1 {
+ stream.WriteMore()
+ }
+ }
+ stream.WriteArrayEnd()
+ stream.WriteObjectEnd()
+ if i != len(r.Streams)-1 {
+ stream.WriteMore()
+ }
+ }
+ stream.WriteArrayEnd()
+ stream.WriteObjectEnd()
+
+ return stream.Buffer(), nil
+}
+
+// Entry is a log entry with a timestamp.
+type Entry struct {
+ Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
+ Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
+}
+
+func (m *Stream) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Stream) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels)))
+ i += copy(dAtA[i:], m.Labels)
+ }
+ if len(m.Entries) > 0 {
+ for _, msg := range m.Entries {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Entry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(SizeOfStdTime(m.Timestamp)))
+ n5, err := StdTimeMarshalTo(m.Timestamp, dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ if len(m.Line) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Line)))
+ i += copy(dAtA[i:], m.Line)
+ }
+ return i, nil
+}
+
+func (m *Stream) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Stream: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Stream: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Labels = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Entries = append(m.Entries, Entry{})
+ if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+func (m *Entry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Entry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Line = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+func (m *Stream) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Labels)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ if len(m.Entries) > 0 {
+ for _, e := range m.Entries {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Entry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = SizeOfStdTime(m.Timestamp)
+ n += 1 + l + sovLogproto(uint64(l))
+ l = len(m.Line)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ return n
+}
+
+func (m *Stream) Equal(that interface{}) bool {
+ if that == nil {
+ return m == nil
+ }
+
+ that1, ok := that.(*Stream)
+ if !ok {
+ that2, ok := that.(Stream)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return m == nil
+ } else if m == nil {
+ return false
+ }
+ if m.Labels != that1.Labels {
+ return false
+ }
+ if len(m.Entries) != len(that1.Entries) {
+ return false
+ }
+ for i := range m.Entries {
+ if !m.Entries[i].Equal(that1.Entries[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *Entry) Equal(that interface{}) bool {
+ if that == nil {
+ return m == nil
+ }
+
+ that1, ok := that.(*Entry)
+ if !ok {
+ that2, ok := that.(Entry)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return m == nil
+ } else if m == nil {
+ return false
+ }
+ if !m.Timestamp.Equal(that1.Timestamp) {
+ return false
+ }
+ if m.Line != that1.Line {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/metric/counters.go b/vendor/github.com/netobserv/loki-client-go/pkg/metric/counters.go
new file mode 100644
index 000000000..bfab117d4
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/metric/counters.go
@@ -0,0 +1,117 @@
+package metric
+
+import (
+ "strings"
+ "time"
+
+ "github.com/mitchellh/mapstructure"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+)
+
+const (
+ CounterInc = "inc"
+ CounterAdd = "add"
+
+ ErrCounterActionRequired = "counter action must be defined as either `inc` or `add`"
+ ErrCounterInvalidAction = "action %s is not valid, action must be either `inc` or `add`"
+ ErrCounterInvalidMatchAll = "`match_all: true` cannot be combined with `value`, please remove `match_all` or `value`"
+ ErrCounterInvalidCountBytes = "`count_entry_bytes: true` can only be set with `match_all: true`"
+ ErrCounterInvalidCountBytesAction = "`count_entry_bytes: true` can only be used with `action: add`"
+)
+
+type CounterConfig struct {
+ MatchAll *bool `mapstructure:"match_all"`
+ CountBytes *bool `mapstructure:"count_entry_bytes"`
+ Value *string `mapstructure:"value"`
+ Action string `mapstructure:"action"`
+}
+
+func validateCounterConfig(config *CounterConfig) error {
+ if config.Action == "" {
+ return errors.New(ErrCounterActionRequired)
+ }
+ config.Action = strings.ToLower(config.Action)
+ if config.Action != CounterInc && config.Action != CounterAdd {
+ return errors.Errorf(ErrCounterInvalidAction, config.Action)
+ }
+ if config.MatchAll != nil && *config.MatchAll && config.Value != nil {
+ return errors.Errorf(ErrCounterInvalidMatchAll)
+ }
+ if config.CountBytes != nil && *config.CountBytes && (config.MatchAll == nil || !*config.MatchAll) {
+ return errors.New(ErrCounterInvalidCountBytes)
+ }
+ if config.CountBytes != nil && *config.CountBytes && config.Action != CounterAdd {
+ return errors.New(ErrCounterInvalidCountBytesAction)
+ }
+ return nil
+}
+
+func parseCounterConfig(config interface{}) (*CounterConfig, error) {
+ cfg := &CounterConfig{}
+ err := mapstructure.Decode(config, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return cfg, nil
+}
+
+// Counters is a vec tor of counters for a each log stream.
+type Counters struct {
+ *metricVec
+ Cfg *CounterConfig
+}
+
+// NewCounters creates a new counter vec.
+func NewCounters(name, help string, config interface{}, maxIdleSec int64) (*Counters, error) {
+ cfg, err := parseCounterConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ err = validateCounterConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &Counters{
+ metricVec: newMetricVec(func(labels map[string]string) prometheus.Metric {
+ return &expiringCounter{prometheus.NewCounter(prometheus.CounterOpts{
+ Help: help,
+ Name: name,
+ ConstLabels: labels,
+ }),
+ 0,
+ }
+ }, maxIdleSec),
+ Cfg: cfg,
+ }, nil
+}
+
+// With returns the counter associated with a stream labelset.
+func (c *Counters) With(labels model.LabelSet) prometheus.Counter {
+ return c.metricVec.With(labels).(prometheus.Counter)
+}
+
+type expiringCounter struct {
+ prometheus.Counter
+ lastModSec int64
+}
+
+// Inc increments the counter by 1. Use Add to increment it by arbitrary
+// non-negative values.
+func (e *expiringCounter) Inc() {
+ e.Counter.Inc()
+ e.lastModSec = time.Now().Unix()
+}
+
+// Add adds the given value to the counter. It panics if the value is <
+// 0.
+func (e *expiringCounter) Add(val float64) {
+ e.Counter.Add(val)
+ e.lastModSec = time.Now().Unix()
+}
+
+// HasExpired implements Expirable
+func (e *expiringCounter) HasExpired(currentTimeSec int64, maxAgeSec int64) bool {
+ return currentTimeSec-e.lastModSec >= maxAgeSec
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/metric/gauges.go b/vendor/github.com/netobserv/loki-client-go/pkg/metric/gauges.go
new file mode 100644
index 000000000..2b5b8b84b
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/metric/gauges.go
@@ -0,0 +1,136 @@
+package metric
+
+import (
+ "strings"
+ "time"
+
+ "github.com/mitchellh/mapstructure"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+)
+
+const (
+ GaugeSet = "set"
+ GaugeInc = "inc"
+ GaugeDec = "dec"
+ GaugeAdd = "add"
+ GaugeSub = "sub"
+
+ ErrGaugeActionRequired = "gauge action must be defined as `set`, `inc`, `dec`, `add`, or `sub`"
+ ErrGaugeInvalidAction = "action %s is not valid, action must be `set`, `inc`, `dec`, `add`, or `sub`"
+)
+
+type GaugeConfig struct {
+ Value *string `mapstructure:"value"`
+ Action string `mapstructure:"action"`
+}
+
+func validateGaugeConfig(config *GaugeConfig) error {
+ if config.Action == "" {
+ return errors.New(ErrGaugeActionRequired)
+ }
+ config.Action = strings.ToLower(config.Action)
+ if config.Action != GaugeSet &&
+ config.Action != GaugeInc &&
+ config.Action != GaugeDec &&
+ config.Action != GaugeAdd &&
+ config.Action != GaugeSub {
+ return errors.Errorf(ErrGaugeInvalidAction, config.Action)
+ }
+ return nil
+}
+
+func parseGaugeConfig(config interface{}) (*GaugeConfig, error) {
+ cfg := &GaugeConfig{}
+ err := mapstructure.Decode(config, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return cfg, nil
+}
+
+// Gauges is a vector of gauges for a each log stream.
+type Gauges struct {
+ *metricVec
+ Cfg *GaugeConfig
+}
+
+// NewGauges creates a new gauge vec.
+func NewGauges(name, help string, config interface{}, maxIdleSec int64) (*Gauges, error) {
+ cfg, err := parseGaugeConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ err = validateGaugeConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &Gauges{
+ metricVec: newMetricVec(func(labels map[string]string) prometheus.Metric {
+ return &expiringGauge{prometheus.NewGauge(prometheus.GaugeOpts{
+ Help: help,
+ Name: name,
+ ConstLabels: labels,
+ }),
+ 0,
+ }
+ }, maxIdleSec),
+ Cfg: cfg,
+ }, nil
+}
+
+// With returns the gauge associated with a stream labelset.
+func (g *Gauges) With(labels model.LabelSet) prometheus.Gauge {
+ return g.metricVec.With(labels).(prometheus.Gauge)
+}
+
+type expiringGauge struct {
+ prometheus.Gauge
+ lastModSec int64
+}
+
+// Set sets the Gauge to an arbitrary value.
+func (g *expiringGauge) Set(val float64) {
+ g.Gauge.Set(val)
+ g.lastModSec = time.Now().Unix()
+}
+
+// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+// values.
+func (g *expiringGauge) Inc() {
+ g.Gauge.Inc()
+ g.lastModSec = time.Now().Unix()
+}
+
+// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+// values.
+func (g *expiringGauge) Dec() {
+ g.Gauge.Dec()
+ g.lastModSec = time.Now().Unix()
+}
+
+// Add adds the given value to the Gauge. (The value can be negative,
+// resulting in a decrease of the Gauge.)
+func (g *expiringGauge) Add(val float64) {
+ g.Gauge.Add(val)
+ g.lastModSec = time.Now().Unix()
+}
+
+// Sub subtracts the given value from the Gauge. (The value can be
+// negative, resulting in an increase of the Gauge.)
+func (g *expiringGauge) Sub(val float64) {
+ g.Gauge.Sub(val)
+ g.lastModSec = time.Now().Unix()
+}
+
+// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+func (g *expiringGauge) SetToCurrentTime() {
+ g.Gauge.SetToCurrentTime()
+ g.lastModSec = time.Now().Unix()
+}
+
+// HasExpired implements Expirable
+func (g *expiringGauge) HasExpired(currentTimeSec int64, maxAgeSec int64) bool {
+ return currentTimeSec-g.lastModSec >= maxAgeSec
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/metric/histograms.go b/vendor/github.com/netobserv/loki-client-go/pkg/metric/histograms.go
new file mode 100644
index 000000000..c920e0a0c
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/metric/histograms.go
@@ -0,0 +1,79 @@
+package metric
+
+import (
+ "time"
+
+ "github.com/mitchellh/mapstructure"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+)
+
+type HistogramConfig struct {
+ Value *string `mapstructure:"value"`
+ Buckets []float64 `mapstructure:"buckets"`
+}
+
+func validateHistogramConfig(config *HistogramConfig) error {
+ return nil
+}
+
+func parseHistogramConfig(config interface{}) (*HistogramConfig, error) {
+ cfg := &HistogramConfig{}
+ err := mapstructure.Decode(config, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return cfg, nil
+}
+
+// Histograms is a vector of histograms for a each log stream.
+type Histograms struct {
+ *metricVec
+ Cfg *HistogramConfig
+}
+
+// NewHistograms creates a new histogram vec.
+func NewHistograms(name, help string, config interface{}, maxIdleSec int64) (*Histograms, error) {
+ cfg, err := parseHistogramConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ err = validateHistogramConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &Histograms{
+ metricVec: newMetricVec(func(labels map[string]string) prometheus.Metric {
+ return &expiringHistogram{prometheus.NewHistogram(prometheus.HistogramOpts{
+ Help: help,
+ Name: name,
+ ConstLabels: labels,
+ Buckets: cfg.Buckets,
+ }),
+ 0,
+ }
+ }, maxIdleSec),
+ Cfg: cfg,
+ }, nil
+}
+
+// With returns the histogram associated with a stream labelset.
+func (h *Histograms) With(labels model.LabelSet) prometheus.Histogram {
+ return h.metricVec.With(labels).(prometheus.Histogram)
+}
+
+type expiringHistogram struct {
+ prometheus.Histogram
+ lastModSec int64
+}
+
+// Observe adds a single observation to the histogram.
+func (h *expiringHistogram) Observe(val float64) {
+ h.Histogram.Observe(val)
+ h.lastModSec = time.Now().Unix()
+}
+
+// HasExpired implements Expirable
+func (h *expiringHistogram) HasExpired(currentTimeSec int64, maxAgeSec int64) bool {
+ return currentTimeSec-h.lastModSec >= maxAgeSec
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/metric/metricvec.go b/vendor/github.com/netobserv/loki-client-go/pkg/metric/metricvec.go
new file mode 100644
index 000000000..e56a6ccc3
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/metric/metricvec.go
@@ -0,0 +1,82 @@
+package metric
+
+import (
+ "sync"
+ "time"
+
+ "github.com/netobserv/loki-client-go/pkg/labelutil"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+)
+
+// Expirable allows checking if something has exceeded the provided maxAge based on the provided currentTime
+type Expirable interface {
+ HasExpired(currentTimeSec int64, maxAgeSec int64) bool
+}
+
+type metricVec struct {
+ factory func(labels map[string]string) prometheus.Metric
+ mtx sync.Mutex
+ metrics map[model.Fingerprint]prometheus.Metric
+ maxAgeSec int64
+}
+
+func newMetricVec(factory func(labels map[string]string) prometheus.Metric, maxAgeSec int64) *metricVec {
+ return &metricVec{
+ metrics: map[model.Fingerprint]prometheus.Metric{},
+ factory: factory,
+ maxAgeSec: maxAgeSec,
+ }
+}
+
+// Describe implements prometheus.Collector and doesn't declare any metrics on purpose to bypass prometheus validation.
+// see https://godoc.org/github.com/prometheus/client_golang/prometheus#hdr-Custom_Collectors_and_constant_Metrics search for "unchecked"
+func (c *metricVec) Describe(ch chan<- *prometheus.Desc) {}
+
+// Collect implements prometheus.Collector
+func (c *metricVec) Collect(ch chan<- prometheus.Metric) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ for _, m := range c.metrics {
+ ch <- m
+ }
+ c.prune()
+}
+
+// With returns the metric associated with the labelset.
+func (c *metricVec) With(labels model.LabelSet) prometheus.Metric {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ fp := labels.Fingerprint()
+ var ok bool
+ var metric prometheus.Metric
+ if metric, ok = c.metrics[fp]; !ok {
+ metric = c.factory(labelutil.ModelLabelSetToMap(labels))
+ c.metrics[fp] = metric
+ }
+ return metric
+}
+
+func (c *metricVec) Delete(labels model.LabelSet) bool {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ fp := labels.Fingerprint()
+ _, ok := c.metrics[fp]
+ if ok {
+ delete(c.metrics, fp)
+ }
+ return ok
+}
+
+// prune will remove all metrics which implement the Expirable interface and have expired
+// it does not take out a lock on the metrics map so whoever calls this function should do so.
+func (c *metricVec) prune() {
+ currentTimeSec := time.Now().Unix()
+ for fp, m := range c.metrics {
+ if em, ok := m.(Expirable); ok {
+ if em.HasExpired(currentTimeSec, c.maxAgeSec) {
+ delete(c.metrics, fp)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/urlutil/url.go b/vendor/github.com/netobserv/loki-client-go/pkg/urlutil/url.go
new file mode 100644
index 000000000..a403f0059
--- /dev/null
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/urlutil/url.go
@@ -0,0 +1,59 @@
+package urlutil
+
+import "net/url"
+
+// URLValue is a url.URL that can be used as a flag.
+type URLValue struct {
+ *url.URL
+}
+
+// String implements flag.Value
+func (v URLValue) String() string {
+ if v.URL == nil {
+ return ""
+ }
+ return v.URL.String()
+}
+
+// Set implements flag.Value
+func (v *URLValue) Set(s string) error {
+ u, err := url.Parse(s)
+ if err != nil {
+ return err
+ }
+ v.URL = u
+ return nil
+}
+
+// UnmarshalYAML implements yaml.Unmarshaler.
+func (v *URLValue) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+
+ // An empty string means no URL has been configured.
+ if s == "" {
+ v.URL = nil
+ return nil
+ }
+
+ return v.Set(s)
+}
+
+// MarshalYAML implements yaml.Marshaler.
+func (v URLValue) MarshalYAML() (interface{}, error) {
+ if v.URL == nil {
+ return "", nil
+ }
+
+ // Mask out passwords when marshalling URLs back to YAML.
+ u := *v.URL
+ if u.User != nil {
+ if _, set := u.User.Password(); set {
+ u.User = url.UserPassword(u.User.Username(), "********")
+ }
+ }
+
+ return u.String(), nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/LICENSE b/vendor/github.com/netobserv/netobserv-ebpf-agent/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/agent.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/agent.go
new file mode 100644
index 000000000..47fab9897
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/agent.go
@@ -0,0 +1,587 @@
+package agent
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/netobserv/gopipes/pkg/node"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/exporter"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/flow"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/kernel"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ promo "github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/tracer"
+
+ "github.com/cilium/ebpf/ringbuf"
+ "github.com/gavv/monotime"
+ ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder"
+ kafkago "github.com/segmentio/kafka-go"
+ "github.com/segmentio/kafka-go/compress"
+ "github.com/sirupsen/logrus"
+)
+
+var alog = logrus.WithField("component", "agent.Flows")
+var plog = logrus.WithField("component", "agent.Packets")
+
+// Status of the agent service. Helps on the health report as well as making some asynchronous
+// tests waiting for the agent to accept flows.
+type Status int
+
+const (
+ StatusNotStarted Status = iota
+ StatusStarting
+ StatusStarted
+ StatusStopping
+ StatusStopped
+)
+
+const (
+ networkEventsDBPath = "/var/run/ovn/ovnnb_db.sock"
+ networkEventsOwnerName = "netobservAgent"
+)
+
+func (s Status) String() string {
+ switch s {
+ case StatusNotStarted:
+ return "StatusNotStarted"
+ case StatusStarting:
+ return "StatusStarting"
+ case StatusStarted:
+ return "StatusStarted"
+ case StatusStopping:
+ return "StatusStopping"
+ case StatusStopped:
+ return "StatusStopped"
+ default:
+ return "invalid"
+ }
+}
+
+func configureInformer(cfg *Config, log *logrus.Entry) ifaces.Informer {
+ var informer ifaces.Informer
+ switch cfg.ListenInterfaces {
+ case ListenPoll:
+ log.WithField("period", cfg.ListenPollPeriod).
+ Debug("listening for new interfaces: use polling")
+ informer = ifaces.NewPoller(cfg.ListenPollPeriod, cfg.BuffersLength)
+ case ListenWatch:
+ log.Debug("listening for new interfaces: use watching")
+ informer = ifaces.NewWatcher(cfg.BuffersLength)
+ default:
+ log.WithField("providedValue", cfg.ListenInterfaces).
+ Warn("wrong interface listen method. Using file watcher as default")
+ informer = ifaces.NewWatcher(cfg.BuffersLength)
+ }
+ return informer
+
+}
+
+func interfaceListener(ctx context.Context, ifaceEvents <-chan ifaces.Event, slog *logrus.Entry, processEvent func(iface ifaces.Interface, add bool)) {
+ for {
+ select {
+ case <-ctx.Done():
+ slog.Debug("stopping interfaces' listener")
+ return
+ case event := <-ifaceEvents:
+ slog.WithField("event", event).Debug("received event")
+ switch event.Type {
+ case ifaces.EventAdded:
+ processEvent(event.Interface, true)
+ case ifaces.EventDeleted:
+ processEvent(event.Interface, false)
+ default:
+ slog.WithField("event", event).Warn("unknown event type")
+ }
+ }
+ }
+}
+
+// Flows reporting agent
+type Flows struct {
+ cfg *Config
+
+ // input data providers
+ interfaces ifaces.Informer
+ filter InterfaceFilter
+ ebpf ebpfFlowFetcher
+
+ // processing nodes to be wired in the buildAndStartPipeline method
+ mapTracer *flow.MapTracer
+ rbTracer *flow.RingBufTracer
+ accounter *flow.Accounter
+ limiter *flow.CapacityLimiter
+ deduper node.MiddleFunc[[]*model.Record, []*model.Record]
+ exporter node.TerminalFunc[[]*model.Record]
+
+ // elements used to decorate flows with extra information
+ interfaceNamer flow.InterfaceNamer
+ agentIP net.IP
+
+ status Status
+ promoServer *http.Server
+ sampleDecoder *ovnobserv.SampleDecoder
+}
+
+// ebpfFlowFetcher abstracts the interface of ebpf.FlowFetcher to allow dependency injection in tests
+type ebpfFlowFetcher interface {
+ io.Closer
+ Register(iface ifaces.Interface) error
+ UnRegister(iface ifaces.Interface) error
+ AttachTCX(iface ifaces.Interface) error
+ DetachTCX(iface ifaces.Interface) error
+
+ LookupAndDeleteMap(*metrics.Metrics) map[ebpf.BpfFlowId]model.BpfFlowContent
+ DeleteMapsStaleEntries(timeOut time.Duration)
+ ReadRingBuf() (ringbuf.Record, error)
+}
+
+// FlowsAgent instantiates a new agent, given a configuration.
+func FlowsAgent(cfg *Config) (*Flows, error) {
+ alog.Info("initializing Flows agent")
+
+ // manage deprecated configs
+ manageDeprecatedConfigs(cfg)
+
+ // configure informer for new interfaces
+ var informer = configureInformer(cfg, alog)
+
+ alog.Debug("acquiring Agent IP")
+ agentIP, err := fetchAgentIP(cfg)
+ if err != nil {
+ return nil, fmt.Errorf("acquiring Agent IP: %w", err)
+ }
+ alog.Debug("agent IP: " + agentIP.String())
+
+ // initialize metrics
+ metricsSettings := &metrics.Settings{
+ PromConnectionInfo: metrics.PromConnectionInfo{
+ Address: cfg.MetricsServerAddress,
+ Port: cfg.MetricsPort,
+ },
+ Prefix: cfg.MetricsPrefix,
+ }
+ if cfg.MetricsTLSCertPath != "" && cfg.MetricsTLSKeyPath != "" {
+ metricsSettings.PromConnectionInfo.TLS = &metrics.PromTLS{
+ CertPath: cfg.MetricsTLSCertPath,
+ KeyPath: cfg.MetricsTLSKeyPath,
+ }
+ }
+ m := metrics.NewMetrics(metricsSettings)
+
+ var s *ovnobserv.SampleDecoder
+ if cfg.EnableNetworkEventsMonitoring {
+ if !kernel.IsKernelOlderThan("5.14.0") {
+ if s, err = ovnobserv.NewSampleDecoderWithDefaultCollector(context.Background(), networkEventsDBPath,
+ networkEventsOwnerName, cfg.NetworkEventsMonitoringGroupID); err != nil {
+ alog.Warnf("failed to create Network Events sample decoder: %v for id: %d", err, cfg.NetworkEventsMonitoringGroupID)
+ }
+ } else {
+ alog.Warn("old kernel doesn't support network events monitoring skip")
+ }
+ }
+
+ // configure selected exporter
+ exportFunc, err := buildFlowExporter(cfg, m, s)
+ if err != nil {
+ return nil, err
+ }
+
+ ingress, egress := flowDirections(cfg)
+ debug := false
+ if cfg.LogLevel == logrus.TraceLevel.String() || cfg.LogLevel == logrus.DebugLevel.String() {
+ debug = true
+ }
+ filterRules := make([]*tracer.FilterConfig, 0)
+ if cfg.EnableFlowFilter {
+ var flowFilters []*FlowFilter
+ if err := json.Unmarshal([]byte(cfg.FlowFilterRules), &flowFilters); err != nil {
+ return nil, err
+ }
+
+ for _, r := range flowFilters {
+ filterRules = append(filterRules, &tracer.FilterConfig{
+ FilterAction: r.FilterAction,
+ FilterDirection: r.FilterDirection,
+ FilterIPCIDR: r.FilterIPCIDR,
+ FilterProtocol: r.FilterProtocol,
+ FilterPeerIP: r.FilterPeerIP,
+ FilterDestinationPort: tracer.ConvertFilterPortsToInstr(r.FilterDestinationPort, r.FilterDestinationPortRange, r.FilterDestinationPorts),
+ FilterSourcePort: tracer.ConvertFilterPortsToInstr(r.FilterSourcePort, r.FilterSourcePortRange, r.FilterSourcePorts),
+ FilterPort: tracer.ConvertFilterPortsToInstr(r.FilterPort, r.FilterPortRange, r.FilterPorts),
+ FilterTCPFlags: r.FilterTCPFlags,
+ FilterDrops: r.FilterDrops,
+ FilterSample: r.FilterSample,
+ })
+ }
+ }
+ ebpfConfig := &tracer.FlowFetcherConfig{
+ EnableIngress: ingress,
+ EnableEgress: egress,
+ Debug: debug,
+ Sampling: cfg.Sampling,
+ CacheMaxSize: cfg.CacheMaxFlows,
+ EnablePktDrops: cfg.EnablePktDrops,
+ EnableDNSTracker: cfg.EnableDNSTracking,
+ DNSTrackerPort: cfg.DNSTrackingPort,
+ EnableRTT: cfg.EnableRTT,
+ EnableNetworkEventsMonitoring: cfg.EnableNetworkEventsMonitoring,
+ NetworkEventsMonitoringGroupID: cfg.NetworkEventsMonitoringGroupID,
+ EnableFlowFilter: cfg.EnableFlowFilter,
+ EnablePktTranslation: cfg.EnablePktTranslationTracking,
+ UseEbpfManager: cfg.EbpfProgramManagerMode,
+ BpfManBpfFSPath: cfg.BpfManBpfFSPath,
+ FilterConfig: filterRules,
+ }
+
+ fetcher, err := tracer.NewFlowFetcher(ebpfConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ return flowsAgent(cfg, m, informer, fetcher, exportFunc, agentIP, s)
+}
+
+// flowsAgent is a private constructor with injectable dependencies, usable for tests
+func flowsAgent(cfg *Config, m *metrics.Metrics,
+ informer ifaces.Informer,
+ fetcher ebpfFlowFetcher,
+ exporter node.TerminalFunc[[]*model.Record],
+ agentIP net.IP,
+ s *ovnobserv.SampleDecoder,
+) (*Flows, error) {
+ var filter InterfaceFilter
+
+ switch {
+ case len(cfg.InterfaceIPs) > 0 && (len(cfg.Interfaces) > 0 || len(cfg.ExcludeInterfaces) > 0):
+ return nil, fmt.Errorf("INTERFACES/EXCLUDE_INTERFACES and INTERFACE_IPS are mutually exclusive")
+
+ case len(cfg.InterfaceIPs) > 0:
+ // configure ip interface filter
+ f, err := initIPInterfaceFilter(cfg.InterfaceIPs, IPsFromInterface)
+ if err != nil {
+ return nil, fmt.Errorf("configuring interface ip filter: %w", err)
+ }
+ filter = &f
+
+ default:
+ // configure allow/deny regexp interfaces filter
+ f, err := initRegexpInterfaceFilter(cfg.Interfaces, cfg.ExcludeInterfaces)
+ if err != nil {
+ return nil, fmt.Errorf("configuring interface filters: %w", err)
+ }
+ filter = &f
+ }
+
+ registerer := ifaces.NewRegisterer(informer, cfg.BuffersLength)
+
+ interfaceNamer := func(ifIndex int) string {
+ iface, ok := registerer.IfaceNameForIndex(ifIndex)
+ if !ok {
+ return "unknown"
+ }
+ return iface
+ }
+ var promoServer *http.Server
+ if cfg.MetricsEnable {
+ promoServer = promo.InitializePrometheus(m.Settings)
+ }
+
+ samplingGauge := m.CreateSamplingRate()
+ samplingGauge.Set(float64(cfg.Sampling))
+
+ mapTracer := flow.NewMapTracer(fetcher, cfg.CacheActiveTimeout, cfg.StaleEntriesEvictTimeout, m)
+ rbTracer := flow.NewRingBufTracer(fetcher, mapTracer, cfg.CacheActiveTimeout, m)
+ accounter := flow.NewAccounter(cfg.CacheMaxFlows, cfg.CacheActiveTimeout, time.Now, monotime.Now, m)
+ limiter := flow.NewCapacityLimiter(m)
+ var deduper node.MiddleFunc[[]*model.Record, []*model.Record]
+ if cfg.Deduper == DeduperFirstCome {
+ deduper = flow.Dedupe(cfg.DeduperFCExpiry, cfg.DeduperJustMark, cfg.DeduperMerge, interfaceNamer, m)
+ }
+
+ return &Flows{
+ ebpf: fetcher,
+ exporter: exporter,
+ interfaces: registerer,
+ filter: filter,
+ cfg: cfg,
+ mapTracer: mapTracer,
+ rbTracer: rbTracer,
+ accounter: accounter,
+ limiter: limiter,
+ deduper: deduper,
+ agentIP: agentIP,
+ interfaceNamer: interfaceNamer,
+ promoServer: promoServer,
+ sampleDecoder: s,
+ }, nil
+}
+
+func flowDirections(cfg *Config) (ingress, egress bool) {
+ switch cfg.Direction {
+ case DirectionIngress:
+ return true, false
+ case DirectionEgress:
+ return false, true
+ case DirectionBoth:
+ return true, true
+ default:
+ alog.Warnf("unknown DIRECTION %q. Tracing both ingress and egress traffic", cfg.Direction)
+ return true, true
+ }
+}
+
+func buildFlowExporter(cfg *Config, m *metrics.Metrics, s *ovnobserv.SampleDecoder) (node.TerminalFunc[[]*model.Record], error) {
+ switch cfg.Export {
+ case "grpc":
+ return buildGRPCExporter(cfg, m, s)
+ case "kafka":
+ return buildKafkaExporter(cfg, m, s)
+ case "ipfix+udp":
+ return buildIPFIXExporter(cfg, "udp")
+ case "ipfix+tcp":
+ return buildIPFIXExporter(cfg, "tcp")
+ case "direct-flp":
+ return buildFlowDirectFLPExporter(cfg)
+ default:
+ return nil, fmt.Errorf("wrong flow export type %s", cfg.Export)
+ }
+}
+
+func buildGRPCExporter(cfg *Config, m *metrics.Metrics, s *ovnobserv.SampleDecoder) (node.TerminalFunc[[]*model.Record], error) {
+ if cfg.TargetHost == "" || cfg.TargetPort == 0 {
+ return nil, fmt.Errorf("missing target host or port: %s:%d",
+ cfg.TargetHost, cfg.TargetPort)
+ }
+ grpcExporter, err := exporter.StartGRPCProto(cfg.TargetHost, cfg.TargetPort, cfg.GRPCMessageMaxFlows, m, s)
+ if err != nil {
+ return nil, err
+ }
+ return grpcExporter.ExportFlows, nil
+}
+
+func buildFlowDirectFLPExporter(cfg *Config) (node.TerminalFunc[[]*model.Record], error) {
+ flpExporter, err := exporter.StartDirectFLP(cfg.FLPConfig, cfg.BuffersLength)
+ if err != nil {
+ return nil, err
+ }
+ return flpExporter.ExportFlows, nil
+}
+
+func buildKafkaExporter(cfg *Config, m *metrics.Metrics, s *ovnobserv.SampleDecoder) (node.TerminalFunc[[]*model.Record], error) {
+ if len(cfg.KafkaBrokers) == 0 {
+ return nil, errors.New("at least one Kafka broker is needed")
+ }
+ var compression compress.Compression
+ if err := compression.UnmarshalText([]byte(cfg.KafkaCompression)); err != nil {
+ return nil, fmt.Errorf("wrong Kafka compression value %s. Admitted values are "+
+ "none, gzip, snappy, lz4, zstd: %w", cfg.KafkaCompression, err)
+ }
+ transport := kafkago.Transport{}
+ if cfg.KafkaEnableTLS {
+ tlsConfig, err := buildTLSConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+ transport.TLS = tlsConfig
+ }
+ if cfg.KafkaEnableSASL {
+ mechanism, err := buildSASLConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+ transport.SASL = mechanism
+ }
+ return (&exporter.KafkaProto{
+ Writer: &kafkago.Writer{
+ Addr: kafkago.TCP(cfg.KafkaBrokers...),
+ Topic: cfg.KafkaTopic,
+ BatchSize: cfg.KafkaBatchMessages,
+ // Assigning KafkaBatchSize to BatchBytes instead of BatchSize might be confusing here.
+ // The reason is that the "standard" Kafka name for this variable is "batch.size",
+ // which specifies the size of messages in terms of bytes, and not in terms of entries.
+ // We have decided to hide this library implementation detail and expose to the
+ // customer the common, standard name and meaning for batch.size
+ BatchBytes: int64(cfg.KafkaBatchSize),
+ // Segmentio's Kafka-go does not behave as standard Kafka library, and would
+ // throttle any Write invocation until reaching the timeout.
+ // Since we invoke write once each CacheActiveTimeout, we can safely disable this
+ // timeout throttling
+ // https://github.com/netobserv/flowlogs-pipeline/pull/233#discussion_r897830057
+ BatchTimeout: time.Nanosecond,
+ Async: cfg.KafkaAsync,
+ Compression: compression,
+ Transport: &transport,
+ Balancer: &kafkago.Hash{},
+ },
+ Metrics: m,
+ SampleDecoder: s,
+ }).ExportFlows, nil
+}
+
+func buildIPFIXExporter(cfg *Config, proto string) (node.TerminalFunc[[]*model.Record], error) {
+ if cfg.TargetHost == "" || cfg.TargetPort == 0 {
+ return nil, fmt.Errorf("missing target host or port: %s:%d",
+ cfg.TargetHost, cfg.TargetPort)
+ }
+ ipfix, err := exporter.StartIPFIXExporter(cfg.TargetHost, cfg.TargetPort, proto)
+ if err != nil {
+ return nil, err
+ }
+ return ipfix.ExportFlows, nil
+}
+
+// Run a Flows agent. The function will keep running in the same thread
+// until the passed context is canceled
+func (f *Flows) Run(ctx context.Context) error {
+ f.status = StatusStarting
+ alog.Info("starting Flows agent")
+ graph, err := f.buildAndStartPipeline(ctx)
+ if err != nil {
+ return fmt.Errorf("starting processing graph: %w", err)
+ }
+
+ f.status = StatusStarted
+ alog.Info("Flows agent successfully started")
+ <-ctx.Done()
+
+ f.status = StatusStopping
+ alog.Info("stopping Flows agent")
+ if err := f.ebpf.Close(); err != nil {
+ alog.WithError(err).Warn("eBPF resources not correctly closed")
+ }
+
+ alog.Debug("waiting for all nodes to finish their pending work")
+ <-graph.Done()
+ if f.promoServer != nil {
+ alog.Debug("closing prometheus server")
+ if err := f.promoServer.Close(); err != nil {
+ alog.WithError(err).Warn("error when closing prometheus server")
+ }
+ }
+ if f.sampleDecoder != nil {
+ f.sampleDecoder.Shutdown()
+ }
+ f.status = StatusStopped
+ alog.Info("Flows agent stopped")
+ return nil
+}
+
+func (f *Flows) Status() Status {
+ return f.status
+}
+
+// interfacesManager uses an informer to check new/deleted network interfaces. For each running
+// interface, it registers a flow ebpfFetcher that will forward new flows to the returned channel
+// TODO: consider move this method and "onInterfaceEvent" to another type
+func (f *Flows) interfacesManager(ctx context.Context) error {
+ slog := alog.WithField("function", "interfacesManager")
+
+ slog.Debug("subscribing for network interface events")
+ ifaceEvents, err := f.interfaces.Subscribe(ctx)
+ if err != nil {
+ return fmt.Errorf("instantiating interfaces' informer: %w", err)
+ }
+
+ go interfaceListener(ctx, ifaceEvents, slog, f.onInterfaceEvent)
+
+ return nil
+}
+
+// buildAndStartPipeline creates the ETL flow processing graph.
+// For a more visual view, check the docs/architecture.md document.
+func (f *Flows) buildAndStartPipeline(ctx context.Context) (*node.Terminal[[]*model.Record], error) {
+
+ if !f.cfg.EbpfProgramManagerMode {
+ alog.Debug("registering interfaces' listener in background")
+ err := f.interfacesManager(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+ alog.Debug("connecting flows' processing graph")
+ mapTracer := node.AsStart(f.mapTracer.TraceLoop(ctx, f.cfg.ForceGC))
+ rbTracer := node.AsStart(f.rbTracer.TraceLoop(ctx))
+
+ accounter := node.AsMiddle(f.accounter.Account,
+ node.ChannelBufferLen(f.cfg.BuffersLength))
+
+ limiter := node.AsMiddle(f.limiter.Limit,
+ node.ChannelBufferLen(f.cfg.BuffersLength))
+
+ decorator := node.AsMiddle(flow.Decorate(f.agentIP, f.interfaceNamer),
+ node.ChannelBufferLen(f.cfg.BuffersLength))
+
+ ebl := f.cfg.ExporterBufferLength
+ if ebl == 0 {
+ ebl = f.cfg.BuffersLength
+ }
+
+ export := node.AsTerminal(f.exporter,
+ node.ChannelBufferLen(ebl))
+
+ rbTracer.SendsTo(accounter)
+
+ if f.deduper != nil {
+ deduper := node.AsMiddle(f.deduper, node.ChannelBufferLen(f.cfg.BuffersLength))
+ mapTracer.SendsTo(deduper)
+ accounter.SendsTo(deduper)
+ deduper.SendsTo(limiter)
+ } else {
+ mapTracer.SendsTo(limiter)
+ accounter.SendsTo(limiter)
+ }
+ limiter.SendsTo(decorator)
+ decorator.SendsTo(export)
+
+ alog.Debug("starting graph")
+ mapTracer.Start()
+ rbTracer.Start()
+ return export, nil
+}
+
+func (f *Flows) onInterfaceEvent(iface ifaces.Interface, add bool) {
+ // ignore interfaces that do not match the user configuration acceptance/exclusion lists
+ allowed, err := f.filter.Allowed(iface.Name)
+ if err != nil {
+ alog.WithField("interface", iface).Errorf("encountered error determining if interface is allowed: %v", err)
+ return
+ }
+ if !allowed {
+ alog.WithField("interface", iface).
+ Debug("interface does not match the allow/exclusion filters. Ignoring")
+ return
+ }
+ if add {
+ alog.WithField("interface", iface).Info("interface detected. trying to attach TCX hook")
+ if err := f.ebpf.AttachTCX(iface); err != nil {
+ alog.WithField("interface", iface).WithError(err).
+ Info("can't attach to TCx hook flow ebpfFetcher. fall back to use legacy TC hook")
+ if err := f.ebpf.Register(iface); err != nil {
+ alog.WithField("interface", iface).WithError(err).
+ Warn("can't register flow ebpfFetcher. Ignoring")
+ return
+ }
+ }
+ } else {
+ alog.WithField("interface", iface).Info("interface deleted. trying to detach TCX hook")
+ if err := f.ebpf.DetachTCX(iface); err != nil {
+ alog.WithField("interface", iface).WithError(err).
+ Info("can't detach from TCx hook flow ebpfFetcher. fall back to use legacy TC hook")
+ if err := f.ebpf.UnRegister(iface); err != nil {
+ alog.WithField("interface", iface).WithError(err).
+ Warn("can't unregister flow ebpfFetcher. Ignoring")
+ return
+ }
+ }
+
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/config.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/config.go
new file mode 100644
index 000000000..9968b01a5
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/config.go
@@ -0,0 +1,277 @@
+package agent
+
+import (
+ "time"
+
+ "github.com/sirupsen/logrus"
+)
+
+var clog = logrus.WithField("component", "config")
+
+const (
+ ListenPoll = "poll"
+ ListenWatch = "watch"
+ DeduperNone = "none"
+ DeduperFirstCome = "firstCome"
+ DirectionIngress = "ingress"
+ DirectionEgress = "egress"
+ DirectionBoth = "both"
+
+ IPTypeAny = "any"
+ IPTypeIPV4 = "ipv4"
+ IPTypeIPV6 = "ipv6"
+
+ IPIfaceExternal = "external"
+ IPIfaceLocal = "local"
+ IPIfaceNamedPrefix = "name:"
+)
+
+type FlowFilter struct {
+ // FilterDirection is the direction of the flow filter.
+ // Possible values are "Ingress" or "Egress".
+ FilterDirection string `json:"direction,omitempty"`
+ // FilterIPCIDR is the IP CIDR to filter flows.
+ // Example: 10.10.10.0/24 or 100:100:100:100::/64, default is 0.0.0.0/0
+ FilterIPCIDR string `json:"ip_cidr,omitempty"`
+ // FilterProtocol is the protocol to filter flows.
+ // supported protocols: TCP, UDP, SCTP, ICMP, ICMPv6
+ FilterProtocol string `json:"protocol,omitempty"`
+ // FilterSourcePort is the source port to filter flows.
+ FilterSourcePort int32 `json:"source_port,omitempty"`
+ // FilterDestinationPort is the destination port to filter flows.
+ FilterDestinationPort int32 `json:"destination_port,omitempty"`
+ // FilterPort is the port to filter flows, can be use for either source or destination port.
+ FilterPort int32 `json:"port,omitempty"`
+ // FilterSourcePortRange is the source port range to filter flows.
+ // Example: 8000-8010
+ FilterSourcePortRange string `json:"source_port_range,omitempty"`
+ // FilterSourcePorts is two source ports to filter flows.
+ // Example: 8000,8010
+ FilterSourcePorts string `json:"source_ports,omitempty"`
+ // FilterDestinationPortRange is the destination port range to filter flows.
+ // Example: 8000-8010
+ FilterDestinationPortRange string `json:"destination_port_range,omitempty"`
+ // FilterDestinationPorts is two destination ports to filter flows.
+ // Example: 8000,8010
+ FilterDestinationPorts string `json:"destination_ports,omitempty"`
+ // FilterPortRange is the port range to filter flows, can be used for either source or destination port.
+ // Example: 8000-8010
+ FilterPortRange string `json:"port_range,omitempty"`
+ // FilterPorts is two ports option to filter flows, can be used for either source or destination port.
+ // Example: 8000,8010
+ FilterPorts string `json:"ports,omitempty"`
+ // FilterICMPType is the ICMP type to filter flows.
+ FilterICMPType int `json:"icmp_type,omitempty"`
+ // FilterICMPCode is the ICMP code to filter flows.
+ FilterICMPCode int `json:"icmp_code,omitempty"`
+ // FilterPeerIP is the IP to filter flows.
+ // Example: 10.10.10.10
+ FilterPeerIP string `json:"peer_ip,omitempty"`
+ // FilterAction is the action to filter flows.
+ // Possible values are "Accept" or "Reject".
+ FilterAction string `json:"action,omitempty"`
+ // FilterTCPFlags is the TCP flags to filter flows.
+ // possible values are: SYN, SYN-ACK, ACK, FIN, RST, PSH, URG, ECE, CWR, FIN-ACK, RST-ACK
+ FilterTCPFlags string `json:"tcp_flags,omitempty"`
+ // FilterDrops allow filtering flows with packet drops, default is false.
+ FilterDrops bool `json:"drops,omitempty"`
+ // FilterSample is the sample rate this matching flow will use
+ FilterSample uint32 `json:"sample,omitempty"`
+}
+
+type Config struct {
+ // AgentIP allows overriding the reported Agent IP address on each flow.
+ AgentIP string `env:"AGENT_IP"`
+ // AgentIPIface specifies which interface should the agent pick the IP address from in order to
+ // report it in the AgentIP field on each flow. Accepted values are: external (default), local,
+ // or name: (e.g. name:eth0).
+ // If the AgentIP configuration property is set, this property has no effect.
+ AgentIPIface string `env:"AGENT_IP_IFACE" envDefault:"external"`
+ // AgentIPType specifies which type of IP address (IPv4 or IPv6 or any) should the agent report
+ // in the AgentID field of each flow. Accepted values are: any (default), ipv4, ipv6.
+ // If the AgentIP configuration property is set, this property has no effect.
+ AgentIPType string `env:"AGENT_IP_TYPE" envDefault:"any"`
+ // Export selects the exporter protocol.
+ // Accepted values for Flows are: grpc (default), kafka, ipfix+udp, ipfix+tcp or direct-flp.
+ // Accepted values for Packets are: grpc (default) or direct-flp
+ Export string `env:"EXPORT" envDefault:"grpc"`
+ // Host is the host name or IP of the flow or packet collector, when the EXPORT variable is
+ // set to "grpc"
+ TargetHost string `env:"TARGET_HOST"`
+ // Port is the port the flow or packet collector, when the EXPORT variable is set to "grpc"
+ TargetPort int `env:"TARGET_PORT"`
+ // GRPCMessageMaxFlows specifies the limit, in number of flows, of each GRPC message. Messages
+ // larger than that number will be split and submitted sequentially.
+ GRPCMessageMaxFlows int `env:"GRPC_MESSAGE_MAX_FLOWS" envDefault:"10000"`
+ // Interfaces contains the interface names from where flows will be collected. If empty, the agent
+ // will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces.
+ // If an entry is enclosed by slashes (e.g. `/br-/`), it will match as regular expression,
+ // otherwise it will be matched as a case-sensitive string.
+ Interfaces []string `env:"INTERFACES" envSeparator:","`
+ // ExcludeInterfaces contains the interface names that will be excluded from flow tracing. Default:
+ // "lo" (loopback).
+ // If an entry is enclosed by slashes (e.g. `/br-/`), it will match as regular expression,
+ // otherwise it will be matched as a case-sensitive string.
+ ExcludeInterfaces []string `env:"EXCLUDE_INTERFACES" envSeparator:"," envDefault:"lo"`
+ // BuffersLength establishes the length of communication channels between the different processing
+ // stages
+ BuffersLength int `env:"BUFFERS_LENGTH" envDefault:"50"`
+ // InterfaceIPs is a list of CIDR-notation IPs/Subnets where any interface containing an IP in the given ranges
+ // should be listened on. This allows users to specify interfaces without knowing the OS-assigned interface names.
+ // Exclusive with Interfaces/ExcludeInterfaces.
+ InterfaceIPs []string `env:"INTERFACE_IPS" envSeparator:","`
+ // ExporterBufferLength establishes the length of the buffer of flow batches (not individual flows)
+ // that can be accumulated before the Kafka or GRPC exporter. When this buffer is full (e.g.
+ // because the Kafka or GRPC endpoint is slow), incoming flow batches will be dropped. If unset,
+ // its value is the same as the BUFFERS_LENGTH property.
+ ExporterBufferLength int `env:"EXPORTER_BUFFER_LENGTH"`
+ // CacheMaxFlows specifies how many flows can be accumulated in the accounting cache before
+ // being flushed for its later export
+ CacheMaxFlows int `env:"CACHE_MAX_FLOWS" envDefault:"5000"`
+ // CacheActiveTimeout specifies the maximum duration that flows are kept in the accounting
+ // cache before being flushed for its later export
+ CacheActiveTimeout time.Duration `env:"CACHE_ACTIVE_TIMEOUT" envDefault:"5s"`
+ // Deduper specifies the deduper type. Accepted values are "none" (disabled) and "firstCome".
+ // When enabled, it will detect duplicate flows (flows that have been detected e.g. through
+ // both the physical and a virtual interface).
+ // "firstCome" will forward only flows from the first interface the flows are received from.
+ Deduper string `env:"DEDUPER" envDefault:"none"`
+ // DeduperFCExpiry specifies the expiry duration of the flows "firstCome" deduplicator. After
+ // a flow hasn't been received for that expiry time, the deduplicator forgets it. That means
+ // that a flow from a connection that has been inactive during that period could be forwarded
+ // again from a different interface.
+ // If the value is not set, it will default to 2 * CacheActiveTimeout
+ DeduperFCExpiry time.Duration `env:"DEDUPER_FC_EXPIRY"`
+ // DeduperJustMark will just mark duplicates (boolean field) instead of dropping them.
+ DeduperJustMark bool `env:"DEDUPER_JUST_MARK" envDefault:"false"`
+ // DeduperMerge will merge duplicated flows and generate list of interfaces and direction pairs
+ DeduperMerge bool `env:"DEDUPER_MERGE" envDefault:"true"`
+ // Direction allows selecting which flows to trace according to its direction. Accepted values
+ // are "ingress", "egress" or "both" (default).
+ Direction string `env:"DIRECTION" envDefault:"both"`
+ // Logger level. From more to less verbose: trace, debug, info, warn, error, fatal, panic.
+ LogLevel string `env:"LOG_LEVEL" envDefault:"info"`
+ // Sampling holds the rate at which packets should be sampled and sent to the target collector.
+ // E.g. if set to 100, one out of 100 packets, on average, will be sent to the target collector.
+ Sampling int `env:"SAMPLING" envDefault:"0"`
+ // ListenInterfaces specifies the mechanism used by the agent to listen for added or removed
+ // network interfaces. Accepted values are "watch" (default) or "poll".
+ // If the value is "watch", interfaces are traced immediately after they are created. This is
+ // the recommended setting for most configurations. "poll" value is a fallback mechanism that
+ // periodically queries the current network interfaces (frequency specified by ListenPollPeriod).
+ ListenInterfaces string `env:"LISTEN_INTERFACES" envDefault:"watch"`
+ // ListenPollPeriod specifies the periodicity to query the network interfaces when the
+ // ListenInterfaces value is set to "poll".
+ ListenPollPeriod time.Duration `env:"LISTEN_POLL_PERIOD" envDefault:"10s"`
+ // KafkaBrokers is a comma-separated list of tha addresses of the brokers of the Kafka cluster
+ // that this agent is configured to send messages to.
+ KafkaBrokers []string `env:"KAFKA_BROKERS" envSeparator:","`
+ // KafkaTopic is the name of the topic where the flows' processor will receive the flows from.
+ KafkaTopic string `env:"KAFKA_TOPIC" envDefault:"network-flows"`
+ // KafkaBatchMessages sets the limit on how many messages will be buffered before being sent to a
+ // partition.
+ KafkaBatchMessages int `env:"KAFKA_BATCH_MESSAGES" envDefault:"1000"`
+ // KafkaBatchSize sets the limit, in bytes, of the maximum size of a request before being sent
+ // to a partition.
+ KafkaBatchSize int `env:"KAFKA_BATCH_SIZE" envDefault:"1048576"`
+ // KafkaAsync. If it's true, the message writing process will never block. It also means that
+ // errors are ignored since the caller will not receive the returned value.
+ KafkaAsync bool `env:"KAFKA_ASYNC" envDefault:"true"`
+ // KafkaCompression sets the compression codec to be used to compress messages. The accepted
+ // values are: none (default), gzip, snappy, lz4, zstd.
+ KafkaCompression string `env:"KAFKA_COMPRESSION" envDefault:"none"`
+ // KafkaEnableTLS set true to enable TLS
+ KafkaEnableTLS bool `env:"KAFKA_ENABLE_TLS" envDefault:"false"`
+ // KafkaTLSInsecureSkipVerify skips server certificate verification in TLS connections
+ KafkaTLSInsecureSkipVerify bool `env:"KAFKA_TLS_INSECURE_SKIP_VERIFY" envDefault:"false"`
+ // KafkaTLSCACertPath is the path to the Kafka server certificate for TLS connections
+ KafkaTLSCACertPath string `env:"KAFKA_TLS_CA_CERT_PATH"`
+ // KafkaTLSUserCertPath is the path to the user (client) certificate for mTLS connections
+ KafkaTLSUserCertPath string `env:"KAFKA_TLS_USER_CERT_PATH"`
+ // KafkaTLSUserKeyPath is the path to the user (client) private key for mTLS connections
+ KafkaTLSUserKeyPath string `env:"KAFKA_TLS_USER_KEY_PATH"`
+ // KafkaEnableSASL set true to enable SASL auth
+ KafkaEnableSASL bool `env:"KAFKA_ENABLE_SASL" envDefault:"false"`
+ // KafkaSASLType type of SASL mechanism: plain or scramSHA512
+ KafkaSASLType string `env:"KAFKA_SASL_TYPE" envDefault:"plain"`
+ // KafkaSASLClientIDPath is the path to the client ID (username) for SASL auth
+ KafkaSASLClientIDPath string `env:"KAFKA_SASL_CLIENT_ID_PATH"`
+ // KafkaSASLClientSecretPath is the path to the client secret (password) for SASL auth
+ KafkaSASLClientSecretPath string `env:"KAFKA_SASL_CLIENT_SECRET_PATH"`
+ // ProfilePort sets the listening port for Go's Pprof tool. If it is not set, profile is disabled
+ ProfilePort int `env:"PROFILE_PORT"`
+ // Flowlogs-pipeline configuration as YAML or JSON, used when export is "direct-flp". Cf https://github.com/netobserv/flowlogs-pipeline
+ // The "ingest" stage must be omitted from this configuration, since it is handled internally by the agent. The first stage should follow "preset-ingester".
+ // E.g: {"pipeline":[{"name": "writer","follows": "preset-ingester"}],"parameters":[{"name": "writer","write": {"type": "stdout"}}]}.
+ FLPConfig string `env:"FLP_CONFIG"`
+ // Enable RTT calculations for the flows, default is false (disabled), set to true to enable.
+ // This feature requires the flows agent to attach at both Ingress and Egress hookpoints.
+ // If both Ingress and Egress are not enabled then this feature will not be enabled even if set to true via env.
+ EnableRTT bool `env:"ENABLE_RTT" envDefault:"false"`
+ // ForceGC enables forcing golang garbage collection run at the end of every map eviction, default is true
+ ForceGC bool `env:"FORCE_GARBAGE_COLLECTION" envDefault:"true"`
+ // EnablePktDrops enable Packet drops eBPF hook to account for dropped flows
+ EnablePktDrops bool `env:"ENABLE_PKT_DROPS" envDefault:"false"`
+ // EnableDNSTracking enable DNS tracking eBPF hook to track dns query/response flows
+ EnableDNSTracking bool `env:"ENABLE_DNS_TRACKING" envDefault:"false"`
+ // DNSTrackingPort used to define which port the DNS service is mapped to at the pod level,
+ // so we can track DNS at the pod level
+ DNSTrackingPort uint16 `env:"DNS_TRACKING_PORT" envDefault:"53"`
+ // StaleEntriesEvictTimeout specifies the maximum duration that stale entries are kept
+ // before being deleted, default is 5 seconds.
+ StaleEntriesEvictTimeout time.Duration `env:"STALE_ENTRIES_EVICT_TIMEOUT" envDefault:"5s"`
+ // EnablePCA enables Packet Capture Agent (PCA). By default, PCA is off.
+ EnablePCA bool `env:"ENABLE_PCA" envDefault:"false"`
+ // MetricsEnable enables http server to collect ebpf agent metrics, default is false.
+ MetricsEnable bool `env:"METRICS_ENABLE" envDefault:"false"`
+ // MetricsServerAddress is the address of the server that collects ebpf agent metrics.
+ MetricsServerAddress string `env:"METRICS_SERVER_ADDRESS"`
+ // MetricsPort is the port of the server that collects ebpf agent metrics.
+ MetricsPort int `env:"METRICS_SERVER_PORT" envDefault:"9090"`
+ // MetricsTLSCertPath is the path to the server certificate for TLS connections
+ MetricsTLSCertPath string `env:"METRICS_TLS_CERT_PATH"`
+ // MetricsTLSKeyPath is the path to the server private key for TLS connections
+ MetricsTLSKeyPath string `env:"METRICS_TLS_KEY_PATH"`
+ // MetricsPrefix is the prefix of the metrics that are sent to the server.
+ MetricsPrefix string `env:"METRICS_PREFIX" envDefault:"ebpf_agent_"`
+ // EnableFlowFilter enables flow filter, default is false.
+ EnableFlowFilter bool `env:"ENABLE_FLOW_FILTER" envDefault:"false"`
+ // FlowFilterRules list of flow filter rules
+ FlowFilterRules string `env:"FLOW_FILTER_RULES"`
+ // EnableNetworkEventsMonitoring enables monitoring network plugin events, default is false.
+ EnableNetworkEventsMonitoring bool `env:"ENABLE_NETWORK_EVENTS_MONITORING" envDefault:"false"`
+ // NetworkEventsMonitoringGroupID to allow ebpf hook to process samples for specific groupID and ignore the rest
+ NetworkEventsMonitoringGroupID int `env:"NETWORK_EVENTS_MONITORING_GROUP_ID" envDefault:"10"`
+ // EnablePktTranslationTracking allow tracking packets after translation - for example, NAT, default is false.
+ EnablePktTranslationTracking bool `env:"ENABLE_PKT_TRANSLATION" envDefault:"false"`
+ // EbpfProgramManagerMode is enabled when eBPF manager is handling netobserv ebpf programs life cycle, default is false.
+ EbpfProgramManagerMode bool `env:"EBPF_PROGRAM_MANAGER_MODE" envDefault:"false"`
+ // BpfManBpfFSPath user configurable ebpf manager mount path
+ BpfManBpfFSPath string `env:"BPFMAN_BPF_FS_PATH" envDefault:"/run/netobserv/maps"`
+
+ /* Deprecated configs are listed below this line
+ * See manageDeprecatedConfigs function for details
+ */
+
+ // Deprecated FlowsTargetHost replaced by TargetHost
+ FlowsTargetHost string `env:"FLOWS_TARGET_HOST"`
+ // Deprecated FlowsTargetPort replaced by TargetPort
+ FlowsTargetPort int `env:"FLOWS_TARGET_PORT"`
+ // Deprecated PCAServerPort replaced by TargetPort
+ PCAServerPort int `env:"PCA_SERVER_PORT"`
+}
+
+func manageDeprecatedConfigs(cfg *Config) {
+ if len(cfg.FlowsTargetHost) != 0 {
+ clog.Infof("Using deprecated FlowsTargetHost %s", cfg.FlowsTargetHost)
+ cfg.TargetHost = cfg.FlowsTargetHost
+ }
+
+ if cfg.FlowsTargetPort != 0 {
+ clog.Infof("Using deprecated FlowsTargetPort %d", cfg.FlowsTargetPort)
+ cfg.TargetPort = cfg.FlowsTargetPort
+ } else if cfg.PCAServerPort != 0 {
+ clog.Infof("Using deprecated PCAServerPort %d", cfg.PCAServerPort)
+ cfg.TargetPort = cfg.PCAServerPort
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/filter.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/filter.go
new file mode 100644
index 000000000..366bcd25d
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/filter.go
@@ -0,0 +1,152 @@
+package agent
+
+import (
+ "fmt"
+ "net"
+ "net/netip"
+ "regexp"
+ "strings"
+)
+
+type InterfaceFilter interface {
+ Allowed(iface string) (bool, error)
+}
+
+type ipInterfaceFilter struct {
+ allowedIPs []netip.Prefix
+ // Almost always going to be a wrapper around getting
+ // the interface from net.InterfaceByName and then calling
+ // .Addrs() on the interface
+ ipsFromIface func(ifaceName string) ([]netip.Addr, error)
+}
+
+// Default function for getting the list of IPs configured
+// for a specific network interface
+func IPsFromInterface(ifaceName string) ([]netip.Addr, error) {
+ iface, err := net.InterfaceByName(ifaceName)
+ if err != nil {
+ return []netip.Addr{}, fmt.Errorf("error retrieving interface by name: %w", err)
+ }
+ addrs, err := iface.Addrs()
+ if err != nil {
+ return []netip.Addr{}, fmt.Errorf("error retrieving addresses from interface: %w", err)
+ }
+
+ interfaceAddrs := []netip.Addr{}
+ for _, addr := range addrs {
+ prefix, err := netip.ParsePrefix(addr.String())
+ if err != nil {
+ return []netip.Addr{}, fmt.Errorf("parsing given ip to netip.Addr: %w", err)
+ }
+ interfaceAddrs = append(interfaceAddrs, prefix.Addr())
+ }
+ return interfaceAddrs, nil
+}
+
+// initIPInterfaceFilter allows filtering network interfaces that are accepted/excluded by the user,
+// according to the provided INTERFACE_IPS from the configuration. It allows interfaces where at least
+// one of the provided CIDRs are associated with it.
+func initIPInterfaceFilter(ips []string, ipsFromIface func(ifaceName string) ([]netip.Addr, error)) (ipInterfaceFilter, error) {
+ ipIfaceFilter := ipInterfaceFilter{}
+ ipIfaceFilter.ipsFromIface = ipsFromIface
+
+ for _, ip := range ips {
+ prefix, err := netip.ParsePrefix(ip)
+ if err != nil {
+ return ipInterfaceFilter{}, fmt.Errorf("error parsing given ip: %s: %w", ip, err)
+ }
+ ipIfaceFilter.allowedIPs = append(ipIfaceFilter.allowedIPs, prefix)
+ }
+
+ return ipIfaceFilter, nil
+}
+
+func (f *ipInterfaceFilter) Allowed(iface string) (bool, error) {
+ ifaceAddrs, err := f.ipsFromIface(iface)
+ if err != nil {
+ return false, fmt.Errorf("error calling ipsFromIface(): %w", err)
+ }
+
+ for _, ifaceAddr := range ifaceAddrs {
+ for _, allowedPrefix := range f.allowedIPs {
+ if allowedPrefix.Contains(ifaceAddr) {
+ return true, nil
+ }
+ }
+ }
+ return false, nil
+}
+
+type regexpInterfaceFilter struct {
+ allowedRegexpes []*regexp.Regexp
+ allowedMatches []string
+ excludedRegexpes []*regexp.Regexp
+ excludedMatches []string
+}
+
+// initRegexpInterfaceFilter allows filtering network interfaces that are accepted/excluded by the user,
+// according to the provided allowed and excluded interfaces from the configuration. It allows
+// matching by exact string or by regular expression
+func initRegexpInterfaceFilter(allowed, excluded []string) (regexpInterfaceFilter, error) {
+ var isRegexp = regexp.MustCompile("^/(.*)/$")
+
+ itf := regexpInterfaceFilter{}
+ for _, definition := range allowed {
+ definition = strings.Trim(definition, " ")
+ // the user defined a /regexp/ between slashes: compile and store it as regular expression
+ if sm := isRegexp.FindStringSubmatch(definition); len(sm) > 1 {
+ re, err := regexp.Compile(sm[1])
+ if err != nil {
+ return itf, fmt.Errorf("wrong interface regexp %q: %w", definition, err)
+ }
+ itf.allowedRegexpes = append(itf.allowedRegexpes, re)
+ } else {
+ // otherwise, store it as exact match definition
+ itf.allowedMatches = append(itf.allowedMatches, definition)
+ }
+ }
+
+ for _, definition := range excluded {
+ definition = strings.Trim(definition, " ")
+ // the user defined a /regexp/ between slashes: compile and store it as regexp
+ if sm := isRegexp.FindStringSubmatch(definition); len(sm) > 1 {
+ re, err := regexp.Compile(sm[1])
+ if err != nil {
+ return itf, fmt.Errorf("wrong excluded interface regexp %q: %w", definition, err)
+ }
+ itf.excludedRegexpes = append(itf.excludedRegexpes, re)
+ } else {
+ // otherwise, store it as exact match definition
+ itf.excludedMatches = append(itf.excludedMatches, definition)
+ }
+ }
+
+ return itf, nil
+}
+
+func (itf *regexpInterfaceFilter) Allowed(name string) (bool, error) {
+ // if the allowed list is empty, any interface is allowed except if it matches the exclusion list
+ allowed := len(itf.allowedRegexpes)+len(itf.allowedMatches) == 0
+ // otherwise, we check if it appears in the allowed lists (both exact match and regexp)
+ for i := 0; !allowed && i < len(itf.allowedMatches); i++ {
+ allowed = allowed || name == itf.allowedMatches[i]
+ }
+ for i := 0; !allowed && i < len(itf.allowedRegexpes); i++ {
+ allowed = allowed || itf.allowedRegexpes[i].MatchString(string(name))
+ }
+ if !allowed {
+ return false, nil
+ }
+ // if the interface matches the allow lists, we still need to check that is not excluded
+ for _, match := range itf.excludedMatches {
+ if name == match {
+ return false, nil
+ }
+ }
+ for _, re := range itf.excludedRegexpes {
+ if re.MatchString(string(name)) {
+ return false, nil
+ }
+ }
+ return true, nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/ip.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/ip.go
new file mode 100644
index 000000000..ef52d01d9
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/ip.go
@@ -0,0 +1,139 @@
+package agent
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// dependencies that can be injected from testing
+var (
+ interfaceByName = net.InterfaceByName
+ interfaceAddrs = net.InterfaceAddrs
+ dial = net.Dial
+ ifaceAddrs = func(iface *net.Interface) ([]net.Addr, error) {
+ return iface.Addrs()
+ }
+)
+
+// fetchAgentIP guesses the non-loopback IP address of the Agent host, according to the
+// user-provided configuration:
+// - If AgentIP is provided, this value is used whatever is the real IP of the Agent.
+// - AgentIPIface specifies which interface this function should look into in order to pickup an address.
+// - AgentIPType specifies which type of IP address should the agent pickup ("any" to pickup whichever
+// ipv4 or ipv6 address is found first)
+func fetchAgentIP(cfg *Config) (net.IP, error) {
+ if cfg.AgentIP != "" {
+ if ip := net.ParseIP(cfg.AgentIP); ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("can't parse provided IP %v", cfg.AgentIP)
+ }
+
+ if cfg.AgentIPType != IPTypeAny &&
+ cfg.AgentIPType != IPTypeIPV6 &&
+ cfg.AgentIPType != IPTypeIPV4 {
+ return nil, fmt.Errorf("invalid IP type %q. Valid values are: %s, %s or %s",
+ cfg.AgentIPType, IPTypeIPV4, IPTypeIPV6, IPTypeAny)
+ }
+
+ switch cfg.AgentIPIface {
+ case IPIfaceLocal:
+ return fromLocal(cfg.AgentIPType)
+ case IPIfaceExternal:
+ return fromExternal(cfg.AgentIPType)
+ default:
+ if !strings.HasPrefix(cfg.AgentIPIface, IPIfaceNamedPrefix) {
+ return nil, fmt.Errorf(
+ "invalid IP interface %q. Valid values are: %s, %s or %s",
+ cfg.AgentIPIface, IPIfaceLocal, IPIfaceExternal, IPIfaceNamedPrefix)
+ }
+ return fromInterface(cfg.AgentIPIface[len(IPIfaceNamedPrefix):], cfg.AgentIPType)
+ }
+}
+
+func fromInterface(ifName, ipType string) (net.IP, error) {
+ iface, err := interfaceByName(ifName)
+ if err != nil {
+ return nil, err
+ }
+ addrs, err := ifaceAddrs(iface)
+ if err != nil {
+ return nil, err
+ }
+ if ip, ok := findAddress(addrs, ipType); ok {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("no matching %q addresses found at interface %v", ipType, ifName)
+}
+
+func fromLocal(ipType string) (net.IP, error) {
+ addrs, err := interfaceAddrs()
+ if err != nil {
+ return nil, err
+ }
+ if ip, ok := findAddress(addrs, ipType); ok {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("no matching local %q addresses found", ipType)
+}
+
+func fromExternal(ipType string) (net.IP, error) {
+ // We don't really care about the existence or nonexistence of the addresses.
+ // This will just establish an external dialer where we can pickup the external
+ // host address
+ addrStr := "8.8.8.8:80"
+ // When IPType is "any" and we have interface with IPv6 address only then use ipv6 dns address
+ ip, _ := fromLocal(IPTypeIPV4)
+ if ipType == IPTypeIPV6 || (ipType == IPTypeAny && ip == nil) {
+ addrStr = "[2001:4860:4860::8888]:80"
+ }
+ conn, err := dial("udp", addrStr)
+ if err != nil {
+ return nil, fmt.Errorf("can't establish an external connection %w", err)
+ }
+ if addr, ok := conn.LocalAddr().(*net.UDPAddr); !ok {
+ return nil, fmt.Errorf("unexpected local address type %T for external connection",
+ conn.LocalAddr())
+ } else if ip, ok := getIP(addr.IP, ipType); ok {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("no matching %q external addresses found", ipType)
+}
+
+func findAddress(addrs []net.Addr, ipType string) (net.IP, bool) {
+ for _, addr := range addrs {
+ if ipnet, ok := addr.(*net.IPNet); ok && ipnet != nil {
+ if ip, ok := getIP(ipnet.IP, ipType); ok {
+ return ip, true
+ }
+ }
+ }
+ return nil, false
+}
+
+func getIP(pip net.IP, ipType string) (net.IP, bool) {
+ if pip == nil || pip.IsLoopback() || pip.IsLinkLocalUnicast() {
+ return nil, false
+ }
+ switch ipType {
+ case IPTypeIPV4:
+ if ip := pip.To4(); ip != nil {
+ return ip, true
+ }
+ case IPTypeIPV6:
+ // as any IP4 address can be converted to IP6, we only return any
+ // address that can be converted to IP6 but not to IP4
+ if ip := pip.To16(); ip != nil && pip.To4() == nil {
+ return ip, true
+ }
+ default: // Any
+ if ip := pip.To4(); ip != nil {
+ return ip, true
+ }
+ if ip := pip.To16(); ip != nil {
+ return ip, true
+ }
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/packets_agent.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/packets_agent.go
new file mode 100644
index 000000000..1157901c3
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/packets_agent.go
@@ -0,0 +1,326 @@
+package agent
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+
+ "github.com/netobserv/gopipes/pkg/node"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/exporter"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/flow"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/tracer"
+
+ "github.com/cilium/ebpf/perf"
+ "github.com/sirupsen/logrus"
+)
+
+// Packets reporting agent
+type Packets struct {
+ cfg *Config
+
+ // input data providers
+ interfaces ifaces.Informer
+ filter InterfaceFilter
+ ebpf ebpfPacketFetcher
+
+ // processing nodes to be wired in the buildAndStartPipeline method
+ perfTracer *flow.PerfTracer
+ packetbuffer *flow.PerfBuffer
+ exporter node.TerminalFunc[[]*model.PacketRecord]
+
+ // elements used to decorate flows with extra information
+ interfaceNamer flow.InterfaceNamer
+ agentIP net.IP
+
+ status Status
+}
+
+type ebpfPacketFetcher interface {
+ io.Closer
+ Register(iface ifaces.Interface) error
+ UnRegister(iface ifaces.Interface) error
+ AttachTCX(iface ifaces.Interface) error
+ DetachTCX(iface ifaces.Interface) error
+ LookupAndDeleteMap(*metrics.Metrics) map[int][]*byte
+ ReadPerf() (perf.Record, error)
+}
+
+// PacketsAgent instantiates a new agent, given a configuration.
+func PacketsAgent(cfg *Config) (*Packets, error) {
+ plog.Info("initializing Packets agent")
+
+ // manage deprecated configs
+ manageDeprecatedConfigs(cfg)
+
+ // configure informer for new interfaces
+ informer := configureInformer(cfg, plog)
+
+ plog.Info("[PCA]acquiring Agent IP")
+ agentIP, err := fetchAgentIP(cfg)
+ if err != nil {
+ return nil, fmt.Errorf("acquiring Agent IP: %w", err)
+ }
+
+ // configure selected exporter
+ packetexportFunc, err := buildPacketExporter(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ ingress, egress := flowDirections(cfg)
+ debug := false
+ if cfg.LogLevel == logrus.TraceLevel.String() || cfg.LogLevel == logrus.DebugLevel.String() {
+ debug = true
+ }
+ filterRules := make([]*tracer.FilterConfig, 0)
+ if cfg.EnableFlowFilter {
+ var flowFilters []*FlowFilter
+ if err := json.Unmarshal([]byte(cfg.FlowFilterRules), &flowFilters); err != nil {
+ return nil, err
+ }
+
+ for _, r := range flowFilters {
+ filterRules = append(filterRules, &tracer.FilterConfig{
+ FilterAction: r.FilterAction,
+ FilterDirection: r.FilterDirection,
+ FilterIPCIDR: r.FilterIPCIDR,
+ FilterProtocol: r.FilterProtocol,
+ FilterPeerIP: r.FilterPeerIP,
+ FilterDestinationPort: tracer.ConvertFilterPortsToInstr(r.FilterDestinationPort, r.FilterDestinationPortRange, r.FilterDestinationPorts),
+ FilterSourcePort: tracer.ConvertFilterPortsToInstr(r.FilterSourcePort, r.FilterSourcePortRange, r.FilterSourcePorts),
+ FilterPort: tracer.ConvertFilterPortsToInstr(r.FilterPort, r.FilterPortRange, r.FilterPorts),
+ FilterTCPFlags: r.FilterTCPFlags,
+ FilterDrops: r.FilterDrops,
+ FilterSample: r.FilterSample,
+ })
+ }
+ }
+ ebpfConfig := &tracer.FlowFetcherConfig{
+ EnableIngress: ingress,
+ EnableEgress: egress,
+ Debug: debug,
+ Sampling: cfg.Sampling,
+ CacheMaxSize: cfg.CacheMaxFlows,
+ EnablePCA: cfg.EnablePCA,
+ UseEbpfManager: cfg.EbpfProgramManagerMode,
+ FilterConfig: filterRules,
+ }
+
+ fetcher, err := tracer.NewPacketFetcher(ebpfConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ return packetsAgent(cfg, informer, fetcher, packetexportFunc, agentIP)
+}
+
+// packetssAgent is a private constructor with injectable dependencies, usable for tests
+func packetsAgent(cfg *Config,
+ informer ifaces.Informer,
+ fetcher ebpfPacketFetcher,
+ packetexporter node.TerminalFunc[[]*model.PacketRecord],
+ agentIP net.IP,
+) (*Packets, error) {
+ var filter InterfaceFilter
+
+ switch {
+ case len(cfg.InterfaceIPs) > 0 && (len(cfg.Interfaces) > 0 || len(cfg.ExcludeInterfaces) > 0):
+ return nil, fmt.Errorf("INTERFACES/EXCLUDE_INTERFACES and INTERFACE_IPS are mutually exclusive")
+
+ case len(cfg.InterfaceIPs) > 0:
+ // configure ip interface filter
+ f, err := initIPInterfaceFilter(cfg.InterfaceIPs, IPsFromInterface)
+ if err != nil {
+ return nil, fmt.Errorf("configuring interface ip filter: %w", err)
+ }
+ filter = &f
+
+ default:
+ // configure allow/deny regexp interfaces filter
+ f, err := initRegexpInterfaceFilter(cfg.Interfaces, cfg.ExcludeInterfaces)
+ if err != nil {
+ return nil, fmt.Errorf("configuring interface filters: %w", err)
+ }
+ filter = &f
+ }
+
+ registerer := ifaces.NewRegisterer(informer, cfg.BuffersLength)
+
+ interfaceNamer := func(ifIndex int) string {
+ iface, ok := registerer.IfaceNameForIndex(ifIndex)
+ if !ok {
+ return "unknown"
+ }
+ return iface
+ }
+
+ perfTracer := flow.NewPerfTracer(fetcher, cfg.CacheActiveTimeout)
+
+ packetbuffer := flow.NewPerfBuffer(cfg.CacheMaxFlows, cfg.CacheActiveTimeout)
+
+ return &Packets{
+ ebpf: fetcher,
+ interfaces: registerer,
+ filter: filter,
+ cfg: cfg,
+ packetbuffer: packetbuffer,
+ perfTracer: perfTracer,
+ agentIP: agentIP,
+ interfaceNamer: interfaceNamer,
+ exporter: packetexporter,
+ }, nil
+}
+
+func buildGRPCPacketExporter(cfg *Config) (node.TerminalFunc[[]*model.PacketRecord], error) {
+ if cfg.TargetHost == "" || cfg.TargetPort == 0 {
+ return nil, fmt.Errorf("missing target host or port for PCA: %s:%d",
+ cfg.TargetHost, cfg.TargetPort)
+ }
+ plog.Info("starting gRPC Packet send")
+ pcapStreamer, err := exporter.StartGRPCPacketSend(cfg.TargetHost, cfg.TargetPort)
+ if err != nil {
+ return nil, err
+ }
+
+ return pcapStreamer.ExportGRPCPackets, nil
+}
+
+func buildPacketExporter(cfg *Config) (node.TerminalFunc[[]*model.PacketRecord], error) {
+ switch cfg.Export {
+ case "grpc":
+ return buildGRPCPacketExporter(cfg)
+ case "direct-flp":
+ return buildPacketDirectFLPExporter(cfg)
+ default:
+ return nil, fmt.Errorf("unsupported packet export type %s", cfg.Export)
+ }
+}
+
+func buildPacketDirectFLPExporter(cfg *Config) (node.TerminalFunc[[]*model.PacketRecord], error) {
+ flpExporter, err := exporter.StartDirectFLP(cfg.FLPConfig, cfg.BuffersLength)
+ if err != nil {
+ return nil, err
+ }
+ return flpExporter.ExportPackets, nil
+}
+
+// Run a Packets agent. The function will keep running in the same thread
+// until the passed context is canceled
+func (p *Packets) Run(ctx context.Context) error {
+ p.status = StatusStarting
+ plog.Info("Starting Packets agent")
+ graph, err := p.buildAndStartPipeline(ctx)
+ if err != nil {
+ return fmt.Errorf("error starting processing graph: %w", err)
+ }
+
+ p.status = StatusStarted
+ plog.Info("Packets agent successfully started")
+ <-ctx.Done()
+
+ p.status = StatusStopping
+ plog.Info("stopping Packets agent")
+ if err := p.ebpf.Close(); err != nil {
+ plog.WithError(err).Warn("eBPF resources not correctly closed")
+ }
+
+ plog.Debug("waiting for all nodes to finish their pending work")
+ <-graph.Done()
+
+ p.status = StatusStopped
+ plog.Info("Packets agent stopped")
+ return nil
+}
+
+func (p *Packets) Status() Status {
+ return p.status
+}
+
+func (p *Packets) interfacesManager(ctx context.Context) error {
+ slog := plog.WithField("function", "interfacesManager")
+
+ slog.Debug("subscribing for network interface events")
+ ifaceEvents, err := p.interfaces.Subscribe(ctx)
+ if err != nil {
+ return fmt.Errorf("instantiating interfaces' informer: %w", err)
+ }
+
+ go interfaceListener(ctx, ifaceEvents, slog, p.onInterfaceAdded)
+
+ return nil
+}
+
+func (p *Packets) buildAndStartPipeline(ctx context.Context) (*node.Terminal[[]*model.PacketRecord], error) {
+
+ if !p.cfg.EbpfProgramManagerMode {
+ plog.Debug("registering interfaces' listener in background")
+ err := p.interfacesManager(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+ plog.Debug("connecting packets' processing graph")
+
+ perfTracer := node.AsStart(p.perfTracer.TraceLoop(ctx))
+
+ ebl := p.cfg.ExporterBufferLength
+ if ebl == 0 {
+ ebl = p.cfg.BuffersLength
+ }
+
+ packetbuffer := node.AsMiddle(p.packetbuffer.PBuffer,
+ node.ChannelBufferLen(p.cfg.BuffersLength))
+
+ perfTracer.SendsTo(packetbuffer)
+
+ export := node.AsTerminal(p.exporter,
+ node.ChannelBufferLen(ebl))
+
+ packetbuffer.SendsTo(export)
+ perfTracer.Start()
+
+ return export, nil
+}
+
+func (p *Packets) onInterfaceAdded(iface ifaces.Interface, add bool) {
+ // ignore interfaces that do not match the user configuration acceptance/exclusion lists
+ allowed, err := p.filter.Allowed(iface.Name)
+ if err != nil {
+ plog.WithField("[PCA]interface", iface).WithError(err).
+ Warn("couldn't determine if interface is allowed. Ignoring")
+ }
+ if !allowed {
+ plog.WithField("interface", iface).
+ Debug("[PCA]interface does not match the allow/exclusion filters. Ignoring")
+ return
+ }
+ if add {
+ plog.WithField("interface", iface).Info("interface detected. trying to attach TCX hook")
+ if err := p.ebpf.AttachTCX(iface); err != nil {
+ plog.WithField("[PCA]interface", iface).WithError(err).
+ Info("can't attach to TCx hook packet ebpfFetcher. fall back to use legacy TC hook")
+ if err := p.ebpf.Register(iface); err != nil {
+ plog.WithField("[PCA]interface", iface).WithError(err).
+ Warn("can't register packet ebpfFetcher. Ignoring")
+ return
+ }
+ }
+ } else {
+ plog.WithField("interface", iface).Info("interface deleted. trying to detach TCX hook")
+ if err := p.ebpf.DetachTCX(iface); err != nil {
+ plog.WithField("[PCA]interface", iface).WithError(err).
+ Info("can't detach from TCx hook packet ebpfFetcher. check if there is any legacy TC hook")
+ if err := p.ebpf.UnRegister(iface); err != nil {
+ plog.WithField("[PCA]interface", iface).WithError(err).
+ Warn("can't unregister packet ebpfFetcher. Ignoring")
+ return
+ }
+ }
+
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/sasl.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/sasl.go
new file mode 100644
index 000000000..a8b228d5b
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/sasl.go
@@ -0,0 +1,39 @@
+package agent
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/segmentio/kafka-go/sasl"
+ "github.com/segmentio/kafka-go/sasl/plain"
+ "github.com/segmentio/kafka-go/sasl/scram"
+)
+
+func buildSASLConfig(cfg *Config) (sasl.Mechanism, error) {
+ // Read client ID
+ id, err := os.ReadFile(cfg.KafkaSASLClientIDPath)
+ if err != nil {
+ return nil, err
+ }
+ strID := strings.TrimSpace(string(id))
+ // Read password
+ pwd, err := os.ReadFile(cfg.KafkaSASLClientSecretPath)
+ if err != nil {
+ return nil, err
+ }
+ strPwd := strings.TrimSpace(string(pwd))
+ var mechanism sasl.Mechanism
+ switch cfg.KafkaSASLType {
+ case "plain":
+ mechanism = plain.Mechanism{Username: strID, Password: strPwd}
+ case "scramSHA512":
+ mechanism, err = scram.Mechanism(scram.SHA512, strID, strPwd)
+ default:
+ err = fmt.Errorf("unknown SASL type: %s", cfg.KafkaSASLType)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return mechanism, nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/tls.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/tls.go
new file mode 100644
index 000000000..c25b26a9b
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/tls.go
@@ -0,0 +1,38 @@
+package agent
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "os"
+)
+
+func buildTLSConfig(cfg *Config) (*tls.Config, error) {
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: cfg.KafkaTLSInsecureSkipVerify,
+ }
+ if cfg.KafkaTLSCACertPath != "" {
+ caCert, err := os.ReadFile(cfg.KafkaTLSCACertPath)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.RootCAs = x509.NewCertPool()
+ tlsConfig.RootCAs.AppendCertsFromPEM(caCert)
+
+ if cfg.KafkaTLSUserCertPath != "" && cfg.KafkaTLSUserKeyPath != "" {
+ userCert, err := os.ReadFile(cfg.KafkaTLSUserCertPath)
+ if err != nil {
+ return nil, err
+ }
+ userKey, err := os.ReadFile(cfg.KafkaTLSUserKeyPath)
+ if err != nil {
+ return nil, err
+ }
+ pair, err := tls.X509KeyPair([]byte(userCert), []byte(userKey))
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = []tls.Certificate{pair}
+ }
+ }
+ return tlsConfig, nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/decode_protobuf.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/decode_protobuf.go
new file mode 100644
index 000000000..f98e6b340
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/decode_protobuf.go
@@ -0,0 +1,421 @@
+package decode
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow"
+
+ "github.com/mdlayher/ethernet"
+ log "github.com/sirupsen/logrus"
+ "google.golang.org/protobuf/proto"
+)
+
+const (
+ skbDropReasonSubsystemShift = 16
+ skbDropReasonSubSysCore = (0 << skbDropReasonSubsystemShift)
+ skbDropReasonSubSysOpenVSwitch = (3 << skbDropReasonSubsystemShift)
+)
+
+// Protobuf decodes protobuf flow records definitions, as forwarded by
+// ingest.NetObservAgent, into a Generic Map that follows the same naming conventions
+// as the IPFIX flows from ingest.IngestCollector
+type Protobuf struct {
+}
+
+func NewProtobuf() (*Protobuf, error) {
+ log.Debugf("entering NewProtobuf")
+ return &Protobuf{}, nil
+}
+
+// Decode decodes the protobuf raw flows and returns a list of GenericMaps representing all
+// the flows there
+func (p *Protobuf) Decode(rawFlow []byte) (config.GenericMap, error) {
+ record := pbflow.Record{}
+ if err := proto.Unmarshal(rawFlow, &record); err != nil {
+ return nil, fmt.Errorf("unmarshaling ProtoBuf record: %w", err)
+ }
+ return PBFlowToMap(&record), nil
+}
+
+func PBFlowToMap(pb *pbflow.Record) config.GenericMap {
+ flow := pbflow.PBToFlow(pb)
+ if flow == nil {
+ return config.GenericMap{}
+ }
+ return RecordToMap(flow)
+}
+
+// RecordToMap converts the flow from Agent inner model into FLP GenericMap model
+// nolint:golint,cyclop
+func RecordToMap(fr *model.Record) config.GenericMap {
+ if fr == nil {
+ return config.GenericMap{}
+ }
+ srcMAC := model.MacAddr(fr.Metrics.SrcMac)
+ dstMAC := model.MacAddr(fr.Metrics.DstMac)
+ out := config.GenericMap{
+ "SrcMac": srcMAC.String(),
+ "DstMac": dstMAC.String(),
+ "Etype": fr.Metrics.EthProtocol,
+ "TimeFlowStartMs": fr.TimeFlowStart.UnixMilli(),
+ "TimeFlowEndMs": fr.TimeFlowEnd.UnixMilli(),
+ "TimeReceived": time.Now().Unix(),
+ "AgentIP": fr.AgentIP.String(),
+ }
+
+ if fr.Duplicate {
+ out["Duplicate"] = true
+ }
+
+ if fr.Metrics.Bytes != 0 {
+ out["Bytes"] = fr.Metrics.Bytes
+ }
+
+ if fr.Metrics.Packets != 0 {
+ out["Packets"] = fr.Metrics.Packets
+ }
+
+ if fr.Metrics.Sampling != 0 {
+ out["Sampling"] = fr.Metrics.Sampling
+ }
+ var interfaces []string
+ var directions []int
+ if len(fr.DupList) != 0 {
+ for _, m := range fr.DupList {
+ for key, value := range m {
+ interfaces = append(interfaces, key)
+ directions = append(directions, int(model.Direction(value)))
+ }
+ }
+ } else {
+ interfaces = append(interfaces, fr.Interface)
+ directions = append(directions, int(fr.ID.Direction))
+ }
+ out["Interfaces"] = interfaces
+ out["IfDirections"] = directions
+
+ if fr.Metrics.EthProtocol == uint16(ethernet.EtherTypeIPv4) || fr.Metrics.EthProtocol == uint16(ethernet.EtherTypeIPv6) {
+ out["SrcAddr"] = model.IP(fr.ID.SrcIp).String()
+ out["DstAddr"] = model.IP(fr.ID.DstIp).String()
+ out["Proto"] = fr.ID.TransportProtocol
+ out["Dscp"] = fr.Metrics.Dscp
+
+ if fr.ID.TransportProtocol == syscall.IPPROTO_ICMP || fr.ID.TransportProtocol == syscall.IPPROTO_ICMPV6 {
+ out["IcmpType"] = fr.ID.IcmpType
+ out["IcmpCode"] = fr.ID.IcmpCode
+ } else if fr.ID.TransportProtocol == syscall.IPPROTO_TCP || fr.ID.TransportProtocol == syscall.IPPROTO_UDP || fr.ID.TransportProtocol == syscall.IPPROTO_SCTP {
+ out["SrcPort"] = fr.ID.SrcPort
+ out["DstPort"] = fr.ID.DstPort
+ if fr.ID.TransportProtocol == syscall.IPPROTO_TCP {
+ out["Flags"] = fr.Metrics.Flags
+ }
+ }
+ }
+
+ if fr.Metrics.AdditionalMetrics != nil {
+ if fr.Metrics.AdditionalMetrics.DnsRecord.Errno != 0 {
+ out["DnsErrno"] = fr.Metrics.AdditionalMetrics.DnsRecord.Errno
+ }
+ if fr.Metrics.AdditionalMetrics.DnsRecord.Id != 0 {
+ out["DnsId"] = fr.Metrics.AdditionalMetrics.DnsRecord.Id
+ out["DnsFlags"] = fr.Metrics.AdditionalMetrics.DnsRecord.Flags
+ out["DnsFlagsResponseCode"] = DNSRcodeToStr(uint32(fr.Metrics.AdditionalMetrics.DnsRecord.Flags) & 0xF)
+ out["DnsLatencyMs"] = fr.DNSLatency.Milliseconds()
+ }
+
+ if fr.Metrics.AdditionalMetrics.PktDrops.LatestDropCause != 0 {
+ out["PktDropBytes"] = fr.Metrics.AdditionalMetrics.PktDrops.Bytes
+ out["PktDropPackets"] = fr.Metrics.AdditionalMetrics.PktDrops.Packets
+ out["PktDropLatestFlags"] = fr.Metrics.AdditionalMetrics.PktDrops.LatestFlags
+ out["PktDropLatestState"] = TCPStateToStr(uint32(fr.Metrics.AdditionalMetrics.PktDrops.LatestState))
+ out["PktDropLatestDropCause"] = PktDropCauseToStr(fr.Metrics.AdditionalMetrics.PktDrops.LatestDropCause)
+ }
+ if !model.AllZeroIP(model.IP(fr.Metrics.AdditionalMetrics.TranslatedFlow.Daddr)) &&
+ !model.AllZeroIP(model.IP(fr.Metrics.AdditionalMetrics.TranslatedFlow.Saddr)) {
+ out["ZoneId"] = fr.Metrics.AdditionalMetrics.TranslatedFlow.ZoneId
+ out["XlatSrcPort"] = fr.Metrics.AdditionalMetrics.TranslatedFlow.Sport
+ out["XlatDstPort"] = fr.Metrics.AdditionalMetrics.TranslatedFlow.Dport
+ out["XlatSrcAddr"] = model.IP(fr.Metrics.AdditionalMetrics.TranslatedFlow.Saddr).String()
+ out["XlatDstAddr"] = model.IP(fr.Metrics.AdditionalMetrics.TranslatedFlow.Daddr).String()
+ out["XlatIcmpId"] = fr.Metrics.AdditionalMetrics.TranslatedFlow.IcmpId
+ }
+ }
+
+ if fr.TimeFlowRtt != 0 {
+ out["TimeFlowRttNs"] = fr.TimeFlowRtt.Nanoseconds()
+ }
+
+ if len(fr.NetworkMonitorEventsMD) != 0 {
+ out["NetworkEvents"] = fr.NetworkMonitorEventsMD
+ }
+
+ return out
+}
+
+// TCPStateToStr is based on kernel TCP state definition
+// https://elixir.bootlin.com/linux/v6.3/source/include/net/tcp_states.h#L12
+func TCPStateToStr(state uint32) string {
+ switch state {
+ case 1:
+ return "TCP_ESTABLISHED"
+ case 2:
+ return "TCP_SYN_SENT"
+ case 3:
+ return "TCP_SYN_RECV"
+ case 4:
+ return "TCP_FIN_WAIT1"
+ case 5:
+ return "TCP_FIN_WAIT2"
+ case 6:
+ return "TCP_CLOSE"
+ case 7:
+ return "TCP_CLOSE_WAIT"
+ case 8:
+ return "TCP_LAST_ACK"
+ case 9:
+ return "TCP_LISTEN"
+ case 10:
+ return "TCP_CLOSING"
+ case 11:
+ return "TCP_NEW_SYN_RECV"
+ }
+ return "TCP_INVALID_STATE"
+}
+
+// PktDropCauseToStr is based on kernel drop cause definition
+// https://elixir.bootlin.com/linux/latest/source/include/net/dropreason.h#L88
+// nolint:cyclop
+func PktDropCauseToStr(dropCause uint32) string {
+ switch dropCause {
+ case skbDropReasonSubSysCore + 2:
+ return "SKB_DROP_REASON_NOT_SPECIFIED"
+ case skbDropReasonSubSysCore + 3:
+ return "SKB_DROP_REASON_NO_SOCKET"
+ case skbDropReasonSubSysCore + 4:
+ return "SKB_DROP_REASON_PKT_TOO_SMALL"
+ case skbDropReasonSubSysCore + 5:
+ return "SKB_DROP_REASON_TCP_CSUM"
+ case skbDropReasonSubSysCore + 6:
+ return "SKB_DROP_REASON_SOCKET_FILTER"
+ case skbDropReasonSubSysCore + 7:
+ return "SKB_DROP_REASON_UDP_CSUM"
+ case skbDropReasonSubSysCore + 8:
+ return "SKB_DROP_REASON_NETFILTER_DROP"
+ case skbDropReasonSubSysCore + 9:
+ return "SKB_DROP_REASON_OTHERHOST"
+ case skbDropReasonSubSysCore + 10:
+ return "SKB_DROP_REASON_IP_CSUM"
+ case skbDropReasonSubSysCore + 11:
+ return "SKB_DROP_REASON_IP_INHDR"
+ case skbDropReasonSubSysCore + 12:
+ return "SKB_DROP_REASON_IP_RPFILTER"
+ case skbDropReasonSubSysCore + 13:
+ return "SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST"
+ case skbDropReasonSubSysCore + 14:
+ return "SKB_DROP_REASON_XFRM_POLICY"
+ case skbDropReasonSubSysCore + 15:
+ return "SKB_DROP_REASON_IP_NOPROTO"
+ case skbDropReasonSubSysCore + 16:
+ return "SKB_DROP_REASON_SOCKET_RCVBUFF"
+ case skbDropReasonSubSysCore + 17:
+ return "SKB_DROP_REASON_PROTO_MEM"
+ case skbDropReasonSubSysCore + 18:
+ return "SKB_DROP_REASON_TCP_MD5NOTFOUND"
+ case skbDropReasonSubSysCore + 19:
+ return "SKB_DROP_REASON_TCP_MD5UNEXPECTED"
+ case skbDropReasonSubSysCore + 20:
+ return "SKB_DROP_REASON_TCP_MD5FAILURE"
+ case skbDropReasonSubSysCore + 21:
+ return "SKB_DROP_REASON_SOCKET_BACKLOG"
+ case skbDropReasonSubSysCore + 22:
+ return "SKB_DROP_REASON_TCP_FLAGS"
+ case skbDropReasonSubSysCore + 23:
+ return "SKB_DROP_REASON_TCP_ZEROWINDOW"
+ case skbDropReasonSubSysCore + 24:
+ return "SKB_DROP_REASON_TCP_OLD_DATA"
+ case skbDropReasonSubSysCore + 25:
+ return "SKB_DROP_REASON_TCP_OVERWINDOW"
+ case skbDropReasonSubSysCore + 26:
+ return "SKB_DROP_REASON_TCP_OFOMERGE"
+ case skbDropReasonSubSysCore + 27:
+ return "SKB_DROP_REASON_TCP_RFC7323_PAWS"
+ case skbDropReasonSubSysCore + 28:
+ return "SKB_DROP_REASON_TCP_INVALID_SEQUENCE"
+ case skbDropReasonSubSysCore + 29:
+ return "SKB_DROP_REASON_TCP_RESET"
+ case skbDropReasonSubSysCore + 30:
+ return "SKB_DROP_REASON_TCP_INVALID_SYN"
+ case skbDropReasonSubSysCore + 31:
+ return "SKB_DROP_REASON_TCP_CLOSE"
+ case skbDropReasonSubSysCore + 32:
+ return "SKB_DROP_REASON_TCP_FASTOPEN"
+ case skbDropReasonSubSysCore + 33:
+ return "SKB_DROP_REASON_TCP_OLD_ACK"
+ case skbDropReasonSubSysCore + 34:
+ return "SKB_DROP_REASON_TCP_TOO_OLD_ACK"
+ case skbDropReasonSubSysCore + 35:
+ return "SKB_DROP_REASON_TCP_ACK_UNSENT_DATA"
+ case skbDropReasonSubSysCore + 36:
+ return "SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE"
+ case skbDropReasonSubSysCore + 37:
+ return "SKB_DROP_REASON_TCP_OFO_DROP"
+ case skbDropReasonSubSysCore + 38:
+ return "SKB_DROP_REASON_IP_OUTNOROUTES"
+ case skbDropReasonSubSysCore + 39:
+ return "SKB_DROP_REASON_BPF_CGROUP_EGRESS"
+ case skbDropReasonSubSysCore + 40:
+ return "SKB_DROP_REASON_IPV6DISABLED"
+ case skbDropReasonSubSysCore + 41:
+ return "SKB_DROP_REASON_NEIGH_CREATEFAIL"
+ case skbDropReasonSubSysCore + 42:
+ return "SKB_DROP_REASON_NEIGH_FAILED"
+ case skbDropReasonSubSysCore + 43:
+ return "SKB_DROP_REASON_NEIGH_QUEUEFULL"
+ case skbDropReasonSubSysCore + 44:
+ return "SKB_DROP_REASON_NEIGH_DEAD"
+ case skbDropReasonSubSysCore + 45:
+ return "SKB_DROP_REASON_TC_EGRESS"
+ case skbDropReasonSubSysCore + 46:
+ return "SKB_DROP_REASON_QDISC_DROP"
+ case skbDropReasonSubSysCore + 47:
+ return "SKB_DROP_REASON_CPU_BACKLOG"
+ case skbDropReasonSubSysCore + 48:
+ return "SKB_DROP_REASON_XDP"
+ case skbDropReasonSubSysCore + 49:
+ return "SKB_DROP_REASON_TC_INGRESS"
+ case skbDropReasonSubSysCore + 50:
+ return "SKB_DROP_REASON_UNHANDLED_PROTO"
+ case skbDropReasonSubSysCore + 51:
+ return "SKB_DROP_REASON_SKB_CSUM"
+ case skbDropReasonSubSysCore + 52:
+ return "SKB_DROP_REASON_SKB_GSO_SEG"
+ case skbDropReasonSubSysCore + 53:
+ return "SKB_DROP_REASON_SKB_UCOPY_FAULT"
+ case skbDropReasonSubSysCore + 54:
+ return "SKB_DROP_REASON_DEV_HDR"
+ case skbDropReasonSubSysCore + 55:
+ return "SKB_DROP_REASON_DEV_READY"
+ case skbDropReasonSubSysCore + 56:
+ return "SKB_DROP_REASON_FULL_RING"
+ case skbDropReasonSubSysCore + 57:
+ return "SKB_DROP_REASON_NOMEM"
+ case skbDropReasonSubSysCore + 58:
+ return "SKB_DROP_REASON_HDR_TRUNC"
+ case skbDropReasonSubSysCore + 59:
+ return "SKB_DROP_REASON_TAP_FILTER"
+ case skbDropReasonSubSysCore + 60:
+ return "SKB_DROP_REASON_TAP_TXFILTER"
+ case skbDropReasonSubSysCore + 61:
+ return "SKB_DROP_REASON_ICMP_CSUM"
+ case skbDropReasonSubSysCore + 62:
+ return "SKB_DROP_REASON_INVALID_PROTO"
+ case skbDropReasonSubSysCore + 63:
+ return "SKB_DROP_REASON_IP_INADDRERRORS"
+ case skbDropReasonSubSysCore + 64:
+ return "SKB_DROP_REASON_IP_INNOROUTES"
+ case skbDropReasonSubSysCore + 65:
+ return "SKB_DROP_REASON_PKT_TOO_BIG"
+ case skbDropReasonSubSysCore + 66:
+ return "SKB_DROP_REASON_DUP_FRAG"
+ case skbDropReasonSubSysCore + 67:
+ return "SKB_DROP_REASON_FRAG_REASM_TIMEOUT"
+ case skbDropReasonSubSysCore + 68:
+ return "SKB_DROP_REASON_FRAG_TOO_FAR"
+ case skbDropReasonSubSysCore + 69:
+ return "SKB_DROP_REASON_TCP_MINTTL"
+ case skbDropReasonSubSysCore + 70:
+ return "SKB_DROP_REASON_IPV6_BAD_EXTHDR"
+ case skbDropReasonSubSysCore + 71:
+ return "SKB_DROP_REASON_IPV6_NDISC_FRAG"
+ case skbDropReasonSubSysCore + 72:
+ return "SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT"
+ case skbDropReasonSubSysCore + 73:
+ return "SKB_DROP_REASON_IPV6_NDISC_BAD_CODE"
+ case skbDropReasonSubSysCore + 74:
+ return "SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS"
+ case skbDropReasonSubSysCore + 75:
+ return "SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST"
+ case skbDropReasonSubSysCore + 76:
+ return "SKB_DROP_REASON_QUEUE_PURGE"
+ case skbDropReasonSubSysCore + 77:
+ return "SKB_DROP_REASON_TC_COOKIE_ERROR"
+ case skbDropReasonSubSysCore + 78:
+ return "SKB_DROP_REASON_PACKET_SOCK_ERROR"
+ case skbDropReasonSubSysCore + 79:
+ return "SKB_DROP_REASON_TC_CHAIN_NOTFOUND"
+ case skbDropReasonSubSysCore + 80:
+ return "SKB_DROP_REASON_TC_RECLASSIFY_LOOP"
+
+ // ovs drop causes
+ // https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git/tree/net/openvswitch/drop.h
+ case skbDropReasonSubSysOpenVSwitch + 1:
+ return "OVS_DROP_LAST_ACTION"
+ case skbDropReasonSubSysOpenVSwitch + 2:
+ return "OVS_DROP_ACTION_ERROR"
+ case skbDropReasonSubSysOpenVSwitch + 3:
+ return "OVS_DROP_EXPLICIT"
+ case skbDropReasonSubSysOpenVSwitch + 4:
+ return "OVS_DROP_EXPLICIT_WITH_ERROR"
+ case skbDropReasonSubSysOpenVSwitch + 5:
+ return "OVS_DROP_METER"
+ case skbDropReasonSubSysOpenVSwitch + 6:
+ return "OVS_DROP_RECURSION_LIMIT"
+ case skbDropReasonSubSysOpenVSwitch + 7:
+ return "OVS_DROP_DEFERRED_LIMIT"
+ case skbDropReasonSubSysOpenVSwitch + 8:
+ return "OVS_DROP_FRAG_L2_TOO_LONG"
+ case skbDropReasonSubSysOpenVSwitch + 9:
+ return "OVS_DROP_FRAG_INVALID_PROTO"
+ case skbDropReasonSubSysOpenVSwitch + 10:
+ return "OVS_DROP_CONNTRACK"
+ case skbDropReasonSubSysOpenVSwitch + 11:
+ return "OVS_DROP_IP_TTL"
+ }
+ return "SKB_DROP_UNKNOWN_CAUSE"
+}
+
+// DNSRcodeToStr decode DNS flags response code bits and return a string
+// https://datatracker.ietf.org/doc/html/rfc2929#section-2.3
+func DNSRcodeToStr(rcode uint32) string {
+ switch rcode {
+ case 0:
+ return "NoError"
+ case 1:
+ return "FormErr"
+ case 2:
+ return "ServFail"
+ case 3:
+ return "NXDomain"
+ case 4:
+ return "NotImp"
+ case 5:
+ return "Refused"
+ case 6:
+ return "YXDomain"
+ case 7:
+ return "YXRRSet"
+ case 8:
+ return "NXRRSet"
+ case 9:
+ return "NotAuth"
+ case 10:
+ return "NotZone"
+ case 16:
+ return "BADVERS"
+ case 17:
+ return "BADKEY"
+ case 18:
+ return "BADTIME"
+ case 19:
+ return "BADMODE"
+ case 20:
+ return "BADNAME"
+ case 21:
+ return "BADALG"
+ }
+ return "UnDefined"
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets/decode_protobuf_packets.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets/decode_protobuf_packets.go
new file mode 100644
index 000000000..ee8be8cdf
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets/decode_protobuf_packets.go
@@ -0,0 +1,76 @@
+package packets
+
+import (
+ "encoding/base64"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+
+ "github.com/gopacket/gopacket"
+ "github.com/gopacket/gopacket/layers"
+)
+
+func PacketToMap(pr *model.PacketRecord) config.GenericMap {
+ out := config.GenericMap{}
+
+ if pr == nil {
+ return out
+ }
+
+ packet := gopacket.NewPacket(pr.Stream, layers.LayerTypeEthernet, gopacket.Lazy)
+ if ethLayer := packet.Layer(layers.LayerTypeEthernet); ethLayer != nil {
+ eth, _ := ethLayer.(*layers.Ethernet)
+ out["SrcMac"] = eth.SrcMAC.String()
+ out["DstMac"] = eth.DstMAC.String()
+ }
+
+ if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil {
+ tcp, _ := tcpLayer.(*layers.TCP)
+ out["SrcPort"] = tcp.SrcPort.String()
+ out["DstPort"] = tcp.DstPort.String()
+ } else if udpLayer := packet.Layer(layers.LayerTypeUDP); udpLayer != nil {
+ udp, _ := udpLayer.(*layers.UDP)
+ out["SrcPort"] = udp.SrcPort.String()
+ out["DstPort"] = udp.DstPort.String()
+ } else if sctpLayer := packet.Layer(layers.LayerTypeSCTP); sctpLayer != nil {
+ sctp, _ := sctpLayer.(*layers.SCTP)
+ out["SrcPort"] = sctp.SrcPort.String()
+ out["DstPort"] = sctp.DstPort.String()
+ }
+
+ if ipv4Layer := packet.Layer(layers.LayerTypeIPv4); ipv4Layer != nil {
+ ipv4, _ := ipv4Layer.(*layers.IPv4)
+ out["SrcAddr"] = ipv4.SrcIP.String()
+ out["DstAddr"] = ipv4.DstIP.String()
+ out["Proto"] = ipv4.Protocol
+ } else if ipv6Layer := packet.Layer(layers.LayerTypeIPv6); ipv6Layer != nil {
+ ipv6, _ := ipv6Layer.(*layers.IPv6)
+ out["SrcAddr"] = ipv6.SrcIP.String()
+ out["DstAddr"] = ipv6.DstIP.String()
+ out["Proto"] = ipv6.NextHeader
+ }
+
+ if icmpv4Layer := packet.Layer(layers.LayerTypeICMPv4); icmpv4Layer != nil {
+ icmpv4, _ := icmpv4Layer.(*layers.ICMPv4)
+ out["IcmpType"] = icmpv4.TypeCode.Type()
+ out["IcmpCode"] = icmpv4.TypeCode.Code()
+ } else if icmpv6Layer := packet.Layer(layers.LayerTypeICMPv6); icmpv6Layer != nil {
+ icmpv6, _ := icmpv6Layer.(*layers.ICMPv6)
+ out["IcmpType"] = icmpv6.TypeCode.Type()
+ out["IcmpCode"] = icmpv6.TypeCode.Code()
+ }
+
+ if dnsLayer := packet.Layer(layers.LayerTypeDNS); dnsLayer != nil {
+ dns, _ := dnsLayer.(*layers.DNS)
+ out["DnsId"] = dns.ID
+ out["DnsFlagsResponseCode"] = dns.ResponseCode.String()
+ //TODO: add DNS questions / answers / authorities
+ }
+
+ out["Bytes"] = len(pr.Stream)
+ // Data is base64 encoded to avoid marshal / unmarshal issues
+ out["Data"] = base64.StdEncoding.EncodeToString(packet.Data())
+ out["Time"] = pr.Time.Unix()
+
+ return out
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.go
new file mode 100644
index 000000000..f12dda23f
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.go
@@ -0,0 +1,338 @@
+// Code generated by bpf2go; DO NOT EDIT.
+//go:build arm64
+
+package ebpf
+
+import (
+ "bytes"
+ _ "embed"
+ "fmt"
+ "io"
+
+ "github.com/cilium/ebpf"
+)
+
+type BpfAdditionalMetrics struct {
+ DnsRecord BpfDnsRecordT
+ PktDrops BpfPktDropsT
+ FlowRtt uint64
+ NetworkEventsIdx uint8
+ NetworkEvents [4][8]uint8
+ _ [1]byte
+ TranslatedFlow BpfTranslatedFlowT
+ _ [6]byte
+}
+
+type BpfDirectionT uint32
+
+const (
+ BpfDirectionTINGRESS BpfDirectionT = 0
+ BpfDirectionTEGRESS BpfDirectionT = 1
+ BpfDirectionTMAX_DIRECTION BpfDirectionT = 2
+)
+
+type BpfDnsFlowId struct {
+ SrcPort uint16
+ DstPort uint16
+ SrcIp [16]uint8
+ DstIp [16]uint8
+ Id uint16
+ Protocol uint8
+ _ [1]byte
+}
+
+type BpfDnsRecordT struct {
+ Id uint16
+ Flags uint16
+ _ [4]byte
+ Latency uint64
+ Errno uint8
+ _ [7]byte
+}
+
+type BpfFilterActionT uint32
+
+const (
+ BpfFilterActionTACCEPT BpfFilterActionT = 0
+ BpfFilterActionTREJECT BpfFilterActionT = 1
+ BpfFilterActionTMAX_FILTER_ACTIONS BpfFilterActionT = 2
+)
+
+type BpfFilterKeyT struct {
+ PrefixLen uint32
+ IpData [16]uint8
+}
+
+type BpfFilterValueT struct {
+ Protocol uint8
+ DstPortStart uint16
+ DstPortEnd uint16
+ DstPort1 uint16
+ DstPort2 uint16
+ SrcPortStart uint16
+ SrcPortEnd uint16
+ SrcPort1 uint16
+ SrcPort2 uint16
+ PortStart uint16
+ PortEnd uint16
+ Port1 uint16
+ Port2 uint16
+ IcmpType uint8
+ IcmpCode uint8
+ Direction BpfDirectionT
+ Action BpfFilterActionT
+ TcpFlags BpfTcpFlagsT
+ FilterDrops uint8
+ Sample uint32
+ Ip [16]uint8
+}
+
+type BpfFlowId BpfFlowIdT
+
+type BpfFlowIdT struct {
+ Direction uint8
+ SrcIp [16]uint8
+ DstIp [16]uint8
+ _ [1]byte
+ SrcPort uint16
+ DstPort uint16
+ TransportProtocol uint8
+ IcmpType uint8
+ IcmpCode uint8
+ _ [3]byte
+ IfIndex uint32
+}
+
+type BpfFlowMetrics BpfFlowMetricsT
+
+type BpfFlowMetricsT struct {
+ Lock struct{ Val uint32 }
+ EthProtocol uint16
+ SrcMac [6]uint8
+ DstMac [6]uint8
+ _ [2]byte
+ Packets uint32
+ Bytes uint64
+ StartMonoTimeTs uint64
+ EndMonoTimeTs uint64
+ Flags uint16
+ Errno uint8
+ Dscp uint8
+ Sampling uint32
+}
+
+type BpfFlowRecordT struct {
+ Id BpfFlowId
+ Metrics BpfFlowMetrics
+}
+
+type BpfGlobalCountersKeyT uint32
+
+const (
+ BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED BpfGlobalCountersKeyT = 0
+ BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS BpfGlobalCountersKeyT = 1
+ BpfGlobalCountersKeyTFILTER_REJECT BpfGlobalCountersKeyT = 2
+ BpfGlobalCountersKeyTFILTER_ACCEPT BpfGlobalCountersKeyT = 3
+ BpfGlobalCountersKeyTFILTER_NOMATCH BpfGlobalCountersKeyT = 4
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR BpfGlobalCountersKeyT = 5
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 6
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 7
+ BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 8
+ BpfGlobalCountersKeyTMAX_COUNTERS BpfGlobalCountersKeyT = 9
+)
+
+type BpfPktDropsT struct {
+ Packets uint32
+ _ [4]byte
+ Bytes uint64
+ LatestFlags uint16
+ LatestState uint8
+ _ [1]byte
+ LatestDropCause uint32
+}
+
+type BpfTcpFlagsT uint32
+
+const (
+ BpfTcpFlagsTFIN_FLAG BpfTcpFlagsT = 1
+ BpfTcpFlagsTSYN_FLAG BpfTcpFlagsT = 2
+ BpfTcpFlagsTRST_FLAG BpfTcpFlagsT = 4
+ BpfTcpFlagsTPSH_FLAG BpfTcpFlagsT = 8
+ BpfTcpFlagsTACK_FLAG BpfTcpFlagsT = 16
+ BpfTcpFlagsTURG_FLAG BpfTcpFlagsT = 32
+ BpfTcpFlagsTECE_FLAG BpfTcpFlagsT = 64
+ BpfTcpFlagsTCWR_FLAG BpfTcpFlagsT = 128
+ BpfTcpFlagsTSYN_ACK_FLAG BpfTcpFlagsT = 256
+ BpfTcpFlagsTFIN_ACK_FLAG BpfTcpFlagsT = 512
+ BpfTcpFlagsTRST_ACK_FLAG BpfTcpFlagsT = 1024
+)
+
+type BpfTranslatedFlowT struct {
+ Saddr [16]uint8
+ Daddr [16]uint8
+ Sport uint16
+ Dport uint16
+ ZoneId uint16
+ IcmpId uint8
+ _ [1]byte
+}
+
+// LoadBpf returns the embedded CollectionSpec for Bpf.
+func LoadBpf() (*ebpf.CollectionSpec, error) {
+ reader := bytes.NewReader(_BpfBytes)
+ spec, err := ebpf.LoadCollectionSpecFromReader(reader)
+ if err != nil {
+ return nil, fmt.Errorf("can't load Bpf: %w", err)
+ }
+
+ return spec, err
+}
+
+// LoadBpfObjects loads Bpf and converts it into a struct.
+//
+// The following types are suitable as obj argument:
+//
+// *BpfObjects
+// *BpfPrograms
+// *BpfMaps
+//
+// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
+func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
+ spec, err := LoadBpf()
+ if err != nil {
+ return err
+ }
+
+ return spec.LoadAndAssign(obj, opts)
+}
+
+// BpfSpecs contains maps and programs before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfSpecs struct {
+ BpfProgramSpecs
+ BpfMapSpecs
+}
+
+// BpfSpecs contains programs before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfProgramSpecs struct {
+ KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"`
+ RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"`
+ TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"`
+ TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"`
+ TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"`
+ TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"`
+ TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"`
+ TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"`
+ TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"`
+ TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"`
+ TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *ebpf.ProgramSpec `ebpf:"track_nat_manip_pkt"`
+}
+
+// BpfMapSpecs contains maps before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfMapSpecs struct {
+ AdditionalFlowMetrics *ebpf.MapSpec `ebpf:"additional_flow_metrics"`
+ AggregatedFlows *ebpf.MapSpec `ebpf:"aggregated_flows"`
+ DirectFlows *ebpf.MapSpec `ebpf:"direct_flows"`
+ DnsFlows *ebpf.MapSpec `ebpf:"dns_flows"`
+ FilterMap *ebpf.MapSpec `ebpf:"filter_map"`
+ GlobalCounters *ebpf.MapSpec `ebpf:"global_counters"`
+ PacketRecord *ebpf.MapSpec `ebpf:"packet_record"`
+}
+
+// BpfObjects contains all objects after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfObjects struct {
+ BpfPrograms
+ BpfMaps
+}
+
+func (o *BpfObjects) Close() error {
+ return _BpfClose(
+ &o.BpfPrograms,
+ &o.BpfMaps,
+ )
+}
+
+// BpfMaps contains all maps after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfMaps struct {
+ AdditionalFlowMetrics *ebpf.Map `ebpf:"additional_flow_metrics"`
+ AggregatedFlows *ebpf.Map `ebpf:"aggregated_flows"`
+ DirectFlows *ebpf.Map `ebpf:"direct_flows"`
+ DnsFlows *ebpf.Map `ebpf:"dns_flows"`
+ FilterMap *ebpf.Map `ebpf:"filter_map"`
+ GlobalCounters *ebpf.Map `ebpf:"global_counters"`
+ PacketRecord *ebpf.Map `ebpf:"packet_record"`
+}
+
+func (m *BpfMaps) Close() error {
+ return _BpfClose(
+ m.AdditionalFlowMetrics,
+ m.AggregatedFlows,
+ m.DirectFlows,
+ m.DnsFlows,
+ m.FilterMap,
+ m.GlobalCounters,
+ m.PacketRecord,
+ )
+}
+
+// BpfPrograms contains all programs after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfPrograms struct {
+ KfreeSkb *ebpf.Program `ebpf:"kfree_skb"`
+ RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"`
+ TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"`
+ TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"`
+ TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"`
+ TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"`
+ TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"`
+ TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"`
+ TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"`
+ TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *ebpf.Program `ebpf:"track_nat_manip_pkt"`
+}
+
+func (p *BpfPrograms) Close() error {
+ return _BpfClose(
+ p.KfreeSkb,
+ p.RhNetworkEventsMonitoring,
+ p.TcEgressFlowParse,
+ p.TcEgressPcaParse,
+ p.TcIngressFlowParse,
+ p.TcIngressPcaParse,
+ p.TcpRcvFentry,
+ p.TcpRcvKprobe,
+ p.TcxEgressFlowParse,
+ p.TcxEgressPcaParse,
+ p.TcxIngressFlowParse,
+ p.TcxIngressPcaParse,
+ p.TrackNatManipPkt,
+ )
+}
+
+func _BpfClose(closers ...io.Closer) error {
+ for _, closer := range closers {
+ if err := closer.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Do not access this directly.
+//
+//go:embed bpf_arm64_bpfel.o
+var _BpfBytes []byte
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.o
new file mode 100644
index 000000000..240486c52
Binary files /dev/null and b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.o differ
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.go
new file mode 100644
index 000000000..a8b825750
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.go
@@ -0,0 +1,338 @@
+// Code generated by bpf2go; DO NOT EDIT.
+//go:build ppc64le
+
+package ebpf
+
+import (
+ "bytes"
+ _ "embed"
+ "fmt"
+ "io"
+
+ "github.com/cilium/ebpf"
+)
+
+type BpfAdditionalMetrics struct {
+ DnsRecord BpfDnsRecordT
+ PktDrops BpfPktDropsT
+ FlowRtt uint64
+ NetworkEventsIdx uint8
+ NetworkEvents [4][8]uint8
+ _ [1]byte
+ TranslatedFlow BpfTranslatedFlowT
+ _ [6]byte
+}
+
+type BpfDirectionT uint32
+
+const (
+ BpfDirectionTINGRESS BpfDirectionT = 0
+ BpfDirectionTEGRESS BpfDirectionT = 1
+ BpfDirectionTMAX_DIRECTION BpfDirectionT = 2
+)
+
+type BpfDnsFlowId struct {
+ SrcPort uint16
+ DstPort uint16
+ SrcIp [16]uint8
+ DstIp [16]uint8
+ Id uint16
+ Protocol uint8
+ _ [1]byte
+}
+
+type BpfDnsRecordT struct {
+ Id uint16
+ Flags uint16
+ _ [4]byte
+ Latency uint64
+ Errno uint8
+ _ [7]byte
+}
+
+type BpfFilterActionT uint32
+
+const (
+ BpfFilterActionTACCEPT BpfFilterActionT = 0
+ BpfFilterActionTREJECT BpfFilterActionT = 1
+ BpfFilterActionTMAX_FILTER_ACTIONS BpfFilterActionT = 2
+)
+
+type BpfFilterKeyT struct {
+ PrefixLen uint32
+ IpData [16]uint8
+}
+
+type BpfFilterValueT struct {
+ Protocol uint8
+ DstPortStart uint16
+ DstPortEnd uint16
+ DstPort1 uint16
+ DstPort2 uint16
+ SrcPortStart uint16
+ SrcPortEnd uint16
+ SrcPort1 uint16
+ SrcPort2 uint16
+ PortStart uint16
+ PortEnd uint16
+ Port1 uint16
+ Port2 uint16
+ IcmpType uint8
+ IcmpCode uint8
+ Direction BpfDirectionT
+ Action BpfFilterActionT
+ TcpFlags BpfTcpFlagsT
+ FilterDrops uint8
+ Sample uint32
+ Ip [16]uint8
+}
+
+type BpfFlowId BpfFlowIdT
+
+type BpfFlowIdT struct {
+ Direction uint8
+ SrcIp [16]uint8
+ DstIp [16]uint8
+ _ [1]byte
+ SrcPort uint16
+ DstPort uint16
+ TransportProtocol uint8
+ IcmpType uint8
+ IcmpCode uint8
+ _ [3]byte
+ IfIndex uint32
+}
+
+type BpfFlowMetrics BpfFlowMetricsT
+
+type BpfFlowMetricsT struct {
+ Lock struct{ Val uint32 }
+ EthProtocol uint16
+ SrcMac [6]uint8
+ DstMac [6]uint8
+ _ [2]byte
+ Packets uint32
+ Bytes uint64
+ StartMonoTimeTs uint64
+ EndMonoTimeTs uint64
+ Flags uint16
+ Errno uint8
+ Dscp uint8
+ Sampling uint32
+}
+
+type BpfFlowRecordT struct {
+ Id BpfFlowId
+ Metrics BpfFlowMetrics
+}
+
+type BpfGlobalCountersKeyT uint32
+
+const (
+ BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED BpfGlobalCountersKeyT = 0
+ BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS BpfGlobalCountersKeyT = 1
+ BpfGlobalCountersKeyTFILTER_REJECT BpfGlobalCountersKeyT = 2
+ BpfGlobalCountersKeyTFILTER_ACCEPT BpfGlobalCountersKeyT = 3
+ BpfGlobalCountersKeyTFILTER_NOMATCH BpfGlobalCountersKeyT = 4
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR BpfGlobalCountersKeyT = 5
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 6
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 7
+ BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 8
+ BpfGlobalCountersKeyTMAX_COUNTERS BpfGlobalCountersKeyT = 9
+)
+
+type BpfPktDropsT struct {
+ Packets uint32
+ _ [4]byte
+ Bytes uint64
+ LatestFlags uint16
+ LatestState uint8
+ _ [1]byte
+ LatestDropCause uint32
+}
+
+type BpfTcpFlagsT uint32
+
+const (
+ BpfTcpFlagsTFIN_FLAG BpfTcpFlagsT = 1
+ BpfTcpFlagsTSYN_FLAG BpfTcpFlagsT = 2
+ BpfTcpFlagsTRST_FLAG BpfTcpFlagsT = 4
+ BpfTcpFlagsTPSH_FLAG BpfTcpFlagsT = 8
+ BpfTcpFlagsTACK_FLAG BpfTcpFlagsT = 16
+ BpfTcpFlagsTURG_FLAG BpfTcpFlagsT = 32
+ BpfTcpFlagsTECE_FLAG BpfTcpFlagsT = 64
+ BpfTcpFlagsTCWR_FLAG BpfTcpFlagsT = 128
+ BpfTcpFlagsTSYN_ACK_FLAG BpfTcpFlagsT = 256
+ BpfTcpFlagsTFIN_ACK_FLAG BpfTcpFlagsT = 512
+ BpfTcpFlagsTRST_ACK_FLAG BpfTcpFlagsT = 1024
+)
+
+type BpfTranslatedFlowT struct {
+ Saddr [16]uint8
+ Daddr [16]uint8
+ Sport uint16
+ Dport uint16
+ ZoneId uint16
+ IcmpId uint8
+ _ [1]byte
+}
+
+// LoadBpf returns the embedded CollectionSpec for Bpf.
+func LoadBpf() (*ebpf.CollectionSpec, error) {
+ reader := bytes.NewReader(_BpfBytes)
+ spec, err := ebpf.LoadCollectionSpecFromReader(reader)
+ if err != nil {
+ return nil, fmt.Errorf("can't load Bpf: %w", err)
+ }
+
+ return spec, err
+}
+
+// LoadBpfObjects loads Bpf and converts it into a struct.
+//
+// The following types are suitable as obj argument:
+//
+// *BpfObjects
+// *BpfPrograms
+// *BpfMaps
+//
+// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
+func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
+ spec, err := LoadBpf()
+ if err != nil {
+ return err
+ }
+
+ return spec.LoadAndAssign(obj, opts)
+}
+
+// BpfSpecs contains maps and programs before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfSpecs struct {
+ BpfProgramSpecs
+ BpfMapSpecs
+}
+
+// BpfSpecs contains programs before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfProgramSpecs struct {
+ KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"`
+ RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"`
+ TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"`
+ TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"`
+ TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"`
+ TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"`
+ TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"`
+ TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"`
+ TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"`
+ TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"`
+ TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *ebpf.ProgramSpec `ebpf:"track_nat_manip_pkt"`
+}
+
+// BpfMapSpecs contains maps before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfMapSpecs struct {
+ AdditionalFlowMetrics *ebpf.MapSpec `ebpf:"additional_flow_metrics"`
+ AggregatedFlows *ebpf.MapSpec `ebpf:"aggregated_flows"`
+ DirectFlows *ebpf.MapSpec `ebpf:"direct_flows"`
+ DnsFlows *ebpf.MapSpec `ebpf:"dns_flows"`
+ FilterMap *ebpf.MapSpec `ebpf:"filter_map"`
+ GlobalCounters *ebpf.MapSpec `ebpf:"global_counters"`
+ PacketRecord *ebpf.MapSpec `ebpf:"packet_record"`
+}
+
+// BpfObjects contains all objects after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfObjects struct {
+ BpfPrograms
+ BpfMaps
+}
+
+func (o *BpfObjects) Close() error {
+ return _BpfClose(
+ &o.BpfPrograms,
+ &o.BpfMaps,
+ )
+}
+
+// BpfMaps contains all maps after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfMaps struct {
+ AdditionalFlowMetrics *ebpf.Map `ebpf:"additional_flow_metrics"`
+ AggregatedFlows *ebpf.Map `ebpf:"aggregated_flows"`
+ DirectFlows *ebpf.Map `ebpf:"direct_flows"`
+ DnsFlows *ebpf.Map `ebpf:"dns_flows"`
+ FilterMap *ebpf.Map `ebpf:"filter_map"`
+ GlobalCounters *ebpf.Map `ebpf:"global_counters"`
+ PacketRecord *ebpf.Map `ebpf:"packet_record"`
+}
+
+func (m *BpfMaps) Close() error {
+ return _BpfClose(
+ m.AdditionalFlowMetrics,
+ m.AggregatedFlows,
+ m.DirectFlows,
+ m.DnsFlows,
+ m.FilterMap,
+ m.GlobalCounters,
+ m.PacketRecord,
+ )
+}
+
+// BpfPrograms contains all programs after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfPrograms struct {
+ KfreeSkb *ebpf.Program `ebpf:"kfree_skb"`
+ RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"`
+ TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"`
+ TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"`
+ TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"`
+ TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"`
+ TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"`
+ TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"`
+ TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"`
+ TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *ebpf.Program `ebpf:"track_nat_manip_pkt"`
+}
+
+func (p *BpfPrograms) Close() error {
+ return _BpfClose(
+ p.KfreeSkb,
+ p.RhNetworkEventsMonitoring,
+ p.TcEgressFlowParse,
+ p.TcEgressPcaParse,
+ p.TcIngressFlowParse,
+ p.TcIngressPcaParse,
+ p.TcpRcvFentry,
+ p.TcpRcvKprobe,
+ p.TcxEgressFlowParse,
+ p.TcxEgressPcaParse,
+ p.TcxIngressFlowParse,
+ p.TcxIngressPcaParse,
+ p.TrackNatManipPkt,
+ )
+}
+
+func _BpfClose(closers ...io.Closer) error {
+ for _, closer := range closers {
+ if err := closer.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Do not access this directly.
+//
+//go:embed bpf_powerpc_bpfel.o
+var _BpfBytes []byte
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.o
new file mode 100644
index 000000000..fae0677f0
Binary files /dev/null and b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.o differ
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.go
new file mode 100644
index 000000000..3372ed20b
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.go
@@ -0,0 +1,338 @@
+// Code generated by bpf2go; DO NOT EDIT.
+//go:build s390x
+
+package ebpf
+
+import (
+ "bytes"
+ _ "embed"
+ "fmt"
+ "io"
+
+ "github.com/cilium/ebpf"
+)
+
+type BpfAdditionalMetrics struct {
+ DnsRecord BpfDnsRecordT
+ PktDrops BpfPktDropsT
+ FlowRtt uint64
+ NetworkEventsIdx uint8
+ NetworkEvents [4][8]uint8
+ _ [1]byte
+ TranslatedFlow BpfTranslatedFlowT
+ _ [6]byte
+}
+
+type BpfDirectionT uint32
+
+const (
+ BpfDirectionTINGRESS BpfDirectionT = 0
+ BpfDirectionTEGRESS BpfDirectionT = 1
+ BpfDirectionTMAX_DIRECTION BpfDirectionT = 2
+)
+
+type BpfDnsFlowId struct {
+ SrcPort uint16
+ DstPort uint16
+ SrcIp [16]uint8
+ DstIp [16]uint8
+ Id uint16
+ Protocol uint8
+ _ [1]byte
+}
+
+type BpfDnsRecordT struct {
+ Id uint16
+ Flags uint16
+ _ [4]byte
+ Latency uint64
+ Errno uint8
+ _ [7]byte
+}
+
+type BpfFilterActionT uint32
+
+const (
+ BpfFilterActionTACCEPT BpfFilterActionT = 0
+ BpfFilterActionTREJECT BpfFilterActionT = 1
+ BpfFilterActionTMAX_FILTER_ACTIONS BpfFilterActionT = 2
+)
+
+type BpfFilterKeyT struct {
+ PrefixLen uint32
+ IpData [16]uint8
+}
+
+type BpfFilterValueT struct {
+ Protocol uint8
+ DstPortStart uint16
+ DstPortEnd uint16
+ DstPort1 uint16
+ DstPort2 uint16
+ SrcPortStart uint16
+ SrcPortEnd uint16
+ SrcPort1 uint16
+ SrcPort2 uint16
+ PortStart uint16
+ PortEnd uint16
+ Port1 uint16
+ Port2 uint16
+ IcmpType uint8
+ IcmpCode uint8
+ Direction BpfDirectionT
+ Action BpfFilterActionT
+ TcpFlags BpfTcpFlagsT
+ FilterDrops uint8
+ Sample uint32
+ Ip [16]uint8
+}
+
+type BpfFlowId BpfFlowIdT
+
+type BpfFlowIdT struct {
+ Direction uint8
+ SrcIp [16]uint8
+ DstIp [16]uint8
+ _ [1]byte
+ SrcPort uint16
+ DstPort uint16
+ TransportProtocol uint8
+ IcmpType uint8
+ IcmpCode uint8
+ _ [3]byte
+ IfIndex uint32
+}
+
+type BpfFlowMetrics BpfFlowMetricsT
+
+type BpfFlowMetricsT struct {
+ Lock struct{ Val uint32 }
+ EthProtocol uint16
+ SrcMac [6]uint8
+ DstMac [6]uint8
+ _ [2]byte
+ Packets uint32
+ Bytes uint64
+ StartMonoTimeTs uint64
+ EndMonoTimeTs uint64
+ Flags uint16
+ Errno uint8
+ Dscp uint8
+ Sampling uint32
+}
+
+type BpfFlowRecordT struct {
+ Id BpfFlowId
+ Metrics BpfFlowMetrics
+}
+
+type BpfGlobalCountersKeyT uint32
+
+const (
+ BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED BpfGlobalCountersKeyT = 0
+ BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS BpfGlobalCountersKeyT = 1
+ BpfGlobalCountersKeyTFILTER_REJECT BpfGlobalCountersKeyT = 2
+ BpfGlobalCountersKeyTFILTER_ACCEPT BpfGlobalCountersKeyT = 3
+ BpfGlobalCountersKeyTFILTER_NOMATCH BpfGlobalCountersKeyT = 4
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR BpfGlobalCountersKeyT = 5
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 6
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 7
+ BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 8
+ BpfGlobalCountersKeyTMAX_COUNTERS BpfGlobalCountersKeyT = 9
+)
+
+type BpfPktDropsT struct {
+ Packets uint32
+ _ [4]byte
+ Bytes uint64
+ LatestFlags uint16
+ LatestState uint8
+ _ [1]byte
+ LatestDropCause uint32
+}
+
+type BpfTcpFlagsT uint32
+
+const (
+ BpfTcpFlagsTFIN_FLAG BpfTcpFlagsT = 1
+ BpfTcpFlagsTSYN_FLAG BpfTcpFlagsT = 2
+ BpfTcpFlagsTRST_FLAG BpfTcpFlagsT = 4
+ BpfTcpFlagsTPSH_FLAG BpfTcpFlagsT = 8
+ BpfTcpFlagsTACK_FLAG BpfTcpFlagsT = 16
+ BpfTcpFlagsTURG_FLAG BpfTcpFlagsT = 32
+ BpfTcpFlagsTECE_FLAG BpfTcpFlagsT = 64
+ BpfTcpFlagsTCWR_FLAG BpfTcpFlagsT = 128
+ BpfTcpFlagsTSYN_ACK_FLAG BpfTcpFlagsT = 256
+ BpfTcpFlagsTFIN_ACK_FLAG BpfTcpFlagsT = 512
+ BpfTcpFlagsTRST_ACK_FLAG BpfTcpFlagsT = 1024
+)
+
+type BpfTranslatedFlowT struct {
+ Saddr [16]uint8
+ Daddr [16]uint8
+ Sport uint16
+ Dport uint16
+ ZoneId uint16
+ IcmpId uint8
+ _ [1]byte
+}
+
+// LoadBpf returns the embedded CollectionSpec for Bpf.
+func LoadBpf() (*ebpf.CollectionSpec, error) {
+ reader := bytes.NewReader(_BpfBytes)
+ spec, err := ebpf.LoadCollectionSpecFromReader(reader)
+ if err != nil {
+ return nil, fmt.Errorf("can't load Bpf: %w", err)
+ }
+
+ return spec, err
+}
+
+// LoadBpfObjects loads Bpf and converts it into a struct.
+//
+// The following types are suitable as obj argument:
+//
+// *BpfObjects
+// *BpfPrograms
+// *BpfMaps
+//
+// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
+func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
+ spec, err := LoadBpf()
+ if err != nil {
+ return err
+ }
+
+ return spec.LoadAndAssign(obj, opts)
+}
+
+// BpfSpecs contains maps and programs before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfSpecs struct {
+ BpfProgramSpecs
+ BpfMapSpecs
+}
+
+// BpfSpecs contains programs before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfProgramSpecs struct {
+ KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"`
+ RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"`
+ TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"`
+ TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"`
+ TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"`
+ TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"`
+ TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"`
+ TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"`
+ TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"`
+ TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"`
+ TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *ebpf.ProgramSpec `ebpf:"track_nat_manip_pkt"`
+}
+
+// BpfMapSpecs contains maps before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfMapSpecs struct {
+ AdditionalFlowMetrics *ebpf.MapSpec `ebpf:"additional_flow_metrics"`
+ AggregatedFlows *ebpf.MapSpec `ebpf:"aggregated_flows"`
+ DirectFlows *ebpf.MapSpec `ebpf:"direct_flows"`
+ DnsFlows *ebpf.MapSpec `ebpf:"dns_flows"`
+ FilterMap *ebpf.MapSpec `ebpf:"filter_map"`
+ GlobalCounters *ebpf.MapSpec `ebpf:"global_counters"`
+ PacketRecord *ebpf.MapSpec `ebpf:"packet_record"`
+}
+
+// BpfObjects contains all objects after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfObjects struct {
+ BpfPrograms
+ BpfMaps
+}
+
+func (o *BpfObjects) Close() error {
+ return _BpfClose(
+ &o.BpfPrograms,
+ &o.BpfMaps,
+ )
+}
+
+// BpfMaps contains all maps after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfMaps struct {
+ AdditionalFlowMetrics *ebpf.Map `ebpf:"additional_flow_metrics"`
+ AggregatedFlows *ebpf.Map `ebpf:"aggregated_flows"`
+ DirectFlows *ebpf.Map `ebpf:"direct_flows"`
+ DnsFlows *ebpf.Map `ebpf:"dns_flows"`
+ FilterMap *ebpf.Map `ebpf:"filter_map"`
+ GlobalCounters *ebpf.Map `ebpf:"global_counters"`
+ PacketRecord *ebpf.Map `ebpf:"packet_record"`
+}
+
+func (m *BpfMaps) Close() error {
+ return _BpfClose(
+ m.AdditionalFlowMetrics,
+ m.AggregatedFlows,
+ m.DirectFlows,
+ m.DnsFlows,
+ m.FilterMap,
+ m.GlobalCounters,
+ m.PacketRecord,
+ )
+}
+
+// BpfPrograms contains all programs after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfPrograms struct {
+ KfreeSkb *ebpf.Program `ebpf:"kfree_skb"`
+ RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"`
+ TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"`
+ TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"`
+ TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"`
+ TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"`
+ TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"`
+ TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"`
+ TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"`
+ TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *ebpf.Program `ebpf:"track_nat_manip_pkt"`
+}
+
+func (p *BpfPrograms) Close() error {
+ return _BpfClose(
+ p.KfreeSkb,
+ p.RhNetworkEventsMonitoring,
+ p.TcEgressFlowParse,
+ p.TcEgressPcaParse,
+ p.TcIngressFlowParse,
+ p.TcIngressPcaParse,
+ p.TcpRcvFentry,
+ p.TcpRcvKprobe,
+ p.TcxEgressFlowParse,
+ p.TcxEgressPcaParse,
+ p.TcxIngressFlowParse,
+ p.TcxIngressPcaParse,
+ p.TrackNatManipPkt,
+ )
+}
+
+func _BpfClose(closers ...io.Closer) error {
+ for _, closer := range closers {
+ if err := closer.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Do not access this directly.
+//
+//go:embed bpf_s390_bpfeb.o
+var _BpfBytes []byte
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.o
new file mode 100644
index 000000000..aae1b0003
Binary files /dev/null and b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.o differ
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go
new file mode 100644
index 000000000..a5839acdd
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go
@@ -0,0 +1,338 @@
+// Code generated by bpf2go; DO NOT EDIT.
+//go:build 386 || amd64
+
+package ebpf
+
+import (
+ "bytes"
+ _ "embed"
+ "fmt"
+ "io"
+
+ "github.com/cilium/ebpf"
+)
+
+type BpfAdditionalMetrics struct {
+ DnsRecord BpfDnsRecordT
+ PktDrops BpfPktDropsT
+ FlowRtt uint64
+ NetworkEventsIdx uint8
+ NetworkEvents [4][8]uint8
+ _ [1]byte
+ TranslatedFlow BpfTranslatedFlowT
+ _ [6]byte
+}
+
+type BpfDirectionT uint32
+
+const (
+ BpfDirectionTINGRESS BpfDirectionT = 0
+ BpfDirectionTEGRESS BpfDirectionT = 1
+ BpfDirectionTMAX_DIRECTION BpfDirectionT = 2
+)
+
+type BpfDnsFlowId struct {
+ SrcPort uint16
+ DstPort uint16
+ SrcIp [16]uint8
+ DstIp [16]uint8
+ Id uint16
+ Protocol uint8
+ _ [1]byte
+}
+
+type BpfDnsRecordT struct {
+ Id uint16
+ Flags uint16
+ _ [4]byte
+ Latency uint64
+ Errno uint8
+ _ [7]byte
+}
+
+type BpfFilterActionT uint32
+
+const (
+ BpfFilterActionTACCEPT BpfFilterActionT = 0
+ BpfFilterActionTREJECT BpfFilterActionT = 1
+ BpfFilterActionTMAX_FILTER_ACTIONS BpfFilterActionT = 2
+)
+
+type BpfFilterKeyT struct {
+ PrefixLen uint32
+ IpData [16]uint8
+}
+
+type BpfFilterValueT struct {
+ Protocol uint8
+ DstPortStart uint16
+ DstPortEnd uint16
+ DstPort1 uint16
+ DstPort2 uint16
+ SrcPortStart uint16
+ SrcPortEnd uint16
+ SrcPort1 uint16
+ SrcPort2 uint16
+ PortStart uint16
+ PortEnd uint16
+ Port1 uint16
+ Port2 uint16
+ IcmpType uint8
+ IcmpCode uint8
+ Direction BpfDirectionT
+ Action BpfFilterActionT
+ TcpFlags BpfTcpFlagsT
+ FilterDrops uint8
+ Sample uint32
+ Ip [16]uint8
+}
+
+type BpfFlowId BpfFlowIdT
+
+type BpfFlowIdT struct {
+ Direction uint8
+ SrcIp [16]uint8
+ DstIp [16]uint8
+ _ [1]byte
+ SrcPort uint16
+ DstPort uint16
+ TransportProtocol uint8
+ IcmpType uint8
+ IcmpCode uint8
+ _ [3]byte
+ IfIndex uint32
+}
+
+type BpfFlowMetrics BpfFlowMetricsT
+
+type BpfFlowMetricsT struct {
+ Lock struct{ Val uint32 }
+ EthProtocol uint16
+ SrcMac [6]uint8
+ DstMac [6]uint8
+ _ [2]byte
+ Packets uint32
+ Bytes uint64
+ StartMonoTimeTs uint64
+ EndMonoTimeTs uint64
+ Flags uint16
+ Errno uint8
+ Dscp uint8
+ Sampling uint32
+}
+
+type BpfFlowRecordT struct {
+ Id BpfFlowId
+ Metrics BpfFlowMetrics
+}
+
+type BpfGlobalCountersKeyT uint32
+
+const (
+ BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED BpfGlobalCountersKeyT = 0
+ BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS BpfGlobalCountersKeyT = 1
+ BpfGlobalCountersKeyTFILTER_REJECT BpfGlobalCountersKeyT = 2
+ BpfGlobalCountersKeyTFILTER_ACCEPT BpfGlobalCountersKeyT = 3
+ BpfGlobalCountersKeyTFILTER_NOMATCH BpfGlobalCountersKeyT = 4
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR BpfGlobalCountersKeyT = 5
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 6
+ BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 7
+ BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 8
+ BpfGlobalCountersKeyTMAX_COUNTERS BpfGlobalCountersKeyT = 9
+)
+
+type BpfPktDropsT struct {
+ Packets uint32
+ _ [4]byte
+ Bytes uint64
+ LatestFlags uint16
+ LatestState uint8
+ _ [1]byte
+ LatestDropCause uint32
+}
+
+type BpfTcpFlagsT uint32
+
+const (
+ BpfTcpFlagsTFIN_FLAG BpfTcpFlagsT = 1
+ BpfTcpFlagsTSYN_FLAG BpfTcpFlagsT = 2
+ BpfTcpFlagsTRST_FLAG BpfTcpFlagsT = 4
+ BpfTcpFlagsTPSH_FLAG BpfTcpFlagsT = 8
+ BpfTcpFlagsTACK_FLAG BpfTcpFlagsT = 16
+ BpfTcpFlagsTURG_FLAG BpfTcpFlagsT = 32
+ BpfTcpFlagsTECE_FLAG BpfTcpFlagsT = 64
+ BpfTcpFlagsTCWR_FLAG BpfTcpFlagsT = 128
+ BpfTcpFlagsTSYN_ACK_FLAG BpfTcpFlagsT = 256
+ BpfTcpFlagsTFIN_ACK_FLAG BpfTcpFlagsT = 512
+ BpfTcpFlagsTRST_ACK_FLAG BpfTcpFlagsT = 1024
+)
+
+type BpfTranslatedFlowT struct {
+ Saddr [16]uint8
+ Daddr [16]uint8
+ Sport uint16
+ Dport uint16
+ ZoneId uint16
+ IcmpId uint8
+ _ [1]byte
+}
+
+// LoadBpf returns the embedded CollectionSpec for Bpf.
+func LoadBpf() (*ebpf.CollectionSpec, error) {
+ reader := bytes.NewReader(_BpfBytes)
+ spec, err := ebpf.LoadCollectionSpecFromReader(reader)
+ if err != nil {
+ return nil, fmt.Errorf("can't load Bpf: %w", err)
+ }
+
+ return spec, err
+}
+
+// LoadBpfObjects loads Bpf and converts it into a struct.
+//
+// The following types are suitable as obj argument:
+//
+// *BpfObjects
+// *BpfPrograms
+// *BpfMaps
+//
+// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
+func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
+ spec, err := LoadBpf()
+ if err != nil {
+ return err
+ }
+
+ return spec.LoadAndAssign(obj, opts)
+}
+
+// BpfSpecs contains maps and programs before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfSpecs struct {
+ BpfProgramSpecs
+ BpfMapSpecs
+}
+
+// BpfSpecs contains programs before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfProgramSpecs struct {
+ KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"`
+ RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"`
+ TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"`
+ TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"`
+ TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"`
+ TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"`
+ TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"`
+ TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"`
+ TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"`
+ TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"`
+ TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *ebpf.ProgramSpec `ebpf:"track_nat_manip_pkt"`
+}
+
+// BpfMapSpecs contains maps before they are loaded into the kernel.
+//
+// It can be passed ebpf.CollectionSpec.Assign.
+type BpfMapSpecs struct {
+ AdditionalFlowMetrics *ebpf.MapSpec `ebpf:"additional_flow_metrics"`
+ AggregatedFlows *ebpf.MapSpec `ebpf:"aggregated_flows"`
+ DirectFlows *ebpf.MapSpec `ebpf:"direct_flows"`
+ DnsFlows *ebpf.MapSpec `ebpf:"dns_flows"`
+ FilterMap *ebpf.MapSpec `ebpf:"filter_map"`
+ GlobalCounters *ebpf.MapSpec `ebpf:"global_counters"`
+ PacketRecord *ebpf.MapSpec `ebpf:"packet_record"`
+}
+
+// BpfObjects contains all objects after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfObjects struct {
+ BpfPrograms
+ BpfMaps
+}
+
+func (o *BpfObjects) Close() error {
+ return _BpfClose(
+ &o.BpfPrograms,
+ &o.BpfMaps,
+ )
+}
+
+// BpfMaps contains all maps after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfMaps struct {
+ AdditionalFlowMetrics *ebpf.Map `ebpf:"additional_flow_metrics"`
+ AggregatedFlows *ebpf.Map `ebpf:"aggregated_flows"`
+ DirectFlows *ebpf.Map `ebpf:"direct_flows"`
+ DnsFlows *ebpf.Map `ebpf:"dns_flows"`
+ FilterMap *ebpf.Map `ebpf:"filter_map"`
+ GlobalCounters *ebpf.Map `ebpf:"global_counters"`
+ PacketRecord *ebpf.Map `ebpf:"packet_record"`
+}
+
+func (m *BpfMaps) Close() error {
+ return _BpfClose(
+ m.AdditionalFlowMetrics,
+ m.AggregatedFlows,
+ m.DirectFlows,
+ m.DnsFlows,
+ m.FilterMap,
+ m.GlobalCounters,
+ m.PacketRecord,
+ )
+}
+
+// BpfPrograms contains all programs after they have been loaded into the kernel.
+//
+// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
+type BpfPrograms struct {
+ KfreeSkb *ebpf.Program `ebpf:"kfree_skb"`
+ RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"`
+ TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"`
+ TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"`
+ TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"`
+ TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"`
+ TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"`
+ TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"`
+ TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"`
+ TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *ebpf.Program `ebpf:"track_nat_manip_pkt"`
+}
+
+func (p *BpfPrograms) Close() error {
+ return _BpfClose(
+ p.KfreeSkb,
+ p.RhNetworkEventsMonitoring,
+ p.TcEgressFlowParse,
+ p.TcEgressPcaParse,
+ p.TcIngressFlowParse,
+ p.TcIngressPcaParse,
+ p.TcpRcvFentry,
+ p.TcpRcvKprobe,
+ p.TcxEgressFlowParse,
+ p.TcxEgressPcaParse,
+ p.TcxIngressFlowParse,
+ p.TcxIngressPcaParse,
+ p.TrackNatManipPkt,
+ )
+}
+
+func _BpfClose(closers ...io.Closer) error {
+ for _, closer := range closers {
+ if err := closer.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Do not access this directly.
+//
+//go:embed bpf_x86_bpfel.o
+var _BpfBytes []byte
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.o
new file mode 100644
index 000000000..9399eaff3
Binary files /dev/null and b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.o differ
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/gen.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/gen.go
new file mode 100644
index 000000000..3ec45b49c
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/gen.go
@@ -0,0 +1,4 @@
+package ebpf
+
+// $BPF_CLANG and $BPF_CFLAGS are set by the Makefile.
+//go:generate bpf2go -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64,ppc64le,s390x -type flow_metrics_t -type flow_id_t -type flow_record_t -type pkt_drops_t -type dns_record_t -type global_counters_key_t -type direction_t -type filter_action_t -type tcp_flags_t -type translated_flow_t Bpf ../../bpf/flows.c -- -I../../bpf/headers
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/direct_flp.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/direct_flp.go
new file mode 100644
index 000000000..3f8822b23
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/direct_flp.go
@@ -0,0 +1,63 @@
+package exporter
+
+import (
+ "fmt"
+
+ flpconfig "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/flowlogs-pipeline/pkg/pipeline"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/decode"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "gopkg.in/yaml.v2"
+)
+
+// DirectFLP flow exporter
+type DirectFLP struct {
+ fwd chan flpconfig.GenericMap
+}
+
+func StartDirectFLP(jsonConfig string, bufLen int) (*DirectFLP, error) {
+ var cfg flpconfig.ConfigFileStruct
+ // Note that, despite jsonConfig being json, we use yaml unmarshaler because the json one
+ // is screwed up for HTTPClientConfig in github.com/prometheus/common/config (used for Loki)
+ // This is ok as YAML is a superset of JSON.
+ // E.g. try unmarshaling `{"clientConfig":{"proxy_url":null}}` as a api.WriteLoki
+ // See also https://github.com/prometheus/prometheus/issues/11816
+ if err := yaml.Unmarshal([]byte(jsonConfig), &cfg); err != nil {
+ return nil, fmt.Errorf("failed to read config: %w", err)
+ }
+
+ fwd := make(chan flpconfig.GenericMap, bufLen)
+ err := pipeline.StartFLPInProcess(&cfg, fwd)
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize pipeline %w", err)
+ }
+
+ return &DirectFLP{fwd: fwd}, nil
+}
+
+// ExportFlows accepts slices of *model.Record by its input channel, converts them
+// to *pbflow.Records instances, and submits them to the collector.
+func (d *DirectFLP) ExportFlows(input <-chan []*model.Record) {
+ for inputRecords := range input {
+ for _, rec := range inputRecords {
+ d.fwd <- decode.RecordToMap(rec)
+ }
+ }
+}
+
+// ExportPackets accepts slices of *model.PacketRecord by its input channel, converts them
+// to *pbflow.Records instances, and submits them to the collector.
+func (d *DirectFLP) ExportPackets(input <-chan []*model.PacketRecord) {
+ for inputPackets := range input {
+ for _, packet := range inputPackets {
+ if len(packet.Stream) != 0 {
+ d.fwd <- packets.PacketToMap(packet)
+ }
+ }
+ }
+}
+
+func (d *DirectFLP) Close() {
+ close(d.fwd)
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_packets.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_packets.go
new file mode 100644
index 000000000..b78ce1e34
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_packets.go
@@ -0,0 +1,67 @@
+package exporter
+
+import (
+ "context"
+ "time"
+
+ grpc "github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets"
+
+ "github.com/sirupsen/logrus"
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+type GRPCPacketProto struct {
+ hostIP string
+ hostPort int
+ clientConn *grpc.ClientConnection
+}
+
+var gplog = logrus.WithField("component", "packet/GRPCPackets")
+
+// WritePacket writes the given packet data out to gRPC.
+func writeGRPCPacket(time time.Time, data []byte, conn *grpc.ClientConnection) error {
+ bytes, err := packets.GetPacketBytesWithHeader(time, data)
+ if err != nil {
+ return err
+ }
+ _, err = conn.Client().Send(context.TODO(), &pbpacket.Packet{
+ Pcap: &anypb.Any{
+ Value: bytes,
+ },
+ })
+ return err
+}
+
+func StartGRPCPacketSend(hostIP string, hostPort int) (*GRPCPacketProto, error) {
+ clientConn, err := grpc.ConnectClient(hostIP, hostPort)
+ if err != nil {
+ return nil, err
+ }
+ return &GRPCPacketProto{
+ hostIP: hostIP,
+ hostPort: hostPort,
+ clientConn: clientConn,
+ }, nil
+}
+
+func (p *GRPCPacketProto) ExportGRPCPackets(in <-chan []*model.PacketRecord) {
+ for packetRecord := range in {
+ var errs []error
+ for _, packet := range packetRecord {
+ if len(packet.Stream) != 0 {
+ if err := writeGRPCPacket(packet.Time, packet.Stream, p.clientConn); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ }
+ if len(errs) != 0 {
+ gplog.Errorf("%d errors while sending packets:\n%s", len(errs), errs)
+ }
+ }
+ if err := p.clientConn.Close(); err != nil {
+ gplog.WithError(err).Warn("couldn't close packet export client")
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_proto.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_proto.go
new file mode 100644
index 000000000..761f0053c
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_proto.go
@@ -0,0 +1,74 @@
+package exporter
+
+import (
+ "context"
+
+ grpc "github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/utils"
+
+ ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+)
+
+var glog = logrus.WithField("component", "exporter/GRPCProto")
+
+const componentGRPC = "grpc"
+
+// GRPCProto flow exporter. Its ExportFlows method accepts slices of *model.Record
+// by its input channel, converts them to *pbflow.Records instances, and submits
+// them to the collector.
+type GRPCProto struct {
+ hostIP string
+ hostPort int
+ clientConn *grpc.ClientConnection
+ // maxFlowsPerMessage limits the maximum number of flows per GRPC message.
+ // If a message contains more flows than this number, the GRPC message will be split into
+ // multiple messages.
+ maxFlowsPerMessage int
+ metrics *metrics.Metrics
+ batchCounter prometheus.Counter
+ sampler *ovnobserv.SampleDecoder
+}
+
+func StartGRPCProto(hostIP string, hostPort int, maxFlowsPerMessage int, m *metrics.Metrics, s *ovnobserv.SampleDecoder) (*GRPCProto, error) {
+ clientConn, err := grpc.ConnectClient(hostIP, hostPort)
+ if err != nil {
+ return nil, err
+ }
+ return &GRPCProto{
+ hostIP: hostIP,
+ hostPort: hostPort,
+ clientConn: clientConn,
+ maxFlowsPerMessage: maxFlowsPerMessage,
+ metrics: m,
+ batchCounter: m.CreateBatchCounter(componentGRPC),
+ sampler: s,
+ }, nil
+}
+
+// ExportFlows accepts slices of *model.Record by its input channel, converts them
+// to *pbflow.Records instances, and submits them to the collector.
+func (g *GRPCProto) ExportFlows(input <-chan []*model.Record) {
+ socket := utils.GetSocket(g.hostIP, g.hostPort)
+ log := glog.WithField("collector", socket)
+ for inputRecords := range input {
+ g.metrics.EvictionCounter.WithSource(componentGRPC).Inc()
+ for _, pbRecords := range pbflow.FlowsToPB(inputRecords, g.maxFlowsPerMessage, g.sampler) {
+ log.Debugf("sending %d records", len(pbRecords.Entries))
+ if _, err := g.clientConn.Client().Send(context.TODO(), pbRecords); err != nil {
+ g.metrics.Errors.WithErrorName(componentGRPC, "CannotWriteMessage").Inc()
+ log.WithError(err).Error("couldn't send flow records to collector")
+ }
+ g.batchCounter.Inc()
+ g.metrics.EvictedFlowsCounter.WithSource(componentGRPC).Add(float64(len(pbRecords.Entries)))
+ }
+ }
+ if err := g.clientConn.Close(); err != nil {
+ log.WithError(err).Warn("couldn't close flow export client")
+ g.metrics.Errors.WithErrorName(componentGRPC, "CannotCloseClient").Inc()
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/ipfix.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/ipfix.go
new file mode 100644
index 000000000..379401ccf
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/ipfix.go
@@ -0,0 +1,383 @@
+package exporter
+
+import (
+ "net"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/utils"
+ "github.com/sirupsen/logrus"
+ "github.com/vmware/go-ipfix/pkg/entities"
+ ipfixExporter "github.com/vmware/go-ipfix/pkg/exporter"
+ "github.com/vmware/go-ipfix/pkg/registry"
+)
+
+var ilog = logrus.WithField("component", "exporter/IPFIXProto")
+
+// TODO: encode also the equivalent of the Protobuf's AgentIP field in a format that is binary-
+// compatible with OVN-K.
+
+type IPFIX struct {
+ hostIP string
+ hostPort int
+ exporter *ipfixExporter.ExportingProcess
+ templateIDv4 uint16
+ templateIDv6 uint16
+ entitiesV4 []entities.InfoElementWithValue
+ entitiesV6 []entities.InfoElementWithValue
+}
+
+func addElementToTemplate(log *logrus.Entry, elementName string, value []byte, elements *[]entities.InfoElementWithValue) error {
+ element, err := registry.GetInfoElement(elementName, registry.IANAEnterpriseID)
+ if err != nil {
+ log.WithError(err).Errorf("Did not find the element with name %s", elementName)
+ return err
+ }
+ ie, err := entities.DecodeAndCreateInfoElementWithValue(element, value)
+ if err != nil {
+ log.WithError(err).Errorf("Failed to decode element %s", elementName)
+ return err
+ }
+ *elements = append(*elements, ie)
+ return nil
+}
+
+func AddRecordValuesToTemplate(log *logrus.Entry, elements *[]entities.InfoElementWithValue) error {
+ err := addElementToTemplate(log, "octetDeltaCount", nil, elements)
+ if err != nil {
+ return err
+ }
+ err = addElementToTemplate(log, "tcpControlBits", nil, elements)
+ if err != nil {
+ return err
+ }
+ err = addElementToTemplate(log, "flowStartSeconds", nil, elements)
+ if err != nil {
+ return err
+ }
+ err = addElementToTemplate(log, "flowStartMilliseconds", nil, elements)
+ if err != nil {
+ return err
+ }
+ err = addElementToTemplate(log, "flowEndSeconds", nil, elements)
+ if err != nil {
+ return err
+ }
+ err = addElementToTemplate(log, "flowEndMilliseconds", nil, elements)
+ if err != nil {
+ return err
+ }
+ err = addElementToTemplate(log, "packetDeltaCount", nil, elements)
+ if err != nil {
+ return err
+ }
+ err = addElementToTemplate(log, "interfaceName", nil, elements)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func SendTemplateRecordv4(log *logrus.Entry, exporter *ipfixExporter.ExportingProcess) (uint16, []entities.InfoElementWithValue, error) {
+ templateID := exporter.NewTemplateID()
+ templateSet := entities.NewSet(false)
+ err := templateSet.PrepareSet(entities.Template, templateID)
+ if err != nil {
+ return 0, nil, err
+ }
+ elements := make([]entities.InfoElementWithValue, 0)
+
+ err = addElementToTemplate(log, "ethernetType", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "flowDirection", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "sourceMacAddress", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "destinationMacAddress", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "sourceIPv4Address", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "destinationIPv4Address", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "protocolIdentifier", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "sourceTransportPort", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "destinationTransportPort", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "icmpTypeIPv4", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "icmpCodeIPv4", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = AddRecordValuesToTemplate(log, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = templateSet.AddRecord(elements, templateID)
+ if err != nil {
+ return 0, nil, err
+ }
+ _, err = exporter.SendSet(templateSet)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return templateID, elements, nil
+}
+
+func SendTemplateRecordv6(log *logrus.Entry, exporter *ipfixExporter.ExportingProcess) (uint16, []entities.InfoElementWithValue, error) {
+ templateID := exporter.NewTemplateID()
+ templateSet := entities.NewSet(false)
+ err := templateSet.PrepareSet(entities.Template, templateID)
+ if err != nil {
+ return 0, nil, err
+ }
+ elements := make([]entities.InfoElementWithValue, 0)
+
+ err = addElementToTemplate(log, "ethernetType", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "flowDirection", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "sourceMacAddress", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "destinationMacAddress", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "sourceIPv6Address", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "destinationIPv6Address", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "nextHeaderIPv6", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "sourceTransportPort", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "destinationTransportPort", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "icmpTypeIPv6", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = addElementToTemplate(log, "icmpCodeIPv6", nil, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+ err = AddRecordValuesToTemplate(log, &elements)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ err = templateSet.AddRecord(elements, templateID)
+ if err != nil {
+ return 0, nil, err
+ }
+ _, err = exporter.SendSet(templateSet)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return templateID, elements, nil
+}
+
+// Sends out Template record to the IPFIX collector
+func StartIPFIXExporter(hostIP string, hostPort int, transportProto string) (*IPFIX, error) {
+ socket := utils.GetSocket(hostIP, hostPort)
+ log := ilog.WithField("collector", socket)
+
+ registry.LoadRegistry()
+ // Create exporter using local server info
+ input := ipfixExporter.ExporterInput{
+ CollectorAddress: socket,
+ CollectorProtocol: transportProto,
+ ObservationDomainID: 1,
+ TempRefTimeout: 1,
+ }
+ exporter, err := ipfixExporter.InitExportingProcess(input)
+ if err != nil {
+ log.Fatalf("Got error when connecting to local server %s: %v", socket, err)
+ return nil, err
+ }
+ log.Infof("Created exporter connecting to local server with address: %s", socket)
+
+ templateIDv4, entitiesV4, err := SendTemplateRecordv4(log, exporter)
+ if err != nil {
+ log.WithError(err).Error("Failed in send IPFIX template v4 record")
+ return nil, err
+ }
+
+ templateIDv6, entitiesV6, err := SendTemplateRecordv6(log, exporter)
+ if err != nil {
+ log.WithError(err).Error("Failed in send IPFIX template v6 record")
+ return nil, err
+ }
+ log.Infof("entities v4 %+v", entitiesV4)
+ log.Infof("entities v6 %+v", entitiesV6)
+
+ return &IPFIX{
+ hostIP: hostIP,
+ hostPort: hostPort,
+ exporter: exporter,
+ templateIDv4: templateIDv4,
+ templateIDv6: templateIDv6,
+ entitiesV4: entitiesV4,
+ entitiesV6: entitiesV6,
+ }, nil
+}
+
+func setIPv4Address(ieValPtr *entities.InfoElementWithValue, ipAddress net.IP) {
+ ieVal := *ieValPtr
+ if ipAddress == nil {
+ ieVal.SetIPAddressValue(net.ParseIP("0.0.0.0"))
+ } else {
+ ieVal.SetIPAddressValue(ipAddress)
+ }
+}
+func setIERecordValue(record *model.Record, ieValPtr *entities.InfoElementWithValue) {
+ ieVal := *ieValPtr
+ switch ieVal.GetName() {
+ case "octetDeltaCount":
+ ieVal.SetUnsigned64Value(record.Metrics.Bytes)
+ case "tcpControlBits":
+ ieVal.SetUnsigned16Value(record.Metrics.Flags)
+ case "flowStartSeconds":
+ ieVal.SetUnsigned32Value(uint32(record.TimeFlowStart.Unix()))
+ case "flowStartMilliseconds":
+ ieVal.SetUnsigned64Value(uint64(record.TimeFlowStart.UnixMilli()))
+ case "flowEndSeconds":
+ ieVal.SetUnsigned32Value(uint32(record.TimeFlowEnd.Unix()))
+ case "flowEndMilliseconds":
+ ieVal.SetUnsigned64Value(uint64(record.TimeFlowEnd.UnixMilli()))
+ case "packetDeltaCount":
+ ieVal.SetUnsigned64Value(uint64(record.Metrics.Packets))
+ case "interfaceName":
+ ieVal.SetStringValue(record.Interface)
+ }
+}
+func setIEValue(record *model.Record, ieValPtr *entities.InfoElementWithValue) {
+ ieVal := *ieValPtr
+ switch ieVal.GetName() {
+ case "ethernetType":
+ ieVal.SetUnsigned16Value(record.Metrics.EthProtocol)
+ case "flowDirection":
+ ieVal.SetUnsigned8Value(record.ID.Direction)
+ case "sourceMacAddress":
+ ieVal.SetMacAddressValue(record.Metrics.SrcMac[:])
+ case "destinationMacAddress":
+ ieVal.SetMacAddressValue(record.Metrics.DstMac[:])
+ case "sourceIPv4Address":
+ setIPv4Address(ieValPtr, model.IP(record.ID.SrcIp).To4())
+ case "destinationIPv4Address":
+ setIPv4Address(ieValPtr, model.IP(record.ID.DstIp).To4())
+ case "sourceIPv6Address":
+ ieVal.SetIPAddressValue(record.ID.SrcIp[:])
+ case "destinationIPv6Address":
+ ieVal.SetIPAddressValue(record.ID.DstIp[:])
+ case "protocolIdentifier", "nextHeaderIPv6":
+ ieVal.SetUnsigned8Value(record.ID.TransportProtocol)
+ case "sourceTransportPort":
+ ieVal.SetUnsigned16Value(record.ID.SrcPort)
+ case "destinationTransportPort":
+ ieVal.SetUnsigned16Value(record.ID.DstPort)
+ case "icmpTypeIPv4", "icmpTypeIPv6":
+ ieVal.SetUnsigned8Value(record.ID.IcmpType)
+ case "icmpCodeIPv4", "icmpCodeIPv6":
+ ieVal.SetUnsigned8Value(record.ID.IcmpCode)
+ }
+}
+func setEntities(record *model.Record, elements *[]entities.InfoElementWithValue) {
+ for _, ieVal := range *elements {
+ setIEValue(record, &ieVal)
+ setIERecordValue(record, &ieVal)
+ }
+}
+func (ipf *IPFIX) sendDataRecord(_ *logrus.Entry, record *model.Record, v6 bool) error {
+ dataSet := entities.NewSet(false)
+ var templateID uint16
+ if v6 {
+ templateID = ipf.templateIDv6
+ setEntities(record, &ipf.entitiesV6)
+ } else {
+ templateID = ipf.templateIDv4
+ setEntities(record, &ipf.entitiesV4)
+ }
+ err := dataSet.PrepareSet(entities.Data, templateID)
+ if err != nil {
+ return err
+ }
+ if v6 {
+ err = dataSet.AddRecord(ipf.entitiesV6, templateID)
+ if err != nil {
+ return err
+ }
+ } else {
+ err = dataSet.AddRecord(ipf.entitiesV4, templateID)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = ipf.exporter.SendSet(dataSet)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// ExportFlows accepts slices of *model.Record by its input channel, converts them
+// to IPFIX Records, and submits them to the collector.
+func (ipf *IPFIX) ExportFlows(input <-chan []*model.Record) {
+ socket := utils.GetSocket(ipf.hostIP, ipf.hostPort)
+ log := ilog.WithField("collector", socket)
+ for inputRecords := range input {
+ for _, record := range inputRecords {
+ if record.Metrics.EthProtocol == model.IPv6Type {
+ err := ipf.sendDataRecord(log, record, true)
+ if err != nil {
+ log.WithError(err).Error("Failed in send IPFIX data record")
+ }
+ } else {
+ err := ipf.sendDataRecord(log, record, false)
+ if err != nil {
+ log.WithError(err).Error("Failed in send IPFIX data record")
+ }
+ }
+ }
+ }
+ ipf.exporter.CloseConnToCollector()
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/kafka_proto.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/kafka_proto.go
new file mode 100644
index 000000000..50b5ee3f1
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/kafka_proto.go
@@ -0,0 +1,78 @@
+package exporter
+
+import (
+ "context"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow"
+
+ ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder"
+ kafkago "github.com/segmentio/kafka-go"
+ "github.com/sirupsen/logrus"
+ "google.golang.org/protobuf/proto"
+)
+
+var klog = logrus.WithField("component", "exporter/KafkaProto")
+
+const componentKafka = "kafka"
+
+type kafkaWriter interface {
+ WriteMessages(ctx context.Context, msgs ...kafkago.Message) error
+}
+
+// KafkaProto exports flows over Kafka, encoded as a protobuf that is understandable by the
+// Flowlogs-Pipeline collector
+type KafkaProto struct {
+ Writer kafkaWriter
+ Metrics *metrics.Metrics
+ SampleDecoder *ovnobserv.SampleDecoder
+}
+
+func (kp *KafkaProto) ExportFlows(input <-chan []*model.Record) {
+ klog.Info("starting Kafka exporter")
+ for records := range input {
+ kp.batchAndSubmit(records)
+ }
+}
+
+func getFlowKey(record *model.Record) []byte {
+ // We are sorting IP address so flows from on ip to a second IP get the same key whatever the direction is
+ for k := range record.ID.SrcIp {
+ if record.ID.SrcIp[k] < record.ID.DstIp[k] {
+ return append(record.ID.SrcIp[:], record.ID.DstIp[:]...)
+ } else if record.ID.SrcIp[k] > record.ID.DstIp[k] {
+ return append(record.ID.DstIp[:], record.ID.SrcIp[:]...)
+ }
+ }
+ return append(record.ID.SrcIp[:], record.ID.DstIp[:]...)
+}
+
+func (kp *KafkaProto) batchAndSubmit(records []*model.Record) {
+ klog.Debugf("sending %d records", len(records))
+ msgs := make([]kafkago.Message, 0, len(records))
+ for _, record := range records {
+ pbBytes, err := proto.Marshal(pbflow.FlowToPB(record, kp.SampleDecoder))
+ if err != nil {
+ klog.WithError(err).Debug("can't encode protobuf message. Ignoring")
+ kp.Metrics.Errors.WithErrorName(componentKafka, "CannotEncodeMessage").Inc()
+ continue
+ }
+ msgs = append(msgs, kafkago.Message{Value: pbBytes, Key: getFlowKey(record)})
+ }
+
+ if err := kp.Writer.WriteMessages(context.TODO(), msgs...); err != nil {
+ klog.WithError(err).Error("can't write messages into Kafka")
+ kp.Metrics.Errors.WithErrorName(componentKafka, "CannotWriteMessage").Inc()
+ }
+ kp.Metrics.EvictionCounter.WithSource(componentKafka).Inc()
+ kp.Metrics.EvictedFlowsCounter.WithSource(componentKafka).Add(float64(len(records)))
+}
+
+type JSONRecord struct {
+ *model.Record
+ TimeFlowStart int64
+ TimeFlowEnd int64
+ TimeFlowStartMs int64
+ TimeFlowEndMs int64
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/account.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/account.go
new file mode 100644
index 000000000..f850f960d
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/account.go
@@ -0,0 +1,105 @@
+package flow
+
+import (
+ "time"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+
+ "github.com/sirupsen/logrus"
+)
+
+// Accounter accumulates flows metrics in memory and eventually evicts them via an evictor channel.
+// The accounting process is usually done at kernel-space. This type reimplements it at userspace
+// for the edge case where packets are submitted directly via ring-buffer because the kernel-side
+// accounting map is full.
+type Accounter struct {
+ maxEntries int
+ evictTimeout time.Duration
+ entries map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics
+ clock func() time.Time
+ monoClock func() time.Duration
+ metrics *metrics.Metrics
+}
+
+var alog = logrus.WithField("component", "flow/Accounter")
+
+// NewAccounter creates a new Accounter.
+// The cache has no limit and it's assumed that eviction is done by the caller.
+func NewAccounter(
+ maxEntries int, evictTimeout time.Duration,
+ clock func() time.Time,
+ monoClock func() time.Duration,
+ m *metrics.Metrics,
+) *Accounter {
+ acc := Accounter{
+ maxEntries: maxEntries,
+ evictTimeout: evictTimeout,
+ entries: map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics{},
+ clock: clock,
+ monoClock: monoClock,
+ metrics: m,
+ }
+ return &acc
+}
+
+// Account runs in a new goroutine. It reads all the records from the input channel
+// and accumulate their metrics internally. Once the metrics have reached their max size
+// or the eviction times out, it evicts all the accumulated flows by the returned channel.
+func (c *Accounter) Account(in <-chan *model.RawRecord, out chan<- []*model.Record) {
+ evictTick := time.NewTicker(c.evictTimeout)
+ defer evictTick.Stop()
+ for {
+ select {
+ case <-evictTick.C:
+ if len(c.entries) == 0 {
+ break
+ }
+ evictingEntries := c.entries
+ c.entries = map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics{}
+ logrus.WithField("flows", len(evictingEntries)).
+ Debug("evicting flows from userspace accounter on timeout")
+ c.evict(evictingEntries, out, "timeout")
+ case record, ok := <-in:
+ if !ok {
+ alog.Debug("input channel closed. Evicting entries")
+ // if the records channel is closed, we evict the entries in the
+ // same goroutine to wait for all the entries to be sent before
+ // closing the channel
+ c.evict(c.entries, out, "closing")
+ alog.Debug("exiting account routine")
+ return
+ }
+ if stored, ok := c.entries[record.Id]; ok {
+ model.AccumulateBase(stored, &record.Metrics)
+ } else {
+ if len(c.entries) >= c.maxEntries {
+ evictingEntries := c.entries
+ c.entries = map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics{}
+ logrus.WithField("flows", len(evictingEntries)).
+ Debug("evicting flows from userspace accounter after reaching cache max length")
+ c.evict(evictingEntries, out, "full")
+ // Since we will evict flows because we reached to cacheMaxFlows then reset
+ // evictTimer to avoid unnecessary another eviction when timer expires.
+ evictTick.Reset(c.evictTimeout)
+ }
+ c.entries[record.Id] = &record.Metrics
+ }
+ }
+ c.metrics.BufferSizeGauge.WithBufferName("accounter-entries").Set(float64(len(c.entries)))
+ }
+}
+
+func (c *Accounter) evict(entries map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics, evictor chan<- []*model.Record, reason string) {
+ now := c.clock()
+ monotonicNow := uint64(c.monoClock())
+ records := make([]*model.Record, 0, len(entries))
+ for key, metrics := range entries {
+ records = append(records, model.NewRecord(key, &model.BpfFlowContent{BpfFlowMetrics: metrics}, now, monotonicNow))
+ }
+ c.metrics.EvictionCounter.WithSourceAndReason("accounter", reason).Inc()
+ c.metrics.EvictedFlowsCounter.WithSourceAndReason("accounter", reason).Add(float64(len(records)))
+ alog.WithField("numEntries", len(records)).Debug("records evicted from userspace accounter")
+ evictor <- records
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/decorator.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/decorator.go
new file mode 100644
index 000000000..75d90bdd3
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/decorator.go
@@ -0,0 +1,24 @@
+package flow
+
+import (
+ "net"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+)
+
+type InterfaceNamer func(ifIndex int) string
+
+// Decorate adds to the flows extra metadata fields that are not directly fetched by eBPF:
+// - The interface name (corresponding to the interface index in the flow).
+// - The IP address of the agent host.
+func Decorate(agentIP net.IP, ifaceNamer InterfaceNamer) func(in <-chan []*model.Record, out chan<- []*model.Record) {
+ return func(in <-chan []*model.Record, out chan<- []*model.Record) {
+ for flows := range in {
+ for _, flow := range flows {
+ flow.Interface = ifaceNamer(int(flow.ID.IfIndex))
+ flow.AgentIP = agentIP
+ }
+ out <- flows
+ }
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go
new file mode 100644
index 000000000..06f7b07f5
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go
@@ -0,0 +1,177 @@
+package flow
+
+import (
+ "container/list"
+ "reflect"
+ "time"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+)
+
+var dlog = logrus.WithField("component", "flow/Deduper")
+var timeNow = time.Now
+
+// deduperCache implement a LRU cache whose elements are evicted if they haven't been accessed
+// during the expire duration.
+// It is not safe for concurrent access.
+type deduperCache struct {
+ expire time.Duration
+ // key: ebpf.BpfFlowId with the interface and MACs erased, to detect duplicates
+ // value: listElement pointing to a struct entry
+ ifaces map[ebpf.BpfFlowId]*list.Element
+ // element: entry structs of the ifaces map ordered by expiry time
+ entries *list.List
+}
+
+type entry struct {
+ key *ebpf.BpfFlowId
+ dnsRecord *ebpf.BpfDnsRecordT
+ flowRTT *uint64
+ networkEvents *[model.MaxNetworkEvents][model.NetworkEventsMaxEventsMD]uint8
+ ifIndex uint32
+ expiryTime time.Time
+ dupList *[]map[string]uint8
+}
+
+// Dedupe receives flows and filters these belonging to duplicate interfaces. It will forward
+// the flows from the first interface coming to it, until that flow expires in the cache
+// (no activity for it during the expiration time)
+// The justMark argument tells that the deduper should not drop the duplicate flows but
+// set their Duplicate field.
+func Dedupe(expireTime time.Duration, justMark, mergeDup bool, ifaceNamer InterfaceNamer, m *metrics.Metrics) func(in <-chan []*model.Record, out chan<- []*model.Record) {
+ cache := &deduperCache{
+ expire: expireTime,
+ entries: list.New(),
+ ifaces: map[ebpf.BpfFlowId]*list.Element{},
+ }
+ return func(in <-chan []*model.Record, out chan<- []*model.Record) {
+ for records := range in {
+ cache.removeExpired()
+ fwd := make([]*model.Record, 0, len(records))
+ for _, record := range records {
+ cache.checkDupe(record, justMark, mergeDup, &fwd, ifaceNamer)
+ }
+ if len(fwd) > 0 {
+ out <- fwd
+ m.EvictionCounter.WithSource("deduper").Inc()
+ m.EvictedFlowsCounter.WithSource("deduper").Add(float64(len(fwd)))
+ }
+ m.BufferSizeGauge.WithBufferName("deduper-list").Set(float64(cache.entries.Len()))
+ m.BufferSizeGauge.WithBufferName("deduper-map").Set(float64(len(cache.ifaces)))
+ }
+ }
+}
+
+// checkDupe check current record if its already available nad if not added to fwd records list
+func (c *deduperCache) checkDupe(r *model.Record, justMark, mergeDup bool, fwd *[]*model.Record, ifaceNamer InterfaceNamer) {
+ mergeEntry := make(map[string]uint8)
+ rk := r.ID
+ // zeroes fields from key that should be ignored from the flow comparison
+ rk.IfIndex = 0
+ rk.Direction = 0
+ if r.Metrics.AdditionalMetrics == nil {
+ r.Metrics.AdditionalMetrics = &ebpf.BpfAdditionalMetrics{}
+ }
+ // If a flow has been accounted previously, whatever its interface was,
+ // it updates the expiry time for that flow
+ if ele, ok := c.ifaces[rk]; ok {
+ fEntry := ele.Value.(*entry)
+ fEntry.expiryTime = timeNow().Add(c.expire)
+ c.entries.MoveToFront(ele)
+ // The input flow is duplicate if its interface is different to the interface
+ // of the non-duplicate flow that was first registered in the cache
+ // except if the new flow has DNS enrichment in this case will enrich the flow in the cache
+ // with DNS info and mark the current flow as duplicate
+ if r.Metrics.AdditionalMetrics.DnsRecord.Latency != 0 && fEntry.dnsRecord.Latency == 0 {
+ // copy DNS record to the cached entry and mark it as duplicate
+ fEntry.dnsRecord.Flags = r.Metrics.AdditionalMetrics.DnsRecord.Flags
+ fEntry.dnsRecord.Id = r.Metrics.AdditionalMetrics.DnsRecord.Id
+ fEntry.dnsRecord.Latency = r.Metrics.AdditionalMetrics.DnsRecord.Latency
+ fEntry.dnsRecord.Errno = r.Metrics.AdditionalMetrics.DnsRecord.Errno
+ }
+ // If the new flow has flowRTT then enrich the flow in the case with the same RTT and mark it duplicate
+ if r.Metrics.AdditionalMetrics.FlowRtt != 0 && *fEntry.flowRTT == 0 {
+ *fEntry.flowRTT = r.Metrics.AdditionalMetrics.FlowRtt
+ }
+ // If the new flows have network events, then enrich the flow in the cache and mark the flow as duplicate
+ for i, md := range r.Metrics.AdditionalMetrics.NetworkEvents {
+ if !model.AllZerosMetaData(md) && model.AllZerosMetaData(fEntry.networkEvents[i]) {
+ copy(fEntry.networkEvents[i][:], md[:])
+ }
+ }
+ if fEntry.ifIndex != r.ID.IfIndex {
+ if justMark {
+ r.Duplicate = true
+ *fwd = append(*fwd, r)
+ }
+ if mergeDup {
+ ifName := ifaceNamer(int(r.ID.IfIndex))
+ mergeEntry[ifName] = r.ID.Direction
+ if dupEntryNew(*fEntry.dupList, mergeEntry) {
+ *fEntry.dupList = append(*fEntry.dupList, mergeEntry)
+ dlog.Debugf("merge list entries dump:")
+ for _, entry := range *fEntry.dupList {
+ for k, v := range entry {
+ dlog.Debugf("interface %s dir %d", k, v)
+ }
+ }
+ }
+ }
+ return
+ }
+ *fwd = append(*fwd, r)
+ return
+ }
+ // The flow has not been accounted previously (or was forgotten after expiration)
+ // so we register it for that concrete interface
+ e := entry{
+ key: &rk,
+ dnsRecord: &r.Metrics.AdditionalMetrics.DnsRecord,
+ flowRTT: &r.Metrics.AdditionalMetrics.FlowRtt,
+ networkEvents: &r.Metrics.AdditionalMetrics.NetworkEvents,
+ ifIndex: r.ID.IfIndex,
+ expiryTime: timeNow().Add(c.expire),
+ }
+ if mergeDup {
+ ifName := ifaceNamer(int(r.ID.IfIndex))
+ mergeEntry[ifName] = r.ID.Direction
+ r.DupList = append(r.DupList, mergeEntry)
+ e.dupList = &r.DupList
+ }
+ c.ifaces[rk] = c.entries.PushFront(&e)
+ *fwd = append(*fwd, r)
+}
+
+func dupEntryNew(dupList []map[string]uint8, mergeEntry map[string]uint8) bool {
+ for _, entry := range dupList {
+ if reflect.DeepEqual(entry, mergeEntry) {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *deduperCache) removeExpired() {
+ now := timeNow()
+ ele := c.entries.Back()
+ evicted := 0
+ for ele != nil && now.After(ele.Value.(*entry).expiryTime) {
+ evicted++
+ c.entries.Remove(ele)
+ fEntry := ele.Value.(*entry)
+ fEntry.dupList = nil
+ delete(c.ifaces, *fEntry.key)
+ ele = c.entries.Back()
+ }
+ if evicted > 0 {
+ dlog.WithFields(logrus.Fields{
+ "current": c.entries.Len(),
+ "evicted": evicted,
+ "expiryTime": c.expire,
+ }).Debug("entries evicted from the deduper cache")
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/limiter.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/limiter.go
new file mode 100644
index 000000000..282ad5761
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/limiter.go
@@ -0,0 +1,67 @@
+package flow
+
+import (
+ "time"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/sirupsen/logrus"
+)
+
+const initialLogPeriod = time.Minute
+const maxLogPeriod = time.Hour
+
+var cllog = logrus.WithField("component", "capacity.Limiter")
+
+// CapacityLimiter forwards the flows between two nodes but checks the status of the destination
+// node's buffered channel. If it is already full, it drops the incoming flow and periodically will
+// log a message about the number of lost flows.
+type CapacityLimiter struct {
+ droppedFlows int
+ metrics *metrics.Metrics
+}
+
+func NewCapacityLimiter(m *metrics.Metrics) *CapacityLimiter {
+ return &CapacityLimiter{metrics: m}
+}
+
+func (c *CapacityLimiter) Limit(in <-chan []*model.Record, out chan<- []*model.Record) {
+ go c.logDroppedFlows()
+ for i := range in {
+ if len(out) < cap(out) || cap(out) == 0 {
+ out <- i
+ } else {
+ c.metrics.DroppedFlowsCounter.WithSourceAndReason("limiter", "full").Add(float64(len(i)))
+ c.droppedFlows += len(i)
+ }
+ }
+}
+
+func (c *CapacityLimiter) logDroppedFlows() {
+ logPeriod := initialLogPeriod
+ debugging := logrus.IsLevelEnabled(logrus.DebugLevel)
+ for {
+ time.Sleep(logPeriod)
+
+ // a race condition might happen in this counter but it's not important as it's just for
+ // logging purposes
+ df := c.droppedFlows
+ if df > 0 {
+ c.droppedFlows = 0
+ cllog.Warnf("%d flows were dropped during the last %s because the agent is forwarding "+
+ "more flows than the remote ingestor is able to process. You might "+
+ "want to increase the CACHE_MAX_FLOWS and CACHE_ACTIVE_TIMEOUT property",
+ df, logPeriod)
+
+ // if not debug logs, backoff to avoid flooding the log with warning messages
+ if !debugging && logPeriod < maxLogPeriod {
+ logPeriod *= 2
+ if logPeriod > maxLogPeriod {
+ logPeriod = maxLogPeriod
+ }
+ }
+ } else {
+ logPeriod = initialLogPeriod
+ }
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/perfbuffer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/perfbuffer.go
new file mode 100644
index 000000000..a795abbaf
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/perfbuffer.go
@@ -0,0 +1,70 @@
+package flow
+
+import (
+ "time"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/sirupsen/logrus"
+)
+
+var plog = logrus.WithField("component", "packet/PerfBuffer")
+
+type PerfBuffer struct {
+ maxEntries int
+ evictTimeout time.Duration
+ entries [](*model.PacketRecord)
+}
+
+func NewPerfBuffer(
+ maxEntries int, evictTimeout time.Duration,
+) *PerfBuffer {
+ return &PerfBuffer{
+ maxEntries: maxEntries,
+ evictTimeout: evictTimeout,
+ entries: []*model.PacketRecord{},
+ }
+}
+
+func (c *PerfBuffer) PBuffer(in <-chan *model.PacketRecord, out chan<- []*model.PacketRecord) {
+ evictTick := time.NewTicker(c.evictTimeout)
+ defer evictTick.Stop()
+ ind := 0
+ for {
+ select {
+ case <-evictTick.C:
+ if len(c.entries) == 0 {
+ break
+ }
+ evictingEntries := c.entries
+ c.entries = []*model.PacketRecord{}
+ logrus.WithField("packets", len(evictingEntries)).
+ Debug("evicting packets from userspace on timeout")
+ c.evict(evictingEntries, out)
+ case packet, ok := <-in:
+ if !ok {
+ plog.Debug("input channel closed. Evicting entries")
+ c.evict(c.entries, out)
+ plog.Debug("exiting perfbuffer routine")
+ return
+ }
+ if len(c.entries) >= c.maxEntries {
+ evictingEntries := c.entries
+ c.entries = []*model.PacketRecord{}
+ logrus.WithField("packets", len(evictingEntries)).
+ Debug("evicting packets from userspace accounter after reaching cache max length")
+ c.evict(evictingEntries, out)
+ }
+ c.entries = append(c.entries, model.NewPacketRecord(packet.Stream, (uint32)(len(packet.Stream)), packet.Time))
+ ind++
+ }
+ }
+}
+
+func (c *PerfBuffer) evict(entries [](*model.PacketRecord), evictor chan<- []*model.PacketRecord) {
+ packets := make([]*model.PacketRecord, 0, len(entries))
+ for _, payload := range entries {
+ packets = append(packets, model.NewPacketRecord(payload.Stream, (uint32)(len(payload.Stream)), payload.Time))
+ }
+ alog.WithField("numEntries", len(packets)).Debug("packets evicted from userspace accounter")
+ evictor <- packets
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go
new file mode 100644
index 000000000..fe9d711ec
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go
@@ -0,0 +1,127 @@
+package flow
+
+import (
+ "context"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+
+ "github.com/gavv/monotime"
+ "github.com/netobserv/gopipes/pkg/node"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+)
+
+var mtlog = logrus.WithField("component", "flow.MapTracer")
+
+// MapTracer accesses a mapped source of flows (the eBPF PerCPU HashMap), deserializes it into
+// a flow model.Record structure, and performs the accumulation of each perCPU-record into a single flow
+type MapTracer struct {
+ mapFetcher mapFetcher
+ evictionTimeout time.Duration
+ staleEntriesEvictTimeout time.Duration
+ // manages the access to the eviction routines, avoiding two evictions happening at the same time
+ evictionCond *sync.Cond
+ metrics *metrics.Metrics
+ timeSpentinLookupAndDelete prometheus.Histogram
+}
+
+type mapFetcher interface {
+ LookupAndDeleteMap(*metrics.Metrics) map[ebpf.BpfFlowId]model.BpfFlowContent
+ DeleteMapsStaleEntries(timeOut time.Duration)
+}
+
+func NewMapTracer(fetcher mapFetcher, evictionTimeout, staleEntriesEvictTimeout time.Duration, m *metrics.Metrics) *MapTracer {
+ return &MapTracer{
+ mapFetcher: fetcher,
+ evictionTimeout: evictionTimeout,
+ evictionCond: sync.NewCond(&sync.Mutex{}),
+ staleEntriesEvictTimeout: staleEntriesEvictTimeout,
+ metrics: m,
+ timeSpentinLookupAndDelete: m.CreateTimeSpendInLookupAndDelete(),
+ }
+}
+
+// Flush forces reading (and removing) all the flows from the source eBPF map
+// and sending the entries to the next stage in the pipeline
+func (m *MapTracer) Flush() {
+ m.evictionCond.Broadcast()
+}
+
+func (m *MapTracer) TraceLoop(ctx context.Context, forceGC bool) node.StartFunc[[]*model.Record] {
+ return func(out chan<- []*model.Record) {
+ evictionTicker := time.NewTicker(m.evictionTimeout)
+ go m.evictionSynchronization(ctx, forceGC, out)
+ for {
+ select {
+ case <-ctx.Done():
+ evictionTicker.Stop()
+ mtlog.Debug("exiting trace loop due to context cancellation")
+ return
+ case <-evictionTicker.C:
+ mtlog.Debug("triggering flow eviction on timer")
+ m.Flush()
+ }
+ }
+ }
+}
+
+// evictionSynchronization loop just waits for the evictionCond to happen
+// and triggers the actual eviction. It makes sure that only one eviction
+// is being triggered at the same time
+func (m *MapTracer) evictionSynchronization(ctx context.Context, forceGC bool, out chan<- []*model.Record) {
+ // flow eviction loop. It just keeps waiting for eviction until someone triggers the
+ // evictionCond.Broadcast signal
+ for {
+ // make sure we only evict once at a time, even if there are multiple eviction signals
+ m.evictionCond.L.Lock()
+ m.evictionCond.Wait()
+ select {
+ case <-ctx.Done():
+ mtlog.Debug("context canceled. Stopping goroutine before evicting flows")
+ return
+ default:
+ mtlog.Debug("evictionSynchronization signal received")
+ m.evictFlows(ctx, forceGC, out)
+ }
+ m.evictionCond.L.Unlock()
+
+ }
+}
+
+func (m *MapTracer) evictFlows(ctx context.Context, forceGC bool, forwardFlows chan<- []*model.Record) {
+ // it's important that this monotonic timer reports same or approximate values as kernel-side bpf_ktime_get_ns()
+ monotonicTimeNow := monotime.Now()
+ currentTime := time.Now()
+
+ var forwardingFlows []*model.Record
+ flows := m.mapFetcher.LookupAndDeleteMap(m.metrics)
+ elapsed := time.Since(currentTime)
+ for flowKey, flowMetrics := range flows {
+ forwardingFlows = append(forwardingFlows, model.NewRecord(
+ flowKey,
+ &flowMetrics,
+ currentTime,
+ uint64(monotonicTimeNow),
+ ))
+ }
+ m.mapFetcher.DeleteMapsStaleEntries(m.staleEntriesEvictTimeout)
+ select {
+ case <-ctx.Done():
+ mtlog.Debug("skipping flow eviction as agent is being stopped")
+ default:
+ forwardFlows <- forwardingFlows
+ }
+
+ if forceGC {
+ runtime.GC()
+ }
+ m.metrics.EvictionCounter.WithSource("hashmap").Inc()
+ m.metrics.EvictedFlowsCounter.WithSource("hashmap").Add(float64(len(forwardingFlows)))
+ m.timeSpentinLookupAndDelete.Observe(elapsed.Seconds())
+ mtlog.Debugf("%d flows evicted", len(forwardingFlows))
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_perf.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_perf.go
new file mode 100644
index 000000000..237cbe946
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_perf.go
@@ -0,0 +1,73 @@
+package flow
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/cilium/ebpf/perf"
+ "github.com/netobserv/gopipes/pkg/node"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/sirupsen/logrus"
+)
+
+var pblog = logrus.WithField("component", "flow.PerfTracer")
+
+// RingBufTracer receives single-packet flows via ringbuffer (usually, these that couldn't be
+// added in the eBPF kernel space due to the map being full or busy) and submits them to the
+// userspace Aggregator map
+type PerfTracer struct {
+ perfArray perfReader
+ stats stats
+}
+
+type perfReader interface {
+ ReadPerf() (perf.Record, error)
+}
+
+func NewPerfTracer(
+ reader perfReader, logTimeout time.Duration,
+) *PerfTracer {
+ return &PerfTracer{
+ perfArray: reader,
+ stats: stats{loggingTimeout: logTimeout},
+ }
+}
+
+func (m *PerfTracer) TraceLoop(ctx context.Context) node.StartFunc[*model.PacketRecord] {
+ return func(out chan<- *model.PacketRecord) {
+ for {
+ select {
+ case <-ctx.Done():
+ pblog.Debug("exiting trace loop due to context cancellation")
+ return
+ default:
+ if err := m.listenAndForwardPerf(out); err != nil {
+
+ if errors.Is(err, perf.ErrClosed) {
+ pblog.Debug("Received signal, exiting..")
+ return
+ }
+ pblog.WithError(err).Warn("ignoring packet event")
+ continue
+ }
+ }
+ }
+ }
+}
+
+func (m *PerfTracer) listenAndForwardPerf(forwardCh chan<- *model.PacketRecord) error {
+ event, err := m.perfArray.ReadPerf()
+ if err != nil {
+ return fmt.Errorf("reading from perf event array: %w", err)
+ }
+ // Parses the perf event entry into an Event structure.
+ readFlow, err := model.ReadRawPacket(bytes.NewBuffer(event.RawSample))
+ if err != nil {
+ return fmt.Errorf("parsing data received from the perf event array: %w", err)
+ }
+ forwardCh <- readFlow
+ return nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_ringbuf.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_ringbuf.go
new file mode 100644
index 000000000..a92f7d38d
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_ringbuf.go
@@ -0,0 +1,128 @@
+package flow
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+
+ "github.com/cilium/ebpf/ringbuf"
+ "github.com/netobserv/gopipes/pkg/node"
+ "github.com/sirupsen/logrus"
+)
+
+var rtlog = logrus.WithField("component", "flow.RingBufTracer")
+
+// RingBufTracer receives single-packet flows via ringbuffer (usually, these that couldn't be
+// added in the eBPF kernel space due to the map being full or busy) and submits them to the
+// userspace Aggregator map
+type RingBufTracer struct {
+ mapFlusher mapFlusher
+ ringBuffer ringBufReader
+ stats stats
+ metrics *metrics.Metrics
+}
+
+type ringBufReader interface {
+ ReadRingBuf() (ringbuf.Record, error)
+}
+
+// stats supports atomic logging of ringBuffer metrics
+type stats struct {
+ loggingTimeout time.Duration
+ isForwarding int32
+ forwardedFlows int32
+ mapFullErrs int32
+}
+
+type mapFlusher interface {
+ Flush()
+}
+
+func NewRingBufTracer(reader ringBufReader, flusher mapFlusher, logTimeout time.Duration, m *metrics.Metrics) *RingBufTracer {
+ return &RingBufTracer{
+ mapFlusher: flusher,
+ ringBuffer: reader,
+ stats: stats{loggingTimeout: logTimeout},
+ metrics: m,
+ }
+}
+
+func (m *RingBufTracer) TraceLoop(ctx context.Context) node.StartFunc[*model.RawRecord] {
+ return func(out chan<- *model.RawRecord) {
+ debugging := logrus.IsLevelEnabled(logrus.DebugLevel)
+ for {
+ select {
+ case <-ctx.Done():
+ rtlog.Debug("exiting trace loop due to context cancellation")
+ return
+ default:
+ if err := m.listenAndForwardRingBuffer(debugging, out); err != nil {
+ if errors.Is(err, ringbuf.ErrClosed) {
+ rtlog.Debug("Received signal, exiting..")
+ return
+ }
+ rtlog.WithError(err).Warn("ignoring flow event")
+ continue
+ }
+ }
+ }
+ }
+}
+
+func (m *RingBufTracer) listenAndForwardRingBuffer(debugging bool, forwardCh chan<- *model.RawRecord) error {
+ event, err := m.ringBuffer.ReadRingBuf()
+ if err != nil {
+ m.metrics.Errors.WithErrorName("ringbuffer", "CannotReadRingbuffer").Inc()
+ return fmt.Errorf("reading from ring buffer: %w", err)
+ }
+ // Parses the ringbuf event entry into an Event structure.
+ readFlow, err := model.ReadFrom(bytes.NewBuffer(event.RawSample))
+ if err != nil {
+ m.metrics.Errors.WithErrorName("ringbuffer", "CannotParseRingbuffer").Inc()
+ return fmt.Errorf("parsing data received from the ring buffer: %w", err)
+ }
+ mapFullError := readFlow.Metrics.Errno == uint8(syscall.E2BIG)
+ if debugging {
+ m.stats.logRingBufferFlows(mapFullError)
+ }
+ errno := syscall.Errno(readFlow.Metrics.Errno)
+ // In ringbuffer, a "flow" is a 1-packet flow, it hasn't gone through aggregation yet. So we use the packet counter metric.
+ m.metrics.EvictedPacketsCounter.WithSourceAndReason("ringbuffer", errno.Error()).Inc()
+ // Will need to send it to accounter anyway to account regardless of complete/ongoing flow
+ forwardCh <- readFlow
+ return nil
+}
+
+// logRingBufferFlows avoids flooding logs on long series of evicted flows by grouping how
+// many flows are forwarded
+func (m *stats) logRingBufferFlows(mapFullErr bool) {
+ atomic.AddInt32(&m.forwardedFlows, 1)
+ if mapFullErr {
+ atomic.AddInt32(&m.mapFullErrs, 1)
+ }
+ if atomic.CompareAndSwapInt32(&m.isForwarding, 0, 1) {
+ go func() {
+ time.Sleep(m.loggingTimeout)
+ mfe := atomic.LoadInt32(&m.mapFullErrs)
+ l := rtlog.WithFields(logrus.Fields{
+ "flows": atomic.LoadInt32(&m.forwardedFlows),
+ "mapFullErrs": mfe,
+ })
+ if mfe == 0 {
+ l.Debug("received flows via ringbuffer")
+ } else {
+ l.Debug("received flows via ringbuffer. You might want to increase the CACHE_MAX_FLOWS value")
+ }
+ atomic.StoreInt32(&m.forwardedFlows, 0)
+ atomic.StoreInt32(&m.isForwarding, 0)
+ atomic.StoreInt32(&m.mapFullErrs, 0)
+ }()
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/client.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/client.go
new file mode 100644
index 000000000..350bc619b
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/client.go
@@ -0,0 +1,37 @@
+// Package flowgrpc provides the basic interfaces to build a gRPC+Protobuf flows client & server
+package flowgrpc
+
+import (
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/utils"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+)
+
+// ClientConnection wraps a gRPC+protobuf connection
+type ClientConnection struct {
+ client pbflow.CollectorClient
+ conn *grpc.ClientConn
+}
+
+func ConnectClient(hostIP string, hostPort int) (*ClientConnection, error) {
+ // TODO: allow configuring some options (keepalive, backoff...)
+ socket := utils.GetSocket(hostIP, hostPort)
+ conn, err := grpc.NewClient(socket,
+ grpc.WithTransportCredentials(insecure.NewCredentials()))
+ if err != nil {
+ return nil, err
+ }
+ return &ClientConnection{
+ client: pbflow.NewCollectorClient(conn),
+ conn: conn,
+ }, nil
+}
+
+func (cp *ClientConnection) Client() pbflow.CollectorClient {
+ return cp.client
+}
+
+func (cp *ClientConnection) Close() error {
+ return cp.conn.Close()
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/server.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/server.go
new file mode 100644
index 000000000..ca5ee8dae
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/server.go
@@ -0,0 +1,77 @@
+package flowgrpc
+
+import (
+ "context"
+ "fmt"
+ "net"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/reflection"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow"
+)
+
+// CollectorServer wraps a Flow Collector connection & session
+type CollectorServer struct {
+ grpcServer *grpc.Server
+}
+
+type collectorOptions struct {
+ grpcServerOptions []grpc.ServerOption
+}
+
+// CollectorOption allows overriding the default configuration of the CollectorServer instance.
+// Use them in the StartCollector function.
+type CollectorOption func(options *collectorOptions)
+
+func WithGRPCServerOptions(options ...grpc.ServerOption) CollectorOption {
+ return func(copt *collectorOptions) {
+ copt.grpcServerOptions = options
+ }
+}
+
+// StartCollector listens in background for gRPC+Protobuf flows in the given port, and forwards each
+// set of *pbflow.Records by the provided channel.
+func StartCollector(
+ port int, recordForwarder chan<- *pbflow.Records, options ...CollectorOption,
+) (*CollectorServer, error) {
+ copts := collectorOptions{}
+ for _, opt := range options {
+ opt(&copts)
+ }
+
+ lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
+ if err != nil {
+ return nil, err
+ }
+ grpcServer := grpc.NewServer(copts.grpcServerOptions...)
+ pbflow.RegisterCollectorServer(grpcServer, &collectorAPI{
+ recordForwarder: recordForwarder,
+ })
+ reflection.Register(grpcServer)
+ go func() {
+ if err := grpcServer.Serve(lis); err != nil {
+ panic("error connecting to server: " + err.Error())
+ }
+ }()
+ return &CollectorServer{
+ grpcServer: grpcServer,
+ }, nil
+}
+
+func (c *CollectorServer) Close() error {
+ c.grpcServer.Stop()
+ return nil
+}
+
+type collectorAPI struct {
+ pbflow.UnimplementedCollectorServer
+ recordForwarder chan<- *pbflow.Records
+}
+
+var okReply = &pbflow.CollectorReply{}
+
+func (c *collectorAPI) Send(_ context.Context, records *pbflow.Records) (*pbflow.CollectorReply, error) {
+ c.recordForwarder <- records
+ return okReply, nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/client.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/client.go
new file mode 100644
index 000000000..52d2fef0a
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/client.go
@@ -0,0 +1,37 @@
+// Package pktgrpc provides the basic interfaces to build a gRPC+Protobuf packet client & server
+package pktgrpc
+
+import (
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/utils"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+)
+
+// ClientConnection wraps a gRPC+protobuf connection
+type ClientConnection struct {
+ client pbpacket.CollectorClient
+ conn *grpc.ClientConn
+}
+
+func ConnectClient(hostIP string, hostPort int) (*ClientConnection, error) {
+ // TODO: allow configuring some options (keepalive, backoff...)
+ socket := utils.GetSocket(hostIP, hostPort)
+ conn, err := grpc.NewClient(socket,
+ grpc.WithTransportCredentials(insecure.NewCredentials()))
+ if err != nil {
+ return nil, err
+ }
+ return &ClientConnection{
+ client: pbpacket.NewCollectorClient(conn),
+ conn: conn,
+ }, nil
+}
+
+func (cp *ClientConnection) Client() pbpacket.CollectorClient {
+ return cp.client
+}
+
+func (cp *ClientConnection) Close() error {
+ return cp.conn.Close()
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/server.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/server.go
new file mode 100644
index 000000000..272814857
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/server.go
@@ -0,0 +1,77 @@
+package pktgrpc
+
+import (
+ "context"
+ "fmt"
+ "net"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/reflection"
+)
+
+// CollectorServer wraps a Flow Collector connection & session
+type CollectorServer struct {
+ grpcServer *grpc.Server
+}
+
+type collectorOptions struct {
+ grpcServerOptions []grpc.ServerOption
+}
+
+// CollectorOption allows overriding the default configuration of the CollectorServer instance.
+// Use them in the StartCollector function.
+type CollectorOption func(options *collectorOptions)
+
+func WithGRPCServerOptions(options ...grpc.ServerOption) CollectorOption {
+ return func(copt *collectorOptions) {
+ copt.grpcServerOptions = options
+ }
+}
+
+// StartCollector listens in background for gRPC+Protobuf flows in the given port, and forwards each
+// set of *pbpacket.Packet by the provided channel.
+func StartCollector(
+ port int, pktForwarder chan<- *pbpacket.Packet, options ...CollectorOption,
+) (*CollectorServer, error) {
+ copts := collectorOptions{}
+ for _, opt := range options {
+ opt(&copts)
+ }
+
+ lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
+ if err != nil {
+ return nil, err
+ }
+ grpcServer := grpc.NewServer(copts.grpcServerOptions...)
+ pbpacket.RegisterCollectorServer(grpcServer, &collectorAPI{
+ pktForwarder: pktForwarder,
+ })
+ reflection.Register(grpcServer)
+ go func() {
+ if err := grpcServer.Serve(lis); err != nil {
+ panic("error connecting to server: " + err.Error())
+ }
+ }()
+ return &CollectorServer{
+ grpcServer: grpcServer,
+ }, nil
+}
+
+func (c *CollectorServer) Close() error {
+ c.grpcServer.Stop()
+ return nil
+}
+
+type collectorAPI struct {
+ pbpacket.UnimplementedCollectorServer
+ pktForwarder chan<- *pbpacket.Packet
+}
+
+var okReply = &pbpacket.CollectorReply{}
+
+func (c *collectorAPI) Send(_ context.Context, pkts *pbpacket.Packet) (*pbpacket.CollectorReply, error) {
+ c.pktForwarder <- pkts
+ return okReply, nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go
new file mode 100644
index 000000000..518010112
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go
@@ -0,0 +1,69 @@
+package ifaces
+
+import (
+ "context"
+ "fmt"
+ "github.com/sirupsen/logrus"
+ "github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
+)
+
+// EventType for an interface: added, deleted
+type EventType int
+
+const (
+ EventAdded EventType = iota
+ EventDeleted
+)
+
+func (e EventType) String() string {
+ switch e {
+ case EventAdded:
+ return "Added"
+ case EventDeleted:
+ return "Deleted"
+ default:
+ return fmt.Sprintf("Unknown (%d)", e)
+ }
+}
+
+var ilog = logrus.WithField("component", "ifaces.Informer")
+
+// Event of a network interface, given the type (added, removed) and the interface name
+type Event struct {
+ Type EventType
+ Interface Interface
+}
+
+type Interface struct {
+ Name string
+ Index int
+ NetNS netns.NsHandle
+}
+
+// Informer provides notifications about each network interface that is added or removed
+// from the host. Production implementations: Poller and Watcher.
+type Informer interface {
+ // Subscribe returns a channel that sends Event instances.
+ Subscribe(ctx context.Context) (<-chan Event, error)
+}
+
+func netInterfaces(nsh netns.NsHandle) ([]Interface, error) {
+ handle, err := netlink.NewHandleAt(nsh)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create handle for netns (%s): %w", nsh.String(), err)
+ }
+ defer handle.Close()
+
+ // Get a list of interfaces in the namespace
+ links, err := handle.LinkList()
+ if err != nil {
+ return nil, fmt.Errorf("failed to list interfaces in netns (%s): %w", nsh.String(), err)
+ }
+
+ names := make([]Interface, len(links))
+ for i, link := range links {
+ names[i] = Interface{Name: link.Attrs().Name, Index: link.Attrs().Index, NetNS: nsh}
+ }
+ return names, nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/poller.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/poller.go
new file mode 100644
index 000000000..e655b9e23
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/poller.go
@@ -0,0 +1,105 @@
+package ifaces
+
+import (
+ "context"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ "github.com/vishvananda/netns"
+)
+
+// Poller periodically looks for the network interfaces in the system and forwards Event
+// notifications when interfaces are added or deleted.
+type Poller struct {
+ period time.Duration
+ current map[Interface]struct{}
+ interfaces func(handle netns.NsHandle) ([]Interface, error)
+ bufLen int
+}
+
+func NewPoller(period time.Duration, bufLen int) *Poller {
+ return &Poller{
+ period: period,
+ bufLen: bufLen,
+ interfaces: netInterfaces,
+ current: map[Interface]struct{}{},
+ }
+}
+
+func (np *Poller) Subscribe(ctx context.Context) (<-chan Event, error) {
+
+ out := make(chan Event, np.bufLen)
+ netns, err := getNetNS()
+ if err != nil {
+ go np.pollForEvents(ctx, "", out)
+ } else {
+ for _, n := range netns {
+ go np.pollForEvents(ctx, n, out)
+ }
+ }
+ return out, nil
+}
+
+func (np *Poller) pollForEvents(ctx context.Context, ns string, out chan Event) {
+ log := logrus.WithField("component", "ifaces.Poller")
+ log.WithField("period", np.period).Debug("subscribing to Interface events")
+ ticker := time.NewTicker(np.period)
+ var netnsHandle netns.NsHandle
+ var err error
+
+ if ns == "" {
+ netnsHandle = netns.None()
+ } else {
+ netnsHandle, err = netns.GetFromName(ns)
+ if err != nil {
+ return
+ }
+ }
+
+ defer ticker.Stop()
+ for {
+ if ifaces, err := np.interfaces(netnsHandle); err != nil {
+ log.WithError(err).Warn("fetching interface names")
+ } else {
+ log.WithField("names", ifaces).Debug("fetched interface names")
+ np.diffNames(out, ifaces)
+ }
+ select {
+ case <-ctx.Done():
+ log.Debug("stopped")
+ close(out)
+ return
+ case <-ticker.C:
+ // continue after a period
+ }
+ }
+}
+
+// diffNames compares and updates the internal account of interfaces with the latest list of
+// polled interfaces. It forwards Events for any detected addition or removal of interfaces.
+func (np *Poller) diffNames(events chan Event, ifaces []Interface) {
+ // Check for new interfaces
+ acquired := map[Interface]struct{}{}
+ for _, iface := range ifaces {
+ acquired[iface] = struct{}{}
+ if _, ok := np.current[iface]; !ok {
+ ilog.WithField("interface", iface).Debug("added network interface")
+ np.current[iface] = struct{}{}
+ events <- Event{
+ Type: EventAdded,
+ Interface: iface,
+ }
+ }
+ }
+ // Check for deleted interfaces
+ for iface := range np.current {
+ if _, ok := acquired[iface]; !ok {
+ delete(np.current, iface)
+ ilog.WithField("interface", iface).Debug("deleted network interface")
+ events <- Event{
+ Type: EventDeleted,
+ Interface: iface,
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/registerer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/registerer.go
new file mode 100644
index 000000000..e006c6c4b
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/registerer.go
@@ -0,0 +1,73 @@
+package ifaces
+
+import (
+ "context"
+ "net"
+ "sync"
+)
+
+// Registerer is an informer that wraps another informer implementation, and keeps track of
+// the currently existing interfaces in the system, accessible through the IfaceNameForIndex method.
+type Registerer struct {
+ m sync.RWMutex
+ inner Informer
+ ifaces map[int]string
+ bufLen int
+}
+
+func NewRegisterer(inner Informer, bufLen int) *Registerer {
+ return &Registerer{
+ inner: inner,
+ bufLen: bufLen,
+ ifaces: map[int]string{},
+ }
+}
+
+func (r *Registerer) Subscribe(ctx context.Context) (<-chan Event, error) {
+ innerCh, err := r.inner.Subscribe(ctx)
+ if err != nil {
+ return nil, err
+ }
+ out := make(chan Event, r.bufLen)
+ go func() {
+ for ev := range innerCh {
+ switch ev.Type {
+ case EventAdded:
+ r.m.Lock()
+ r.ifaces[ev.Interface.Index] = ev.Interface.Name
+ r.m.Unlock()
+ case EventDeleted:
+ r.m.Lock()
+ name, ok := r.ifaces[ev.Interface.Index]
+ // prevent removing an interface with the same index but different name
+ // e.g. due to an out-of-order add/delete signaling
+ if ok && name == ev.Interface.Name {
+ delete(r.ifaces, ev.Interface.Index)
+ }
+ r.m.Unlock()
+ }
+ out <- ev
+ }
+ }()
+ return out, nil
+}
+
+// IfaceNameForIndex gets the interface name given an index as recorded by the underlying
+// interfaces' informer. It backs up into the net.InterfaceByIndex function if the interface
+// has not been previously registered
+func (r *Registerer) IfaceNameForIndex(idx int) (string, bool) {
+ r.m.RLock()
+ name, ok := r.ifaces[idx]
+ r.m.RUnlock()
+ if !ok {
+ iface, err := net.InterfaceByIndex(idx)
+ if err != nil {
+ return "", false
+ }
+ name = iface.Name
+ r.m.Lock()
+ r.ifaces[idx] = name
+ r.m.Unlock()
+ }
+ return name, ok
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go
new file mode 100644
index 000000000..e35bb9c90
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go
@@ -0,0 +1,232 @@
+package ifaces
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/fsnotify/fsnotify"
+ "github.com/sirupsen/logrus"
+ "github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+const (
+ netnsVolume = "/var/run/netns"
+)
+
+// Watcher uses system's netlink to get real-time information events about network interfaces'
+// addition or removal.
+type Watcher struct {
+ bufLen int
+ current map[Interface]struct{}
+ interfaces func(handle netns.NsHandle) ([]Interface, error)
+ // linkSubscriber abstracts netlink.LinkSubscribe implementation, allowing the injection of
+ // mocks for unit testing
+ linkSubscriberAt func(ns netns.NsHandle, ch chan<- netlink.LinkUpdate, done <-chan struct{}) error
+ mutex *sync.Mutex
+ netnsWatcher *fsnotify.Watcher
+ nsDone map[string]chan struct{}
+}
+
+func NewWatcher(bufLen int) *Watcher {
+ return &Watcher{
+ bufLen: bufLen,
+ current: map[Interface]struct{}{},
+ interfaces: netInterfaces,
+ linkSubscriberAt: netlink.LinkSubscribeAt,
+ mutex: &sync.Mutex{},
+ netnsWatcher: &fsnotify.Watcher{},
+ nsDone: make(map[string]chan struct{}),
+ }
+}
+
+func (w *Watcher) Subscribe(ctx context.Context) (<-chan Event, error) {
+ out := make(chan Event, w.bufLen)
+ netns, err := getNetNS()
+ if err != nil {
+ w.nsDone[""] = make(chan struct{})
+ go w.sendUpdates(ctx, "", out)
+ } else {
+ for _, n := range netns {
+ w.nsDone[n] = make(chan struct{})
+ go w.sendUpdates(ctx, n, out)
+ }
+ }
+ // register to get notification when netns is created or deleted and register for link update for new netns
+ w.netnsNotify(ctx, out)
+ return out, nil
+}
+
+func (w *Watcher) sendUpdates(ctx context.Context, ns string, out chan Event) {
+ var netnsHandle netns.NsHandle
+ var err error
+ log := logrus.WithField("component", "ifaces.Watcher")
+ doneChan := w.nsDone[ns]
+ defer func() {
+ close(doneChan)
+ delete(w.nsDone, ns)
+ }()
+ // subscribe for interface events
+ links := make(chan netlink.LinkUpdate)
+ if err = wait.PollUntilContextTimeout(ctx, 50*time.Microsecond, time.Second, true, func(_ context.Context) (done bool, err error) {
+ if ns == "" {
+ netnsHandle = netns.None()
+ } else {
+ if netnsHandle, err = netns.GetFromName(ns); err != nil {
+ return false, nil
+ }
+ }
+
+ if err = w.linkSubscriberAt(netnsHandle, links, doneChan); err != nil {
+ log.WithFields(logrus.Fields{
+ "netns": ns,
+ "netnsHandle": netnsHandle.String(),
+ "error": err,
+ }).Debug("linkSubscribe failed retry")
+ if err := netnsHandle.Close(); err != nil {
+ log.WithError(err).Warn("netnsHandle close failed")
+ }
+ return false, nil
+ }
+
+ log.WithFields(logrus.Fields{
+ "netns": ns,
+ "netnsHandle": netnsHandle.String(),
+ }).Debug("linkSubscribe to receive links update")
+ return true, nil
+ }); err != nil {
+ log.WithError(err).Errorf("can't subscribe to links netns %s netnsHandle %s", ns, netnsHandle.String())
+ return
+ }
+
+ // before sending netlink updates, send all the existing interfaces at the moment of starting
+ // the Watcher
+ if netnsHandle.IsOpen() || netnsHandle.Equal(netns.None()) {
+ if names, err := w.interfaces(netnsHandle); err != nil {
+ log.WithError(err).Error("can't fetch network interfaces. You might be missing flows")
+ } else {
+ for _, name := range names {
+ iface := Interface{Name: name.Name, Index: name.Index, NetNS: netnsHandle}
+ w.mutex.Lock()
+ w.current[iface] = struct{}{}
+ w.mutex.Unlock()
+ out <- Event{Type: EventAdded, Interface: iface}
+ }
+ }
+ }
+
+ for link := range links {
+ attrs := link.Attrs()
+ if attrs == nil {
+ log.WithField("link", link).Debug("received link update without attributes. Ignoring")
+ continue
+ }
+ iface := Interface{Name: attrs.Name, Index: attrs.Index, NetNS: netnsHandle}
+ w.mutex.Lock()
+ if link.Flags&(syscall.IFF_UP|syscall.IFF_RUNNING) != 0 && attrs.OperState == netlink.OperUp {
+ log.WithFields(logrus.Fields{
+ "operstate": attrs.OperState,
+ "flags": attrs.Flags,
+ "name": attrs.Name,
+ "netns": netnsHandle.String(),
+ }).Debug("Interface up and running")
+ if _, ok := w.current[iface]; !ok {
+ w.current[iface] = struct{}{}
+ out <- Event{Type: EventAdded, Interface: iface}
+ }
+ } else {
+ log.WithFields(logrus.Fields{
+ "operstate": attrs.OperState,
+ "flags": attrs.Flags,
+ "name": attrs.Name,
+ "netns": netnsHandle.String(),
+ }).Debug("Interface down or not running")
+ if _, ok := w.current[iface]; ok {
+ delete(w.current, iface)
+ out <- Event{Type: EventDeleted, Interface: iface}
+ }
+ }
+ w.mutex.Unlock()
+ }
+}
+
+func getNetNS() ([]string, error) {
+ log := logrus.WithField("component", "ifaces.Watcher")
+ files, err := os.ReadDir(netnsVolume)
+ if err != nil {
+ log.Warningf("can't detect any network-namespaces err: %v [Ignore if the agent privileged flag is not set]", err)
+ return nil, fmt.Errorf("failed to list network-namespaces: %w", err)
+ }
+
+ netns := []string{""}
+ if len(files) == 0 {
+ log.WithField("netns", files).Debug("empty network-namespaces list")
+ return netns, nil
+ }
+ for _, f := range files {
+ ns := f.Name()
+ netns = append(netns, ns)
+ log.WithFields(logrus.Fields{
+ "netns": ns,
+ }).Debug("Detected network-namespace")
+ }
+
+ return netns, nil
+}
+
+func (w *Watcher) netnsNotify(ctx context.Context, out chan Event) {
+ var err error
+ log := logrus.WithField("component", "ifaces.Watcher")
+
+ w.netnsWatcher, err = fsnotify.NewWatcher()
+ if err != nil {
+ log.WithError(err).Error("can't subscribe fsnotify")
+ return
+ }
+ // Start a goroutine to handle netns events
+ go func() {
+ for {
+ select {
+ case event, ok := <-w.netnsWatcher.Events:
+ if !ok {
+ return
+ }
+ if event.Op&fsnotify.Create == fsnotify.Create {
+ ns := filepath.Base(event.Name)
+ log.WithField("netns", ns).Debug("netns create notification")
+ if _, ok := w.nsDone[ns]; ok {
+ log.WithField("netns", ns).Debug("netns channel already exists, delete it")
+ delete(w.nsDone, ns)
+ }
+ w.nsDone[ns] = make(chan struct{})
+ go w.sendUpdates(ctx, ns, out)
+ }
+ if event.Op&fsnotify.Remove == fsnotify.Remove {
+ ns := filepath.Base(event.Name)
+ log.WithField("netns", ns).Debug("netns delete notification")
+ if _, ok := w.nsDone[ns]; ok {
+ w.nsDone[ns] <- struct{}{}
+ } else {
+ log.WithField("netns", ns).Debug("netns delete but there is no channel to send events to")
+ }
+ }
+ case err, ok := <-w.netnsWatcher.Errors:
+ if !ok {
+ return
+ }
+ log.WithError(err).Error("netns watcher detected an error")
+ }
+ }
+ }()
+
+ err = w.netnsWatcher.Add(netnsVolume)
+ if err != nil {
+ log.Warningf("failed to add watcher to netns directory err: %v [Ignore if the agent privileged flag is not set]", err)
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/kernel/kernel_utils.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/kernel/kernel_utils.go
new file mode 100644
index 000000000..4f81c7fa4
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/kernel/kernel_utils.go
@@ -0,0 +1,122 @@
+package kernel
+
+import (
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ versionRegex = regexp.MustCompile(`^(\d+)\.(\d+)\.(\d+)(?:-(\d+))?`)
+ rtRegex = regexp.MustCompile(`[.-]rt`)
+ kernelVersion uint32
+ log = logrus.WithField("component", "kernel")
+)
+
+func init() {
+ var err error
+ kernelVersion, err = currentKernelVersion()
+ if err != nil {
+ log.Errorf("failed to get current kernel version: %v", err)
+ }
+}
+
+func IsKernelOlderThan(version string) bool {
+ refVersion, err := kernelVersionFromReleaseString(version)
+ if err != nil {
+ log.Warnf("failed to get kernel version from release string: %v", err)
+ return false
+ }
+ return kernelVersion != 0 && kernelVersion < refVersion
+}
+
+// kernelVersionFromReleaseString converts a release string with format
+// 4.4.2[-1] to a kernel version number in LINUX_VERSION_CODE format.
+// That is, for kernel "a.b.c-d", the version number will be (a<<24 + b<<16 + c<<8 + d)
+func kernelVersionFromReleaseString(releaseString string) (uint32, error) {
+ versionParts := versionRegex.FindStringSubmatch(releaseString)
+ if len(versionParts) == 0 {
+ return 0, fmt.Errorf("got invalid release version %q (expected format '4.3.2-1')", releaseString)
+ }
+ major, err := strconv.Atoi(versionParts[1])
+ if err != nil {
+ return 0, err
+ }
+
+ minor, err := strconv.Atoi(versionParts[2])
+ if err != nil {
+ return 0, err
+ }
+
+ patch, err := strconv.Atoi(versionParts[3])
+ if err != nil {
+ return 0, err
+ }
+ extraNumeric := 0
+ if versionParts[4] != "" {
+ extraNumeric, err = strconv.Atoi(versionParts[4])
+ if err != nil {
+ return 0, err
+ }
+ }
+ out := major*256*256*256 + minor*256*256 + patch*256 + (extraNumeric & 0xFF)
+ return uint32(out), nil
+}
+
+func currentKernelVersion() (uint32, error) {
+ var buf syscall.Utsname
+ if err := syscall.Uname(&buf); err != nil {
+ return 0, err
+ }
+ releaseString := strings.Trim(utsnameStr(buf.Release[:]), "\x00")
+ return kernelVersionFromReleaseString(releaseString)
+}
+
+func utsnameStr[T int8 | uint8](in []T) string {
+ out := make([]byte, len(in))
+ for i := 0; i < len(in); i++ {
+ if in[i] == 0 {
+ break
+ }
+ out = append(out, byte(in[i]))
+ }
+ return string(out)
+}
+
+func getKernelVersion() (string, error) {
+ output, err := exec.Command("uname", "-r").Output()
+ if err != nil {
+ return "", err
+ }
+ if len(output) == 0 {
+ return "", fmt.Errorf("kernel version not found")
+ }
+ return strings.TrimSpace(string(output)), nil
+}
+
+func isRealTimeKernel(kernelVersion string) bool {
+ rt := rtRegex.FindStringSubmatch(kernelVersion)
+ return len(rt) != 0
+}
+
+func IsRealTimeKernel() bool {
+ version, err := getKernelVersion()
+ if err != nil {
+ log.Errorf("failed to get kernel version: %v", err)
+ return false
+ }
+
+ if len(version) == 0 {
+ return false
+ }
+ if isRealTimeKernel(version) {
+ log.Debugf("kernel version: %v is realtime", version)
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go
new file mode 100644
index 000000000..77e3ac28d
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go
@@ -0,0 +1,285 @@
+package metrics
+
+import (
+ "errors"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+)
+
+type MetricDefinition struct {
+ Name string
+ Help string
+ Type metricType
+ Labels []string
+}
+
+type PromTLS struct {
+ CertPath string
+ KeyPath string
+}
+
+type PromConnectionInfo struct {
+ Address string
+ Port int
+ TLS *PromTLS
+}
+
+type Settings struct {
+ PromConnectionInfo
+ Prefix string
+}
+
+type metricType string
+
+const (
+ TypeCounter metricType = "counter"
+ TypeGauge metricType = "gauge"
+ TypeHistogram metricType = "histogram"
+)
+
+var allMetrics = []MetricDefinition{}
+
+func defineMetric(name, help string, t metricType, labels ...string) MetricDefinition {
+ def := MetricDefinition{
+ Name: name,
+ Help: help,
+ Type: t,
+ Labels: labels,
+ }
+ allMetrics = append(allMetrics, def)
+ return def
+}
+
+var (
+ evictionsTotal = defineMetric(
+ "evictions_total",
+ "Number of eviction events",
+ TypeCounter,
+ "source",
+ "reason",
+ )
+ evictedFlowsTotal = defineMetric(
+ "evicted_flows_total",
+ "Number of evicted flows",
+ TypeCounter,
+ "source",
+ "reason",
+ )
+ evictedPktTotal = defineMetric(
+ "evicted_packets_total",
+ "Number of evicted packets",
+ TypeCounter,
+ "source",
+ "reason",
+ )
+ lookupAndDeleteMapDurationSeconds = defineMetric(
+ "lookup_and_delete_map_duration_seconds",
+ "Lookup and delete map duration in seconds",
+ TypeHistogram,
+ )
+ droppedFlows = defineMetric(
+ "dropped_flows_total",
+ "Number of dropped flows",
+ TypeCounter,
+ "source",
+ "reason",
+ )
+ filterFlows = defineMetric(
+ "filtered_flows_total",
+ "Number of filtered flows",
+ TypeCounter,
+ "source",
+ "reason",
+ )
+ networkEvents = defineMetric(
+ "network_events_total",
+ "Number of Network Events flows",
+ TypeCounter,
+ "source",
+ "reason",
+ )
+ bufferSize = defineMetric(
+ "buffer_size",
+ "Buffer size",
+ TypeGauge,
+ "name",
+ )
+ exportedBatchCounterTotal = defineMetric(
+ "exported_batch_total",
+ "Exported batches",
+ TypeCounter,
+ "exporter",
+ )
+ samplingRate = defineMetric(
+ "sampling_rate",
+ "Sampling rate",
+ TypeGauge,
+ )
+ errorsCounter = defineMetric(
+ "errors_total",
+ "errors counter",
+ TypeCounter,
+ "component",
+ "error",
+ )
+)
+
+func (def *MetricDefinition) mapLabels(labels []string) prometheus.Labels {
+ if len(labels) != len(def.Labels) {
+ logrus.Errorf("Could not map labels, length differ in def %s [%v / %v]", def.Name, def.Labels, labels)
+ }
+ labelsMap := prometheus.Labels{}
+ for i, label := range labels {
+ labelsMap[def.Labels[i]] = label
+ }
+ return labelsMap
+}
+
+func verifyMetricType(def *MetricDefinition, t metricType) {
+ if def.Type != t {
+ logrus.Panicf("operational metric %q is of type %q but is being registered as %q", def.Name, def.Type, t)
+ }
+}
+
+type Metrics struct {
+ Settings *Settings
+
+ // Shared metrics:
+ EvictionCounter *EvictionCounter
+ EvictedFlowsCounter *EvictionCounter
+ EvictedPacketsCounter *EvictionCounter
+ DroppedFlowsCounter *EvictionCounter
+ FilteredFlowsCounter *EvictionCounter
+ NetworkEventsCounter *EvictionCounter
+ BufferSizeGauge *BufferSizeGauge
+ Errors *ErrorCounter
+}
+
+func NewMetrics(settings *Settings) *Metrics {
+ m := &Metrics{
+ Settings: settings,
+ }
+ m.EvictionCounter = &EvictionCounter{vec: m.NewCounterVec(&evictionsTotal)}
+ m.EvictedFlowsCounter = &EvictionCounter{vec: m.NewCounterVec(&evictedFlowsTotal)}
+ m.EvictedPacketsCounter = &EvictionCounter{vec: m.NewCounterVec(&evictedPktTotal)}
+ m.DroppedFlowsCounter = &EvictionCounter{vec: m.NewCounterVec(&droppedFlows)}
+ m.FilteredFlowsCounter = &EvictionCounter{vec: m.NewCounterVec(&filterFlows)}
+ m.NetworkEventsCounter = &EvictionCounter{vec: m.NewCounterVec(&networkEvents)}
+ m.BufferSizeGauge = &BufferSizeGauge{vec: m.NewGaugeVec(&bufferSize)}
+ m.Errors = &ErrorCounter{vec: m.NewCounterVec(&errorsCounter)}
+ return m
+}
+
+// register will register against the default registry. May panic or not depending on settings
+func (m *Metrics) register(c prometheus.Collector, name string) {
+ err := prometheus.DefaultRegisterer.Register(c)
+ if err != nil {
+ if errors.As(err, &prometheus.AlreadyRegisteredError{}) {
+ logrus.Warningf("metrics registration error [%s]: %v", name, err)
+ } else {
+ logrus.Panicf("metrics registration error [%s]: %v", name, err)
+ }
+ }
+}
+
+func (m *Metrics) NewCounter(def *MetricDefinition, labels ...string) prometheus.Counter {
+ verifyMetricType(def, TypeCounter)
+ fullName := m.Settings.Prefix + def.Name
+ c := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: fullName,
+ Help: def.Help,
+ ConstLabels: def.mapLabels(labels),
+ })
+ m.register(c, fullName)
+ return c
+}
+
+func (m *Metrics) NewCounterVec(def *MetricDefinition) *prometheus.CounterVec {
+ verifyMetricType(def, TypeCounter)
+ fullName := m.Settings.Prefix + def.Name
+ c := prometheus.NewCounterVec(prometheus.CounterOpts{
+ Name: fullName,
+ Help: def.Help,
+ }, def.Labels)
+ m.register(c, fullName)
+ return c
+}
+
+func (m *Metrics) NewGauge(def *MetricDefinition, labels ...string) prometheus.Gauge {
+ verifyMetricType(def, TypeGauge)
+ fullName := m.Settings.Prefix + def.Name
+ c := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: fullName,
+ Help: def.Help,
+ ConstLabels: def.mapLabels(labels),
+ })
+ m.register(c, fullName)
+ return c
+}
+
+func (m *Metrics) NewGaugeVec(def *MetricDefinition) *prometheus.GaugeVec {
+ verifyMetricType(def, TypeGauge)
+ fullName := m.Settings.Prefix + def.Name
+ g := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Name: fullName,
+ Help: def.Help,
+ }, def.Labels)
+ m.register(g, fullName)
+ return g
+}
+
+func (m *Metrics) NewHistogram(def *MetricDefinition, buckets []float64, labels ...string) prometheus.Histogram {
+ verifyMetricType(def, TypeHistogram)
+ fullName := m.Settings.Prefix + def.Name
+ c := prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: fullName,
+ Help: def.Help,
+ Buckets: buckets,
+ ConstLabels: def.mapLabels(labels),
+ })
+ m.register(c, fullName)
+ return c
+}
+
+// EvictionCounter provides syntactic sugar hidding prom's counter for eviction purpose
+type EvictionCounter struct {
+ vec *prometheus.CounterVec
+}
+
+func (c *EvictionCounter) WithSourceAndReason(source, reason string) prometheus.Counter {
+ return c.vec.WithLabelValues(source, reason)
+}
+
+func (c *EvictionCounter) WithSource(source string) prometheus.Counter {
+ return c.vec.WithLabelValues(source, "")
+}
+
+func (m *Metrics) CreateTimeSpendInLookupAndDelete() prometheus.Histogram {
+ return m.NewHistogram(&lookupAndDeleteMapDurationSeconds, []float64{.001, .01, .1, 1, 10, 100, 1000, 10000})
+}
+
+// BufferSizeGauge provides syntactic sugar hidding prom's gauge tailored for buffer size
+type BufferSizeGauge struct {
+ vec *prometheus.GaugeVec
+}
+
+func (g *BufferSizeGauge) WithBufferName(bufferName string) prometheus.Gauge {
+ return g.vec.WithLabelValues(bufferName)
+}
+
+func (m *Metrics) CreateBatchCounter(exporter string) prometheus.Counter {
+ return m.NewCounter(&exportedBatchCounterTotal, exporter)
+}
+
+func (m *Metrics) CreateSamplingRate() prometheus.Gauge {
+ return m.NewGauge(&samplingRate)
+}
+
+type ErrorCounter struct {
+ vec *prometheus.CounterVec
+}
+
+func (c *ErrorCounter) WithErrorName(component, errName string) prometheus.Counter {
+ return c.vec.WithLabelValues(component, errName)
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/flow_content.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/flow_content.go
new file mode 100644
index 000000000..111303565
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/flow_content.go
@@ -0,0 +1,110 @@
+package model
+
+import "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+
+type BpfFlowContent struct {
+ *ebpf.BpfFlowMetrics
+ AdditionalMetrics *ebpf.BpfAdditionalMetrics
+}
+
+type BpfFlowContents []BpfFlowContent
+
+func (a *BpfFlowContents) Accumulate() BpfFlowContent {
+ res := BpfFlowContent{}
+ for _, p := range *a {
+ res.AccumulateBase(p.BpfFlowMetrics)
+ res.AccumulateAdditional(p.AdditionalMetrics)
+ }
+ return res
+}
+
+func (p *BpfFlowContent) AccumulateBase(other *ebpf.BpfFlowMetrics) {
+ p.BpfFlowMetrics = AccumulateBase(p.BpfFlowMetrics, other)
+}
+
+func AccumulateBase(p *ebpf.BpfFlowMetrics, other *ebpf.BpfFlowMetrics) *ebpf.BpfFlowMetrics {
+ if other == nil {
+ return p
+ }
+ if p == nil {
+ return other
+ }
+ // time == 0 if the value has not been yet set
+ if p.StartMonoTimeTs == 0 || (p.StartMonoTimeTs > other.StartMonoTimeTs && other.StartMonoTimeTs != 0) {
+ p.StartMonoTimeTs = other.StartMonoTimeTs
+ }
+ if p.EndMonoTimeTs == 0 || p.EndMonoTimeTs < other.EndMonoTimeTs {
+ p.EndMonoTimeTs = other.EndMonoTimeTs
+ }
+ p.Bytes += other.Bytes
+ p.Packets += other.Packets
+ p.Flags |= other.Flags
+ if other.EthProtocol != 0 {
+ p.EthProtocol = other.EthProtocol
+ }
+ if allZerosMac(p.SrcMac) {
+ p.SrcMac = other.SrcMac
+ }
+ if allZerosMac(p.DstMac) {
+ p.DstMac = other.DstMac
+ }
+ if other.Dscp != 0 {
+ p.Dscp = other.Dscp
+ }
+ if other.Sampling != 0 {
+ p.Sampling = other.Sampling
+ }
+ return p
+}
+
+func (p *BpfFlowContent) AccumulateAdditional(other *ebpf.BpfAdditionalMetrics) {
+ if other == nil {
+ return
+ }
+ if p.AdditionalMetrics == nil {
+ p.AdditionalMetrics = other
+ return
+ }
+ // DNS
+ p.AdditionalMetrics.DnsRecord.Flags |= other.DnsRecord.Flags
+ if other.DnsRecord.Id != 0 {
+ p.AdditionalMetrics.DnsRecord.Id = other.DnsRecord.Id
+ }
+ if p.AdditionalMetrics.DnsRecord.Errno != other.DnsRecord.Errno {
+ p.AdditionalMetrics.DnsRecord.Errno = other.DnsRecord.Errno
+ }
+ if p.AdditionalMetrics.DnsRecord.Latency < other.DnsRecord.Latency {
+ p.AdditionalMetrics.DnsRecord.Latency = other.DnsRecord.Latency
+ }
+ // Drop statistics
+ p.AdditionalMetrics.PktDrops.Bytes += other.PktDrops.Bytes
+ p.AdditionalMetrics.PktDrops.Packets += other.PktDrops.Packets
+ p.AdditionalMetrics.PktDrops.LatestFlags |= other.PktDrops.LatestFlags
+ if other.PktDrops.LatestDropCause != 0 {
+ p.AdditionalMetrics.PktDrops.LatestDropCause = other.PktDrops.LatestDropCause
+ }
+ // RTT
+ if p.AdditionalMetrics.FlowRtt < other.FlowRtt {
+ p.AdditionalMetrics.FlowRtt = other.FlowRtt
+ }
+ // Network events
+ for _, md := range other.NetworkEvents {
+ if !AllZerosMetaData(md) && !networkEventsMDExist(p.AdditionalMetrics.NetworkEvents, md) {
+ copy(p.AdditionalMetrics.NetworkEvents[p.AdditionalMetrics.NetworkEventsIdx][:], md[:])
+ p.AdditionalMetrics.NetworkEventsIdx = (p.AdditionalMetrics.NetworkEventsIdx + 1) % MaxNetworkEvents
+ }
+ }
+ // Packet Translations
+ if !AllZeroIP(IP(other.TranslatedFlow.Saddr)) && !AllZeroIP(IP(other.TranslatedFlow.Daddr)) {
+ p.AdditionalMetrics.TranslatedFlow = other.TranslatedFlow
+ }
+}
+
+func allZerosMac(s [6]uint8) bool {
+ for _, v := range s {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/packet_record.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/packet_record.go
new file mode 100644
index 000000000..85af86136
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/packet_record.go
@@ -0,0 +1,52 @@
+package model
+
+import (
+ "encoding/binary"
+ "io"
+ "time"
+
+ "github.com/gavv/monotime"
+)
+
+type RawByte byte
+
+type PacketRecord struct {
+ Stream []byte
+ Time time.Time
+}
+
+// NewPacketRecord contains packet bytes
+func NewPacketRecord(
+ stream []byte,
+ len uint32,
+ ts time.Time,
+) *PacketRecord {
+ pr := PacketRecord{}
+ pr.Time = ts
+ pr.Stream = make([]byte, len)
+ pr.Stream = stream
+ return &pr
+}
+
+// ReadRawPacket reads a PacketRecord from a binary source, in LittleEndian order
+func ReadRawPacket(reader io.Reader) (*PacketRecord, error) {
+ var pr PacketRecord
+ currentTime := time.Now()
+ monotonicTimeNow := monotime.Now()
+ getLen := make([]byte, 4)
+ packetTimestamp := make([]byte, 8)
+ // Read IfIndex and discard it: To be used in other usecases
+ _ = binary.Read(reader, binary.LittleEndian, make([]byte, 4))
+ // Read Length of packet
+ _ = binary.Read(reader, binary.LittleEndian, getLen)
+ pr.Stream = make([]byte, binary.LittleEndian.Uint32(getLen))
+ // Read TimeStamp of packet
+ _ = binary.Read(reader, binary.LittleEndian, packetTimestamp)
+ // The assumption is monotonic time should be as close to time recorded by ebpf.
+ // The difference is considered the delta time from current time.
+ tsDelta := time.Duration(uint64(monotonicTimeNow) - binary.LittleEndian.Uint64(packetTimestamp))
+ pr.Time = currentTime.Add(-tsDelta)
+
+ err := binary.Read(reader, binary.LittleEndian, &pr.Stream)
+ return &pr, err
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/record.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/record.go
new file mode 100644
index 000000000..8f6b83230
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/record.go
@@ -0,0 +1,157 @@
+package model
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "time"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+)
+
+// Values according to field 61 in https://www.iana.org/assignments/ipfix/ipfix.xhtml
+const (
+ DirectionIngress = uint8(0)
+ DirectionEgress = uint8(1)
+)
+const MacLen = 6
+
+// IPv4Type / IPv6Type value as defined in IEEE 802: https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml
+const (
+ IPv6Type = 0x86DD
+ NetworkEventsMaxEventsMD = 8
+ MaxNetworkEvents = 4
+)
+
+type HumanBytes uint64
+type MacAddr [MacLen]uint8
+type Direction uint8
+
+// IPAddr encodes v4 and v6 IPs with a fixed length.
+// IPv4 addresses are encoded as IPv6 addresses with prefix ::ffff/96
+// as described in https://datatracker.ietf.org/doc/html/rfc4038#section-4.2
+// (same behavior as Go's net.IP type)
+type IPAddr [net.IPv6len]uint8
+
+// record structure as parsed from eBPF
+type RawRecord ebpf.BpfFlowRecordT
+
+// Record contains accumulated metrics from a flow
+type Record struct {
+ ID ebpf.BpfFlowId
+ Metrics BpfFlowContent
+
+ // TODO: redundant field from RecordMetrics. Reorganize structs
+ TimeFlowStart time.Time
+ TimeFlowEnd time.Time
+ DNSLatency time.Duration
+ Interface string
+ // Duplicate tells whether this flow has another duplicate so it has to be excluded from
+ // any metrics' aggregation (e.g. bytes/second rates between two pods).
+ // The reason for this field is that the same flow can be observed from multiple interfaces,
+ // so the agent needs to choose only a view of the same flow and mark the others as
+ // "exclude from aggregation". Otherwise rates, sums, etc... values would be multiplied by the
+ // number of interfaces this flow is observed from.
+ Duplicate bool
+
+ // AgentIP provides information about the source of the flow (the Agent that traced it)
+ AgentIP net.IP
+ // Calculated RTT which is set when record is created by calling NewRecord
+ TimeFlowRtt time.Duration
+ DupList []map[string]uint8
+ NetworkMonitorEventsMD []config.GenericMap
+}
+
+func NewRecord(
+ key ebpf.BpfFlowId,
+ metrics *BpfFlowContent,
+ currentTime time.Time,
+ monotonicCurrentTime uint64,
+) *Record {
+ startDelta := time.Duration(monotonicCurrentTime - metrics.StartMonoTimeTs)
+ endDelta := time.Duration(monotonicCurrentTime - metrics.EndMonoTimeTs)
+
+ var record = Record{
+ ID: key,
+ Metrics: *metrics,
+ TimeFlowStart: currentTime.Add(-startDelta),
+ TimeFlowEnd: currentTime.Add(-endDelta),
+ }
+ if metrics.AdditionalMetrics != nil {
+ if metrics.AdditionalMetrics.FlowRtt != 0 {
+ record.TimeFlowRtt = time.Duration(metrics.AdditionalMetrics.FlowRtt)
+ }
+ if metrics.AdditionalMetrics.DnsRecord.Latency != 0 {
+ record.DNSLatency = time.Duration(metrics.AdditionalMetrics.DnsRecord.Latency)
+ }
+ }
+ record.DupList = make([]map[string]uint8, 0)
+ record.NetworkMonitorEventsMD = make([]config.GenericMap, 0)
+ return &record
+}
+
+func networkEventsMDExist(events [MaxNetworkEvents][NetworkEventsMaxEventsMD]uint8, md [NetworkEventsMaxEventsMD]uint8) bool {
+ for _, e := range events {
+ if reflect.DeepEqual(e, md) {
+ return true
+ }
+ }
+ return false
+}
+
+// IP returns the net.IP equivalent object
+func IP(ia IPAddr) net.IP {
+ return ia[:]
+}
+
+// IntEncodeV4 encodes an IPv4 address as an integer (in network encoding, big endian).
+// It assumes that the passed IP is already IPv4. Otherwise, it would just encode the
+// last 4 bytes of an IPv6 address
+func IntEncodeV4(ia [net.IPv6len]uint8) uint32 {
+ return binary.BigEndian.Uint32(ia[net.IPv6len-net.IPv4len : net.IPv6len])
+}
+
+// IPAddrFromNetIP returns IPAddr from net.IP
+func IPAddrFromNetIP(netIP net.IP) IPAddr {
+ var arr [net.IPv6len]uint8
+ copy(arr[:], (netIP)[0:net.IPv6len])
+ return arr
+}
+
+func (ia *IPAddr) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + IP(*ia).String() + `"`), nil
+}
+
+func (m *MacAddr) String() string {
+ return fmt.Sprintf("%02X:%02X:%02X:%02X:%02X:%02X", m[0], m[1], m[2], m[3], m[4], m[5])
+}
+
+func (m *MacAddr) MarshalJSON() ([]byte, error) {
+ return []byte("\"" + m.String() + "\""), nil
+}
+
+// ReadFrom reads a Record from a binary source, in LittleEndian order
+func ReadFrom(reader io.Reader) (*RawRecord, error) {
+ var fr RawRecord
+ err := binary.Read(reader, binary.LittleEndian, &fr)
+ return &fr, err
+}
+
+func AllZerosMetaData(s [NetworkEventsMaxEventsMD]uint8) bool {
+ for _, v := range s {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func AllZeroIP(ip net.IP) bool {
+ if ip.Equal(net.IPv4zero) || ip.Equal(net.IPv6zero) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go
new file mode 100644
index 000000000..c1ce04bb8
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go
@@ -0,0 +1,1103 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.1
+// protoc v3.19.4
+// source: proto/flow.proto
+
+package pbflow
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// as defined by field 61 in
+// https://www.iana.org/assignments/ipfix/ipfix.xhtml
+type Direction int32
+
+const (
+ Direction_INGRESS Direction = 0
+ Direction_EGRESS Direction = 1
+)
+
+// Enum value maps for Direction.
+var (
+ Direction_name = map[int32]string{
+ 0: "INGRESS",
+ 1: "EGRESS",
+ }
+ Direction_value = map[string]int32{
+ "INGRESS": 0,
+ "EGRESS": 1,
+ }
+)
+
+func (x Direction) Enum() *Direction {
+ p := new(Direction)
+ *p = x
+ return p
+}
+
+func (x Direction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Direction) Descriptor() protoreflect.EnumDescriptor {
+ return file_proto_flow_proto_enumTypes[0].Descriptor()
+}
+
+func (Direction) Type() protoreflect.EnumType {
+ return &file_proto_flow_proto_enumTypes[0]
+}
+
+func (x Direction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Direction.Descriptor instead.
+func (Direction) EnumDescriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{0}
+}
+
+// intentionally empty
+type CollectorReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CollectorReply) Reset() {
+ *x = CollectorReply{}
+ mi := &file_proto_flow_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CollectorReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectorReply) ProtoMessage() {}
+
+func (x *CollectorReply) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectorReply.ProtoReflect.Descriptor instead.
+func (*CollectorReply) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{0}
+}
+
+type Records struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries []*Record `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Records) Reset() {
+ *x = Records{}
+ mi := &file_proto_flow_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Records) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Records) ProtoMessage() {}
+
+func (x *Records) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Records.ProtoReflect.Descriptor instead.
+func (*Records) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Records) GetEntries() []*Record {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type DupMapEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Interface string `protobuf:"bytes,1,opt,name=interface,proto3" json:"interface,omitempty"`
+ Direction Direction `protobuf:"varint,2,opt,name=direction,proto3,enum=pbflow.Direction" json:"direction,omitempty"`
+}
+
+func (x *DupMapEntry) Reset() {
+ *x = DupMapEntry{}
+ mi := &file_proto_flow_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DupMapEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DupMapEntry) ProtoMessage() {}
+
+func (x *DupMapEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DupMapEntry.ProtoReflect.Descriptor instead.
+func (*DupMapEntry) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DupMapEntry) GetInterface() string {
+ if x != nil {
+ return x.Interface
+ }
+ return ""
+}
+
+func (x *DupMapEntry) GetDirection() Direction {
+ if x != nil {
+ return x.Direction
+ }
+ return Direction_INGRESS
+}
+
+type NetworkEvent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Events map[string]string `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *NetworkEvent) Reset() {
+ *x = NetworkEvent{}
+ mi := &file_proto_flow_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *NetworkEvent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetworkEvent) ProtoMessage() {}
+
+func (x *NetworkEvent) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetworkEvent.ProtoReflect.Descriptor instead.
+func (*NetworkEvent) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *NetworkEvent) GetEvents() map[string]string {
+ if x != nil {
+ return x.Events
+ }
+ return nil
+}
+
+type Record struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // protocol as defined by ETH_P_* in linux/if_ether.h
+ // https://github.com/torvalds/linux/blob/master/include/uapi/linux/if_ether.h
+ EthProtocol uint32 `protobuf:"varint,1,opt,name=eth_protocol,json=ethProtocol,proto3" json:"eth_protocol,omitempty"`
+ Direction Direction `protobuf:"varint,2,opt,name=direction,proto3,enum=pbflow.Direction" json:"direction,omitempty"`
+ TimeFlowStart *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=time_flow_start,json=timeFlowStart,proto3" json:"time_flow_start,omitempty"`
+ TimeFlowEnd *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=time_flow_end,json=timeFlowEnd,proto3" json:"time_flow_end,omitempty"`
+ // OSI-layer attributes
+ DataLink *DataLink `protobuf:"bytes,5,opt,name=data_link,json=dataLink,proto3" json:"data_link,omitempty"`
+ Network *Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"`
+ Transport *Transport `protobuf:"bytes,7,opt,name=transport,proto3" json:"transport,omitempty"`
+ Bytes uint64 `protobuf:"varint,8,opt,name=bytes,proto3" json:"bytes,omitempty"`
+ Packets uint64 `protobuf:"varint,9,opt,name=packets,proto3" json:"packets,omitempty"`
+ Interface string `protobuf:"bytes,10,opt,name=interface,proto3" json:"interface,omitempty"`
+ // if true, the same flow has been recorded via another interface.
+ // From all the duplicate flows, one will set this value to false and the rest will be true.
+ Duplicate bool `protobuf:"varint,11,opt,name=duplicate,proto3" json:"duplicate,omitempty"`
+ // Agent IP address to help identifying the source of the flow
+ AgentIp *IP `protobuf:"bytes,12,opt,name=agent_ip,json=agentIp,proto3" json:"agent_ip,omitempty"`
+ Flags uint32 `protobuf:"varint,13,opt,name=flags,proto3" json:"flags,omitempty"`
+ IcmpType uint32 `protobuf:"varint,14,opt,name=icmp_type,json=icmpType,proto3" json:"icmp_type,omitempty"`
+ IcmpCode uint32 `protobuf:"varint,15,opt,name=icmp_code,json=icmpCode,proto3" json:"icmp_code,omitempty"`
+ PktDropBytes uint64 `protobuf:"varint,16,opt,name=pkt_drop_bytes,json=pktDropBytes,proto3" json:"pkt_drop_bytes,omitempty"`
+ PktDropPackets uint64 `protobuf:"varint,17,opt,name=pkt_drop_packets,json=pktDropPackets,proto3" json:"pkt_drop_packets,omitempty"`
+ PktDropLatestFlags uint32 `protobuf:"varint,18,opt,name=pkt_drop_latest_flags,json=pktDropLatestFlags,proto3" json:"pkt_drop_latest_flags,omitempty"`
+ PktDropLatestState uint32 `protobuf:"varint,19,opt,name=pkt_drop_latest_state,json=pktDropLatestState,proto3" json:"pkt_drop_latest_state,omitempty"`
+ PktDropLatestDropCause uint32 `protobuf:"varint,20,opt,name=pkt_drop_latest_drop_cause,json=pktDropLatestDropCause,proto3" json:"pkt_drop_latest_drop_cause,omitempty"`
+ DnsId uint32 `protobuf:"varint,21,opt,name=dns_id,json=dnsId,proto3" json:"dns_id,omitempty"`
+ DnsFlags uint32 `protobuf:"varint,22,opt,name=dns_flags,json=dnsFlags,proto3" json:"dns_flags,omitempty"`
+ DnsLatency *durationpb.Duration `protobuf:"bytes,23,opt,name=dns_latency,json=dnsLatency,proto3" json:"dns_latency,omitempty"`
+ TimeFlowRtt *durationpb.Duration `protobuf:"bytes,24,opt,name=time_flow_rtt,json=timeFlowRtt,proto3" json:"time_flow_rtt,omitempty"`
+ DnsErrno uint32 `protobuf:"varint,25,opt,name=dns_errno,json=dnsErrno,proto3" json:"dns_errno,omitempty"`
+ DupList []*DupMapEntry `protobuf:"bytes,26,rep,name=dup_list,json=dupList,proto3" json:"dup_list,omitempty"`
+ NetworkEventsMetadata []*NetworkEvent `protobuf:"bytes,27,rep,name=network_events_metadata,json=networkEventsMetadata,proto3" json:"network_events_metadata,omitempty"`
+ Xlat *Xlat `protobuf:"bytes,28,opt,name=xlat,proto3" json:"xlat,omitempty"`
+ Sampling uint32 `protobuf:"varint,29,opt,name=sampling,proto3" json:"sampling,omitempty"`
+}
+
+func (x *Record) Reset() {
+ *x = Record{}
+ mi := &file_proto_flow_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Record) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Record) ProtoMessage() {}
+
+func (x *Record) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Record.ProtoReflect.Descriptor instead.
+func (*Record) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Record) GetEthProtocol() uint32 {
+ if x != nil {
+ return x.EthProtocol
+ }
+ return 0
+}
+
+func (x *Record) GetDirection() Direction {
+ if x != nil {
+ return x.Direction
+ }
+ return Direction_INGRESS
+}
+
+func (x *Record) GetTimeFlowStart() *timestamppb.Timestamp {
+ if x != nil {
+ return x.TimeFlowStart
+ }
+ return nil
+}
+
+func (x *Record) GetTimeFlowEnd() *timestamppb.Timestamp {
+ if x != nil {
+ return x.TimeFlowEnd
+ }
+ return nil
+}
+
+func (x *Record) GetDataLink() *DataLink {
+ if x != nil {
+ return x.DataLink
+ }
+ return nil
+}
+
+func (x *Record) GetNetwork() *Network {
+ if x != nil {
+ return x.Network
+ }
+ return nil
+}
+
+func (x *Record) GetTransport() *Transport {
+ if x != nil {
+ return x.Transport
+ }
+ return nil
+}
+
+func (x *Record) GetBytes() uint64 {
+ if x != nil {
+ return x.Bytes
+ }
+ return 0
+}
+
+func (x *Record) GetPackets() uint64 {
+ if x != nil {
+ return x.Packets
+ }
+ return 0
+}
+
+func (x *Record) GetInterface() string {
+ if x != nil {
+ return x.Interface
+ }
+ return ""
+}
+
+func (x *Record) GetDuplicate() bool {
+ if x != nil {
+ return x.Duplicate
+ }
+ return false
+}
+
+func (x *Record) GetAgentIp() *IP {
+ if x != nil {
+ return x.AgentIp
+ }
+ return nil
+}
+
+func (x *Record) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+func (x *Record) GetIcmpType() uint32 {
+ if x != nil {
+ return x.IcmpType
+ }
+ return 0
+}
+
+func (x *Record) GetIcmpCode() uint32 {
+ if x != nil {
+ return x.IcmpCode
+ }
+ return 0
+}
+
+func (x *Record) GetPktDropBytes() uint64 {
+ if x != nil {
+ return x.PktDropBytes
+ }
+ return 0
+}
+
+func (x *Record) GetPktDropPackets() uint64 {
+ if x != nil {
+ return x.PktDropPackets
+ }
+ return 0
+}
+
+func (x *Record) GetPktDropLatestFlags() uint32 {
+ if x != nil {
+ return x.PktDropLatestFlags
+ }
+ return 0
+}
+
+func (x *Record) GetPktDropLatestState() uint32 {
+ if x != nil {
+ return x.PktDropLatestState
+ }
+ return 0
+}
+
+func (x *Record) GetPktDropLatestDropCause() uint32 {
+ if x != nil {
+ return x.PktDropLatestDropCause
+ }
+ return 0
+}
+
+func (x *Record) GetDnsId() uint32 {
+ if x != nil {
+ return x.DnsId
+ }
+ return 0
+}
+
+func (x *Record) GetDnsFlags() uint32 {
+ if x != nil {
+ return x.DnsFlags
+ }
+ return 0
+}
+
+func (x *Record) GetDnsLatency() *durationpb.Duration {
+ if x != nil {
+ return x.DnsLatency
+ }
+ return nil
+}
+
+func (x *Record) GetTimeFlowRtt() *durationpb.Duration {
+ if x != nil {
+ return x.TimeFlowRtt
+ }
+ return nil
+}
+
+func (x *Record) GetDnsErrno() uint32 {
+ if x != nil {
+ return x.DnsErrno
+ }
+ return 0
+}
+
+func (x *Record) GetDupList() []*DupMapEntry {
+ if x != nil {
+ return x.DupList
+ }
+ return nil
+}
+
+func (x *Record) GetNetworkEventsMetadata() []*NetworkEvent {
+ if x != nil {
+ return x.NetworkEventsMetadata
+ }
+ return nil
+}
+
+func (x *Record) GetXlat() *Xlat {
+ if x != nil {
+ return x.Xlat
+ }
+ return nil
+}
+
+func (x *Record) GetSampling() uint32 {
+ if x != nil {
+ return x.Sampling
+ }
+ return 0
+}
+
+type DataLink struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SrcMac uint64 `protobuf:"varint,1,opt,name=src_mac,json=srcMac,proto3" json:"src_mac,omitempty"`
+ DstMac uint64 `protobuf:"varint,2,opt,name=dst_mac,json=dstMac,proto3" json:"dst_mac,omitempty"`
+}
+
+func (x *DataLink) Reset() {
+ *x = DataLink{}
+ mi := &file_proto_flow_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DataLink) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DataLink) ProtoMessage() {}
+
+func (x *DataLink) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DataLink.ProtoReflect.Descriptor instead.
+func (*DataLink) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DataLink) GetSrcMac() uint64 {
+ if x != nil {
+ return x.SrcMac
+ }
+ return 0
+}
+
+func (x *DataLink) GetDstMac() uint64 {
+ if x != nil {
+ return x.DstMac
+ }
+ return 0
+}
+
+type Network struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SrcAddr *IP `protobuf:"bytes,1,opt,name=src_addr,json=srcAddr,proto3" json:"src_addr,omitempty"`
+ DstAddr *IP `protobuf:"bytes,2,opt,name=dst_addr,json=dstAddr,proto3" json:"dst_addr,omitempty"`
+ Dscp uint32 `protobuf:"varint,3,opt,name=dscp,proto3" json:"dscp,omitempty"`
+}
+
+func (x *Network) Reset() {
+ *x = Network{}
+ mi := &file_proto_flow_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Network) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Network) ProtoMessage() {}
+
+func (x *Network) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Network.ProtoReflect.Descriptor instead.
+func (*Network) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Network) GetSrcAddr() *IP {
+ if x != nil {
+ return x.SrcAddr
+ }
+ return nil
+}
+
+func (x *Network) GetDstAddr() *IP {
+ if x != nil {
+ return x.DstAddr
+ }
+ return nil
+}
+
+func (x *Network) GetDscp() uint32 {
+ if x != nil {
+ return x.Dscp
+ }
+ return 0
+}
+
+type IP struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to IpFamily:
+ //
+ // *IP_Ipv4
+ // *IP_Ipv6
+ IpFamily isIP_IpFamily `protobuf_oneof:"ip_family"`
+}
+
+func (x *IP) Reset() {
+ *x = IP{}
+ mi := &file_proto_flow_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *IP) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IP) ProtoMessage() {}
+
+func (x *IP) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IP.ProtoReflect.Descriptor instead.
+func (*IP) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{7}
+}
+
+func (m *IP) GetIpFamily() isIP_IpFamily {
+ if m != nil {
+ return m.IpFamily
+ }
+ return nil
+}
+
+func (x *IP) GetIpv4() uint32 {
+ if x, ok := x.GetIpFamily().(*IP_Ipv4); ok {
+ return x.Ipv4
+ }
+ return 0
+}
+
+func (x *IP) GetIpv6() []byte {
+ if x, ok := x.GetIpFamily().(*IP_Ipv6); ok {
+ return x.Ipv6
+ }
+ return nil
+}
+
+type isIP_IpFamily interface {
+ isIP_IpFamily()
+}
+
+type IP_Ipv4 struct {
+ Ipv4 uint32 `protobuf:"fixed32,1,opt,name=ipv4,proto3,oneof"`
+}
+
+type IP_Ipv6 struct {
+ Ipv6 []byte `protobuf:"bytes,2,opt,name=ipv6,proto3,oneof"`
+}
+
+func (*IP_Ipv4) isIP_IpFamily() {}
+
+func (*IP_Ipv6) isIP_IpFamily() {}
+
+type Transport struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SrcPort uint32 `protobuf:"varint,1,opt,name=src_port,json=srcPort,proto3" json:"src_port,omitempty"`
+ DstPort uint32 `protobuf:"varint,2,opt,name=dst_port,json=dstPort,proto3" json:"dst_port,omitempty"`
+ // protocol as defined by IPPROTO_* in linux/in.h
+ // https://github.com/torvalds/linux/blob/master/include/uapi/linux/in.h
+ Protocol uint32 `protobuf:"varint,3,opt,name=protocol,proto3" json:"protocol,omitempty"`
+}
+
+func (x *Transport) Reset() {
+ *x = Transport{}
+ mi := &file_proto_flow_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Transport) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Transport) ProtoMessage() {}
+
+func (x *Transport) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Transport.ProtoReflect.Descriptor instead.
+func (*Transport) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Transport) GetSrcPort() uint32 {
+ if x != nil {
+ return x.SrcPort
+ }
+ return 0
+}
+
+func (x *Transport) GetDstPort() uint32 {
+ if x != nil {
+ return x.DstPort
+ }
+ return 0
+}
+
+func (x *Transport) GetProtocol() uint32 {
+ if x != nil {
+ return x.Protocol
+ }
+ return 0
+}
+
+type Xlat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SrcAddr *IP `protobuf:"bytes,1,opt,name=src_addr,json=srcAddr,proto3" json:"src_addr,omitempty"`
+ DstAddr *IP `protobuf:"bytes,2,opt,name=dst_addr,json=dstAddr,proto3" json:"dst_addr,omitempty"`
+ SrcPort uint32 `protobuf:"varint,3,opt,name=src_port,json=srcPort,proto3" json:"src_port,omitempty"`
+ DstPort uint32 `protobuf:"varint,4,opt,name=dst_port,json=dstPort,proto3" json:"dst_port,omitempty"`
+ ZoneId uint32 `protobuf:"varint,5,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"`
+ IcmpId uint32 `protobuf:"varint,7,opt,name=icmp_id,json=icmpId,proto3" json:"icmp_id,omitempty"`
+}
+
+func (x *Xlat) Reset() {
+ *x = Xlat{}
+ mi := &file_proto_flow_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Xlat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Xlat) ProtoMessage() {}
+
+func (x *Xlat) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_flow_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Xlat.ProtoReflect.Descriptor instead.
+func (*Xlat) Descriptor() ([]byte, []int) {
+ return file_proto_flow_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Xlat) GetSrcAddr() *IP {
+ if x != nil {
+ return x.SrcAddr
+ }
+ return nil
+}
+
+func (x *Xlat) GetDstAddr() *IP {
+ if x != nil {
+ return x.DstAddr
+ }
+ return nil
+}
+
+func (x *Xlat) GetSrcPort() uint32 {
+ if x != nil {
+ return x.SrcPort
+ }
+ return 0
+}
+
+func (x *Xlat) GetDstPort() uint32 {
+ if x != nil {
+ return x.DstPort
+ }
+ return 0
+}
+
+func (x *Xlat) GetZoneId() uint32 {
+ if x != nil {
+ return x.ZoneId
+ }
+ return 0
+}
+
+func (x *Xlat) GetIcmpId() uint32 {
+ if x != nil {
+ return x.IcmpId
+ }
+ return 0
+}
+
+var File_proto_flow_proto protoreflect.FileDescriptor
+
+var file_proto_flow_proto_rawDesc = []byte{
+ 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x06, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x10, 0x0a, 0x0e, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x33, 0x0a,
+ 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72,
+ 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x66, 0x6c,
+ 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69,
+ 0x65, 0x73, 0x22, 0x5c, 0x0a, 0x0b, 0x44, 0x75, 0x70, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12,
+ 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x72, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x12, 0x38, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f,
+ 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc8, 0x09, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72,
+ 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65,
+ 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x74, 0x69,
+ 0x6d, 0x65, 0x46, 0x6c, 0x6f, 0x77, 0x45, 0x6e, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x64, 0x61, 0x74,
+ 0x61, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70,
+ 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x08,
+ 0x64, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x29, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77,
+ 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x66, 0x6c,
+ 0x6f, 0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77,
+ 0x6f, 0x72, 0x6b, 0x12, 0x2f, 0x0a, 0x09, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
+ 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x09, 0x74, 0x72, 0x61, 0x6e, 0x73,
+ 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x61, 0x63,
+ 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63,
+ 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61,
+ 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x12, 0x25, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07,
+ 0x61, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73,
+ 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a,
+ 0x09, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x08, 0x69, 0x63, 0x6d, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x63,
+ 0x6d, 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69,
+ 0x63, 0x6d, 0x70, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x6b, 0x74, 0x5f, 0x64,
+ 0x72, 0x6f, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x0c, 0x70, 0x6b, 0x74, 0x44, 0x72, 0x6f, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x28, 0x0a,
+ 0x10, 0x70, 0x6b, 0x74, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74,
+ 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x70, 0x6b, 0x74, 0x44, 0x72, 0x6f, 0x70,
+ 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x70, 0x6b, 0x74, 0x5f, 0x64,
+ 0x72, 0x6f, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73,
+ 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x70, 0x6b, 0x74, 0x44, 0x72, 0x6f, 0x70, 0x4c,
+ 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x70, 0x6b,
+ 0x74, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x70, 0x6b, 0x74, 0x44, 0x72,
+ 0x6f, 0x70, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3a, 0x0a,
+ 0x1a, 0x70, 0x6b, 0x74, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74,
+ 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x63, 0x61, 0x75, 0x73, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x16, 0x70, 0x6b, 0x74, 0x44, 0x72, 0x6f, 0x70, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74,
+ 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x75, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x64, 0x6e, 0x73,
+ 0x5f, 0x69, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x6e, 0x73, 0x49, 0x64,
+ 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x16, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x3a, 0x0a,
+ 0x0b, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x17, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x64,
+ 0x6e, 0x73, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x74, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x74, 0x69, 0x6d,
+ 0x65, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x74, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f,
+ 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x6e, 0x73,
+ 0x45, 0x72, 0x72, 0x6e, 0x6f, 0x12, 0x2e, 0x0a, 0x08, 0x64, 0x75, 0x70, 0x5f, 0x6c, 0x69, 0x73,
+ 0x74, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x44, 0x75, 0x70, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x75,
+ 0x70, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4c, 0x0a, 0x17, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
+ 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
+ 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x6e, 0x65,
+ 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x12, 0x20, 0x0a, 0x04, 0x78, 0x6c, 0x61, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x58, 0x6c, 0x61, 0x74, 0x52,
+ 0x04, 0x78, 0x6c, 0x61, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e,
+ 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e,
+ 0x67, 0x22, 0x3c, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x17, 0x0a,
+ 0x07, 0x73, 0x72, 0x63, 0x5f, 0x6d, 0x61, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06,
+ 0x73, 0x72, 0x63, 0x4d, 0x61, 0x63, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x6d, 0x61,
+ 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x64, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x22,
+ 0x6b, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x25, 0x0a, 0x08, 0x73, 0x72,
+ 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70,
+ 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x73, 0x72, 0x63, 0x41, 0x64, 0x64,
+ 0x72, 0x12, 0x25, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52,
+ 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x73, 0x63, 0x70,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x64, 0x73, 0x63, 0x70, 0x22, 0x3d, 0x0a, 0x02,
+ 0x49, 0x50, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07,
+ 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x42, 0x0b,
+ 0x0a, 0x09, 0x69, 0x70, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x22, 0x5d, 0x0a, 0x09, 0x54,
+ 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f,
+ 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x72, 0x63, 0x50,
+ 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x64, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a,
+ 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xbc, 0x01, 0x0a, 0x04, 0x58,
+ 0x6c, 0x61, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49,
+ 0x50, 0x52, 0x07, 0x73, 0x72, 0x63, 0x41, 0x64, 0x64, 0x72, 0x12, 0x25, 0x0a, 0x08, 0x64, 0x73,
+ 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70,
+ 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64,
+ 0x72, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x72, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08,
+ 0x64, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07,
+ 0x64, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x7a, 0x6f, 0x6e, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x49, 0x64,
+ 0x12, 0x17, 0x0a, 0x07, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x06, 0x69, 0x63, 0x6d, 0x70, 0x49, 0x64, 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53,
+ 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x32,
+ 0x3e, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04,
+ 0x53, 0x65, 0x6e, 0x64, 0x12, 0x0f, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42,
+ 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_proto_flow_proto_rawDescOnce sync.Once
+ file_proto_flow_proto_rawDescData = file_proto_flow_proto_rawDesc
+)
+
+func file_proto_flow_proto_rawDescGZIP() []byte {
+ file_proto_flow_proto_rawDescOnce.Do(func() {
+ file_proto_flow_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_flow_proto_rawDescData)
+ })
+ return file_proto_flow_proto_rawDescData
+}
+
+var file_proto_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_proto_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
+var file_proto_flow_proto_goTypes = []any{
+ (Direction)(0), // 0: pbflow.Direction
+ (*CollectorReply)(nil), // 1: pbflow.CollectorReply
+ (*Records)(nil), // 2: pbflow.Records
+ (*DupMapEntry)(nil), // 3: pbflow.DupMapEntry
+ (*NetworkEvent)(nil), // 4: pbflow.NetworkEvent
+ (*Record)(nil), // 5: pbflow.Record
+ (*DataLink)(nil), // 6: pbflow.DataLink
+ (*Network)(nil), // 7: pbflow.Network
+ (*IP)(nil), // 8: pbflow.IP
+ (*Transport)(nil), // 9: pbflow.Transport
+ (*Xlat)(nil), // 10: pbflow.Xlat
+ nil, // 11: pbflow.NetworkEvent.EventsEntry
+ (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 13: google.protobuf.Duration
+}
+var file_proto_flow_proto_depIdxs = []int32{
+ 5, // 0: pbflow.Records.entries:type_name -> pbflow.Record
+ 0, // 1: pbflow.DupMapEntry.direction:type_name -> pbflow.Direction
+ 11, // 2: pbflow.NetworkEvent.events:type_name -> pbflow.NetworkEvent.EventsEntry
+ 0, // 3: pbflow.Record.direction:type_name -> pbflow.Direction
+ 12, // 4: pbflow.Record.time_flow_start:type_name -> google.protobuf.Timestamp
+ 12, // 5: pbflow.Record.time_flow_end:type_name -> google.protobuf.Timestamp
+ 6, // 6: pbflow.Record.data_link:type_name -> pbflow.DataLink
+ 7, // 7: pbflow.Record.network:type_name -> pbflow.Network
+ 9, // 8: pbflow.Record.transport:type_name -> pbflow.Transport
+ 8, // 9: pbflow.Record.agent_ip:type_name -> pbflow.IP
+ 13, // 10: pbflow.Record.dns_latency:type_name -> google.protobuf.Duration
+ 13, // 11: pbflow.Record.time_flow_rtt:type_name -> google.protobuf.Duration
+ 3, // 12: pbflow.Record.dup_list:type_name -> pbflow.DupMapEntry
+ 4, // 13: pbflow.Record.network_events_metadata:type_name -> pbflow.NetworkEvent
+ 10, // 14: pbflow.Record.xlat:type_name -> pbflow.Xlat
+ 8, // 15: pbflow.Network.src_addr:type_name -> pbflow.IP
+ 8, // 16: pbflow.Network.dst_addr:type_name -> pbflow.IP
+ 8, // 17: pbflow.Xlat.src_addr:type_name -> pbflow.IP
+ 8, // 18: pbflow.Xlat.dst_addr:type_name -> pbflow.IP
+ 2, // 19: pbflow.Collector.Send:input_type -> pbflow.Records
+ 1, // 20: pbflow.Collector.Send:output_type -> pbflow.CollectorReply
+ 20, // [20:21] is the sub-list for method output_type
+ 19, // [19:20] is the sub-list for method input_type
+ 19, // [19:19] is the sub-list for extension type_name
+ 19, // [19:19] is the sub-list for extension extendee
+ 0, // [0:19] is the sub-list for field type_name
+}
+
+func init() { file_proto_flow_proto_init() }
+func file_proto_flow_proto_init() {
+ if File_proto_flow_proto != nil {
+ return
+ }
+ file_proto_flow_proto_msgTypes[7].OneofWrappers = []any{
+ (*IP_Ipv4)(nil),
+ (*IP_Ipv6)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_proto_flow_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 11,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_proto_flow_proto_goTypes,
+ DependencyIndexes: file_proto_flow_proto_depIdxs,
+ EnumInfos: file_proto_flow_proto_enumTypes,
+ MessageInfos: file_proto_flow_proto_msgTypes,
+ }.Build()
+ File_proto_flow_proto = out.File
+ file_proto_flow_proto_rawDesc = nil
+ file_proto_flow_proto_goTypes = nil
+ file_proto_flow_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow_grpc.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow_grpc.pb.go
new file mode 100644
index 000000000..ac00dda6d
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow_grpc.pb.go
@@ -0,0 +1,121 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v3.19.4
+// source: proto/flow.proto
+
+package pbflow
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Collector_Send_FullMethodName = "/pbflow.Collector/Send"
+)
+
+// CollectorClient is the client API for Collector service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type CollectorClient interface {
+ Send(ctx context.Context, in *Records, opts ...grpc.CallOption) (*CollectorReply, error)
+}
+
+type collectorClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewCollectorClient(cc grpc.ClientConnInterface) CollectorClient {
+ return &collectorClient{cc}
+}
+
+func (c *collectorClient) Send(ctx context.Context, in *Records, opts ...grpc.CallOption) (*CollectorReply, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(CollectorReply)
+ err := c.cc.Invoke(ctx, Collector_Send_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// CollectorServer is the server API for Collector service.
+// All implementations must embed UnimplementedCollectorServer
+// for forward compatibility.
+type CollectorServer interface {
+ Send(context.Context, *Records) (*CollectorReply, error)
+ mustEmbedUnimplementedCollectorServer()
+}
+
+// UnimplementedCollectorServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedCollectorServer struct{}
+
+func (UnimplementedCollectorServer) Send(context.Context, *Records) (*CollectorReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
+}
+func (UnimplementedCollectorServer) mustEmbedUnimplementedCollectorServer() {}
+func (UnimplementedCollectorServer) testEmbeddedByValue() {}
+
+// UnsafeCollectorServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to CollectorServer will
+// result in compilation errors.
+type UnsafeCollectorServer interface {
+ mustEmbedUnimplementedCollectorServer()
+}
+
+func RegisterCollectorServer(s grpc.ServiceRegistrar, srv CollectorServer) {
+ // If the following call pancis, it indicates UnimplementedCollectorServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&Collector_ServiceDesc, srv)
+}
+
+func _Collector_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Records)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(CollectorServer).Send(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Collector_Send_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(CollectorServer).Send(ctx, req.(*Records))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Collector_ServiceDesc is the grpc.ServiceDesc for Collector service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Collector_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "pbflow.Collector",
+ HandlerType: (*CollectorServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Send",
+ Handler: _Collector_Send_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "proto/flow.proto",
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go
new file mode 100644
index 000000000..27b7e57c0
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go
@@ -0,0 +1,285 @@
+package pbflow
+
+import (
+ "encoding/binary"
+ "net"
+
+ "github.com/netobserv/flowlogs-pipeline/pkg/config"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ ovnmodel "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model"
+ ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder"
+ "github.com/sirupsen/logrus"
+ "google.golang.org/protobuf/types/known/durationpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+var protoLog = logrus.WithField("component", "pbflow")
+
+// FlowsToPB is an auxiliary function to convert flow records, as returned by the eBPF agent,
+// into protobuf-encoded messages ready to be sent to the collector via GRPC
+func FlowsToPB(inputRecords []*model.Record, maxLen int, s *ovnobserv.SampleDecoder) []*Records {
+ entries := make([]*Record, 0, len(inputRecords))
+ for _, record := range inputRecords {
+ entries = append(entries, FlowToPB(record, s))
+ }
+ var records []*Records
+ for len(entries) > 0 {
+ end := len(entries)
+ if end > maxLen {
+ end = maxLen
+ }
+ records = append(records, &Records{Entries: entries[:end]})
+ entries = entries[end:]
+ }
+ return records
+}
+
+// FlowToPB is an auxiliary function to convert a single flow record, as returned by the eBPF agent,
+// into a protobuf-encoded message ready to be sent to the collector via kafka
+func FlowToPB(fr *model.Record, s *ovnobserv.SampleDecoder) *Record {
+ var pbflowRecord = Record{
+ EthProtocol: uint32(fr.Metrics.EthProtocol),
+ Direction: Direction(fr.ID.Direction),
+ DataLink: &DataLink{
+ SrcMac: macToUint64(&fr.Metrics.SrcMac),
+ DstMac: macToUint64(&fr.Metrics.DstMac),
+ },
+ Network: &Network{
+ Dscp: uint32(fr.Metrics.Dscp),
+ },
+ Transport: &Transport{
+ Protocol: uint32(fr.ID.TransportProtocol),
+ SrcPort: uint32(fr.ID.SrcPort),
+ DstPort: uint32(fr.ID.DstPort),
+ },
+ IcmpType: uint32(fr.ID.IcmpType),
+ IcmpCode: uint32(fr.ID.IcmpCode),
+ Bytes: fr.Metrics.Bytes,
+ TimeFlowStart: ×tamppb.Timestamp{
+ Seconds: fr.TimeFlowStart.Unix(),
+ Nanos: int32(fr.TimeFlowStart.Nanosecond()),
+ },
+ TimeFlowEnd: ×tamppb.Timestamp{
+ Seconds: fr.TimeFlowEnd.Unix(),
+ Nanos: int32(fr.TimeFlowEnd.Nanosecond()),
+ },
+ Packets: uint64(fr.Metrics.Packets),
+ Duplicate: fr.Duplicate,
+ AgentIp: agentIP(fr.AgentIP),
+ Flags: uint32(fr.Metrics.Flags),
+ Interface: fr.Interface,
+ TimeFlowRtt: durationpb.New(fr.TimeFlowRtt),
+ Sampling: fr.Metrics.Sampling,
+ }
+ if fr.Metrics.AdditionalMetrics != nil {
+ pbflowRecord.PktDropBytes = fr.Metrics.AdditionalMetrics.PktDrops.Bytes
+ pbflowRecord.PktDropPackets = uint64(fr.Metrics.AdditionalMetrics.PktDrops.Packets)
+ pbflowRecord.PktDropLatestFlags = uint32(fr.Metrics.AdditionalMetrics.PktDrops.LatestFlags)
+ pbflowRecord.PktDropLatestState = uint32(fr.Metrics.AdditionalMetrics.PktDrops.LatestState)
+ pbflowRecord.PktDropLatestDropCause = fr.Metrics.AdditionalMetrics.PktDrops.LatestDropCause
+ pbflowRecord.DnsId = uint32(fr.Metrics.AdditionalMetrics.DnsRecord.Id)
+ pbflowRecord.DnsFlags = uint32(fr.Metrics.AdditionalMetrics.DnsRecord.Flags)
+ pbflowRecord.DnsErrno = uint32(fr.Metrics.AdditionalMetrics.DnsRecord.Errno)
+ if fr.Metrics.AdditionalMetrics.DnsRecord.Latency != 0 {
+ pbflowRecord.DnsLatency = durationpb.New(fr.DNSLatency)
+ }
+ pbflowRecord.Xlat = &Xlat{
+ SrcPort: uint32(fr.Metrics.AdditionalMetrics.TranslatedFlow.Sport),
+ DstPort: uint32(fr.Metrics.AdditionalMetrics.TranslatedFlow.Dport),
+ ZoneId: uint32(fr.Metrics.AdditionalMetrics.TranslatedFlow.ZoneId),
+ IcmpId: uint32(fr.Metrics.AdditionalMetrics.TranslatedFlow.IcmpId),
+ }
+ }
+ if len(fr.DupList) != 0 {
+ pbflowRecord.DupList = make([]*DupMapEntry, 0)
+ for _, m := range fr.DupList {
+ for key, value := range m {
+ pbflowRecord.DupList = append(pbflowRecord.DupList, &DupMapEntry{
+ Interface: key,
+ Direction: Direction(value),
+ })
+ }
+ }
+ }
+ if fr.Metrics.EthProtocol == model.IPv6Type {
+ pbflowRecord.Network.SrcAddr = &IP{IpFamily: &IP_Ipv6{Ipv6: fr.ID.SrcIp[:]}}
+ pbflowRecord.Network.DstAddr = &IP{IpFamily: &IP_Ipv6{Ipv6: fr.ID.DstIp[:]}}
+ if fr.Metrics.AdditionalMetrics != nil {
+ pbflowRecord.Xlat.SrcAddr = &IP{IpFamily: &IP_Ipv6{Ipv6: fr.Metrics.AdditionalMetrics.TranslatedFlow.Saddr[:]}}
+ pbflowRecord.Xlat.DstAddr = &IP{IpFamily: &IP_Ipv6{Ipv6: fr.Metrics.AdditionalMetrics.TranslatedFlow.Daddr[:]}}
+ }
+ } else {
+ pbflowRecord.Network.SrcAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: model.IntEncodeV4(fr.ID.SrcIp)}}
+ pbflowRecord.Network.DstAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: model.IntEncodeV4(fr.ID.DstIp)}}
+ if fr.Metrics.AdditionalMetrics != nil {
+ pbflowRecord.Xlat.SrcAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: model.IntEncodeV4(fr.Metrics.AdditionalMetrics.TranslatedFlow.Saddr)}}
+ pbflowRecord.Xlat.DstAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: model.IntEncodeV4(fr.Metrics.AdditionalMetrics.TranslatedFlow.Daddr)}}
+ }
+ }
+ if s != nil && fr.Metrics.AdditionalMetrics != nil {
+ seen := make(map[string]bool)
+ pbflowRecord.NetworkEventsMetadata = make([]*NetworkEvent, 0)
+ for _, metadata := range fr.Metrics.AdditionalMetrics.NetworkEvents {
+ var pbEvent NetworkEvent
+ if !model.AllZerosMetaData(metadata) {
+ if md, err := s.DecodeCookie8Bytes(metadata); err == nil {
+ acl, ok := md.(*ovnmodel.ACLEvent)
+ mdStr := md.String()
+ protoLog.Debugf("Network Events Metadata %v decoded Cookie: %v decoded string: %s", metadata, md, mdStr)
+ if !seen[mdStr] {
+ if ok {
+ pbEvent = NetworkEvent{
+ Events: map[string]string{
+ "Action": acl.Action,
+ "Type": acl.Actor,
+ "Feature": "acl",
+ "Name": acl.Name,
+ "Namespace": acl.Namespace,
+ "Direction": acl.Direction,
+ },
+ }
+ } else {
+ pbEvent = NetworkEvent{
+ Events: map[string]string{
+ "Message": mdStr,
+ },
+ }
+ }
+ pbflowRecord.NetworkEventsMetadata = append(pbflowRecord.NetworkEventsMetadata, &pbEvent)
+ seen[mdStr] = true
+ }
+ } else {
+ protoLog.Errorf("unable to decode Network events cookie: %v", err)
+ }
+ }
+ }
+ }
+ return &pbflowRecord
+}
+
+func PBToFlow(pb *Record) *model.Record {
+ if pb == nil {
+ return nil
+ }
+ out := model.Record{
+ ID: ebpf.BpfFlowId{
+ Direction: uint8(pb.Direction),
+ TransportProtocol: uint8(pb.Transport.Protocol),
+ SrcIp: ipToIPAddr(pb.Network.GetSrcAddr()),
+ DstIp: ipToIPAddr(pb.Network.GetDstAddr()),
+ SrcPort: uint16(pb.Transport.SrcPort),
+ DstPort: uint16(pb.Transport.DstPort),
+ IcmpType: uint8(pb.IcmpType),
+ IcmpCode: uint8(pb.IcmpCode),
+ },
+ Metrics: model.BpfFlowContent{
+ BpfFlowMetrics: &ebpf.BpfFlowMetrics{
+ EthProtocol: uint16(pb.EthProtocol),
+ SrcMac: macToUint8(pb.DataLink.GetSrcMac()),
+ DstMac: macToUint8(pb.DataLink.GetDstMac()),
+ Bytes: pb.Bytes,
+ Packets: uint32(pb.Packets),
+ Flags: uint16(pb.Flags),
+ Dscp: uint8(pb.Network.Dscp),
+ Sampling: pb.Sampling,
+ },
+ AdditionalMetrics: &ebpf.BpfAdditionalMetrics{
+ PktDrops: ebpf.BpfPktDropsT{
+ Bytes: pb.PktDropBytes,
+ Packets: uint32(pb.PktDropPackets),
+ LatestFlags: uint16(pb.PktDropLatestFlags),
+ LatestState: uint8(pb.PktDropLatestState),
+ LatestDropCause: pb.PktDropLatestDropCause,
+ },
+ DnsRecord: ebpf.BpfDnsRecordT{
+ Id: uint16(pb.DnsId),
+ Flags: uint16(pb.DnsFlags),
+ Errno: uint8(pb.DnsErrno),
+ Latency: uint64(pb.DnsLatency.AsDuration()),
+ },
+ TranslatedFlow: ebpf.BpfTranslatedFlowT{
+ Saddr: ipToIPAddr(pb.Xlat.GetSrcAddr()),
+ Daddr: ipToIPAddr(pb.Xlat.GetDstAddr()),
+ Sport: uint16(pb.Xlat.GetSrcPort()),
+ Dport: uint16(pb.Xlat.GetDstPort()),
+ ZoneId: uint16(pb.Xlat.GetZoneId()),
+ IcmpId: uint8(pb.Xlat.GetIcmpId()),
+ },
+ },
+ },
+ TimeFlowStart: pb.TimeFlowStart.AsTime(),
+ TimeFlowEnd: pb.TimeFlowEnd.AsTime(),
+ AgentIP: pbIPToNetIP(pb.AgentIp),
+ Duplicate: pb.Duplicate,
+ Interface: pb.Interface,
+ TimeFlowRtt: pb.TimeFlowRtt.AsDuration(),
+ DNSLatency: pb.DnsLatency.AsDuration(),
+ }
+
+ if len(pb.GetDupList()) != 0 {
+ for _, entry := range pb.GetDupList() {
+ intf := entry.Interface
+ dir := uint8(entry.Direction)
+ out.DupList = append(out.DupList, map[string]uint8{intf: dir})
+ }
+ }
+ if len(pb.GetNetworkEventsMetadata()) != 0 {
+ for _, e := range pb.GetNetworkEventsMetadata() {
+ m := config.GenericMap{}
+ for k, v := range e.Events {
+ m[k] = v
+ }
+ out.NetworkMonitorEventsMD = append(out.NetworkMonitorEventsMD, m)
+ }
+ protoLog.Debugf("decoded Network events monitor metadata: %v", out.NetworkMonitorEventsMD)
+ }
+
+ return &out
+}
+
+// Mac bytes are encoded in the same order as in the array. This is, a Mac
+// like 11:22:33:44:55:66 will be encoded as 0x112233445566
+func macToUint64(m *[model.MacLen]uint8) uint64 {
+ return uint64(m[5]) |
+ (uint64(m[4]) << 8) |
+ (uint64(m[3]) << 16) |
+ (uint64(m[2]) << 24) |
+ (uint64(m[1]) << 32) |
+ (uint64(m[0]) << 40)
+}
+
+func agentIP(nip net.IP) *IP {
+ if ip := nip.To4(); ip != nil {
+ return &IP{IpFamily: &IP_Ipv4{Ipv4: binary.BigEndian.Uint32(ip)}}
+ }
+ // IPv6 address
+ return &IP{IpFamily: &IP_Ipv6{Ipv6: nip}}
+}
+
+func pbIPToNetIP(ip *IP) net.IP {
+ if ip.GetIpv6() != nil {
+ return net.IP(ip.GetIpv6())
+ }
+ n := ip.GetIpv4()
+ return net.IPv4(
+ byte((n>>24)&0xFF),
+ byte((n>>16)&0xFF),
+ byte((n>>8)&0xFF),
+ byte(n&0xFF))
+}
+
+func ipToIPAddr(ip *IP) model.IPAddr {
+ return model.IPAddrFromNetIP(pbIPToNetIP(ip))
+}
+
+func macToUint8(mac uint64) [6]uint8 {
+ return [6]uint8{
+ uint8(mac >> 40),
+ uint8(mac >> 32),
+ uint8(mac >> 24),
+ uint8(mac >> 16),
+ uint8(mac >> 8),
+ uint8(mac),
+ }
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet.pb.go
new file mode 100644
index 000000000..30916b7d4
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet.pb.go
@@ -0,0 +1,178 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.1
+// protoc v3.19.4
+// source: proto/packet.proto
+
+package pbpacket
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The request message containing the Packet
+type Packet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Pcap *anypb.Any `protobuf:"bytes,1,opt,name=pcap,proto3" json:"pcap,omitempty"`
+}
+
+func (x *Packet) Reset() {
+ *x = Packet{}
+ mi := &file_proto_packet_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Packet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Packet) ProtoMessage() {}
+
+func (x *Packet) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_packet_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Packet.ProtoReflect.Descriptor instead.
+func (*Packet) Descriptor() ([]byte, []int) {
+ return file_proto_packet_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Packet) GetPcap() *anypb.Any {
+ if x != nil {
+ return x.Pcap
+ }
+ return nil
+}
+
+// intentionally empty
+type CollectorReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CollectorReply) Reset() {
+ *x = CollectorReply{}
+ mi := &file_proto_packet_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CollectorReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectorReply) ProtoMessage() {}
+
+func (x *CollectorReply) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_packet_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectorReply.ProtoReflect.Descriptor instead.
+func (*CollectorReply) Descriptor() ([]byte, []int) {
+ return file_proto_packet_proto_rawDescGZIP(), []int{1}
+}
+
+var File_proto_packet_proto protoreflect.FileDescriptor
+
+var file_proto_packet_proto_rawDesc = []byte{
+ 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x70, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x19,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x32, 0x0a, 0x06, 0x50, 0x61, 0x63,
+ 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x70, 0x63, 0x61, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x70, 0x63, 0x61, 0x70, 0x22, 0x10, 0x0a,
+ 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32,
+ 0x41, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x34, 0x0a, 0x04,
+ 0x53, 0x65, 0x6e, 0x64, 0x12, 0x10, 0x2e, 0x70, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2e,
+ 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65,
+ 0x74, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x22, 0x00, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 0x70, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_proto_packet_proto_rawDescOnce sync.Once
+ file_proto_packet_proto_rawDescData = file_proto_packet_proto_rawDesc
+)
+
+func file_proto_packet_proto_rawDescGZIP() []byte {
+ file_proto_packet_proto_rawDescOnce.Do(func() {
+ file_proto_packet_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_packet_proto_rawDescData)
+ })
+ return file_proto_packet_proto_rawDescData
+}
+
+var file_proto_packet_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_proto_packet_proto_goTypes = []any{
+ (*Packet)(nil), // 0: pbpacket.Packet
+ (*CollectorReply)(nil), // 1: pbpacket.CollectorReply
+ (*anypb.Any)(nil), // 2: google.protobuf.Any
+}
+var file_proto_packet_proto_depIdxs = []int32{
+ 2, // 0: pbpacket.Packet.pcap:type_name -> google.protobuf.Any
+ 0, // 1: pbpacket.Collector.Send:input_type -> pbpacket.Packet
+ 1, // 2: pbpacket.Collector.Send:output_type -> pbpacket.CollectorReply
+ 2, // [2:3] is the sub-list for method output_type
+ 1, // [1:2] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_proto_packet_proto_init() }
+func file_proto_packet_proto_init() {
+ if File_proto_packet_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_proto_packet_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_proto_packet_proto_goTypes,
+ DependencyIndexes: file_proto_packet_proto_depIdxs,
+ MessageInfos: file_proto_packet_proto_msgTypes,
+ }.Build()
+ File_proto_packet_proto = out.File
+ file_proto_packet_proto_rawDesc = nil
+ file_proto_packet_proto_goTypes = nil
+ file_proto_packet_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet_grpc.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet_grpc.pb.go
new file mode 100644
index 000000000..fce168986
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet_grpc.pb.go
@@ -0,0 +1,121 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v3.19.4
+// source: proto/packet.proto
+
+package pbpacket
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Collector_Send_FullMethodName = "/pbpacket.Collector/Send"
+)
+
+// CollectorClient is the client API for Collector service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type CollectorClient interface {
+ Send(ctx context.Context, in *Packet, opts ...grpc.CallOption) (*CollectorReply, error)
+}
+
+type collectorClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewCollectorClient(cc grpc.ClientConnInterface) CollectorClient {
+ return &collectorClient{cc}
+}
+
+func (c *collectorClient) Send(ctx context.Context, in *Packet, opts ...grpc.CallOption) (*CollectorReply, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(CollectorReply)
+ err := c.cc.Invoke(ctx, Collector_Send_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// CollectorServer is the server API for Collector service.
+// All implementations must embed UnimplementedCollectorServer
+// for forward compatibility.
+type CollectorServer interface {
+ Send(context.Context, *Packet) (*CollectorReply, error)
+ mustEmbedUnimplementedCollectorServer()
+}
+
+// UnimplementedCollectorServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedCollectorServer struct{}
+
+func (UnimplementedCollectorServer) Send(context.Context, *Packet) (*CollectorReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
+}
+func (UnimplementedCollectorServer) mustEmbedUnimplementedCollectorServer() {}
+func (UnimplementedCollectorServer) testEmbeddedByValue() {}
+
+// UnsafeCollectorServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to CollectorServer will
+// result in compilation errors.
+type UnsafeCollectorServer interface {
+ mustEmbedUnimplementedCollectorServer()
+}
+
+func RegisterCollectorServer(s grpc.ServiceRegistrar, srv CollectorServer) {
+ // If the following call pancis, it indicates UnimplementedCollectorServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&Collector_ServiceDesc, srv)
+}
+
+func _Collector_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Packet)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(CollectorServer).Send(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Collector_Send_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(CollectorServer).Send(ctx, req.(*Packet))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Collector_ServiceDesc is the grpc.ServiceDesc for Collector service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Collector_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "pbpacket.Collector",
+ HandlerType: (*CollectorServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Send",
+ Handler: _Collector_Send_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "proto/packet.proto",
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus/prom_server.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus/prom_server.go
new file mode 100644
index 000000000..c50639a2e
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus/prom_server.go
@@ -0,0 +1,103 @@
+package prometheus
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+
+ prom "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ plog = logrus.WithField("component", "prometheus")
+ maybePanic = plog.Fatalf
+)
+
+// InitializePrometheus starts the global Prometheus server, used for operational metrics and prom-encode stages if they don't override the server settings
+func InitializePrometheus(settings *metrics.Settings) *http.Server {
+ return StartServerAsync(settings, nil)
+}
+
+// StartServerAsync listens for prometheus resource usage requests
+func StartServerAsync(conn *metrics.Settings, registry *prom.Registry) *http.Server {
+ // create prometheus server for operational metrics
+ // if value of address is empty, then by default it will take 0.0.0.0
+ port := conn.Port
+ if port == 0 {
+ port = 9090
+ }
+ addr := fmt.Sprintf("%s:%v", conn.Address, port)
+ plog.Infof("StartServerAsync: addr = %s", addr)
+
+ httpServer := &http.Server{
+ Addr: addr,
+ // TLS clients must use TLS 1.2 or higher
+ TLSConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ },
+ }
+ // The Handler function provides a default handler to expose metrics
+ // via an HTTP server. "/metrics" is the usual endpoint for that.
+ mux := http.NewServeMux()
+ if registry == nil {
+ mux.Handle("/metrics", promhttp.Handler())
+ } else {
+ mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
+ }
+ httpServer.Handler = mux
+ httpServer = defaultServer(httpServer)
+
+ go func() {
+ var err error
+ if conn.TLS != nil {
+ err = httpServer.ListenAndServeTLS(conn.TLS.CertPath, conn.TLS.KeyPath)
+ } else {
+ err = httpServer.ListenAndServe()
+ }
+ if err != nil && err != http.ErrServerClosed {
+ maybePanic("error in http.ListenAndServe: %v", err)
+ }
+ }()
+
+ return httpServer
+}
+
+func defaultServer(srv *http.Server) *http.Server {
+ // defaults taken from https://bruinsslot.jp/post/go-secure-webserver/ can be overriden by caller
+ if srv.Handler != nil {
+ // No more than 2MB body
+ srv.Handler = http.MaxBytesHandler(srv.Handler, 2<<20)
+ } else {
+ plog.Warnf("Handler not yet set on server while securing defaults. Make sure a MaxByte middleware is used.")
+ }
+ if srv.ReadTimeout == 0 {
+ srv.ReadTimeout = 10 * time.Second
+ }
+ if srv.ReadHeaderTimeout == 0 {
+ srv.ReadHeaderTimeout = 5 * time.Second
+ }
+ if srv.WriteTimeout == 0 {
+ srv.WriteTimeout = 10 * time.Second
+ }
+ if srv.IdleTimeout == 0 {
+ srv.IdleTimeout = 120 * time.Second
+ }
+ if srv.MaxHeaderBytes == 0 {
+ srv.MaxHeaderBytes = 1 << 20 // 1MB
+ }
+ if srv.TLSConfig == nil {
+ srv.TLSConfig = &tls.Config{}
+ }
+ if srv.TLSConfig.MinVersion == 0 {
+ srv.TLSConfig.MinVersion = tls.VersionTLS13
+ }
+ // Disable http/2
+ srv.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0)
+
+ return srv
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/flow_filter.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/flow_filter.go
new file mode 100644
index 000000000..b29e2dcfb
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/flow_filter.go
@@ -0,0 +1,266 @@
+package tracer
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "syscall"
+
+ cilium "github.com/cilium/ebpf"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+type FilterConfig struct {
+ FilterDirection string
+ FilterIPCIDR string
+ FilterProtocol string
+ FilterSourcePort intstr.IntOrString
+ FilterDestinationPort intstr.IntOrString
+ FilterPort intstr.IntOrString
+ FilterIcmpType int
+ FilterIcmpCode int
+ FilterPeerIP string
+ FilterAction string
+ FilterTCPFlags string
+ FilterDrops bool
+ FilterSample uint32
+}
+
+type Filter struct {
+ // eBPF objs to create/update eBPF maps
+ objects *ebpf.BpfObjects
+ config []*FilterConfig
+}
+
+func NewFilter(objects *ebpf.BpfObjects, cfg []*FilterConfig) *Filter {
+ return &Filter{
+ objects: objects,
+ config: cfg,
+ }
+}
+
+func (f *Filter) ProgramFilter() error {
+
+ for _, config := range f.config {
+ log.Infof("Flow filter config: %v", f.config)
+ key, err := f.getFilterKey(config)
+ if err != nil {
+ return fmt.Errorf("failed to get filter key: %w", err)
+ }
+
+ val, err := f.getFilterValue(config)
+ if err != nil {
+ return fmt.Errorf("failed to get filter value: %w", err)
+ }
+
+ err = f.objects.FilterMap.Update(key, val, cilium.UpdateAny)
+ if err != nil {
+ return fmt.Errorf("failed to update filter map: %w", err)
+ }
+
+ log.Infof("Programmed filter with key: %v, value: %v", key, val)
+ }
+ return nil
+}
+
+func (f *Filter) getFilterKey(config *FilterConfig) (ebpf.BpfFilterKeyT, error) {
+ key := ebpf.BpfFilterKeyT{}
+ if config.FilterIPCIDR == "" {
+ config.FilterIPCIDR = "0.0.0.0/0"
+ }
+ ip, ipNet, err := net.ParseCIDR(config.FilterIPCIDR)
+ if err != nil {
+ return key, fmt.Errorf("failed to parse FlowFilterIPCIDR: %w", err)
+ }
+ if ip.To4() != nil {
+ copy(key.IpData[:], ip.To4())
+ } else {
+ copy(key.IpData[:], ip.To16())
+ }
+ pfLen, _ := ipNet.Mask.Size()
+ key.PrefixLen = uint32(pfLen)
+
+ return key, nil
+}
+
+// nolint:cyclop
+func (f *Filter) getFilterValue(config *FilterConfig) (ebpf.BpfFilterValueT, error) {
+ val := ebpf.BpfFilterValueT{}
+
+ switch config.FilterDirection {
+ case "Ingress":
+ val.Direction = ebpf.BpfDirectionTINGRESS
+ case "Egress":
+ val.Direction = ebpf.BpfDirectionTEGRESS
+ default:
+ val.Direction = ebpf.BpfDirectionTMAX_DIRECTION
+ }
+
+ switch config.FilterAction {
+ case "Reject":
+ val.Action = ebpf.BpfFilterActionTREJECT
+ case "Accept":
+ val.Action = ebpf.BpfFilterActionTACCEPT
+ default:
+ val.Action = ebpf.BpfFilterActionTMAX_FILTER_ACTIONS
+ }
+
+ switch config.FilterProtocol {
+ case "TCP":
+ val.Protocol = syscall.IPPROTO_TCP
+ case "UDP":
+ val.Protocol = syscall.IPPROTO_UDP
+ case "SCTP":
+ val.Protocol = syscall.IPPROTO_SCTP
+ case "ICMP":
+ val.Protocol = syscall.IPPROTO_ICMP
+ case "ICMPv6":
+ val.Protocol = syscall.IPPROTO_ICMPV6
+ }
+
+ val.DstPortStart, val.DstPortEnd = getDstPortsRange(config)
+ val.DstPort1, val.DstPort2 = getDstPorts(config)
+ val.SrcPortStart, val.SrcPortEnd = getSrcPortsRange(config)
+ val.SrcPort1, val.SrcPort2 = getSrcPorts(config)
+ val.PortStart, val.PortEnd = getPortsRange(config)
+ val.Port1, val.Port2 = getPorts(config)
+ val.IcmpType = uint8(config.FilterIcmpType)
+ val.IcmpCode = uint8(config.FilterIcmpCode)
+
+ if config.FilterPeerIP != "" {
+ ip := net.ParseIP(config.FilterPeerIP)
+ if ip.To4() != nil {
+ copy(val.Ip[:], ip.To4())
+ } else {
+ copy(val.Ip[:], ip.To16())
+ }
+ }
+
+ switch config.FilterTCPFlags {
+ case "SYN":
+ val.TcpFlags = ebpf.BpfTcpFlagsTSYN_FLAG
+ case "SYN-ACK":
+ val.TcpFlags = ebpf.BpfTcpFlagsTSYN_ACK_FLAG
+ case "ACK":
+ val.TcpFlags = ebpf.BpfTcpFlagsTACK_FLAG
+ case "FIN":
+ val.TcpFlags = ebpf.BpfTcpFlagsTFIN_FLAG
+ case "RST":
+ val.TcpFlags = ebpf.BpfTcpFlagsTRST_FLAG
+ case "PUSH":
+ val.TcpFlags = ebpf.BpfTcpFlagsTPSH_FLAG
+ case "URG":
+ val.TcpFlags = ebpf.BpfTcpFlagsTURG_FLAG
+ case "ECE":
+ val.TcpFlags = ebpf.BpfTcpFlagsTECE_FLAG
+ case "CWR":
+ val.TcpFlags = ebpf.BpfTcpFlagsTCWR_FLAG
+ case "FIN-ACK":
+ val.TcpFlags = ebpf.BpfTcpFlagsTFIN_ACK_FLAG
+ case "RST-ACK":
+ val.TcpFlags = ebpf.BpfTcpFlagsTRST_ACK_FLAG
+ }
+
+ if config.FilterDrops {
+ val.FilterDrops = 1
+ }
+
+ if config.FilterSample != 0 {
+ val.Sample = config.FilterSample
+ }
+ return val, nil
+}
+
+func getSrcPortsRange(config *FilterConfig) (uint16, uint16) {
+ if config.FilterSourcePort.Type == intstr.Int {
+ return uint16(config.FilterSourcePort.IntVal), 0
+ }
+ start, end, err := getPortsFromString(config.FilterSourcePort.String(), "-")
+ if err != nil {
+ return 0, 0
+ }
+ return start, end
+}
+
+func getSrcPorts(config *FilterConfig) (uint16, uint16) {
+ port1, port2, err := getPortsFromString(config.FilterSourcePort.String(), ",")
+ if err != nil {
+ return 0, 0
+ }
+ return port1, port2
+}
+
+func getDstPortsRange(config *FilterConfig) (uint16, uint16) {
+ if config.FilterDestinationPort.Type == intstr.Int {
+ return uint16(config.FilterDestinationPort.IntVal), 0
+ }
+ start, end, err := getPortsFromString(config.FilterDestinationPort.String(), "-")
+ if err != nil {
+ return 0, 0
+ }
+ return start, end
+}
+
+func getDstPorts(config *FilterConfig) (uint16, uint16) {
+ port1, port2, err := getPortsFromString(config.FilterDestinationPort.String(), ",")
+ if err != nil {
+ return 0, 0
+ }
+ return port1, port2
+}
+
+func getPortsRange(config *FilterConfig) (uint16, uint16) {
+ if config.FilterPort.Type == intstr.Int {
+ return uint16(config.FilterPort.IntVal), 0
+ }
+ start, end, err := getPortsFromString(config.FilterPort.String(), "-")
+ if err != nil {
+ return 0, 0
+ }
+ return start, end
+}
+
+func getPorts(config *FilterConfig) (uint16, uint16) {
+ port1, port2, err := getPortsFromString(config.FilterPort.String(), ",")
+ if err != nil {
+ return 0, 0
+ }
+ return port1, port2
+}
+
+func getPortsFromString(s, sep string) (uint16, uint16, error) {
+ ps := strings.SplitN(s, sep, 2)
+ if len(ps) != 2 {
+ return 0, 0, fmt.Errorf("invalid ports range. Expected two integers separated by %s but found %s", sep, s)
+ }
+ startPort, err := strconv.ParseUint(ps[0], 10, 16)
+ if err != nil {
+ return 0, 0, fmt.Errorf("invalid start port number %w", err)
+ }
+ endPort, err := strconv.ParseUint(ps[1], 10, 16)
+ if err != nil {
+ return 0, 0, fmt.Errorf("invalid end port number %w", err)
+ }
+ if sep == "-" && startPort > endPort {
+ return 0, 0, fmt.Errorf("invalid port range. Start port is greater than end port")
+ }
+ if startPort == endPort {
+ return 0, 0, fmt.Errorf("invalid port range. Start and end port are equal. Remove the %s and enter a single port", sep)
+ }
+ if startPort == 0 {
+ return 0, 0, fmt.Errorf("invalid start port 0")
+ }
+ return uint16(startPort), uint16(endPort), nil
+}
+
+func ConvertFilterPortsToInstr(intPort int32, rangePorts, ports string) intstr.IntOrString {
+ if rangePorts != "" {
+ return intstr.FromString(rangePorts)
+ }
+ if ports != "" {
+ return intstr.FromString(ports)
+ }
+ return intstr.FromInt32(intPort)
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer.go
new file mode 100644
index 000000000..6b9ab6cc3
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer.go
@@ -0,0 +1,1674 @@
+package tracer
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/kernel"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+ "github.com/prometheus/client_golang/prometheus"
+
+ cilium "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/link"
+ "github.com/cilium/ebpf/perf"
+ "github.com/cilium/ebpf/ringbuf"
+ "github.com/cilium/ebpf/rlimit"
+ "github.com/gavv/monotime"
+ "github.com/sirupsen/logrus"
+ "github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
+ "golang.org/x/sys/unix"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+const (
+ qdiscType = "clsact"
+ // ebpf map names as defined in bpf/maps_definition.h
+ aggregatedFlowsMap = "aggregated_flows"
+ additionalFlowMetrics = "additional_flow_metrics"
+ dnsLatencyMap = "dns_flows"
+ // constants defined in flows.c as "volatile const"
+ constSampling = "sampling"
+ constTraceMessages = "trace_messages"
+ constEnableRtt = "enable_rtt"
+ constEnableDNSTracking = "enable_dns_tracking"
+ constDNSTrackingPort = "dns_port"
+ dnsDefaultPort = 53
+ constEnableFlowFiltering = "enable_flows_filtering"
+ constEnableNetworkEventsMonitoring = "enable_network_events_monitoring"
+ constNetworkEventsMonitoringGroupID = "network_events_monitoring_groupid"
+ constEnablePktTranslation = "enable_pkt_translation_tracking"
+ pktDropHook = "kfree_skb"
+ constPcaEnable = "enable_pca"
+ pcaRecordsMap = "packet_record"
+ tcEgressFilterName = "tc/tc_egress_flow_parse"
+ tcIngressFilterName = "tc/tc_ingress_flow_parse"
+ tcpFentryHook = "tcp_rcv_fentry"
+ tcpRcvKprobe = "tcp_rcv_kprobe"
+ rhNetworkEventsMonitoringHook = "rh_psample_sample_packet"
+ networkEventsMonitoringHook = "psample_sample_packet"
+ defaultNetworkEventsGroupID = 10
+)
+
+var log = logrus.WithField("component", "ebpf.FlowFetcher")
+var plog = logrus.WithField("component", "ebpf.PacketFetcher")
+
+// FlowFetcher reads and forwards the Flows from the Traffic Control hooks in the eBPF kernel space.
+// It provides access both to flows that are aggregated in the kernel space (via PerfCPU hashmap)
+// and to flows that are forwarded by the kernel via ringbuffer because could not be aggregated
+// in the map
+type FlowFetcher struct {
+ objects *ebpf.BpfObjects
+ qdiscs map[ifaces.Interface]*netlink.GenericQdisc
+ egressFilters map[ifaces.Interface]*netlink.BpfFilter
+ ingressFilters map[ifaces.Interface]*netlink.BpfFilter
+ ringbufReader *ringbuf.Reader
+ cacheMaxSize int
+ enableIngress bool
+ enableEgress bool
+ pktDropsTracePoint link.Link
+ rttFentryLink link.Link
+ rttKprobeLink link.Link
+ egressTCXLink map[ifaces.Interface]link.Link
+ ingressTCXLink map[ifaces.Interface]link.Link
+ networkEventsMonitoringLink link.Link
+ nfNatManIPLink link.Link
+ lookupAndDeleteSupported bool
+ useEbpfManager bool
+ pinDir string
+}
+
+type FlowFetcherConfig struct {
+ EnableIngress bool
+ EnableEgress bool
+ Debug bool
+ Sampling int
+ CacheMaxSize int
+ EnablePktDrops bool
+ EnableDNSTracker bool
+ DNSTrackerPort uint16
+ EnableRTT bool
+ EnableNetworkEventsMonitoring bool
+ NetworkEventsMonitoringGroupID int
+ EnableFlowFilter bool
+ EnablePCA bool
+ EnablePktTranslation bool
+ UseEbpfManager bool
+ BpfManBpfFSPath string
+ FilterConfig []*FilterConfig
+}
+
+// nolint:golint,cyclop
+func NewFlowFetcher(cfg *FlowFetcherConfig) (*FlowFetcher, error) {
+ var pktDropsLink, networkEventsMonitoringLink, rttFentryLink, rttKprobeLink link.Link
+ var nfNatManIPLink link.Link
+ var err error
+ objects := ebpf.BpfObjects{}
+ var pinDir string
+
+ if !cfg.UseEbpfManager {
+ if err := rlimit.RemoveMemlock(); err != nil {
+ log.WithError(err).
+ Warn("can't remove mem lock. The agent will not be able to start eBPF programs")
+ }
+ spec, err := ebpf.LoadBpf()
+ if err != nil {
+ return nil, fmt.Errorf("loading BPF data: %w", err)
+ }
+
+ // Resize maps according to user-provided configuration
+ spec.Maps[aggregatedFlowsMap].MaxEntries = uint32(cfg.CacheMaxSize)
+ if isEBPFFeaturesEnabled(cfg) {
+ spec.Maps[additionalFlowMetrics].MaxEntries = uint32(cfg.CacheMaxSize)
+ } else {
+ spec.Maps[additionalFlowMetrics].MaxEntries = 1
+ }
+ // remove pinning from all maps
+ maps2Name := []string{"aggregated_flows", "additional_flow_metrics", "direct_flows", "dns_flows", "filter_map", "global_counters", "packet_record"}
+ for _, m := range maps2Name {
+ spec.Maps[m].Pinning = 0
+ }
+
+ traceMsgs := 0
+ if cfg.Debug {
+ traceMsgs = 1
+ }
+
+ enableRtt := 0
+ if cfg.EnableRTT {
+ enableRtt = 1
+ }
+
+ enableDNSTracking := 0
+ dnsTrackerPort := uint16(dnsDefaultPort)
+ if cfg.EnableDNSTracker {
+ enableDNSTracking = 1
+ if cfg.DNSTrackerPort != 0 {
+ dnsTrackerPort = cfg.DNSTrackerPort
+ }
+ }
+
+ if enableDNSTracking == 0 {
+ spec.Maps[dnsLatencyMap].MaxEntries = 1
+ }
+
+ enableFlowFiltering := 0
+ if cfg.EnableFlowFilter {
+ enableFlowFiltering = 1
+ }
+ enableNetworkEventsMonitoring := 0
+ if cfg.EnableNetworkEventsMonitoring {
+ enableNetworkEventsMonitoring = 1
+ }
+ networkEventsMonitoringGroupID := defaultNetworkEventsGroupID
+ if cfg.NetworkEventsMonitoringGroupID > 0 {
+ networkEventsMonitoringGroupID = cfg.NetworkEventsMonitoringGroupID
+ }
+ enablePktTranslation := 0
+ if cfg.EnablePktTranslation {
+ enablePktTranslation = 1
+ }
+ if err := spec.RewriteConstants(map[string]interface{}{
+ constSampling: uint32(cfg.Sampling),
+ constTraceMessages: uint8(traceMsgs),
+ constEnableRtt: uint8(enableRtt),
+ constEnableDNSTracking: uint8(enableDNSTracking),
+ constDNSTrackingPort: dnsTrackerPort,
+ constEnableFlowFiltering: uint8(enableFlowFiltering),
+ constEnableNetworkEventsMonitoring: uint8(enableNetworkEventsMonitoring),
+ constNetworkEventsMonitoringGroupID: uint8(networkEventsMonitoringGroupID),
+ constEnablePktTranslation: uint8(enablePktTranslation),
+ }); err != nil {
+ return nil, fmt.Errorf("rewriting BPF constants definition: %w", err)
+ }
+
+ oldKernel := kernel.IsKernelOlderThan("5.14.0")
+ if oldKernel {
+ log.Infof("kernel older than 5.14.0 detected: not all hooks are supported")
+ }
+ rtOldKernel := kernel.IsRealTimeKernel() && kernel.IsKernelOlderThan("5.14.0-292")
+ if rtOldKernel {
+ log.Infof("kernel is realtime and older than 5.14.0-292 not all hooks are supported")
+ }
+ supportNetworkEvents := !kernel.IsKernelOlderThan("5.14.0-427")
+ objects, err = kernelSpecificLoadAndAssign(oldKernel, rtOldKernel, supportNetworkEvents, spec, pinDir)
+ if err != nil {
+ return nil, err
+ }
+
+ log.Debugf("Deleting specs for PCA")
+ // Deleting specs for PCA
+ // Always set pcaRecordsMap to the minimum in FlowFetcher - PCA and Flow Fetcher are mutually exclusive.
+ spec.Maps[pcaRecordsMap].MaxEntries = 1
+
+ objects.TcxEgressPcaParse = nil
+ objects.TcIngressPcaParse = nil
+ delete(spec.Programs, constPcaEnable)
+
+ if cfg.EnablePktDrops && !oldKernel && !rtOldKernel {
+ pktDropsLink, err = link.Tracepoint("skb", pktDropHook, objects.KfreeSkb, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to attach the BPF program to kfree_skb tracepoint: %w", err)
+ }
+ }
+
+ if cfg.EnableNetworkEventsMonitoring {
+ if supportNetworkEvents {
+ // Enable the following logic with RHEL9.6 when its available
+ if !kernel.IsKernelOlderThan("5.16.0") {
+ //revive:disable
+ /*
+ networkEventsMonitoringLink, err = link.Kprobe(networkEventsMonitoringHook, objects.NetworkEventsMonitoring, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to attach the BPF program network events monitoring kprobe: %w", err)
+ }
+ */
+ } else {
+ log.Infof("kernel older than 5.16.0 detected: use custom network_events_monitoring hook")
+ networkEventsMonitoringLink, err = link.Kprobe(rhNetworkEventsMonitoringHook, objects.RhNetworkEventsMonitoring, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to attach the BPF program network events monitoring kprobe: %w", err)
+ }
+ }
+ } else {
+ log.Infof("kernel older than 5.14.0-427 detected: it does not support network_events_monitoring hook, skip")
+ }
+ }
+
+ if cfg.EnableRTT {
+ if !oldKernel {
+ rttFentryLink, err = link.AttachTracing(link.TracingOptions{
+ Program: objects.BpfPrograms.TcpRcvFentry,
+ })
+ if err == nil {
+ goto next
+ }
+ if err != nil {
+ log.Warningf("failed to attach the BPF program to tcpReceiveFentry: %v fallback to use kprobe", err)
+ // Fall through to use kprobe
+ }
+ }
+ // try to use kprobe for older kernels
+ if !rtOldKernel {
+ rttKprobeLink, err = link.Kprobe("tcp_rcv_established", objects.TcpRcvKprobe, nil)
+ if err != nil {
+ log.Warningf("failed to attach the BPF program to kprobe: %v", err)
+ return nil, fmt.Errorf("failed to attach the BPF program to tcpReceiveKprobe: %w", err)
+ }
+ }
+ }
+ next:
+ if cfg.EnablePktTranslation {
+ nfNatManIPLink, err = link.Kprobe("nf_nat_manip_pkt", objects.TrackNatManipPkt, nil)
+ if err != nil {
+ log.Warningf("failed to attach the BPF program to nat_manip kprobe: %v", err)
+ return nil, fmt.Errorf("failed to attach the BPF program to nat_manip kprobe: %w", err)
+ }
+ }
+ } else {
+ pinDir = cfg.BpfManBpfFSPath
+ opts := &cilium.LoadPinOptions{
+ ReadOnly: false,
+ WriteOnly: false,
+ Flags: 0,
+ }
+
+ log.Info("BPFManager mode: loading aggregated flows pinned maps")
+ mPath := path.Join(pinDir, "aggregated_flows")
+ objects.BpfMaps.AggregatedFlows, err = cilium.LoadPinnedMap(mPath, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load %s: %w", mPath, err)
+ }
+ log.Info("BPFManager mode: loading additional flow metrics pinned maps")
+ mPath = path.Join(pinDir, "additional_flow_metrics")
+ objects.BpfMaps.AdditionalFlowMetrics, err = cilium.LoadPinnedMap(mPath, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load %s: %w", mPath, err)
+ }
+ log.Info("BPFManager mode: loading direct flows pinned maps")
+ mPath = path.Join(pinDir, "direct_flows")
+ objects.BpfMaps.DirectFlows, err = cilium.LoadPinnedMap(mPath, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load %s: %w", mPath, err)
+ }
+ log.Infof("BPFManager mode: loading DNS flows pinned maps")
+ mPath = path.Join(pinDir, "dns_flows")
+ objects.BpfMaps.DnsFlows, err = cilium.LoadPinnedMap(mPath, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load %s: %w", mPath, err)
+ }
+ log.Infof("BPFManager mode: loading filter pinned maps")
+ mPath = path.Join(pinDir, "filter_map")
+ objects.BpfMaps.FilterMap, err = cilium.LoadPinnedMap(mPath, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load %s: %w", mPath, err)
+ }
+ log.Infof("BPFManager mode: loading global counters pinned maps")
+ mPath = path.Join(pinDir, "global_counters")
+ objects.BpfMaps.GlobalCounters, err = cilium.LoadPinnedMap(mPath, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load %s: %w", mPath, err)
+ }
+ log.Infof("BPFManager mode: loading packet record pinned maps")
+ mPath = path.Join(pinDir, "packet_record")
+ objects.BpfMaps.PacketRecord, err = cilium.LoadPinnedMap(mPath, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load %s: %w", mPath, err)
+ }
+ }
+
+ if cfg.EnableFlowFilter {
+ f := NewFilter(&objects, cfg.FilterConfig)
+ if err := f.ProgramFilter(); err != nil {
+ return nil, fmt.Errorf("programming flow filter: %w", err)
+ }
+ }
+
+ flows, err := ringbuf.NewReader(objects.BpfMaps.DirectFlows)
+ if err != nil {
+ return nil, fmt.Errorf("accessing to ringbuffer: %w", err)
+ }
+
+ return &FlowFetcher{
+ objects: &objects,
+ ringbufReader: flows,
+ egressFilters: map[ifaces.Interface]*netlink.BpfFilter{},
+ ingressFilters: map[ifaces.Interface]*netlink.BpfFilter{},
+ qdiscs: map[ifaces.Interface]*netlink.GenericQdisc{},
+ cacheMaxSize: cfg.CacheMaxSize,
+ enableIngress: cfg.EnableIngress,
+ enableEgress: cfg.EnableEgress,
+ pktDropsTracePoint: pktDropsLink,
+ rttFentryLink: rttFentryLink,
+ rttKprobeLink: rttKprobeLink,
+ nfNatManIPLink: nfNatManIPLink,
+ egressTCXLink: map[ifaces.Interface]link.Link{},
+ ingressTCXLink: map[ifaces.Interface]link.Link{},
+ networkEventsMonitoringLink: networkEventsMonitoringLink,
+ lookupAndDeleteSupported: true, // this will be turned off later if found to be not supported
+ useEbpfManager: cfg.UseEbpfManager,
+ pinDir: pinDir,
+ }, nil
+}
+
+func isEBPFFeaturesEnabled(cfg *FlowFetcherConfig) bool {
+ if cfg.EnableNetworkEventsMonitoring || cfg.EnableRTT || cfg.EnablePktDrops || cfg.EnableDNSTracker || cfg.EnablePktTranslation {
+ return true
+ }
+ return false
+}
+
+func (m *FlowFetcher) AttachTCX(iface ifaces.Interface) error {
+ ilog := log.WithField("iface", iface)
+ if iface.NetNS != netns.None() {
+ originalNs, err := netns.Get()
+ if err != nil {
+ return fmt.Errorf("failed to get current netns: %w", err)
+ }
+ defer func() {
+ if err := netns.Set(originalNs); err != nil {
+ ilog.WithError(err).Error("failed to set netns back")
+ }
+ originalNs.Close()
+ }()
+ if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil {
+ return fmt.Errorf("failed to setns to %s: %w", iface.NetNS, err)
+ }
+ }
+
+ if m.enableEgress {
+ egrLink, err := link.AttachTCX(link.TCXOptions{
+ Program: m.objects.BpfPrograms.TcxEgressFlowParse,
+ Attach: cilium.AttachTCXEgress,
+ Interface: iface.Index,
+ })
+ if err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ // The interface already has a TCX egress hook
+ log.WithField("iface", iface.Name).Debug("interface already has a TCX egress hook ignore")
+ } else {
+ return fmt.Errorf("failed to attach TCX egress: %w", err)
+ }
+ }
+ m.egressTCXLink[iface] = egrLink
+ ilog.WithField("interface", iface.Name).Debug("successfully attach egressTCX hook")
+ }
+
+ if m.enableIngress {
+ ingLink, err := link.AttachTCX(link.TCXOptions{
+ Program: m.objects.BpfPrograms.TcxIngressFlowParse,
+ Attach: cilium.AttachTCXIngress,
+ Interface: iface.Index,
+ })
+ if err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ // The interface already has a TCX ingress hook
+ log.WithField("iface", iface.Name).Debug("interface already has a TCX ingress hook ignore")
+ } else {
+ return fmt.Errorf("failed to attach TCX ingress: %w", err)
+ }
+ }
+ m.ingressTCXLink[iface] = ingLink
+ ilog.WithField("interface", iface.Name).Debug("successfully attach ingressTCX hook")
+ }
+
+ return nil
+}
+
+func (m *FlowFetcher) DetachTCX(iface ifaces.Interface) error {
+ ilog := log.WithField("iface", iface)
+ if iface.NetNS != netns.None() {
+ originalNs, err := netns.Get()
+ if err != nil {
+ return fmt.Errorf("failed to get current netns: %w", err)
+ }
+ defer func() {
+ if err := netns.Set(originalNs); err != nil {
+ ilog.WithError(err).Error("failed to set netns back")
+ }
+ originalNs.Close()
+ }()
+ if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil {
+ return fmt.Errorf("failed to setns to %s: %w", iface.NetNS, err)
+ }
+ }
+ if m.enableEgress {
+ if l := m.egressTCXLink[iface]; l != nil {
+ if err := l.Close(); err != nil {
+ return fmt.Errorf("TCX: failed to close egress link: %w", err)
+ }
+ ilog.WithField("interface", iface.Name).Debug("successfully detach egressTCX hook")
+ } else {
+ return fmt.Errorf("egress link does not have a TCX egress hook")
+ }
+ }
+
+ if m.enableIngress {
+ if l := m.ingressTCXLink[iface]; l != nil {
+ if err := l.Close(); err != nil {
+ return fmt.Errorf("TCX: failed to close ingress link: %w", err)
+ }
+ ilog.WithField("interface", iface.Name).Debug("successfully detach ingressTCX hook")
+ } else {
+ return fmt.Errorf("ingress link does not have a TCX ingress hook")
+ }
+ }
+
+ return nil
+}
+
+func removeTCFilters(ifName string, tcDir uint32) error {
+ link, err := netlink.LinkByName(ifName)
+ if err != nil {
+ return err
+ }
+
+ filters, err := netlink.FilterList(link, tcDir)
+ if err != nil {
+ return err
+ }
+ var errs []error
+ for _, f := range filters {
+ if err := netlink.FilterDel(f); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ return kerrors.NewAggregate(errs)
+}
+
+func unregister(iface ifaces.Interface) error {
+ ilog := log.WithField("iface", iface)
+ ilog.Debugf("looking for previously installed TC filters on %s", iface.Name)
+ links, err := netlink.LinkList()
+ if err != nil {
+ return fmt.Errorf("retrieving all netlink devices: %w", err)
+ }
+
+ egressDevs := []netlink.Link{}
+ ingressDevs := []netlink.Link{}
+ for _, l := range links {
+ if l.Attrs().Name != iface.Name {
+ continue
+ }
+ ingressFilters, err := netlink.FilterList(l, netlink.HANDLE_MIN_INGRESS)
+ if err != nil {
+ return fmt.Errorf("listing ingress filters: %w", err)
+ }
+ for _, filter := range ingressFilters {
+ if bpfFilter, ok := filter.(*netlink.BpfFilter); ok {
+ if strings.HasPrefix(bpfFilter.Name, tcIngressFilterName) {
+ ingressDevs = append(ingressDevs, l)
+ }
+ }
+ }
+
+ egressFilters, err := netlink.FilterList(l, netlink.HANDLE_MIN_EGRESS)
+ if err != nil {
+ return fmt.Errorf("listing egress filters: %w", err)
+ }
+ for _, filter := range egressFilters {
+ if bpfFilter, ok := filter.(*netlink.BpfFilter); ok {
+ if strings.HasPrefix(bpfFilter.Name, tcEgressFilterName) {
+ egressDevs = append(egressDevs, l)
+ }
+ }
+ }
+ }
+
+ for _, dev := range ingressDevs {
+ ilog.Debugf("removing ingress stale tc filters from %s", dev.Attrs().Name)
+ err = removeTCFilters(dev.Attrs().Name, netlink.HANDLE_MIN_INGRESS)
+ if err != nil {
+ ilog.WithError(err).Errorf("couldn't remove ingress tc filters from %s", dev.Attrs().Name)
+ }
+ }
+
+ for _, dev := range egressDevs {
+ ilog.Debugf("removing egress stale tc filters from %s", dev.Attrs().Name)
+ err = removeTCFilters(dev.Attrs().Name, netlink.HANDLE_MIN_EGRESS)
+ if err != nil {
+ ilog.WithError(err).Errorf("couldn't remove egress tc filters from %s", dev.Attrs().Name)
+ }
+ }
+
+ return nil
+}
+
+func (m *FlowFetcher) UnRegister(iface ifaces.Interface) error {
+ // qdiscs, ingress and egress filters are automatically deleted so we don't need to
+ // specifically detach them from the ebpfFetcher
+ return unregister(iface)
+}
+
+// Register and links the eBPF fetcher into the system. The program should invoke Unregister
+// before exiting.
+func (m *FlowFetcher) Register(iface ifaces.Interface) error {
+ ilog := log.WithField("iface", iface)
+ handle, err := netlink.NewHandleAt(iface.NetNS)
+ if err != nil {
+ return fmt.Errorf("failed to create handle for netns (%s): %w", iface.NetNS.String(), err)
+ }
+ defer handle.Close()
+
+ // Load pre-compiled programs and maps into the kernel, and rewrites the configuration
+ ipvlan, err := handle.LinkByIndex(iface.Index)
+ if err != nil {
+ return fmt.Errorf("failed to lookup ipvlan device %d (%s): %w", iface.Index, iface.Name, err)
+ }
+ qdiscAttrs := netlink.QdiscAttrs{
+ LinkIndex: ipvlan.Attrs().Index,
+ Handle: netlink.MakeHandle(0xffff, 0),
+ Parent: netlink.HANDLE_CLSACT,
+ }
+ qdisc := &netlink.GenericQdisc{
+ QdiscAttrs: qdiscAttrs,
+ QdiscType: qdiscType,
+ }
+ if err := handle.QdiscDel(qdisc); err == nil {
+ ilog.Warn("qdisc clsact already existed. Deleted it")
+ }
+ if err := handle.QdiscAdd(qdisc); err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ ilog.WithError(err).Warn("qdisc clsact already exists. Ignoring")
+ } else {
+ return fmt.Errorf("failed to create clsact qdisc on %d (%s): %w", iface.Index, iface.Name, err)
+ }
+ }
+ m.qdiscs[iface] = qdisc
+
+ // Remove previously installed filters
+ if err := unregister(iface); err != nil {
+ return fmt.Errorf("failed to remove previous filters: %w", err)
+ }
+
+ if err := m.registerEgress(iface, ipvlan, handle); err != nil {
+ return err
+ }
+
+ return m.registerIngress(iface, ipvlan, handle)
+}
+
+func (m *FlowFetcher) registerEgress(iface ifaces.Interface, ipvlan netlink.Link, handle *netlink.Handle) error {
+ ilog := log.WithField("iface", iface)
+ if !m.enableEgress {
+ ilog.Debug("ignoring egress traffic, according to user configuration")
+ return nil
+ }
+ // Fetch events on egress
+ egressAttrs := netlink.FilterAttrs{
+ LinkIndex: ipvlan.Attrs().Index,
+ Parent: netlink.HANDLE_MIN_EGRESS,
+ Handle: netlink.MakeHandle(0, 1),
+ Protocol: 3,
+ Priority: 1,
+ }
+ egressFilter := &netlink.BpfFilter{
+ FilterAttrs: egressAttrs,
+ Fd: m.objects.TcEgressFlowParse.FD(),
+ Name: tcEgressFilterName,
+ DirectAction: true,
+ }
+ if err := handle.FilterDel(egressFilter); err == nil {
+ ilog.Warn("egress filter already existed. Deleted it")
+ }
+ if err := handle.FilterAdd(egressFilter); err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ ilog.WithError(err).Warn("egress filter already exists. Ignoring")
+ } else {
+ return fmt.Errorf("failed to create egress filter: %w", err)
+ }
+ }
+ m.egressFilters[iface] = egressFilter
+ return nil
+}
+
+func (m *FlowFetcher) registerIngress(iface ifaces.Interface, ipvlan netlink.Link, handle *netlink.Handle) error {
+ ilog := log.WithField("iface", iface)
+ if !m.enableIngress {
+ ilog.Debug("ignoring ingress traffic, according to user configuration")
+ return nil
+ }
+ // Fetch events on ingress
+ ingressAttrs := netlink.FilterAttrs{
+ LinkIndex: ipvlan.Attrs().Index,
+ Parent: netlink.HANDLE_MIN_INGRESS,
+ Handle: netlink.MakeHandle(0, 1),
+ Protocol: unix.ETH_P_ALL,
+ Priority: 1,
+ }
+ ingressFilter := &netlink.BpfFilter{
+ FilterAttrs: ingressAttrs,
+ Fd: m.objects.TcIngressFlowParse.FD(),
+ Name: tcIngressFilterName,
+ DirectAction: true,
+ }
+ if err := handle.FilterDel(ingressFilter); err == nil {
+ ilog.Warn("ingress filter already existed. Deleted it")
+ }
+ if err := handle.FilterAdd(ingressFilter); err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ ilog.WithError(err).Warn("ingress filter already exists. Ignoring")
+ } else {
+ return fmt.Errorf("failed to create ingress filter: %w", err)
+ }
+ }
+ m.ingressFilters[iface] = ingressFilter
+ return nil
+}
+
+// Close the eBPF fetcher from the system.
+// We don't need a "Close(iface)" method because the filters and qdiscs
+// are automatically removed when the interface is down
+// nolint:cyclop
+func (m *FlowFetcher) Close() error {
+ log.Debug("unregistering eBPF objects")
+
+ var errs []error
+
+ if m.pktDropsTracePoint != nil {
+ if err := m.pktDropsTracePoint.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if m.rttFentryLink != nil {
+ if err := m.rttFentryLink.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if m.rttKprobeLink != nil {
+ if err := m.rttKprobeLink.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if m.networkEventsMonitoringLink != nil {
+ if err := m.networkEventsMonitoringLink.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if m.nfNatManIPLink != nil {
+ if err := m.nfNatManIPLink.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ // m.ringbufReader.Read is a blocking operation, so we need to close the ring buffer
+ // from another goroutine to avoid the system not being able to exit if there
+ // isn't traffic in a given interface
+ if m.ringbufReader != nil {
+ if err := m.ringbufReader.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if m.objects != nil {
+ if err := m.objects.TcEgressFlowParse.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := m.objects.TcIngressFlowParse.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := m.objects.TcxEgressFlowParse.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := m.objects.TcxIngressFlowParse.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := m.objects.AggregatedFlows.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := m.objects.DirectFlows.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := m.objects.DnsFlows.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := m.objects.GlobalCounters.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := m.objects.FilterMap.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if len(errs) == 0 {
+ m.objects = nil
+ }
+ }
+
+ for iface, ef := range m.egressFilters {
+ log := log.WithField("interface", iface)
+ log.Debug("deleting egress filter")
+ if err := doIgnoreNoDev(netlink.FilterDel, netlink.Filter(ef), log); err != nil {
+ errs = append(errs, fmt.Errorf("deleting egress filter: %w", err))
+ }
+ }
+ m.egressFilters = map[ifaces.Interface]*netlink.BpfFilter{}
+ for iface, igf := range m.ingressFilters {
+ log := log.WithField("interface", iface)
+ log.Debug("deleting ingress filter")
+ if err := doIgnoreNoDev(netlink.FilterDel, netlink.Filter(igf), log); err != nil {
+ errs = append(errs, fmt.Errorf("deleting ingress filter: %w", err))
+ }
+ }
+ m.ingressFilters = map[ifaces.Interface]*netlink.BpfFilter{}
+ for iface, qd := range m.qdiscs {
+ log := log.WithField("interface", iface)
+ log.Debug("deleting Qdisc")
+ if err := doIgnoreNoDev(netlink.QdiscDel, netlink.Qdisc(qd), log); err != nil {
+ errs = append(errs, fmt.Errorf("deleting qdisc: %w", err))
+ }
+ }
+ m.qdiscs = map[ifaces.Interface]*netlink.GenericQdisc{}
+
+ for iface, l := range m.egressTCXLink {
+ log := log.WithField("interface", iface)
+ log.Debug("detach egress TCX hook")
+ l.Close()
+ }
+ m.egressTCXLink = map[ifaces.Interface]link.Link{}
+ for iface, l := range m.ingressTCXLink {
+ log := log.WithField("interface", iface)
+ log.Debug("detach ingress TCX hook")
+ l.Close()
+ }
+ m.ingressTCXLink = map[ifaces.Interface]link.Link{}
+
+ if !m.useEbpfManager {
+ if err := m.removeAllPins(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+
+ var errStrings []string
+ for _, err := range errs {
+ errStrings = append(errStrings, err.Error())
+ }
+ return errors.New(`errors: "` + strings.Join(errStrings, `", "`) + `"`)
+}
+
+// removeAllPins removes all pins.
+func (m *FlowFetcher) removeAllPins() error {
+ files, err := os.ReadDir(m.pinDir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ for _, file := range files {
+ if err := os.Remove(path.Join(m.pinDir, file.Name())); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err := os.Remove(m.pinDir); err != nil {
+ return err
+ }
+ return nil
+}
+
+// doIgnoreNoDev runs the provided syscall over the provided device and ignores the error
+// if the cause is a non-existing device (just logs the error as debug).
+// If the agent is deployed as part of the Network Observability pipeline, normally
+// undeploying the FlowCollector could cause the agent to try to remove resources
+// from Pods that have been removed immediately before (e.g. flowlogs-pipeline or the
+// console plugin), so we avoid logging some errors that would unnecessarily raise the
+// user's attention.
+// This function uses generics because the set of provided functions accept different argument
+// types.
+func doIgnoreNoDev[T any](sysCall func(T) error, dev T, log *logrus.Entry) error {
+ if err := sysCall(dev); err != nil {
+ if errors.Is(err, unix.ENODEV) {
+ log.WithError(err).Error("can't delete. Ignore this error if other pods or interfaces " +
+ " are also being deleted at this moment. For example, if you are undeploying " +
+ " a FlowCollector or Deployment where this agent is part of")
+ } else {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *FlowFetcher) ReadRingBuf() (ringbuf.Record, error) {
+ return m.ringbufReader.Read()
+}
+
+// LookupAndDeleteMap reads all the entries from the eBPF map and removes them from it.
+// TODO: detect whether BatchLookupAndDelete is supported (Kernel>=5.6) and use it selectively
+// Supported Lookup/Delete operations by kernel: https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md
+func (m *FlowFetcher) LookupAndDeleteMap(met *metrics.Metrics) map[ebpf.BpfFlowId]model.BpfFlowContent {
+ if !m.lookupAndDeleteSupported {
+ return m.legacyLookupAndDeleteMap(met)
+ }
+
+ flowMap := m.objects.AggregatedFlows
+
+ iterator := flowMap.Iterate()
+ var flows = make(map[ebpf.BpfFlowId]model.BpfFlowContent, m.cacheMaxSize)
+ var ids []ebpf.BpfFlowId
+ var id ebpf.BpfFlowId
+ var baseMetrics ebpf.BpfFlowMetrics
+ var additionalMetrics []ebpf.BpfAdditionalMetrics
+
+ // First, get all ids and don't care about metrics (we need lookup+delete to be atomic)
+ for iterator.Next(&id, &baseMetrics) {
+ ids = append(ids, id)
+ }
+
+ count := 0
+ // Run the atomic Lookup+Delete; if new ids have been inserted in the meantime, they'll be fetched next time
+ for i, id := range ids {
+ count++
+ if err := flowMap.LookupAndDelete(&id, &baseMetrics); err != nil {
+ if i == 0 && errors.Is(err, cilium.ErrNotSupported) {
+ log.WithError(err).Warnf("switching to legacy mode")
+ m.lookupAndDeleteSupported = false
+ return m.legacyLookupAndDeleteMap(met)
+ }
+ log.WithError(err).WithField("flowId", id).Warnf("couldn't lookup/delete flow entry")
+ met.Errors.WithErrorName("flow-fetcher", "CannotDeleteFlows").Inc()
+ continue
+ }
+ flowPayload := model.BpfFlowContent{BpfFlowMetrics: &ebpf.BpfFlowMetrics{}}
+ flowPayload.AccumulateBase(&baseMetrics)
+
+ // Fetch additional metrics; ids are without direction and interface
+ shorterID := id
+ shorterID.Direction = 0
+ shorterID.IfIndex = 0
+ if err := m.objects.AdditionalFlowMetrics.LookupAndDelete(&shorterID, &additionalMetrics); err != nil {
+ if !errors.Is(err, cilium.ErrKeyNotExist) {
+ log.WithError(err).WithField("flowId", id).Warnf("couldn't lookup/delete additional metrics entry")
+ met.Errors.WithErrorName("flow-fetcher", "CannotDeleteAdditionalMetric").Inc()
+ }
+ } else {
+ for i := range additionalMetrics {
+ flowPayload.AccumulateAdditional(&additionalMetrics[i])
+ }
+ }
+ flows[id] = flowPayload
+ }
+ met.BufferSizeGauge.WithBufferName("hashmap-total").Set(float64(count))
+ met.BufferSizeGauge.WithBufferName("hashmap-unique").Set(float64(len(flows)))
+
+ m.ReadGlobalCounter(met)
+ return flows
+}
+
+// ReadGlobalCounter reads the global counter and updates drop flows counter metrics
+func (m *FlowFetcher) ReadGlobalCounter(met *metrics.Metrics) {
+ var allCPUValue []uint32
+ globalCounters := map[ebpf.BpfGlobalCountersKeyT]prometheus.Counter{
+ ebpf.BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED: met.DroppedFlowsCounter.WithSourceAndReason("flow-fetcher", "CannotUpdateFlowsHashMap"),
+ ebpf.BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS: met.DroppedFlowsCounter.WithSourceAndReason("flow-fetcher", "CannotUpdateDNSHashMap"),
+ ebpf.BpfGlobalCountersKeyTFILTER_REJECT: met.FilteredFlowsCounter.WithSourceAndReason("flow-filtering", "FilterReject"),
+ ebpf.BpfGlobalCountersKeyTFILTER_ACCEPT: met.FilteredFlowsCounter.WithSourceAndReason("flow-filtering", "FilterAccept"),
+ ebpf.BpfGlobalCountersKeyTFILTER_NOMATCH: met.FilteredFlowsCounter.WithSourceAndReason("flow-filtering", "FilterNoMatch"),
+ ebpf.BpfGlobalCountersKeyTNETWORK_EVENTS_ERR: met.NetworkEventsCounter.WithSourceAndReason("network-events", "NetworkEventsErrors"),
+ ebpf.BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH: met.NetworkEventsCounter.WithSourceAndReason("network-events", "NetworkEventsErrorsGroupIDMismatch"),
+ ebpf.BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS: met.NetworkEventsCounter.WithSourceAndReason("network-events", "NetworkEventsErrorsFlowMapUpdate"),
+ ebpf.BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD: met.NetworkEventsCounter.WithSourceAndReason("network-events", "NetworkEventsGoodEvent"),
+ }
+ zeroCounters := make([]uint32, cilium.MustPossibleCPU())
+ for key := ebpf.BpfGlobalCountersKeyT(0); key < ebpf.BpfGlobalCountersKeyTMAX_COUNTERS; key++ {
+ if err := m.objects.GlobalCounters.Lookup(key, &allCPUValue); err != nil {
+ log.WithError(err).Warnf("couldn't read global counter")
+ return
+ }
+ metric := globalCounters[key]
+ if metric != nil {
+ // aggregate all the counters
+ for _, counter := range allCPUValue {
+ metric.Add(float64(counter))
+ }
+ }
+ // reset the global counter-map entry
+ if err := m.objects.GlobalCounters.Put(key, zeroCounters); err != nil {
+ log.WithError(err).Warnf("coudn't reset global counter")
+ return
+ }
+ }
+}
+
+// DeleteMapsStaleEntries Look for any stale entries in the features maps and delete them
+func (m *FlowFetcher) DeleteMapsStaleEntries(timeOut time.Duration) {
+ m.lookupAndDeleteDNSMap(timeOut)
+}
+
+// lookupAndDeleteDNSMap iterate over DNS queries map and delete any stale DNS requests
+// entries which never get responses for.
+func (m *FlowFetcher) lookupAndDeleteDNSMap(timeOut time.Duration) {
+ monotonicTimeNow := monotime.Now()
+ dnsMap := m.objects.DnsFlows
+ var dnsKey ebpf.BpfDnsFlowId
+ var keysToDelete []ebpf.BpfDnsFlowId
+ var dnsVal uint64
+
+ if dnsMap != nil {
+ // Ideally the Lookup + Delete should be atomic, however we cannot use LookupAndDelete since the deletion is conditional
+ // Do not delete while iterating, as it causes severe performance degradation
+ iterator := dnsMap.Iterate()
+ for iterator.Next(&dnsKey, &dnsVal) {
+ if time.Duration(uint64(monotonicTimeNow)-dnsVal) >= timeOut {
+ keysToDelete = append(keysToDelete, dnsKey)
+ }
+ }
+ for _, dnsKey = range keysToDelete {
+ if err := dnsMap.Delete(dnsKey); err != nil {
+ log.WithError(err).WithField("dnsKey", dnsKey).Warnf("couldn't delete DNS record entry")
+ }
+ }
+ }
+}
+
+// kernelSpecificLoadAndAssign based on a kernel version, it will load only the supported eBPF hooks
+func kernelSpecificLoadAndAssign(oldKernel, rtKernel, supportNetworkEvents bool, spec *cilium.CollectionSpec, pinDir string) (ebpf.BpfObjects, error) {
+ objects := ebpf.BpfObjects{}
+
+ // Helper to remove common hooks
+ removeCommonHooks := func() {
+ delete(spec.Programs, pktDropHook)
+ delete(spec.Programs, rhNetworkEventsMonitoringHook)
+ }
+
+ // Helper to load and assign BPF objects
+ loadAndAssign := func(objects interface{}) error {
+ if err := spec.LoadAndAssign(objects, &cilium.CollectionOptions{Maps: cilium.MapOptions{PinPath: pinDir}}); err != nil {
+ var ve *cilium.VerifierError
+ if errors.As(err, &ve) {
+ log.Infof("Verifier error: %+v", ve)
+ }
+ return fmt.Errorf("loading and assigning BPF objects: %w", err)
+ }
+ return nil
+ }
+
+ // Configure BPF programs based on the kernel type
+ switch {
+ case oldKernel && rtKernel:
+ type newBpfPrograms struct {
+ TcEgressFlowParse *cilium.Program `ebpf:"tc_egress_flow_parse"`
+ TcIngressFlowParse *cilium.Program `ebpf:"tc_ingress_flow_parse"`
+ TcxEgressFlowParse *cilium.Program `ebpf:"tcx_egress_flow_parse"`
+ TcxIngressFlowParse *cilium.Program `ebpf:"tcx_ingress_flow_parse"`
+ TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"`
+ TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"`
+ TrackNatManipPkt *cilium.Program `ebpf:"track_nat_manip_pkt"`
+ }
+ type newBpfObjects struct {
+ newBpfPrograms
+ ebpf.BpfMaps
+ }
+ var newObjects newBpfObjects
+ removeCommonHooks()
+ delete(spec.Programs, tcpRcvKprobe)
+ delete(spec.Programs, tcpFentryHook)
+
+ if err := loadAndAssign(&newObjects); err != nil {
+ return objects, err
+ }
+
+ objects = ebpf.BpfObjects{
+ BpfPrograms: ebpf.BpfPrograms{
+ TcEgressFlowParse: newObjects.TcEgressFlowParse,
+ TcIngressFlowParse: newObjects.TcIngressFlowParse,
+ TcxEgressFlowParse: newObjects.TcxEgressFlowParse,
+ TcxIngressFlowParse: newObjects.TcxIngressFlowParse,
+ TcEgressPcaParse: newObjects.TcEgressPcaParse,
+ TcIngressPcaParse: newObjects.TcIngressPcaParse,
+ TcxEgressPcaParse: newObjects.TcxEgressPcaParse,
+ TcxIngressPcaParse: newObjects.TcxIngressPcaParse,
+ TrackNatManipPkt: newObjects.TrackNatManipPkt,
+ TcpRcvKprobe: nil,
+ TcpRcvFentry: nil,
+ KfreeSkb: nil,
+ RhNetworkEventsMonitoring: nil,
+ },
+ BpfMaps: ebpf.BpfMaps{
+ DirectFlows: newObjects.DirectFlows,
+ AggregatedFlows: newObjects.AggregatedFlows,
+ DnsFlows: newObjects.DnsFlows,
+ FilterMap: newObjects.FilterMap,
+ GlobalCounters: newObjects.GlobalCounters,
+ },
+ }
+
+ case oldKernel:
+ type newBpfPrograms struct {
+ TcEgressFlowParse *cilium.Program `ebpf:"tc_egress_flow_parse"`
+ TcIngressFlowParse *cilium.Program `ebpf:"tc_ingress_flow_parse"`
+ TcxEgressFlowParse *cilium.Program `ebpf:"tcx_egress_flow_parse"`
+ TcxIngressFlowParse *cilium.Program `ebpf:"tcx_ingress_flow_parse"`
+ TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"`
+ TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"`
+ TCPRcvKprobe *cilium.Program `ebpf:"tcp_rcv_kprobe"`
+ TrackNatManipPkt *cilium.Program `ebpf:"track_nat_manip_pkt"`
+ }
+ type newBpfObjects struct {
+ newBpfPrograms
+ ebpf.BpfMaps
+ }
+ var newObjects newBpfObjects
+ removeCommonHooks()
+ delete(spec.Programs, tcpFentryHook)
+
+ if err := loadAndAssign(&newObjects); err != nil {
+ return objects, err
+ }
+
+ objects = ebpf.BpfObjects{
+ BpfPrograms: ebpf.BpfPrograms{
+ TcEgressFlowParse: newObjects.TcEgressFlowParse,
+ TcIngressFlowParse: newObjects.TcIngressFlowParse,
+ TcxEgressFlowParse: newObjects.TcxEgressFlowParse,
+ TcxIngressFlowParse: newObjects.TcxIngressFlowParse,
+ TcEgressPcaParse: newObjects.TcEgressPcaParse,
+ TcIngressPcaParse: newObjects.TcIngressPcaParse,
+ TcxEgressPcaParse: newObjects.TcxEgressPcaParse,
+ TcxIngressPcaParse: newObjects.TcxIngressPcaParse,
+ TcpRcvKprobe: newObjects.TCPRcvKprobe,
+ TrackNatManipPkt: newObjects.TrackNatManipPkt,
+ TcpRcvFentry: nil,
+ KfreeSkb: nil,
+ RhNetworkEventsMonitoring: nil,
+ },
+ BpfMaps: ebpf.BpfMaps{
+ DirectFlows: newObjects.DirectFlows,
+ AggregatedFlows: newObjects.AggregatedFlows,
+ DnsFlows: newObjects.DnsFlows,
+ FilterMap: newObjects.FilterMap,
+ GlobalCounters: newObjects.GlobalCounters,
+ },
+ }
+
+ case rtKernel:
+ type newBpfPrograms struct {
+ TcEgressFlowParse *cilium.Program `ebpf:"tc_egress_flow_parse"`
+ TcIngressFlowParse *cilium.Program `ebpf:"tc_ingress_flow_parse"`
+ TcxEgressFlowParse *cilium.Program `ebpf:"tcx_egress_flow_parse"`
+ TcxIngressFlowParse *cilium.Program `ebpf:"tcx_ingress_flow_parse"`
+ TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"`
+ TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"`
+ TCPRcvFentry *cilium.Program `ebpf:"tcp_rcv_fentry"`
+ TrackNatManipPkt *cilium.Program `ebpf:"track_nat_manip_pkt"`
+ }
+ type newBpfObjects struct {
+ newBpfPrograms
+ ebpf.BpfMaps
+ }
+ var newObjects newBpfObjects
+ removeCommonHooks()
+ delete(spec.Programs, tcpRcvKprobe)
+
+ if err := loadAndAssign(&newObjects); err != nil {
+ return objects, err
+ }
+
+ objects = ebpf.BpfObjects{
+ BpfPrograms: ebpf.BpfPrograms{
+ TcEgressFlowParse: newObjects.TcEgressFlowParse,
+ TcIngressFlowParse: newObjects.TcIngressFlowParse,
+ TcxEgressFlowParse: newObjects.TcxEgressFlowParse,
+ TcxIngressFlowParse: newObjects.TcxIngressFlowParse,
+ TcEgressPcaParse: newObjects.TcEgressPcaParse,
+ TcIngressPcaParse: newObjects.TcIngressPcaParse,
+ TcxEgressPcaParse: newObjects.TcxEgressPcaParse,
+ TcxIngressPcaParse: newObjects.TcxIngressPcaParse,
+ TcpRcvFentry: newObjects.TCPRcvFentry,
+ TrackNatManipPkt: newObjects.TrackNatManipPkt,
+ TcpRcvKprobe: nil,
+ KfreeSkb: nil,
+ RhNetworkEventsMonitoring: nil,
+ },
+ BpfMaps: ebpf.BpfMaps{
+ DirectFlows: newObjects.DirectFlows,
+ AggregatedFlows: newObjects.AggregatedFlows,
+ DnsFlows: newObjects.DnsFlows,
+ FilterMap: newObjects.FilterMap,
+ GlobalCounters: newObjects.GlobalCounters,
+ },
+ }
+
+ case !supportNetworkEvents:
+ type newBpfPrograms struct {
+ TcEgressFlowParse *cilium.Program `ebpf:"tc_egress_flow_parse"`
+ TcIngressFlowParse *cilium.Program `ebpf:"tc_ingress_flow_parse"`
+ TcxEgressFlowParse *cilium.Program `ebpf:"tcx_egress_flow_parse"`
+ TcxIngressFlowParse *cilium.Program `ebpf:"tcx_ingress_flow_parse"`
+ TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"`
+ TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"`
+ TCPRcvFentry *cilium.Program `ebpf:"tcp_rcv_fentry"`
+ TCPRcvKprobe *cilium.Program `ebpf:"tcp_rcv_kprobe"`
+ KfreeSkb *cilium.Program `ebpf:"kfree_skb"`
+ TrackNatManipPkt *cilium.Program `ebpf:"track_nat_manip_pkt"`
+ }
+ type newBpfObjects struct {
+ newBpfPrograms
+ ebpf.BpfMaps
+ }
+ var newObjects newBpfObjects
+ delete(spec.Programs, rhNetworkEventsMonitoringHook)
+
+ if err := loadAndAssign(&newObjects); err != nil {
+ return objects, err
+ }
+
+ objects = ebpf.BpfObjects{
+ BpfPrograms: ebpf.BpfPrograms{
+ TcEgressFlowParse: newObjects.TcEgressFlowParse,
+ TcIngressFlowParse: newObjects.TcIngressFlowParse,
+ TcxEgressFlowParse: newObjects.TcxEgressFlowParse,
+ TcxIngressFlowParse: newObjects.TcxIngressFlowParse,
+ TcEgressPcaParse: newObjects.TcEgressPcaParse,
+ TcIngressPcaParse: newObjects.TcIngressPcaParse,
+ TcxEgressPcaParse: newObjects.TcxEgressPcaParse,
+ TcxIngressPcaParse: newObjects.TcxIngressPcaParse,
+ TcpRcvFentry: newObjects.TCPRcvFentry,
+ TcpRcvKprobe: newObjects.TCPRcvKprobe,
+ KfreeSkb: newObjects.KfreeSkb,
+ TrackNatManipPkt: newObjects.TrackNatManipPkt,
+ RhNetworkEventsMonitoring: nil,
+ },
+ BpfMaps: ebpf.BpfMaps{
+ DirectFlows: newObjects.DirectFlows,
+ AggregatedFlows: newObjects.AggregatedFlows,
+ DnsFlows: newObjects.DnsFlows,
+ FilterMap: newObjects.FilterMap,
+ GlobalCounters: newObjects.GlobalCounters,
+ },
+ }
+
+ default:
+ if err := loadAndAssign(&objects); err != nil {
+ return objects, err
+ }
+ }
+
+ // Release cached kernel BTF memory
+ btf.FlushKernelSpec()
+
+ return objects, nil
+}
+
+// It provides access to packets from the kernel space (via PerfCPU hashmap)
+type PacketFetcher struct {
+ objects *ebpf.BpfObjects
+ qdiscs map[ifaces.Interface]*netlink.GenericQdisc
+ egressFilters map[ifaces.Interface]*netlink.BpfFilter
+ ingressFilters map[ifaces.Interface]*netlink.BpfFilter
+ perfReader *perf.Reader
+ cacheMaxSize int
+ enableIngress bool
+ enableEgress bool
+ egressTCXLink map[ifaces.Interface]link.Link
+ ingressTCXLink map[ifaces.Interface]link.Link
+ lookupAndDeleteSupported bool
+}
+
+func NewPacketFetcher(cfg *FlowFetcherConfig) (*PacketFetcher, error) {
+ if err := rlimit.RemoveMemlock(); err != nil {
+ log.WithError(err).
+ Warn("can't remove mem lock. The agent could not be able to start eBPF programs")
+ }
+
+ objects := ebpf.BpfObjects{}
+ spec, err := ebpf.LoadBpf()
+ if err != nil {
+ return nil, err
+ }
+ pcaEnable := 0
+ if cfg.EnablePCA {
+ pcaEnable = 1
+ }
+
+ if err := spec.RewriteConstants(map[string]interface{}{
+ constSampling: uint32(cfg.Sampling),
+ constPcaEnable: uint8(pcaEnable),
+ }); err != nil {
+ return nil, fmt.Errorf("rewriting BPF constants definition: %w", err)
+ }
+
+ type pcaBpfPrograms struct {
+ TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"`
+ TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"`
+ TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"`
+ TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"`
+ }
+ type newBpfObjects struct {
+ pcaBpfPrograms
+ ebpf.BpfMaps
+ }
+ var newObjects newBpfObjects
+ delete(spec.Programs, pktDropHook)
+ delete(spec.Programs, rhNetworkEventsMonitoringHook)
+ delete(spec.Programs, tcpRcvKprobe)
+ delete(spec.Programs, tcpFentryHook)
+ delete(spec.Programs, aggregatedFlowsMap)
+ delete(spec.Programs, additionalFlowMetrics)
+ delete(spec.Programs, constSampling)
+ delete(spec.Programs, constTraceMessages)
+ delete(spec.Programs, constEnableDNSTracking)
+ delete(spec.Programs, constDNSTrackingPort)
+ delete(spec.Programs, constEnableRtt)
+ delete(spec.Programs, constEnableFlowFiltering)
+ delete(spec.Programs, constEnableNetworkEventsMonitoring)
+ delete(spec.Programs, constNetworkEventsMonitoringGroupID)
+
+ if err := spec.LoadAndAssign(&newObjects, nil); err != nil {
+ var ve *cilium.VerifierError
+ if errors.As(err, &ve) {
+ // Using %+v will print the whole verifier error, not just the last
+ // few lines.
+ plog.Infof("Verifier error: %+v", ve)
+ }
+ return nil, fmt.Errorf("loading and assigning BPF objects: %w", err)
+ }
+
+ objects = ebpf.BpfObjects{
+ BpfPrograms: ebpf.BpfPrograms{
+ TcEgressPcaParse: newObjects.TcEgressPcaParse,
+ TcIngressPcaParse: newObjects.TcIngressPcaParse,
+ TcxEgressPcaParse: newObjects.TcxEgressPcaParse,
+ TcxIngressPcaParse: newObjects.TcxIngressPcaParse,
+ TcEgressFlowParse: nil,
+ TcIngressFlowParse: nil,
+ TcxEgressFlowParse: nil,
+ TcxIngressFlowParse: nil,
+ TcpRcvFentry: nil,
+ TcpRcvKprobe: nil,
+ KfreeSkb: nil,
+ RhNetworkEventsMonitoring: nil,
+ },
+ BpfMaps: ebpf.BpfMaps{
+ PacketRecord: newObjects.PacketRecord,
+ FilterMap: newObjects.FilterMap,
+ },
+ }
+
+ f := NewFilter(&objects, cfg.FilterConfig)
+ if err := f.ProgramFilter(); err != nil {
+ return nil, fmt.Errorf("programming flow filter: %w", err)
+ }
+
+ // read packets from igress+egress perf array
+ packets, err := perf.NewReader(objects.PacketRecord, os.Getpagesize())
+ if err != nil {
+ return nil, fmt.Errorf("accessing to perf: %w", err)
+ }
+
+ return &PacketFetcher{
+ objects: &objects,
+ perfReader: packets,
+ egressFilters: map[ifaces.Interface]*netlink.BpfFilter{},
+ ingressFilters: map[ifaces.Interface]*netlink.BpfFilter{},
+ qdiscs: map[ifaces.Interface]*netlink.GenericQdisc{},
+ cacheMaxSize: cfg.CacheMaxSize,
+ enableIngress: cfg.EnableIngress,
+ enableEgress: cfg.EnableEgress,
+ egressTCXLink: map[ifaces.Interface]link.Link{},
+ ingressTCXLink: map[ifaces.Interface]link.Link{},
+ lookupAndDeleteSupported: true, // this will be turned off later if found to be not supported
+ }, nil
+}
+
+func registerInterface(iface ifaces.Interface) (*netlink.GenericQdisc, netlink.Link, error) {
+ ilog := plog.WithField("iface", iface)
+ handle, err := netlink.NewHandleAt(iface.NetNS)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create handle for netns (%s): %w", iface.NetNS.String(), err)
+ }
+ defer handle.Close()
+
+ // Load pre-compiled programs and maps into the kernel, and rewrites the configuration
+ ipvlan, err := handle.LinkByIndex(iface.Index)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to lookup ipvlan device %d (%s): %w", iface.Index, iface.Name, err)
+ }
+ qdiscAttrs := netlink.QdiscAttrs{
+ LinkIndex: ipvlan.Attrs().Index,
+ Handle: netlink.MakeHandle(0xffff, 0),
+ Parent: netlink.HANDLE_CLSACT,
+ }
+ qdisc := &netlink.GenericQdisc{
+ QdiscAttrs: qdiscAttrs,
+ QdiscType: qdiscType,
+ }
+ if err := handle.QdiscDel(qdisc); err == nil {
+ ilog.Warn("qdisc clsact already existed. Deleted it")
+ }
+ if err := handle.QdiscAdd(qdisc); err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ ilog.WithError(err).Warn("qdisc clsact already exists. Ignoring")
+ } else {
+ return nil, nil, fmt.Errorf("failed to create clsact qdisc on %d (%s): %w", iface.Index, iface.Name, err)
+ }
+ }
+ return qdisc, ipvlan, nil
+}
+
+func (p *PacketFetcher) UnRegister(iface ifaces.Interface) error {
+ // qdiscs, ingress and egress filters are automatically deleted so we don't need to
+ // specifically detach them from the ebpfFetcher
+ return unregister(iface)
+}
+
+func (p *PacketFetcher) Register(iface ifaces.Interface) error {
+ qdisc, ipvlan, err := registerInterface(iface)
+ if err != nil {
+ return err
+ }
+ p.qdiscs[iface] = qdisc
+
+ if err := p.registerEgress(iface, ipvlan); err != nil {
+ return err
+ }
+ return p.registerIngress(iface, ipvlan)
+}
+
+func (p *PacketFetcher) DetachTCX(iface ifaces.Interface) error {
+ ilog := log.WithField("iface", iface)
+ if iface.NetNS != netns.None() {
+ originalNs, err := netns.Get()
+ if err != nil {
+ return fmt.Errorf("PCA failed to get current netns: %w", err)
+ }
+ defer func() {
+ if err := netns.Set(originalNs); err != nil {
+ ilog.WithError(err).Error("PCA failed to set netns back")
+ }
+ originalNs.Close()
+ }()
+ if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil {
+ return fmt.Errorf("PCA failed to setns to %s: %w", iface.NetNS, err)
+ }
+ }
+ if p.enableEgress {
+ if l := p.egressTCXLink[iface]; l != nil {
+ if err := l.Close(); err != nil {
+ return fmt.Errorf("TCX: failed to close egress link: %w", err)
+ }
+ ilog.WithField("interface", iface.Name).Debug("successfully detach egressTCX hook")
+ } else {
+ return fmt.Errorf("egress link does not support TCX hook")
+ }
+ }
+
+ if p.enableIngress {
+ if l := p.ingressTCXLink[iface]; l != nil {
+ if err := l.Close(); err != nil {
+ return fmt.Errorf("TCX: failed to close ingress link: %w", err)
+ }
+ ilog.WithField("interface", iface.Name).Debug("successfully detach ingressTCX hook")
+ } else {
+ return fmt.Errorf("ingress link does not support TCX hook")
+ }
+ }
+ return nil
+}
+
+func (p *PacketFetcher) AttachTCX(iface ifaces.Interface) error {
+ ilog := log.WithField("iface", iface)
+ if iface.NetNS != netns.None() {
+ originalNs, err := netns.Get()
+ if err != nil {
+ return fmt.Errorf("PCA failed to get current netns: %w", err)
+ }
+ defer func() {
+ if err := netns.Set(originalNs); err != nil {
+ ilog.WithError(err).Error("PCA failed to set netns back")
+ }
+ originalNs.Close()
+ }()
+ if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil {
+ return fmt.Errorf("PCA failed to setns to %s: %w", iface.NetNS, err)
+ }
+ }
+
+ if p.enableEgress {
+ egrLink, err := link.AttachTCX(link.TCXOptions{
+ Program: p.objects.BpfPrograms.TcxEgressPcaParse,
+ Attach: cilium.AttachTCXEgress,
+ Interface: iface.Index,
+ })
+ if err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ // The interface already has a TCX egress hook
+ log.WithField("iface", iface.Name).Debug("interface already has a TCX PCA egress hook ignore")
+ } else {
+ return fmt.Errorf("failed to attach PCA TCX egress: %w", err)
+ }
+ }
+ p.egressTCXLink[iface] = egrLink
+ ilog.WithField("interface", iface.Name).Debug("successfully attach PCA egressTCX hook")
+ }
+
+ if p.enableIngress {
+ ingLink, err := link.AttachTCX(link.TCXOptions{
+ Program: p.objects.BpfPrograms.TcxIngressPcaParse,
+ Attach: cilium.AttachTCXIngress,
+ Interface: iface.Index,
+ })
+ if err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ // The interface already has a TCX ingress hook
+ log.WithField("iface", iface.Name).Debug("interface already has a TCX PCA ingress hook ignore")
+ } else {
+ return fmt.Errorf("failed to attach PCA TCX ingress: %w", err)
+ }
+ }
+ p.ingressTCXLink[iface] = ingLink
+ ilog.WithField("interface", iface.Name).Debug("successfully attach PCA ingressTCX hook")
+ }
+
+ return nil
+}
+
+func fetchEgressEvents(iface ifaces.Interface, ipvlan netlink.Link, parser *cilium.Program, name string) (*netlink.BpfFilter, error) {
+ ilog := plog.WithField("iface", iface)
+ egressAttrs := netlink.FilterAttrs{
+ LinkIndex: ipvlan.Attrs().Index,
+ Parent: netlink.HANDLE_MIN_EGRESS,
+ Handle: netlink.MakeHandle(0, 1),
+ Protocol: 3,
+ Priority: 1,
+ }
+ egressFilter := &netlink.BpfFilter{
+ FilterAttrs: egressAttrs,
+ Fd: parser.FD(),
+ Name: "tc/" + name,
+ DirectAction: true,
+ }
+ if err := netlink.FilterDel(egressFilter); err == nil {
+ ilog.Warn("egress filter already existed. Deleted it")
+ }
+ if err := netlink.FilterAdd(egressFilter); err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ ilog.WithError(err).Warn("egress filter already exists. Ignoring")
+ } else {
+ return nil, fmt.Errorf("failed to create egress filter: %w", err)
+ }
+ }
+ return egressFilter, nil
+
+}
+
+func (p *PacketFetcher) registerEgress(iface ifaces.Interface, ipvlan netlink.Link) error {
+ egressFilter, err := fetchEgressEvents(iface, ipvlan, p.objects.TcEgressPcaParse, "tc_egress_pca_parse")
+ if err != nil {
+ return err
+ }
+
+ p.egressFilters[iface] = egressFilter
+ return nil
+}
+
+func fetchIngressEvents(iface ifaces.Interface, ipvlan netlink.Link, parser *cilium.Program, name string) (*netlink.BpfFilter, error) {
+ ilog := plog.WithField("iface", iface)
+ ingressAttrs := netlink.FilterAttrs{
+ LinkIndex: ipvlan.Attrs().Index,
+ Parent: netlink.HANDLE_MIN_INGRESS,
+ Handle: netlink.MakeHandle(0, 1),
+ Protocol: 3,
+ Priority: 1,
+ }
+ ingressFilter := &netlink.BpfFilter{
+ FilterAttrs: ingressAttrs,
+ Fd: parser.FD(),
+ Name: "tc/" + name,
+ DirectAction: true,
+ }
+ if err := netlink.FilterDel(ingressFilter); err == nil {
+ ilog.Warn("egress filter already existed. Deleted it")
+ }
+ if err := netlink.FilterAdd(ingressFilter); err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ ilog.WithError(err).Warn("ingress filter already exists. Ignoring")
+ } else {
+ return nil, fmt.Errorf("failed to create egress filter: %w", err)
+ }
+ }
+ return ingressFilter, nil
+
+}
+
+func (p *PacketFetcher) registerIngress(iface ifaces.Interface, ipvlan netlink.Link) error {
+ ingressFilter, err := fetchIngressEvents(iface, ipvlan, p.objects.TcIngressPcaParse, "tc_ingress_pca_parse")
+ if err != nil {
+ return err
+ }
+
+ p.ingressFilters[iface] = ingressFilter
+ return nil
+}
+
+// Close the eBPF fetcher from the system.
+// We don't need an "Close(iface)" method because the filters and qdiscs
+// are automatically removed when the interface is down
+func (p *PacketFetcher) Close() error {
+ log.Debug("unregistering eBPF objects")
+
+ var errs []error
+ if p.perfReader != nil {
+ if err := p.perfReader.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if p.objects != nil {
+ if err := p.objects.TcEgressPcaParse.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := p.objects.TcIngressPcaParse.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := p.objects.TcxEgressPcaParse.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := p.objects.TcxIngressPcaParse.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := p.objects.PacketRecord.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ p.objects = nil
+ }
+ for iface, ef := range p.egressFilters {
+ log.WithField("interface", iface).Debug("deleting egress filter")
+ if err := netlink.FilterDel(ef); err != nil {
+ errs = append(errs, fmt.Errorf("deleting egress filter: %w", err))
+ }
+ }
+ p.egressFilters = map[ifaces.Interface]*netlink.BpfFilter{}
+ for iface, igf := range p.ingressFilters {
+ log.WithField("interface", iface).Debug("deleting ingress filter")
+ if err := netlink.FilterDel(igf); err != nil {
+ errs = append(errs, fmt.Errorf("deleting ingress filter: %w", err))
+ }
+ }
+ p.ingressFilters = map[ifaces.Interface]*netlink.BpfFilter{}
+ for iface, qd := range p.qdiscs {
+ log.WithField("interface", iface).Debug("deleting Qdisc")
+ if err := netlink.QdiscDel(qd); err != nil {
+ errs = append(errs, fmt.Errorf("deleting qdisc: %w", err))
+ }
+ }
+ p.qdiscs = map[ifaces.Interface]*netlink.GenericQdisc{}
+ if len(errs) == 0 {
+ return nil
+ }
+
+ for iface, l := range p.egressTCXLink {
+ log := log.WithField("interface", iface)
+ log.Debug("detach egress TCX hook")
+ l.Close()
+
+ }
+ p.egressTCXLink = map[ifaces.Interface]link.Link{}
+ for iface, l := range p.ingressTCXLink {
+ log := log.WithField("interface", iface)
+ log.Debug("detach ingress TCX hook")
+ l.Close()
+ }
+ p.ingressTCXLink = map[ifaces.Interface]link.Link{}
+
+ var errStrings []string
+ for _, err := range errs {
+ errStrings = append(errStrings, err.Error())
+ }
+ return errors.New(`errors: "` + strings.Join(errStrings, `", "`) + `"`)
+}
+
+func (p *PacketFetcher) ReadPerf() (perf.Record, error) {
+ return p.perfReader.Read()
+}
+
+func (p *PacketFetcher) LookupAndDeleteMap(met *metrics.Metrics) map[int][]*byte {
+ if !p.lookupAndDeleteSupported {
+ return p.legacyLookupAndDeleteMap(met)
+ }
+
+ packetMap := p.objects.PacketRecord
+ iterator := packetMap.Iterate()
+ packets := make(map[int][]*byte, p.cacheMaxSize)
+ var id int
+ var ids []int
+ var packet []*byte
+
+ // First, get all ids and ignore content (we need lookup+delete to be atomic)
+ for iterator.Next(&id, &packet) {
+ ids = append(ids, id)
+ }
+
+ // Run the atomic Lookup+Delete; if new ids have been inserted in the meantime, they'll be fetched next time
+ for i, id := range ids {
+ if err := packetMap.LookupAndDelete(&id, &packet); err != nil {
+ if i == 0 && errors.Is(err, cilium.ErrNotSupported) {
+ log.WithError(err).Warnf("switching to legacy mode")
+ p.lookupAndDeleteSupported = false
+ return p.legacyLookupAndDeleteMap(met)
+ }
+ log.WithError(err).WithField("packetID", id).Warnf("couldn't delete entry")
+ met.Errors.WithErrorName("pkt-fetcher", "CannotDeleteEntry").Inc()
+ }
+ packets[id] = packet
+ }
+
+ return packets
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer_legacy.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer_legacy.go
new file mode 100644
index 000000000..d573146a5
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer_legacy.go
@@ -0,0 +1,58 @@
+package tracer
+
+import (
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics"
+ "github.com/netobserv/netobserv-ebpf-agent/pkg/model"
+)
+
+// This file contains legacy implementations kept for old kernels
+
+func (m *FlowFetcher) legacyLookupAndDeleteMap(met *metrics.Metrics) map[ebpf.BpfFlowId]model.BpfFlowContent {
+ flowMap := m.objects.AggregatedFlows
+
+ iterator := flowMap.Iterate()
+ var flows = make(map[ebpf.BpfFlowId]model.BpfFlowContent, m.cacheMaxSize)
+ var id ebpf.BpfFlowId
+ var metrics []ebpf.BpfFlowMetrics
+ count := 0
+
+ // Deleting while iterating is really bad for performance (like, really!) as it causes seeing multiple times the same key
+ // This is solved in >=4.20 kernels with LookupAndDelete
+ for iterator.Next(&id, &metrics) {
+ count++
+ if err := flowMap.Delete(id); err != nil {
+ log.WithError(err).WithField("flowId", id).Warnf("couldn't delete flow entry")
+ met.Errors.WithErrorName("flow-fetcher-legacy", "CannotDeleteFlows").Inc()
+ }
+ // We observed that eBFP PerCPU map might insert multiple times the same key in the map
+ // (probably due to race conditions) so we need to re-join metrics again at userspace
+ aggr := model.BpfFlowContent{}
+ for i := range metrics {
+ aggr.AccumulateBase(&metrics[i])
+ }
+ flows[id] = aggr
+ }
+ met.BufferSizeGauge.WithBufferName("hashmap-legacy-total").Set(float64(count))
+ met.BufferSizeGauge.WithBufferName("hashmap-legacy-unique").Set(float64(len(flows)))
+
+ m.ReadGlobalCounter(met)
+ return flows
+}
+
+func (p *PacketFetcher) legacyLookupAndDeleteMap(met *metrics.Metrics) map[int][]*byte {
+ packetMap := p.objects.PacketRecord
+ iterator := packetMap.Iterate()
+ packets := make(map[int][]*byte, p.cacheMaxSize)
+
+ var id int
+ var packet []*byte
+ for iterator.Next(&id, &packet) {
+ if err := packetMap.Delete(id); err != nil {
+ log.WithError(err).WithField("packetID ", id).Warnf("couldn't delete entry")
+ met.Errors.WithErrorName("pkt-fetcher-legacy", "CannotDeleteEntry").Inc()
+ }
+ packets[id] = append(packets[id], packet...)
+ }
+ return packets
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets/packets.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets/packets.go
new file mode 100644
index 000000000..af3f4d2f6
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets/packets.go
@@ -0,0 +1,61 @@
+package packets
+
+import (
+ "encoding/binary"
+ "fmt"
+ "time"
+
+ "github.com/gopacket/gopacket"
+ "github.com/gopacket/gopacket/layers"
+)
+
+// PCAP Magic number is fixed for each endianness.
+const pcapMagicNumber = 0xA1B2C3D4
+const versionMajor = 2
+const versionMinor = 4
+const nanosPerMicro = 1000
+
+func GetPCAPFileHeader(snaplen uint32, linktype layers.LinkType) []byte {
+ var buf [24]byte
+ binary.LittleEndian.PutUint32(buf[0:4], pcapMagicNumber)
+ binary.LittleEndian.PutUint16(buf[4:6], versionMajor)
+ binary.LittleEndian.PutUint16(buf[6:8], versionMinor)
+ binary.LittleEndian.PutUint32(buf[16:20], snaplen)
+ binary.LittleEndian.PutUint32(buf[20:24], uint32(linktype))
+ return buf[:]
+}
+
+func GetPacketHeader(ci gopacket.CaptureInfo) ([]byte, error) {
+ var buf [16]byte
+ t := ci.Timestamp
+ if t.IsZero() {
+ return nil, fmt.Errorf("incoming packet does not have a timestamp. Ignoring packet")
+ }
+ secs := t.Unix()
+ usecs := t.Nanosecond() / nanosPerMicro
+ binary.LittleEndian.PutUint32(buf[0:4], uint32(secs))
+ binary.LittleEndian.PutUint32(buf[4:8], uint32(usecs))
+ binary.LittleEndian.PutUint32(buf[8:12], uint32(ci.CaptureLength))
+ binary.LittleEndian.PutUint32(buf[12:16], uint32(ci.Length))
+ return buf[:], nil
+}
+
+func GetPacketBytesWithHeader(time time.Time, data []byte) ([]byte, error) {
+ ci := gopacket.CaptureInfo{
+ Timestamp: time,
+ CaptureLength: len(data),
+ Length: len(data),
+ }
+ if ci.CaptureLength != len(data) {
+ return nil, fmt.Errorf("capture length %d does not match data length %d", ci.CaptureLength, len(data))
+ }
+ if ci.CaptureLength > ci.Length {
+ return nil, fmt.Errorf("invalid capture info %+v: capture length > length", ci)
+ }
+ b, err := GetPacketHeader(ci)
+ if err != nil {
+ return nil, fmt.Errorf("error writing packet header: %w", err)
+ }
+ // append 16 byte packet header & data all at once
+ return append(b, data...), nil
+}
diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/utils.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/utils.go
new file mode 100644
index 000000000..f0cb22cf3
--- /dev/null
+++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/utils.go
@@ -0,0 +1,16 @@
+package utils
+
+import (
+ "fmt"
+ "net"
+)
+
+// GetSocket returns socket string in the correct format based on address family
+func GetSocket(hostIP string, hostPort int) string {
+ socket := fmt.Sprintf("%s:%d", hostIP, hostPort)
+ ipAddr := net.ParseIP(hostIP)
+ if ipAddr != nil && ipAddr.To4() == nil {
+ socket = fmt.Sprintf("[%s]:%d", hostIP, hostPort)
+ }
+ return socket
+}
diff --git a/vendor/github.com/netsampler/goflow2/LICENSE b/vendor/github.com/netsampler/goflow2/LICENSE
new file mode 100644
index 000000000..a2fee077c
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2021, NetSampler
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/netsampler/goflow2/decoders/decoder.go b/vendor/github.com/netsampler/goflow2/decoders/decoder.go
new file mode 100644
index 000000000..8eebaf3bd
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/decoder.go
@@ -0,0 +1,115 @@
+package decoder
+
+import (
+ "time"
+)
+
+type Message interface{}
+type MessageDecoded interface{}
+
+type DecoderFunc func(Message interface{}) error
+type DoneCallback func(string, int, time.Time, time.Time)
+type ErrorCallback func(string, int, time.Time, time.Time, error)
+
+// Worker structure
+type Worker struct {
+ Id int
+ DecoderParams DecoderParams
+ WorkerPool chan chan Message
+ Name string
+ InMsg chan Message
+ Quit chan bool
+}
+
+// Create a worker and add it to the pool.
+func CreateWorker(workerPool chan chan Message, decoderParams DecoderParams, id int, name string) Worker {
+ return Worker{
+ Id: id,
+ DecoderParams: decoderParams,
+ WorkerPool: workerPool,
+ Name: name,
+ InMsg: make(chan Message),
+ Quit: make(chan bool),
+ }
+}
+
+// Start the worker. Launches a goroutine to process NFv9 messages.
+// The worker will add its input channel of NFv9 messages to decode to the pool.
+func (w Worker) Start() {
+ go func() {
+ //log.Debugf("Worker %v started", w.Id)
+ for {
+ select {
+ case <-w.Quit:
+ break
+ case w.WorkerPool <- w.InMsg:
+ msg := <-w.InMsg
+ timeTrackStart := time.Now()
+ err := w.DecoderParams.DecoderFunc(msg)
+ timeTrackStop := time.Now()
+
+ if err != nil && w.DecoderParams.ErrorCallback != nil {
+ w.DecoderParams.ErrorCallback(w.Name, w.Id, timeTrackStart, timeTrackStop, err)
+ } else if err == nil && w.DecoderParams.DoneCallback != nil {
+ w.DecoderParams.DoneCallback(w.Name, w.Id, timeTrackStart, timeTrackStop)
+ }
+ }
+ }
+ //log.Debugf("Worker %v done", w.Id)
+ }()
+}
+
+// Stop the worker.
+func (w Worker) Stop() {
+ //log.Debugf("Stopping worker %v", w.Id)
+ w.Quit <- true
+}
+
+// Processor structure
+type Processor struct {
+ workerpool chan chan Message
+ workerlist []Worker
+ DecoderParams DecoderParams
+ Name string
+}
+
+// Decoder structure. Define the function to call and the config specific to the type of packets.
+type DecoderParams struct {
+ DecoderFunc DecoderFunc
+ DoneCallback DoneCallback
+ ErrorCallback ErrorCallback
+}
+
+// Create a message processor which is going to create all the workers and set-up the pool.
+func CreateProcessor(numWorkers int, decoderParams DecoderParams, name string) Processor {
+ processor := Processor{
+ workerpool: make(chan chan Message),
+ workerlist: make([]Worker, numWorkers),
+ DecoderParams: decoderParams,
+ Name: name,
+ }
+ for i := 0; i < numWorkers; i++ {
+ worker := CreateWorker(processor.workerpool, decoderParams, i, name)
+ processor.workerlist[i] = worker
+ }
+ return processor
+}
+
+// Start message processor
+func (p Processor) Start() {
+ for _, worker := range p.workerlist {
+ worker.Start()
+ }
+}
+
+func (p Processor) Stop() {
+ for _, worker := range p.workerlist {
+ worker.Stop()
+ }
+}
+
+// Send a message to be decoded to the pool.
+func (p Processor) ProcessMessage(msg Message) {
+ sendChannel := <-p.workerpool
+ sendChannel <- msg
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/ipfix.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/ipfix.go
new file mode 100644
index 000000000..954b7d38c
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/ipfix.go
@@ -0,0 +1,989 @@
+package netflow
+
+import (
+ "fmt"
+ "time"
+)
+
+const (
+ IPFIX_FIELD_Reserved = 0
+ IPFIX_FIELD_octetDeltaCount = 1
+ IPFIX_FIELD_packetDeltaCount = 2
+ IPFIX_FIELD_deltaFlowCount = 3
+ IPFIX_FIELD_protocolIdentifier = 4
+ IPFIX_FIELD_ipClassOfService = 5
+ IPFIX_FIELD_tcpControlBits = 6
+ IPFIX_FIELD_sourceTransportPort = 7
+ IPFIX_FIELD_sourceIPv4Address = 8
+ IPFIX_FIELD_sourceIPv4PrefixLength = 9
+ IPFIX_FIELD_ingressInterface = 10
+ IPFIX_FIELD_destinationTransportPort = 11
+ IPFIX_FIELD_destinationIPv4Address = 12
+ IPFIX_FIELD_destinationIPv4PrefixLength = 13
+ IPFIX_FIELD_egressInterface = 14
+ IPFIX_FIELD_ipNextHopIPv4Address = 15
+ IPFIX_FIELD_bgpSourceAsNumber = 16
+ IPFIX_FIELD_bgpDestinationAsNumber = 17
+ IPFIX_FIELD_bgpNextHopIPv4Address = 18
+ IPFIX_FIELD_postMCastPacketDeltaCount = 19
+ IPFIX_FIELD_postMCastOctetDeltaCount = 20
+ IPFIX_FIELD_flowEndSysUpTime = 21
+ IPFIX_FIELD_flowStartSysUpTime = 22
+ IPFIX_FIELD_postOctetDeltaCount = 23
+ IPFIX_FIELD_postPacketDeltaCount = 24
+ IPFIX_FIELD_minimumIpTotalLength = 25
+ IPFIX_FIELD_maximumIpTotalLength = 26
+ IPFIX_FIELD_sourceIPv6Address = 27
+ IPFIX_FIELD_destinationIPv6Address = 28
+ IPFIX_FIELD_sourceIPv6PrefixLength = 29
+ IPFIX_FIELD_destinationIPv6PrefixLength = 30
+ IPFIX_FIELD_flowLabelIPv6 = 31
+ IPFIX_FIELD_icmpTypeCodeIPv4 = 32
+ IPFIX_FIELD_igmpType = 33
+ IPFIX_FIELD_samplingInterval = 34
+ IPFIX_FIELD_samplingAlgorithm = 35
+ IPFIX_FIELD_flowActiveTimeout = 36
+ IPFIX_FIELD_flowIdleTimeout = 37
+ IPFIX_FIELD_engineType = 38
+ IPFIX_FIELD_engineId = 39
+ IPFIX_FIELD_exportedOctetTotalCount = 40
+ IPFIX_FIELD_exportedMessageTotalCount = 41
+ IPFIX_FIELD_exportedFlowRecordTotalCount = 42
+ IPFIX_FIELD_ipv4RouterSc = 43
+ IPFIX_FIELD_sourceIPv4Prefix = 44
+ IPFIX_FIELD_destinationIPv4Prefix = 45
+ IPFIX_FIELD_mplsTopLabelType = 46
+ IPFIX_FIELD_mplsTopLabelIPv4Address = 47
+ IPFIX_FIELD_samplerId = 48
+ IPFIX_FIELD_samplerMode = 49
+ IPFIX_FIELD_samplerRandomInterval = 50
+ IPFIX_FIELD_classId = 51
+ IPFIX_FIELD_minimumTTL = 52
+ IPFIX_FIELD_maximumTTL = 53
+ IPFIX_FIELD_fragmentIdentification = 54
+ IPFIX_FIELD_postIpClassOfService = 55
+ IPFIX_FIELD_sourceMacAddress = 56
+ IPFIX_FIELD_postDestinationMacAddress = 57
+ IPFIX_FIELD_vlanId = 58
+ IPFIX_FIELD_postVlanId = 59
+ IPFIX_FIELD_ipVersion = 60
+ IPFIX_FIELD_flowDirection = 61
+ IPFIX_FIELD_ipNextHopIPv6Address = 62
+ IPFIX_FIELD_bgpNextHopIPv6Address = 63
+ IPFIX_FIELD_ipv6ExtensionHeaders = 64
+ IPFIX_FIELD_mplsTopLabelStackSection = 70
+ IPFIX_FIELD_mplsLabelStackSection2 = 71
+ IPFIX_FIELD_mplsLabelStackSection3 = 72
+ IPFIX_FIELD_mplsLabelStackSection4 = 73
+ IPFIX_FIELD_mplsLabelStackSection5 = 74
+ IPFIX_FIELD_mplsLabelStackSection6 = 75
+ IPFIX_FIELD_mplsLabelStackSection7 = 76
+ IPFIX_FIELD_mplsLabelStackSection8 = 77
+ IPFIX_FIELD_mplsLabelStackSection9 = 78
+ IPFIX_FIELD_mplsLabelStackSection10 = 79
+ IPFIX_FIELD_destinationMacAddress = 80
+ IPFIX_FIELD_postSourceMacAddress = 81
+ IPFIX_FIELD_interfaceName = 82
+ IPFIX_FIELD_interfaceDescription = 83
+ IPFIX_FIELD_samplerName = 84
+ IPFIX_FIELD_octetTotalCount = 85
+ IPFIX_FIELD_packetTotalCount = 86
+ IPFIX_FIELD_flagsAndSamplerId = 87
+ IPFIX_FIELD_fragmentOffset = 88
+ IPFIX_FIELD_forwardingStatus = 89
+ IPFIX_FIELD_mplsVpnRouteDistinguisher = 90
+ IPFIX_FIELD_mplsTopLabelPrefixLength = 91
+ IPFIX_FIELD_srcTrafficIndex = 92
+ IPFIX_FIELD_dstTrafficIndex = 93
+ IPFIX_FIELD_applicationDescription = 94
+ IPFIX_FIELD_applicationId = 95
+ IPFIX_FIELD_applicationName = 96
+ IPFIX_FIELD_postIpDiffServCodePoint = 98
+ IPFIX_FIELD_multicastReplicationFactor = 99
+ IPFIX_FIELD_className = 100
+ IPFIX_FIELD_classificationEngineId = 101
+ IPFIX_FIELD_layer2packetSectionOffset = 102
+ IPFIX_FIELD_layer2packetSectionSize = 103
+ IPFIX_FIELD_layer2packetSectionData = 104
+ IPFIX_FIELD_bgpNextAdjacentAsNumber = 128
+ IPFIX_FIELD_bgpPrevAdjacentAsNumber = 129
+ IPFIX_FIELD_exporterIPv4Address = 130
+ IPFIX_FIELD_exporterIPv6Address = 131
+ IPFIX_FIELD_droppedOctetDeltaCount = 132
+ IPFIX_FIELD_droppedPacketDeltaCount = 133
+ IPFIX_FIELD_droppedOctetTotalCount = 134
+ IPFIX_FIELD_droppedPacketTotalCount = 135
+ IPFIX_FIELD_flowEndReason = 136
+ IPFIX_FIELD_commonPropertiesId = 137
+ IPFIX_FIELD_observationPointId = 138
+ IPFIX_FIELD_icmpTypeCodeIPv6 = 139
+ IPFIX_FIELD_mplsTopLabelIPv6Address = 140
+ IPFIX_FIELD_lineCardId = 141
+ IPFIX_FIELD_portId = 142
+ IPFIX_FIELD_meteringProcessId = 143
+ IPFIX_FIELD_exportingProcessId = 144
+ IPFIX_FIELD_templateId = 145
+ IPFIX_FIELD_wlanChannelId = 146
+ IPFIX_FIELD_wlanSSID = 147
+ IPFIX_FIELD_flowId = 148
+ IPFIX_FIELD_observationDomainId = 149
+ IPFIX_FIELD_flowStartSeconds = 150
+ IPFIX_FIELD_flowEndSeconds = 151
+ IPFIX_FIELD_flowStartMilliseconds = 152
+ IPFIX_FIELD_flowEndMilliseconds = 153
+ IPFIX_FIELD_flowStartMicroseconds = 154
+ IPFIX_FIELD_flowEndMicroseconds = 155
+ IPFIX_FIELD_flowStartNanoseconds = 156
+ IPFIX_FIELD_flowEndNanoseconds = 157
+ IPFIX_FIELD_flowStartDeltaMicroseconds = 158
+ IPFIX_FIELD_flowEndDeltaMicroseconds = 159
+ IPFIX_FIELD_systemInitTimeMilliseconds = 160
+ IPFIX_FIELD_flowDurationMilliseconds = 161
+ IPFIX_FIELD_flowDurationMicroseconds = 162
+ IPFIX_FIELD_observedFlowTotalCount = 163
+ IPFIX_FIELD_ignoredPacketTotalCount = 164
+ IPFIX_FIELD_ignoredOctetTotalCount = 165
+ IPFIX_FIELD_notSentFlowTotalCount = 166
+ IPFIX_FIELD_notSentPacketTotalCount = 167
+ IPFIX_FIELD_notSentOctetTotalCount = 168
+ IPFIX_FIELD_destinationIPv6Prefix = 169
+ IPFIX_FIELD_sourceIPv6Prefix = 170
+ IPFIX_FIELD_postOctetTotalCount = 171
+ IPFIX_FIELD_postPacketTotalCount = 172
+ IPFIX_FIELD_flowKeyIndicator = 173
+ IPFIX_FIELD_postMCastPacketTotalCount = 174
+ IPFIX_FIELD_postMCastOctetTotalCount = 175
+ IPFIX_FIELD_icmpTypeIPv4 = 176
+ IPFIX_FIELD_icmpCodeIPv4 = 177
+ IPFIX_FIELD_icmpTypeIPv6 = 178
+ IPFIX_FIELD_icmpCodeIPv6 = 179
+ IPFIX_FIELD_udpSourcePort = 180
+ IPFIX_FIELD_udpDestinationPort = 181
+ IPFIX_FIELD_tcpSourcePort = 182
+ IPFIX_FIELD_tcpDestinationPort = 183
+ IPFIX_FIELD_tcpSequenceNumber = 184
+ IPFIX_FIELD_tcpAcknowledgementNumber = 185
+ IPFIX_FIELD_tcpWindowSize = 186
+ IPFIX_FIELD_tcpUrgentPointer = 187
+ IPFIX_FIELD_tcpHeaderLength = 188
+ IPFIX_FIELD_ipHeaderLength = 189
+ IPFIX_FIELD_totalLengthIPv4 = 190
+ IPFIX_FIELD_payloadLengthIPv6 = 191
+ IPFIX_FIELD_ipTTL = 192
+ IPFIX_FIELD_nextHeaderIPv6 = 193
+ IPFIX_FIELD_mplsPayloadLength = 194
+ IPFIX_FIELD_ipDiffServCodePoint = 195
+ IPFIX_FIELD_ipPrecedence = 196
+ IPFIX_FIELD_fragmentFlags = 197
+ IPFIX_FIELD_octetDeltaSumOfSquares = 198
+ IPFIX_FIELD_octetTotalSumOfSquares = 199
+ IPFIX_FIELD_mplsTopLabelTTL = 200
+ IPFIX_FIELD_mplsLabelStackLength = 201
+ IPFIX_FIELD_mplsLabelStackDepth = 202
+ IPFIX_FIELD_mplsTopLabelExp = 203
+ IPFIX_FIELD_ipPayloadLength = 204
+ IPFIX_FIELD_udpMessageLength = 205
+ IPFIX_FIELD_isMulticast = 206
+ IPFIX_FIELD_ipv4IHL = 207
+ IPFIX_FIELD_ipv4Options = 208
+ IPFIX_FIELD_tcpOptions = 209
+ IPFIX_FIELD_paddingOctets = 210
+ IPFIX_FIELD_collectorIPv4Address = 211
+ IPFIX_FIELD_collectorIPv6Address = 212
+ IPFIX_FIELD_exportInterface = 213
+ IPFIX_FIELD_exportProtocolVersion = 214
+ IPFIX_FIELD_exportTransportProtocol = 215
+ IPFIX_FIELD_collectorTransportPort = 216
+ IPFIX_FIELD_exporterTransportPort = 217
+ IPFIX_FIELD_tcpSynTotalCount = 218
+ IPFIX_FIELD_tcpFinTotalCount = 219
+ IPFIX_FIELD_tcpRstTotalCount = 220
+ IPFIX_FIELD_tcpPshTotalCount = 221
+ IPFIX_FIELD_tcpAckTotalCount = 222
+ IPFIX_FIELD_tcpUrgTotalCount = 223
+ IPFIX_FIELD_ipTotalLength = 224
+ IPFIX_FIELD_postNATSourceIPv4Address = 225
+ IPFIX_FIELD_postNATDestinationIPv4Address = 226
+ IPFIX_FIELD_postNAPTSourceTransportPort = 227
+ IPFIX_FIELD_postNAPTDestinationTransportPort = 228
+ IPFIX_FIELD_natOriginatingAddressRealm = 229
+ IPFIX_FIELD_natEvent = 230
+ IPFIX_FIELD_initiatorOctets = 231
+ IPFIX_FIELD_responderOctets = 232
+ IPFIX_FIELD_firewallEvent = 233
+ IPFIX_FIELD_ingressVRFID = 234
+ IPFIX_FIELD_egressVRFID = 235
+ IPFIX_FIELD_VRFname = 236
+ IPFIX_FIELD_postMplsTopLabelExp = 237
+ IPFIX_FIELD_tcpWindowScale = 238
+ IPFIX_FIELD_biflowDirection = 239
+ IPFIX_FIELD_ethernetHeaderLength = 240
+ IPFIX_FIELD_ethernetPayloadLength = 241
+ IPFIX_FIELD_ethernetTotalLength = 242
+ IPFIX_FIELD_dot1qVlanId = 243
+ IPFIX_FIELD_dot1qPriority = 244
+ IPFIX_FIELD_dot1qCustomerVlanId = 245
+ IPFIX_FIELD_dot1qCustomerPriority = 246
+ IPFIX_FIELD_metroEvcId = 247
+ IPFIX_FIELD_metroEvcType = 248
+ IPFIX_FIELD_pseudoWireId = 249
+ IPFIX_FIELD_pseudoWireType = 250
+ IPFIX_FIELD_pseudoWireControlWord = 251
+ IPFIX_FIELD_ingressPhysicalInterface = 252
+ IPFIX_FIELD_egressPhysicalInterface = 253
+ IPFIX_FIELD_postDot1qVlanId = 254
+ IPFIX_FIELD_postDot1qCustomerVlanId = 255
+ IPFIX_FIELD_ethernetType = 256
+ IPFIX_FIELD_postIpPrecedence = 257
+ IPFIX_FIELD_collectionTimeMilliseconds = 258
+ IPFIX_FIELD_exportSctpStreamId = 259
+ IPFIX_FIELD_maxExportSeconds = 260
+ IPFIX_FIELD_maxFlowEndSeconds = 261
+ IPFIX_FIELD_messageMD5Checksum = 262
+ IPFIX_FIELD_messageScope = 263
+ IPFIX_FIELD_minExportSeconds = 264
+ IPFIX_FIELD_minFlowStartSeconds = 265
+ IPFIX_FIELD_opaqueOctets = 266
+ IPFIX_FIELD_sessionScope = 267
+ IPFIX_FIELD_maxFlowEndMicroseconds = 268
+ IPFIX_FIELD_maxFlowEndMilliseconds = 269
+ IPFIX_FIELD_maxFlowEndNanoseconds = 270
+ IPFIX_FIELD_minFlowStartMicroseconds = 271
+ IPFIX_FIELD_minFlowStartMilliseconds = 272
+ IPFIX_FIELD_minFlowStartNanoseconds = 273
+ IPFIX_FIELD_collectorCertificate = 274
+ IPFIX_FIELD_exporterCertificate = 275
+ IPFIX_FIELD_dataRecordsReliability = 276
+ IPFIX_FIELD_observationPointType = 277
+ IPFIX_FIELD_newConnectionDeltaCount = 278
+ IPFIX_FIELD_connectionSumDurationSeconds = 279
+ IPFIX_FIELD_connectionTransactionId = 280
+ IPFIX_FIELD_postNATSourceIPv6Address = 281
+ IPFIX_FIELD_postNATDestinationIPv6Address = 282
+ IPFIX_FIELD_natPoolId = 283
+ IPFIX_FIELD_natPoolName = 284
+ IPFIX_FIELD_anonymizationFlags = 285
+ IPFIX_FIELD_anonymizationTechnique = 286
+ IPFIX_FIELD_informationElementIndex = 287
+ IPFIX_FIELD_p2pTechnology = 288
+ IPFIX_FIELD_tunnelTechnology = 289
+ IPFIX_FIELD_encryptedTechnology = 290
+ IPFIX_FIELD_basicList = 291
+ IPFIX_FIELD_subTemplateList = 292
+ IPFIX_FIELD_subTemplateMultiList = 293
+ IPFIX_FIELD_bgpValidityState = 294
+ IPFIX_FIELD_IPSecSPI = 295
+ IPFIX_FIELD_greKey = 296
+ IPFIX_FIELD_natType = 297
+ IPFIX_FIELD_initiatorPackets = 298
+ IPFIX_FIELD_responderPackets = 299
+ IPFIX_FIELD_observationDomainName = 300
+ IPFIX_FIELD_selectionSequenceId = 301
+ IPFIX_FIELD_selectorId = 302
+ IPFIX_FIELD_informationElementId = 303
+ IPFIX_FIELD_selectorAlgorithm = 304
+ IPFIX_FIELD_samplingPacketInterval = 305
+ IPFIX_FIELD_samplingPacketSpace = 306
+ IPFIX_FIELD_samplingTimeInterval = 307
+ IPFIX_FIELD_samplingTimeSpace = 308
+ IPFIX_FIELD_samplingSize = 309
+ IPFIX_FIELD_samplingPopulation = 310
+ IPFIX_FIELD_samplingProbability = 311
+ IPFIX_FIELD_dataLinkFrameSize = 312
+ IPFIX_FIELD_ipHeaderPacketSection = 313
+ IPFIX_FIELD_ipPayloadPacketSection = 314
+ IPFIX_FIELD_dataLinkFrameSection = 315
+ IPFIX_FIELD_mplsLabelStackSection = 316
+ IPFIX_FIELD_mplsPayloadPacketSection = 317
+ IPFIX_FIELD_selectorIdTotalPktsObserved = 318
+ IPFIX_FIELD_selectorIdTotalPktsSelected = 319
+ IPFIX_FIELD_absoluteError = 320
+ IPFIX_FIELD_relativeError = 321
+ IPFIX_FIELD_observationTimeSeconds = 322
+ IPFIX_FIELD_observationTimeMilliseconds = 323
+ IPFIX_FIELD_observationTimeMicroseconds = 324
+ IPFIX_FIELD_observationTimeNanoseconds = 325
+ IPFIX_FIELD_digestHashValue = 326
+ IPFIX_FIELD_hashIPPayloadOffset = 327
+ IPFIX_FIELD_hashIPPayloadSize = 328
+ IPFIX_FIELD_hashOutputRangeMin = 329
+ IPFIX_FIELD_hashOutputRangeMax = 330
+ IPFIX_FIELD_hashSelectedRangeMin = 331
+ IPFIX_FIELD_hashSelectedRangeMax = 332
+ IPFIX_FIELD_hashDigestOutput = 333
+ IPFIX_FIELD_hashInitialiserValue = 334
+ IPFIX_FIELD_selectorName = 335
+ IPFIX_FIELD_upperCILimit = 336
+ IPFIX_FIELD_lowerCILimit = 337
+ IPFIX_FIELD_confidenceLevel = 338
+ IPFIX_FIELD_informationElementDataType = 339
+ IPFIX_FIELD_informationElementDescription = 340
+ IPFIX_FIELD_informationElementName = 341
+ IPFIX_FIELD_informationElementRangeBegin = 342
+ IPFIX_FIELD_informationElementRangeEnd = 343
+ IPFIX_FIELD_informationElementSemantics = 344
+ IPFIX_FIELD_informationElementUnits = 345
+ IPFIX_FIELD_privateEnterpriseNumber = 346
+ IPFIX_FIELD_virtualStationInterfaceId = 347
+ IPFIX_FIELD_virtualStationInterfaceName = 348
+ IPFIX_FIELD_virtualStationUUID = 349
+ IPFIX_FIELD_virtualStationName = 350
+ IPFIX_FIELD_layer2SegmentId = 351
+ IPFIX_FIELD_layer2OctetDeltaCount = 352
+ IPFIX_FIELD_layer2OctetTotalCount = 353
+ IPFIX_FIELD_ingressUnicastPacketTotalCount = 354
+ IPFIX_FIELD_ingressMulticastPacketTotalCount = 355
+ IPFIX_FIELD_ingressBroadcastPacketTotalCount = 356
+ IPFIX_FIELD_egressUnicastPacketTotalCount = 357
+ IPFIX_FIELD_egressBroadcastPacketTotalCount = 358
+ IPFIX_FIELD_monitoringIntervalStartMilliSeconds = 359
+ IPFIX_FIELD_monitoringIntervalEndMilliSeconds = 360
+ IPFIX_FIELD_portRangeStart = 361
+ IPFIX_FIELD_portRangeEnd = 362
+ IPFIX_FIELD_portRangeStepSize = 363
+ IPFIX_FIELD_portRangeNumPorts = 364
+ IPFIX_FIELD_staMacAddress = 365
+ IPFIX_FIELD_staIPv4Address = 366
+ IPFIX_FIELD_wtpMacAddress = 367
+ IPFIX_FIELD_ingressInterfaceType = 368
+ IPFIX_FIELD_egressInterfaceType = 369
+ IPFIX_FIELD_rtpSequenceNumber = 370
+ IPFIX_FIELD_userName = 371
+ IPFIX_FIELD_applicationCategoryName = 372
+ IPFIX_FIELD_applicationSubCategoryName = 373
+ IPFIX_FIELD_applicationGroupName = 374
+ IPFIX_FIELD_originalFlowsPresent = 375
+ IPFIX_FIELD_originalFlowsInitiated = 376
+ IPFIX_FIELD_originalFlowsCompleted = 377
+ IPFIX_FIELD_distinctCountOfSourceIPAddress = 378
+ IPFIX_FIELD_distinctCountOfDestinationIPAddress = 379
+ IPFIX_FIELD_distinctCountOfSourceIPv4Address = 380
+ IPFIX_FIELD_distinctCountOfDestinationIPv4Address = 381
+ IPFIX_FIELD_distinctCountOfSourceIPv6Address = 382
+ IPFIX_FIELD_distinctCountOfDestinationIPv6Address = 383
+ IPFIX_FIELD_valueDistributionMethod = 384
+ IPFIX_FIELD_rfc3550JitterMilliseconds = 385
+ IPFIX_FIELD_rfc3550JitterMicroseconds = 386
+ IPFIX_FIELD_rfc3550JitterNanoseconds = 387
+ IPFIX_FIELD_dot1qDEI = 388
+ IPFIX_FIELD_dot1qCustomerDEI = 389
+ IPFIX_FIELD_flowSelectorAlgorithm = 390
+ IPFIX_FIELD_flowSelectedOctetDeltaCount = 391
+ IPFIX_FIELD_flowSelectedPacketDeltaCount = 392
+ IPFIX_FIELD_flowSelectedFlowDeltaCount = 393
+ IPFIX_FIELD_selectorIDTotalFlowsObserved = 394
+ IPFIX_FIELD_selectorIDTotalFlowsSelected = 395
+ IPFIX_FIELD_samplingFlowInterval = 396
+ IPFIX_FIELD_samplingFlowSpacing = 397
+ IPFIX_FIELD_flowSamplingTimeInterval = 398
+ IPFIX_FIELD_flowSamplingTimeSpacing = 399
+ IPFIX_FIELD_hashFlowDomain = 400
+ IPFIX_FIELD_transportOctetDeltaCount = 401
+ IPFIX_FIELD_transportPacketDeltaCount = 402
+ IPFIX_FIELD_originalExporterIPv4Address = 403
+ IPFIX_FIELD_originalExporterIPv6Address = 404
+ IPFIX_FIELD_originalObservationDomainId = 405
+ IPFIX_FIELD_intermediateProcessId = 406
+ IPFIX_FIELD_ignoredDataRecordTotalCount = 407
+ IPFIX_FIELD_dataLinkFrameType = 408
+ IPFIX_FIELD_sectionOffset = 409
+ IPFIX_FIELD_sectionExportedOctets = 410
+ IPFIX_FIELD_dot1qServiceInstanceTag = 411
+ IPFIX_FIELD_dot1qServiceInstanceId = 412
+ IPFIX_FIELD_dot1qServiceInstancePriority = 413
+ IPFIX_FIELD_dot1qCustomerSourceMacAddress = 414
+ IPFIX_FIELD_dot1qCustomerDestinationMacAddress = 415
+ IPFIX_FIELD_postLayer2OctetDeltaCount = 417
+ IPFIX_FIELD_postMCastLayer2OctetDeltaCount = 418
+ IPFIX_FIELD_postLayer2OctetTotalCount = 420
+ IPFIX_FIELD_postMCastLayer2OctetTotalCount = 421
+ IPFIX_FIELD_minimumLayer2TotalLength = 422
+ IPFIX_FIELD_maximumLayer2TotalLength = 423
+ IPFIX_FIELD_droppedLayer2OctetDeltaCount = 424
+ IPFIX_FIELD_droppedLayer2OctetTotalCount = 425
+ IPFIX_FIELD_ignoredLayer2OctetTotalCount = 426
+ IPFIX_FIELD_notSentLayer2OctetTotalCount = 427
+ IPFIX_FIELD_layer2OctetDeltaSumOfSquares = 428
+ IPFIX_FIELD_layer2OctetTotalSumOfSquares = 429
+ IPFIX_FIELD_layer2FrameDeltaCount = 430
+ IPFIX_FIELD_layer2FrameTotalCount = 431
+ IPFIX_FIELD_pseudoWireDestinationIPv4Address = 432
+ IPFIX_FIELD_ignoredLayer2FrameTotalCount = 433
+ IPFIX_FIELD_mibObjectValueInteger = 434
+ IPFIX_FIELD_mibObjectValueOctetString = 435
+ IPFIX_FIELD_mibObjectValueOID = 436
+ IPFIX_FIELD_mibObjectValueBits = 437
+ IPFIX_FIELD_mibObjectValueIPAddress = 438
+ IPFIX_FIELD_mibObjectValueCounter = 439
+ IPFIX_FIELD_mibObjectValueGauge = 440
+ IPFIX_FIELD_mibObjectValueTimeTicks = 441
+ IPFIX_FIELD_mibObjectValueUnsigned = 442
+ IPFIX_FIELD_mibObjectValueTable = 443
+ IPFIX_FIELD_mibObjectValueRow = 444
+ IPFIX_FIELD_mibObjectIdentifier = 445
+ IPFIX_FIELD_mibSubIdentifier = 446
+ IPFIX_FIELD_mibIndexIndicator = 447
+ IPFIX_FIELD_mibCaptureTimeSemantics = 448
+ IPFIX_FIELD_mibContextEngineID = 449
+ IPFIX_FIELD_mibContextName = 450
+ IPFIX_FIELD_mibObjectName = 451
+ IPFIX_FIELD_mibObjectDescription = 452
+ IPFIX_FIELD_mibObjectSyntax = 453
+ IPFIX_FIELD_mibModuleName = 454
+ IPFIX_FIELD_mobileIMSI = 455
+ IPFIX_FIELD_mobileMSISDN = 456
+ IPFIX_FIELD_httpStatusCode = 457
+ IPFIX_FIELD_sourceTransportPortsLimit = 458
+ IPFIX_FIELD_httpRequestMethod = 459
+ IPFIX_FIELD_httpRequestHost = 460
+ IPFIX_FIELD_httpRequestTarget = 461
+ IPFIX_FIELD_httpMessageVersion = 462
+ IPFIX_FIELD_natInstanceID = 463
+ IPFIX_FIELD_internalAddressRealm = 464
+ IPFIX_FIELD_externalAddressRealm = 465
+ IPFIX_FIELD_natQuotaExceededEvent = 466
+ IPFIX_FIELD_natThresholdEvent = 467
+)
+
+type IPFIXPacket struct {
+ Version uint16
+ Length uint16
+ ExportTime uint32
+ SequenceNumber uint32
+ ObservationDomainId uint32
+ FlowSets []interface{}
+}
+
+type IPFIXOptionsTemplateFlowSet struct {
+ FlowSetHeader
+ Records []IPFIXOptionsTemplateRecord
+}
+
+type IPFIXOptionsTemplateRecord struct {
+ TemplateId uint16
+ FieldCount uint16
+ ScopeFieldCount uint16
+ Options []Field
+ Scopes []Field
+}
+
+func IPFIXTypeToString(typeId uint16) string {
+
+ nameList := map[uint16]string{
+ 0: "Reserved",
+ 1: "octetDeltaCount",
+ 2: "packetDeltaCount",
+ 3: "deltaFlowCount",
+ 4: "protocolIdentifier",
+ 5: "ipClassOfService",
+ 6: "tcpControlBits",
+ 7: "sourceTransportPort",
+ 8: "sourceIPv4Address",
+ 9: "sourceIPv4PrefixLength",
+ 10: "ingressInterface",
+ 11: "destinationTransportPort",
+ 12: "destinationIPv4Address",
+ 13: "destinationIPv4PrefixLength",
+ 14: "egressInterface",
+ 15: "ipNextHopIPv4Address",
+ 16: "bgpSourceAsNumber",
+ 17: "bgpDestinationAsNumber",
+ 18: "bgpNextHopIPv4Address",
+ 19: "postMCastPacketDeltaCount",
+ 20: "postMCastOctetDeltaCount",
+ 21: "flowEndSysUpTime",
+ 22: "flowStartSysUpTime",
+ 23: "postOctetDeltaCount",
+ 24: "postPacketDeltaCount",
+ 25: "minimumIpTotalLength",
+ 26: "maximumIpTotalLength",
+ 27: "sourceIPv6Address",
+ 28: "destinationIPv6Address",
+ 29: "sourceIPv6PrefixLength",
+ 30: "destinationIPv6PrefixLength",
+ 31: "flowLabelIPv6",
+ 32: "icmpTypeCodeIPv4",
+ 33: "igmpType",
+ 34: "samplingInterval",
+ 35: "samplingAlgorithm",
+ 36: "flowActiveTimeout",
+ 37: "flowIdleTimeout",
+ 38: "engineType",
+ 39: "engineId",
+ 40: "exportedOctetTotalCount",
+ 41: "exportedMessageTotalCount",
+ 42: "exportedFlowRecordTotalCount",
+ 43: "ipv4RouterSc",
+ 44: "sourceIPv4Prefix",
+ 45: "destinationIPv4Prefix",
+ 46: "mplsTopLabelType",
+ 47: "mplsTopLabelIPv4Address",
+ 48: "samplerId",
+ 49: "samplerMode",
+ 50: "samplerRandomInterval",
+ 51: "classId",
+ 52: "minimumTTL",
+ 53: "maximumTTL",
+ 54: "fragmentIdentification",
+ 55: "postIpClassOfService",
+ 56: "sourceMacAddress",
+ 57: "postDestinationMacAddress",
+ 58: "vlanId",
+ 59: "postVlanId",
+ 60: "ipVersion",
+ 61: "flowDirection",
+ 62: "ipNextHopIPv6Address",
+ 63: "bgpNextHopIPv6Address",
+ 64: "ipv6ExtensionHeaders",
+ 65: "Assigned for NetFlow v9 compatibility",
+ 66: "Assigned for NetFlow v9 compatibility",
+ 67: "Assigned for NetFlow v9 compatibility",
+ 68: "Assigned for NetFlow v9 compatibility",
+ 69: "Assigned for NetFlow v9 compatibility",
+ 70: "mplsTopLabelStackSection",
+ 71: "mplsLabelStackSection2",
+ 72: "mplsLabelStackSection3",
+ 73: "mplsLabelStackSection4",
+ 74: "mplsLabelStackSection5",
+ 75: "mplsLabelStackSection6",
+ 76: "mplsLabelStackSection7",
+ 77: "mplsLabelStackSection8",
+ 78: "mplsLabelStackSection9",
+ 79: "mplsLabelStackSection10",
+ 80: "destinationMacAddress",
+ 81: "postSourceMacAddress",
+ 82: "interfaceName",
+ 83: "interfaceDescription",
+ 84: "samplerName",
+ 85: "octetTotalCount",
+ 86: "packetTotalCount",
+ 87: "flagsAndSamplerId",
+ 88: "fragmentOffset",
+ 89: "forwardingStatus",
+ 90: "mplsVpnRouteDistinguisher",
+ 91: "mplsTopLabelPrefixLength",
+ 92: "srcTrafficIndex",
+ 93: "dstTrafficIndex",
+ 94: "applicationDescription",
+ 95: "applicationId",
+ 96: "applicationName",
+ 97: "Assigned for NetFlow v9 compatibility",
+ 98: "postIpDiffServCodePoint",
+ 99: "multicastReplicationFactor",
+ 100: "className",
+ 101: "classificationEngineId",
+ 102: "layer2packetSectionOffset",
+ 103: "layer2packetSectionSize",
+ 104: "layer2packetSectionData",
+ 128: "bgpNextAdjacentAsNumber",
+ 129: "bgpPrevAdjacentAsNumber",
+ 130: "exporterIPv4Address",
+ 131: "exporterIPv6Address",
+ 132: "droppedOctetDeltaCount",
+ 133: "droppedPacketDeltaCount",
+ 134: "droppedOctetTotalCount",
+ 135: "droppedPacketTotalCount",
+ 136: "flowEndReason",
+ 137: "commonPropertiesId",
+ 138: "observationPointId",
+ 139: "icmpTypeCodeIPv6",
+ 140: "mplsTopLabelIPv6Address",
+ 141: "lineCardId",
+ 142: "portId",
+ 143: "meteringProcessId",
+ 144: "exportingProcessId",
+ 145: "templateId",
+ 146: "wlanChannelId",
+ 147: "wlanSSID",
+ 148: "flowId",
+ 149: "observationDomainId",
+ 150: "flowStartSeconds",
+ 151: "flowEndSeconds",
+ 152: "flowStartMilliseconds",
+ 153: "flowEndMilliseconds",
+ 154: "flowStartMicroseconds",
+ 155: "flowEndMicroseconds",
+ 156: "flowStartNanoseconds",
+ 157: "flowEndNanoseconds",
+ 158: "flowStartDeltaMicroseconds",
+ 159: "flowEndDeltaMicroseconds",
+ 160: "systemInitTimeMilliseconds",
+ 161: "flowDurationMilliseconds",
+ 162: "flowDurationMicroseconds",
+ 163: "observedFlowTotalCount",
+ 164: "ignoredPacketTotalCount",
+ 165: "ignoredOctetTotalCount",
+ 166: "notSentFlowTotalCount",
+ 167: "notSentPacketTotalCount",
+ 168: "notSentOctetTotalCount",
+ 169: "destinationIPv6Prefix",
+ 170: "sourceIPv6Prefix",
+ 171: "postOctetTotalCount",
+ 172: "postPacketTotalCount",
+ 173: "flowKeyIndicator",
+ 174: "postMCastPacketTotalCount",
+ 175: "postMCastOctetTotalCount",
+ 176: "icmpTypeIPv4",
+ 177: "icmpCodeIPv4",
+ 178: "icmpTypeIPv6",
+ 179: "icmpCodeIPv6",
+ 180: "udpSourcePort",
+ 181: "udpDestinationPort",
+ 182: "tcpSourcePort",
+ 183: "tcpDestinationPort",
+ 184: "tcpSequenceNumber",
+ 185: "tcpAcknowledgementNumber",
+ 186: "tcpWindowSize",
+ 187: "tcpUrgentPointer",
+ 188: "tcpHeaderLength",
+ 189: "ipHeaderLength",
+ 190: "totalLengthIPv4",
+ 191: "payloadLengthIPv6",
+ 192: "ipTTL",
+ 193: "nextHeaderIPv6",
+ 194: "mplsPayloadLength",
+ 195: "ipDiffServCodePoint",
+ 196: "ipPrecedence",
+ 197: "fragmentFlags",
+ 198: "octetDeltaSumOfSquares",
+ 199: "octetTotalSumOfSquares",
+ 200: "mplsTopLabelTTL",
+ 201: "mplsLabelStackLength",
+ 202: "mplsLabelStackDepth",
+ 203: "mplsTopLabelExp",
+ 204: "ipPayloadLength",
+ 205: "udpMessageLength",
+ 206: "isMulticast",
+ 207: "ipv4IHL",
+ 208: "ipv4Options",
+ 209: "tcpOptions",
+ 210: "paddingOctets",
+ 211: "collectorIPv4Address",
+ 212: "collectorIPv6Address",
+ 213: "exportInterface",
+ 214: "exportProtocolVersion",
+ 215: "exportTransportProtocol",
+ 216: "collectorTransportPort",
+ 217: "exporterTransportPort",
+ 218: "tcpSynTotalCount",
+ 219: "tcpFinTotalCount",
+ 220: "tcpRstTotalCount",
+ 221: "tcpPshTotalCount",
+ 222: "tcpAckTotalCount",
+ 223: "tcpUrgTotalCount",
+ 224: "ipTotalLength",
+ 225: "postNATSourceIPv4Address",
+ 226: "postNATDestinationIPv4Address",
+ 227: "postNAPTSourceTransportPort",
+ 228: "postNAPTDestinationTransportPort",
+ 229: "natOriginatingAddressRealm",
+ 230: "natEvent",
+ 231: "initiatorOctets",
+ 232: "responderOctets",
+ 233: "firewallEvent",
+ 234: "ingressVRFID",
+ 235: "egressVRFID",
+ 236: "VRFname",
+ 237: "postMplsTopLabelExp",
+ 238: "tcpWindowScale",
+ 239: "biflowDirection",
+ 240: "ethernetHeaderLength",
+ 241: "ethernetPayloadLength",
+ 242: "ethernetTotalLength",
+ 243: "dot1qVlanId",
+ 244: "dot1qPriority",
+ 245: "dot1qCustomerVlanId",
+ 246: "dot1qCustomerPriority",
+ 247: "metroEvcId",
+ 248: "metroEvcType",
+ 249: "pseudoWireId",
+ 250: "pseudoWireType",
+ 251: "pseudoWireControlWord",
+ 252: "ingressPhysicalInterface",
+ 253: "egressPhysicalInterface",
+ 254: "postDot1qVlanId",
+ 255: "postDot1qCustomerVlanId",
+ 256: "ethernetType",
+ 257: "postIpPrecedence",
+ 258: "collectionTimeMilliseconds",
+ 259: "exportSctpStreamId",
+ 260: "maxExportSeconds",
+ 261: "maxFlowEndSeconds",
+ 262: "messageMD5Checksum",
+ 263: "messageScope",
+ 264: "minExportSeconds",
+ 265: "minFlowStartSeconds",
+ 266: "opaqueOctets",
+ 267: "sessionScope",
+ 268: "maxFlowEndMicroseconds",
+ 269: "maxFlowEndMilliseconds",
+ 270: "maxFlowEndNanoseconds",
+ 271: "minFlowStartMicroseconds",
+ 272: "minFlowStartMilliseconds",
+ 273: "minFlowStartNanoseconds",
+ 274: "collectorCertificate",
+ 275: "exporterCertificate",
+ 276: "dataRecordsReliability",
+ 277: "observationPointType",
+ 278: "newConnectionDeltaCount",
+ 279: "connectionSumDurationSeconds",
+ 280: "connectionTransactionId",
+ 281: "postNATSourceIPv6Address",
+ 282: "postNATDestinationIPv6Address",
+ 283: "natPoolId",
+ 284: "natPoolName",
+ 285: "anonymizationFlags",
+ 286: "anonymizationTechnique",
+ 287: "informationElementIndex",
+ 288: "p2pTechnology",
+ 289: "tunnelTechnology",
+ 290: "encryptedTechnology",
+ 291: "basicList",
+ 292: "subTemplateList",
+ 293: "subTemplateMultiList",
+ 294: "bgpValidityState",
+ 295: "IPSecSPI",
+ 296: "greKey",
+ 297: "natType",
+ 298: "initiatorPackets",
+ 299: "responderPackets",
+ 300: "observationDomainName",
+ 301: "selectionSequenceId",
+ 302: "selectorId",
+ 303: "informationElementId",
+ 304: "selectorAlgorithm",
+ 305: "samplingPacketInterval",
+ 306: "samplingPacketSpace",
+ 307: "samplingTimeInterval",
+ 308: "samplingTimeSpace",
+ 309: "samplingSize",
+ 310: "samplingPopulation",
+ 311: "samplingProbability",
+ 312: "dataLinkFrameSize",
+ 313: "ipHeaderPacketSection",
+ 314: "ipPayloadPacketSection",
+ 315: "dataLinkFrameSection",
+ 316: "mplsLabelStackSection",
+ 317: "mplsPayloadPacketSection",
+ 318: "selectorIdTotalPktsObserved",
+ 319: "selectorIdTotalPktsSelected",
+ 320: "absoluteError",
+ 321: "relativeError",
+ 322: "observationTimeSeconds",
+ 323: "observationTimeMilliseconds",
+ 324: "observationTimeMicroseconds",
+ 325: "observationTimeNanoseconds",
+ 326: "digestHashValue",
+ 327: "hashIPPayloadOffset",
+ 328: "hashIPPayloadSize",
+ 329: "hashOutputRangeMin",
+ 330: "hashOutputRangeMax",
+ 331: "hashSelectedRangeMin",
+ 332: "hashSelectedRangeMax",
+ 333: "hashDigestOutput",
+ 334: "hashInitialiserValue",
+ 335: "selectorName",
+ 336: "upperCILimit",
+ 337: "lowerCILimit",
+ 338: "confidenceLevel",
+ 339: "informationElementDataType",
+ 340: "informationElementDescription",
+ 341: "informationElementName",
+ 342: "informationElementRangeBegin",
+ 343: "informationElementRangeEnd",
+ 344: "informationElementSemantics",
+ 345: "informationElementUnits",
+ 346: "privateEnterpriseNumber",
+ 347: "virtualStationInterfaceId",
+ 348: "virtualStationInterfaceName",
+ 349: "virtualStationUUID",
+ 350: "virtualStationName",
+ 351: "layer2SegmentId",
+ 352: "layer2OctetDeltaCount",
+ 353: "layer2OctetTotalCount",
+ 354: "ingressUnicastPacketTotalCount",
+ 355: "ingressMulticastPacketTotalCount",
+ 356: "ingressBroadcastPacketTotalCount",
+ 357: "egressUnicastPacketTotalCount",
+ 358: "egressBroadcastPacketTotalCount",
+ 359: "monitoringIntervalStartMilliSeconds",
+ 360: "monitoringIntervalEndMilliSeconds",
+ 361: "portRangeStart",
+ 362: "portRangeEnd",
+ 363: "portRangeStepSize",
+ 364: "portRangeNumPorts",
+ 365: "staMacAddress",
+ 366: "staIPv4Address",
+ 367: "wtpMacAddress",
+ 368: "ingressInterfaceType",
+ 369: "egressInterfaceType",
+ 370: "rtpSequenceNumber",
+ 371: "userName",
+ 372: "applicationCategoryName",
+ 373: "applicationSubCategoryName",
+ 374: "applicationGroupName",
+ 375: "originalFlowsPresent",
+ 376: "originalFlowsInitiated",
+ 377: "originalFlowsCompleted",
+ 378: "distinctCountOfSourceIPAddress",
+ 379: "distinctCountOfDestinationIPAddress",
+ 380: "distinctCountOfSourceIPv4Address",
+ 381: "distinctCountOfDestinationIPv4Address",
+ 382: "distinctCountOfSourceIPv6Address",
+ 383: "distinctCountOfDestinationIPv6Address",
+ 384: "valueDistributionMethod",
+ 385: "rfc3550JitterMilliseconds",
+ 386: "rfc3550JitterMicroseconds",
+ 387: "rfc3550JitterNanoseconds",
+ 388: "dot1qDEI",
+ 389: "dot1qCustomerDEI",
+ 390: "flowSelectorAlgorithm",
+ 391: "flowSelectedOctetDeltaCount",
+ 392: "flowSelectedPacketDeltaCount",
+ 393: "flowSelectedFlowDeltaCount",
+ 394: "selectorIDTotalFlowsObserved",
+ 395: "selectorIDTotalFlowsSelected",
+ 396: "samplingFlowInterval",
+ 397: "samplingFlowSpacing",
+ 398: "flowSamplingTimeInterval",
+ 399: "flowSamplingTimeSpacing",
+ 400: "hashFlowDomain",
+ 401: "transportOctetDeltaCount",
+ 402: "transportPacketDeltaCount",
+ 403: "originalExporterIPv4Address",
+ 404: "originalExporterIPv6Address",
+ 405: "originalObservationDomainId",
+ 406: "intermediateProcessId",
+ 407: "ignoredDataRecordTotalCount",
+ 408: "dataLinkFrameType",
+ 409: "sectionOffset",
+ 410: "sectionExportedOctets",
+ 411: "dot1qServiceInstanceTag",
+ 412: "dot1qServiceInstanceId",
+ 413: "dot1qServiceInstancePriority",
+ 414: "dot1qCustomerSourceMacAddress",
+ 415: "dot1qCustomerDestinationMacAddress",
+ 416: "",
+ 417: "postLayer2OctetDeltaCount",
+ 418: "postMCastLayer2OctetDeltaCount",
+ 419: "",
+ 420: "postLayer2OctetTotalCount",
+ 421: "postMCastLayer2OctetTotalCount",
+ 422: "minimumLayer2TotalLength",
+ 423: "maximumLayer2TotalLength",
+ 424: "droppedLayer2OctetDeltaCount",
+ 425: "droppedLayer2OctetTotalCount",
+ 426: "ignoredLayer2OctetTotalCount",
+ 427: "notSentLayer2OctetTotalCount",
+ 428: "layer2OctetDeltaSumOfSquares",
+ 429: "layer2OctetTotalSumOfSquares",
+ 430: "layer2FrameDeltaCount",
+ 431: "layer2FrameTotalCount",
+ 432: "pseudoWireDestinationIPv4Address",
+ 433: "ignoredLayer2FrameTotalCount",
+ 434: "mibObjectValueInteger",
+ 435: "mibObjectValueOctetString",
+ 436: "mibObjectValueOID",
+ 437: "mibObjectValueBits",
+ 438: "mibObjectValueIPAddress",
+ 439: "mibObjectValueCounter",
+ 440: "mibObjectValueGauge",
+ 441: "mibObjectValueTimeTicks",
+ 442: "mibObjectValueUnsigned",
+ 443: "mibObjectValueTable",
+ 444: "mibObjectValueRow",
+ 445: "mibObjectIdentifier",
+ 446: "mibSubIdentifier",
+ 447: "mibIndexIndicator",
+ 448: "mibCaptureTimeSemantics",
+ 449: "mibContextEngineID",
+ 450: "mibContextName",
+ 451: "mibObjectName",
+ 452: "mibObjectDescription",
+ 453: "mibObjectSyntax",
+ 454: "mibModuleName",
+ 455: "mobileIMSI",
+ 456: "mobileMSISDN",
+ 457: "httpStatusCode",
+ 458: "sourceTransportPortsLimit",
+ 459: "httpRequestMethod",
+ 460: "httpRequestHost",
+ 461: "httpRequestTarget",
+ 462: "httpMessageVersion",
+ 463: "natInstanceID",
+ 464: "internalAddressRealm",
+ 465: "externalAddressRealm",
+ 466: "natQuotaExceededEvent",
+ 467: "natThresholdEvent",
+ }
+
+ if typeId >= 105 && typeId <= 127 {
+ return "Assigned for NetFlow v9 compatibility"
+ } else if typeId >= 468 && typeId <= 32767 {
+ return "Unassigned"
+ } else {
+ return nameList[typeId]
+ }
+}
+
+func (flowSet IPFIXOptionsTemplateFlowSet) String(TypeToString func(uint16) string) string {
+ str := fmt.Sprintf(" Id %v\n", flowSet.Id)
+ str += fmt.Sprintf(" Length: %v\n", flowSet.Length)
+ str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records))
+
+ for j, record := range flowSet.Records {
+ str += fmt.Sprintf(" - Record %v:\n", j)
+ str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId)
+ str += fmt.Sprintf(" FieldCount: %v\n", record.FieldCount)
+ str += fmt.Sprintf(" ScopeFieldCount: %v\n", record.ScopeFieldCount)
+
+ str += fmt.Sprintf(" Scopes (%v):\n", len(record.Scopes))
+
+ for k, field := range record.Scopes {
+ str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length)
+ }
+
+ str += fmt.Sprintf(" Options (%v):\n", len(record.Options))
+
+ for k, field := range record.Options {
+ str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length)
+ }
+
+ }
+
+ return str
+}
+
+func (p IPFIXPacket) String() string {
+ str := "Flow Packet\n"
+ str += "------------\n"
+ str += fmt.Sprintf(" Version: %v\n", p.Version)
+ str += fmt.Sprintf(" Length: %v\n", p.Length)
+
+ exportTime := time.Unix(int64(p.ExportTime), 0)
+ str += fmt.Sprintf(" ExportTime: %v\n", exportTime.String())
+ str += fmt.Sprintf(" SequenceNumber: %v\n", p.SequenceNumber)
+ str += fmt.Sprintf(" ObservationDomainId: %v\n", p.ObservationDomainId)
+ str += fmt.Sprintf(" FlowSets (%v):\n", len(p.FlowSets))
+
+ for i, flowSet := range p.FlowSets {
+ switch flowSet := flowSet.(type) {
+ case TemplateFlowSet:
+ str += fmt.Sprintf(" - TemplateFlowSet %v:\n", i)
+ str += flowSet.String(IPFIXTypeToString)
+ case IPFIXOptionsTemplateFlowSet:
+ str += fmt.Sprintf(" - OptionsTemplateFlowSet %v:\n", i)
+ str += flowSet.String(IPFIXTypeToString)
+ case DataFlowSet:
+ str += fmt.Sprintf(" - DataFlowSet %v:\n", i)
+ str += flowSet.String(IPFIXTypeToString)
+ case OptionsDataFlowSet:
+ str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i)
+ str += flowSet.String(IPFIXTypeToString, IPFIXTypeToString)
+ default:
+ str += fmt.Sprintf(" - (unknown type) %v: %v\n", i, flowSet)
+ }
+ }
+
+ return str
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go
new file mode 100644
index 000000000..8191b632f
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go
@@ -0,0 +1,537 @@
+package netflow
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "github.com/netsampler/goflow2/decoders/netflow/templates"
+ "github.com/netsampler/goflow2/decoders/utils"
+)
+
+type FlowBaseTemplateSet map[uint16]map[uint32]map[uint16]interface{}
+
+type NetFlowTemplateSystem interface {
+ GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error)
+ AddTemplate(version uint16, obsDomainId uint32, template interface{})
+}
+
+// Transition structure to ease the conversion with the new template systems
+type TemplateWrapper struct {
+ Ctx context.Context
+ Key string
+ Inner templates.TemplateInterface
+}
+
+func (w *TemplateWrapper) getTemplateId(template interface{}) (templateId uint16) {
+ switch templateIdConv := template.(type) {
+ case IPFIXOptionsTemplateRecord:
+ templateId = templateIdConv.TemplateId
+ case NFv9OptionsTemplateRecord:
+ templateId = templateIdConv.TemplateId
+ case TemplateRecord:
+ templateId = templateIdConv.TemplateId
+ }
+ return templateId
+}
+
+func (w TemplateWrapper) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) {
+ return w.Inner.GetTemplate(w.Ctx, &templates.TemplateKey{w.Key, version, obsDomainId, templateId})
+}
+
+func (w TemplateWrapper) AddTemplate(version uint16, obsDomainId uint32, template interface{}) {
+ w.Inner.AddTemplate(w.Ctx, &templates.TemplateKey{w.Key, version, obsDomainId, w.getTemplateId(template)}, template)
+}
+
+func DecodeNFv9OptionsTemplateSet(payload *bytes.Buffer) ([]NFv9OptionsTemplateRecord, error) {
+ var records []NFv9OptionsTemplateRecord
+ var err error
+ for payload.Len() >= 4 {
+ optsTemplateRecord := NFv9OptionsTemplateRecord{}
+ err = utils.BinaryDecoder(payload, &optsTemplateRecord.TemplateId, &optsTemplateRecord.ScopeLength, &optsTemplateRecord.OptionLength)
+ if err != nil {
+ return records, err
+ }
+
+ sizeScope := int(optsTemplateRecord.ScopeLength) / 4
+ sizeOptions := int(optsTemplateRecord.OptionLength) / 4
+ if sizeScope < 0 || sizeOptions < 0 {
+ return records, fmt.Errorf("Error decoding OptionsTemplateSet: negative length.")
+ }
+
+ fields := make([]Field, sizeScope)
+ for i := 0; i < sizeScope; i++ {
+ field := Field{}
+ if err := DecodeField(payload, &field, false); err != nil {
+ return records, err
+ }
+ fields[i] = field
+ }
+ optsTemplateRecord.Scopes = fields
+
+ fields = make([]Field, sizeOptions)
+ for i := 0; i < sizeOptions; i++ {
+ field := Field{}
+ if err := DecodeField(payload, &field, false); err != nil {
+ return records, err
+ }
+ fields[i] = field
+ }
+ optsTemplateRecord.Options = fields
+
+ records = append(records, optsTemplateRecord)
+ }
+
+ return records, err
+}
+
+func DecodeField(payload *bytes.Buffer, field *Field, pen bool) error {
+ err := utils.BinaryDecoder(payload, &field.Type, &field.Length)
+ if pen && err == nil && field.Type&0x8000 != 0 {
+ field.PenProvided = true
+ err = utils.BinaryDecoder(payload, &field.Pen)
+ }
+ return err
+}
+
+func DecodeIPFIXOptionsTemplateSet(payload *bytes.Buffer) ([]IPFIXOptionsTemplateRecord, error) {
+ var records []IPFIXOptionsTemplateRecord
+ var err error
+ for payload.Len() >= 4 {
+ optsTemplateRecord := IPFIXOptionsTemplateRecord{}
+ err = utils.BinaryDecoder(payload, &optsTemplateRecord.TemplateId, &optsTemplateRecord.FieldCount, &optsTemplateRecord.ScopeFieldCount)
+ if err != nil {
+ return records, err
+ }
+
+ fields := make([]Field, int(optsTemplateRecord.ScopeFieldCount))
+ for i := 0; i < int(optsTemplateRecord.ScopeFieldCount); i++ {
+ field := Field{}
+ if err := DecodeField(payload, &field, true); err != nil {
+ return records, err
+ }
+ fields[i] = field
+ }
+ optsTemplateRecord.Scopes = fields
+
+ optionsSize := int(optsTemplateRecord.FieldCount) - int(optsTemplateRecord.ScopeFieldCount)
+ if optionsSize < 0 {
+ return records, fmt.Errorf("Error decoding OptionsTemplateSet: negative length.")
+ }
+ fields = make([]Field, optionsSize)
+ for i := 0; i < optionsSize; i++ {
+ field := Field{}
+ if err := DecodeField(payload, &field, true); err != nil {
+ return records, err
+ }
+ fields[i] = field
+ }
+ optsTemplateRecord.Options = fields
+
+ records = append(records, optsTemplateRecord)
+ }
+
+ return records, nil
+}
+
+func DecodeTemplateSet(version uint16, payload *bytes.Buffer) ([]TemplateRecord, error) {
+ var records []TemplateRecord
+ var err error
+ for payload.Len() >= 4 {
+ templateRecord := TemplateRecord{}
+ err = utils.BinaryDecoder(payload, &templateRecord.TemplateId, &templateRecord.FieldCount)
+ if err != nil {
+ return records, err
+ }
+
+ if int(templateRecord.FieldCount) < 0 {
+ return records, fmt.Errorf("Error decoding TemplateSet: zero count.")
+ }
+
+ fields := make([]Field, int(templateRecord.FieldCount))
+ for i := 0; i < int(templateRecord.FieldCount); i++ {
+ field := Field{}
+ err := utils.BinaryDecoder(payload, &field.Type, &field.Length)
+ if err == nil && version == 10 && field.Type&0x8000 != 0 {
+ field.PenProvided = true
+ field.Type = field.Type ^ 0x8000
+ err = utils.BinaryDecoder(payload, &field.Pen)
+ }
+ if err != nil {
+ return records, err
+ }
+ fields[i] = field
+ }
+ templateRecord.Fields = fields
+ records = append(records, templateRecord)
+ }
+
+ return records, nil
+}
+
+func GetTemplateSize(version uint16, template []Field) int {
+ sum := 0
+ for _, templateField := range template {
+ if templateField.Length == 0xffff {
+ continue
+ }
+
+ sum += int(templateField.Length)
+ }
+ return sum
+}
+
+func DecodeDataSetUsingFields(version uint16, payload *bytes.Buffer, listFields []Field) []DataField {
+ for payload.Len() >= GetTemplateSize(version, listFields) {
+
+ dataFields := make([]DataField, len(listFields))
+ for i, templateField := range listFields {
+
+ finalLength := int(templateField.Length)
+ if templateField.Length == 0xffff {
+ var variableLen8 byte
+ var variableLen16 uint16
+ err := utils.BinaryDecoder(payload, &variableLen8)
+ if err != nil {
+ return []DataField{}
+ }
+ if variableLen8 == 0xff {
+ err := utils.BinaryDecoder(payload, &variableLen16)
+ if err != nil {
+ return []DataField{}
+ }
+ finalLength = int(variableLen16)
+ } else {
+ finalLength = int(variableLen8)
+ }
+ }
+
+ value := payload.Next(finalLength)
+ nfvalue := DataField{
+ Type: templateField.Type,
+ PenProvided: templateField.PenProvided,
+ Pen: templateField.Pen,
+ Value: value,
+ }
+ dataFields[i] = nfvalue
+ }
+ return dataFields
+ }
+ return []DataField{}
+}
+
+type ErrorTemplateNotFound struct {
+ version uint16
+ obsDomainId uint32
+ templateId uint16
+ typeTemplate string
+}
+
+func NewErrorTemplateNotFound(version uint16, obsDomainId uint32, templateId uint16, typeTemplate string) *ErrorTemplateNotFound {
+ return &ErrorTemplateNotFound{
+ version: version,
+ obsDomainId: obsDomainId,
+ templateId: templateId,
+ typeTemplate: typeTemplate,
+ }
+}
+
+func (e *ErrorTemplateNotFound) Error() string {
+ return fmt.Sprintf("No %v template %v found for and domain id %v", e.typeTemplate, e.templateId, e.obsDomainId)
+}
+
+func DecodeOptionsDataSet(version uint16, payload *bytes.Buffer, listFieldsScopes, listFieldsOption []Field) ([]OptionsDataRecord, error) {
+ var records []OptionsDataRecord
+
+ listFieldsScopesSize := GetTemplateSize(version, listFieldsScopes)
+ listFieldsOptionSize := GetTemplateSize(version, listFieldsOption)
+
+ for payload.Len() >= listFieldsScopesSize+listFieldsOptionSize {
+ scopeValues := DecodeDataSetUsingFields(version, payload, listFieldsScopes)
+ optionValues := DecodeDataSetUsingFields(version, payload, listFieldsOption)
+
+ record := OptionsDataRecord{
+ ScopesValues: scopeValues,
+ OptionsValues: optionValues,
+ }
+
+ records = append(records, record)
+ }
+ return records, nil
+}
+
+func DecodeDataSet(version uint16, payload *bytes.Buffer, listFields []Field) ([]DataRecord, error) {
+ var records []DataRecord
+
+ listFieldsSize := GetTemplateSize(version, listFields)
+ for payload.Len() >= listFieldsSize {
+ values := DecodeDataSetUsingFields(version, payload, listFields)
+
+ record := DataRecord{
+ Values: values,
+ }
+
+ records = append(records, record)
+ }
+ return records, nil
+}
+
+func (ts *BasicTemplateSystem) GetTemplates() map[uint16]map[uint32]map[uint16]interface{} {
+ ts.templateslock.RLock()
+ tmp := ts.templates
+ ts.templateslock.RUnlock()
+ return tmp
+}
+
+func (ts *BasicTemplateSystem) AddTemplate(version uint16, obsDomainId uint32, template interface{}) {
+ ts.templateslock.Lock()
+ defer ts.templateslock.Unlock()
+ _, exists := ts.templates[version]
+ if exists != true {
+ ts.templates[version] = make(map[uint32]map[uint16]interface{})
+ }
+ _, exists = ts.templates[version][obsDomainId]
+ if exists != true {
+ ts.templates[version][obsDomainId] = make(map[uint16]interface{})
+ }
+ var templateId uint16
+ switch templateIdConv := template.(type) {
+ case IPFIXOptionsTemplateRecord:
+ templateId = templateIdConv.TemplateId
+ case NFv9OptionsTemplateRecord:
+ templateId = templateIdConv.TemplateId
+ case TemplateRecord:
+ templateId = templateIdConv.TemplateId
+ }
+ ts.templates[version][obsDomainId][templateId] = template
+}
+
+func (ts *BasicTemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) {
+ ts.templateslock.RLock()
+ defer ts.templateslock.RUnlock()
+ templatesVersion, okver := ts.templates[version]
+ if okver {
+ templatesObsDom, okobs := templatesVersion[obsDomainId]
+ if okobs {
+ template, okid := templatesObsDom[templateId]
+ if okid {
+ return template, nil
+ }
+ }
+ }
+ return nil, NewErrorTemplateNotFound(version, obsDomainId, templateId, "info")
+}
+
+type BasicTemplateSystem struct {
+ templates FlowBaseTemplateSet
+ templateslock *sync.RWMutex
+}
+
+func CreateTemplateSystem() *BasicTemplateSystem {
+ ts := &BasicTemplateSystem{
+ templates: make(FlowBaseTemplateSet),
+ templateslock: &sync.RWMutex{},
+ }
+ return ts
+}
+
+func DecodeMessage(payload *bytes.Buffer, templates NetFlowTemplateSystem) (interface{}, error) {
+ return DecodeMessageContext(context.Background(), payload, "", templates)
+}
+
+func DecodeMessageContext(ctx context.Context, payload *bytes.Buffer, templateKey string, tpli NetFlowTemplateSystem) (interface{}, error) {
+ var size uint16
+ packetNFv9 := NFv9Packet{}
+ packetIPFIX := IPFIXPacket{}
+ var returnItem interface{}
+
+ var version uint16
+ var obsDomainId uint32
+ if err := utils.BinaryRead(payload, binary.BigEndian, &version); err != nil {
+ return nil, fmt.Errorf("Error decoding version: %v", err)
+ }
+
+ if version == 9 {
+ err := utils.BinaryDecoder(payload, &packetNFv9.Count, &packetNFv9.SystemUptime, &packetNFv9.UnixSeconds, &packetNFv9.SequenceNumber, &packetNFv9.SourceId)
+ if err != nil {
+ return nil, fmt.Errorf("Error decoding NetFlow v9 header: %v", err)
+ }
+ size = packetNFv9.Count
+ packetNFv9.Version = version
+ returnItem = *(&packetNFv9)
+ obsDomainId = packetNFv9.SourceId
+ } else if version == 10 {
+ err := utils.BinaryDecoder(payload, &packetIPFIX.Length, &packetIPFIX.ExportTime, &packetIPFIX.SequenceNumber, &packetIPFIX.ObservationDomainId)
+ if err != nil {
+ return nil, fmt.Errorf("Error decoding IPFIX header: %v", err)
+ }
+ size = packetIPFIX.Length
+ packetIPFIX.Version = version
+ returnItem = *(&packetIPFIX)
+ obsDomainId = packetIPFIX.ObservationDomainId
+ } else {
+ return nil, fmt.Errorf("NetFlow/IPFIX version error: %d", version)
+ }
+ read := 16
+ startSize := payload.Len()
+
+ for i := 0; ((i < int(size) && version == 9) || (uint16(read) < size && version == 10)) && payload.Len() > 0; i++ {
+ fsheader := FlowSetHeader{}
+ if err := utils.BinaryDecoder(payload, &fsheader.Id, &fsheader.Length); err != nil {
+ return returnItem, fmt.Errorf("Error decoding FlowSet header: %v", err)
+ }
+
+ nextrelpos := int(fsheader.Length) - binary.Size(fsheader)
+ if nextrelpos < 0 {
+ return returnItem, fmt.Errorf("Error decoding packet: non-terminated stream")
+ }
+
+ var flowSet interface{}
+
+ if fsheader.Id == 0 && version == 9 {
+ templateReader := bytes.NewBuffer(payload.Next(nextrelpos))
+ records, err := DecodeTemplateSet(version, templateReader)
+ if err != nil {
+ return returnItem, fmt.Errorf("Error decoding FlowSet header: %v", err)
+ }
+ templatefs := TemplateFlowSet{
+ FlowSetHeader: fsheader,
+ Records: records,
+ }
+
+ flowSet = templatefs
+
+ if tpli != nil {
+ for _, record := range records {
+ tpli.AddTemplate(version, obsDomainId, record)
+ //tpli.AddTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, record.TemplateId), record)
+ }
+ }
+
+ } else if fsheader.Id == 1 && version == 9 {
+ templateReader := bytes.NewBuffer(payload.Next(nextrelpos))
+ records, err := DecodeNFv9OptionsTemplateSet(templateReader)
+ if err != nil {
+ return returnItem, fmt.Errorf("Error decoding NetFlow OptionsTemplateSet: %v", err)
+ }
+ optsTemplatefs := NFv9OptionsTemplateFlowSet{
+ FlowSetHeader: fsheader,
+ Records: records,
+ }
+ flowSet = optsTemplatefs
+
+ if tpli != nil {
+ for _, record := range records {
+ tpli.AddTemplate(version, obsDomainId, record)
+ //tpli.AddTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, record.TemplateId), record)
+ }
+ }
+
+ } else if fsheader.Id == 2 && version == 10 {
+ templateReader := bytes.NewBuffer(payload.Next(nextrelpos))
+ records, err := DecodeTemplateSet(version, templateReader)
+ if err != nil {
+ return returnItem, fmt.Errorf("Error decoding IPFIX TemplateSet: %v", err)
+ }
+ templatefs := TemplateFlowSet{
+ FlowSetHeader: fsheader,
+ Records: records,
+ }
+ flowSet = templatefs
+
+ if tpli != nil {
+ for _, record := range records {
+ tpli.AddTemplate(version, obsDomainId, record)
+ //tpli.AddTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, record.TemplateId), record)
+ }
+ }
+
+ } else if fsheader.Id == 3 && version == 10 {
+ templateReader := bytes.NewBuffer(payload.Next(nextrelpos))
+ records, err := DecodeIPFIXOptionsTemplateSet(templateReader)
+ if err != nil {
+ return returnItem, fmt.Errorf("Error decoding IPFIX OptionsTemplateSet: %v", err)
+ }
+ optsTemplatefs := IPFIXOptionsTemplateFlowSet{
+ FlowSetHeader: fsheader,
+ Records: records,
+ }
+ flowSet = optsTemplatefs
+
+ if tpli != nil {
+ for _, record := range records {
+ tpli.AddTemplate(version, obsDomainId, record)
+ //tpli.AddTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, record.TemplateId), record)
+ }
+ }
+
+ } else if fsheader.Id >= 256 {
+ dataReader := bytes.NewBuffer(payload.Next(nextrelpos))
+
+ if tpli == nil {
+ continue
+ }
+
+ template, err := tpli.GetTemplate(version, obsDomainId, fsheader.Id)
+ //template, err := tpli.GetTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, fsheader.Id))
+
+ if err == nil {
+ switch templatec := template.(type) {
+ case TemplateRecord:
+ records, err := DecodeDataSet(version, dataReader, templatec.Fields)
+ if err != nil {
+ return returnItem, fmt.Errorf("Error decoding DataSet: %v", err)
+ }
+ datafs := DataFlowSet{
+ FlowSetHeader: fsheader,
+ Records: records,
+ }
+ flowSet = datafs
+ case IPFIXOptionsTemplateRecord:
+ records, err := DecodeOptionsDataSet(version, dataReader, templatec.Scopes, templatec.Options)
+ if err != nil {
+ return returnItem, fmt.Errorf("Error decoding DataSet: %v", err)
+ }
+
+ datafs := OptionsDataFlowSet{
+ FlowSetHeader: fsheader,
+ Records: records,
+ }
+ flowSet = datafs
+ case NFv9OptionsTemplateRecord:
+ records, err := DecodeOptionsDataSet(version, dataReader, templatec.Scopes, templatec.Options)
+ if err != nil {
+ return returnItem, fmt.Errorf("Error decoding OptionDataSet: %v", err)
+ }
+
+ datafs := OptionsDataFlowSet{
+ FlowSetHeader: fsheader,
+ Records: records,
+ }
+ flowSet = datafs
+ }
+ } else {
+ return returnItem, err
+ }
+ } else {
+ return returnItem, fmt.Errorf("Error with ID %d", fsheader.Id)
+ }
+
+ if version == 9 && flowSet != nil {
+ packetNFv9.FlowSets = append(packetNFv9.FlowSets, flowSet)
+ } else if version == 10 && flowSet != nil {
+ packetIPFIX.FlowSets = append(packetIPFIX.FlowSets, flowSet)
+ }
+ read = startSize - payload.Len() + 16
+ }
+
+ if version == 9 {
+ return packetNFv9, nil
+ } else if version == 10 {
+ return packetIPFIX, nil
+ } else {
+ return returnItem, fmt.Errorf("Unknown version: %d", version)
+ }
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/nfv9.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/nfv9.go
new file mode 100644
index 000000000..64fe227d8
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/nfv9.go
@@ -0,0 +1,317 @@
+package netflow
+
+import (
+ "fmt"
+ "time"
+)
+
+const (
+ NFV9_FIELD_IN_BYTES = 1
+ NFV9_FIELD_IN_PKTS = 2
+ NFV9_FIELD_FLOWS = 3
+ NFV9_FIELD_PROTOCOL = 4
+ NFV9_FIELD_SRC_TOS = 5
+ NFV9_FIELD_TCP_FLAGS = 6
+ NFV9_FIELD_L4_SRC_PORT = 7
+ NFV9_FIELD_IPV4_SRC_ADDR = 8
+ NFV9_FIELD_SRC_MASK = 9
+ NFV9_FIELD_INPUT_SNMP = 10
+ NFV9_FIELD_L4_DST_PORT = 11
+ NFV9_FIELD_IPV4_DST_ADDR = 12
+ NFV9_FIELD_DST_MASK = 13
+ NFV9_FIELD_OUTPUT_SNMP = 14
+ NFV9_FIELD_IPV4_NEXT_HOP = 15
+ NFV9_FIELD_SRC_AS = 16
+ NFV9_FIELD_DST_AS = 17
+ NFV9_FIELD_BGP_IPV4_NEXT_HOP = 18
+ NFV9_FIELD_MUL_DST_PKTS = 19
+ NFV9_FIELD_MUL_DST_BYTES = 20
+ NFV9_FIELD_LAST_SWITCHED = 21
+ NFV9_FIELD_FIRST_SWITCHED = 22
+ NFV9_FIELD_OUT_BYTES = 23
+ NFV9_FIELD_OUT_PKTS = 24
+ NFV9_FIELD_MIN_PKT_LNGTH = 25
+ NFV9_FIELD_MAX_PKT_LNGTH = 26
+ NFV9_FIELD_IPV6_SRC_ADDR = 27
+ NFV9_FIELD_IPV6_DST_ADDR = 28
+ NFV9_FIELD_IPV6_SRC_MASK = 29
+ NFV9_FIELD_IPV6_DST_MASK = 30
+ NFV9_FIELD_IPV6_FLOW_LABEL = 31
+ NFV9_FIELD_ICMP_TYPE = 32
+ NFV9_FIELD_MUL_IGMP_TYPE = 33
+ NFV9_FIELD_SAMPLING_INTERVAL = 34
+ NFV9_FIELD_SAMPLING_ALGORITHM = 35
+ NFV9_FIELD_FLOW_ACTIVE_TIMEOUT = 36
+ NFV9_FIELD_FLOW_INACTIVE_TIMEOUT = 37
+ NFV9_FIELD_ENGINE_TYPE = 38
+ NFV9_FIELD_ENGINE_ID = 39
+ NFV9_FIELD_TOTAL_BYTES_EXP = 40
+ NFV9_FIELD_TOTAL_PKTS_EXP = 41
+ NFV9_FIELD_TOTAL_FLOWS_EXP = 42
+ NFV9_FIELD_IPV4_SRC_PREFIX = 44
+ NFV9_FIELD_IPV4_DST_PREFIX = 45
+ NFV9_FIELD_MPLS_TOP_LABEL_TYPE = 46
+ NFV9_FIELD_MPLS_TOP_LABEL_IP_ADDR = 47
+ NFV9_FIELD_FLOW_SAMPLER_ID = 48
+ NFV9_FIELD_FLOW_SAMPLER_MODE = 49
+ NFV9_FIELD_FLOW_SAMPLER_RANDOM_INTERVAL = 50
+ NFV9_FIELD_MIN_TTL = 52
+ NFV9_FIELD_MAX_TTL = 53
+ NFV9_FIELD_IPV4_IDENT = 54
+ NFV9_FIELD_DST_TOS = 55
+ NFV9_FIELD_IN_SRC_MAC = 56
+ NFV9_FIELD_OUT_DST_MAC = 57
+ NFV9_FIELD_SRC_VLAN = 58
+ NFV9_FIELD_DST_VLAN = 59
+ NFV9_FIELD_IP_PROTOCOL_VERSION = 60
+ NFV9_FIELD_DIRECTION = 61
+ NFV9_FIELD_IPV6_NEXT_HOP = 62
+ NFV9_FIELD_BGP_IPV6_NEXT_HOP = 63
+ NFV9_FIELD_IPV6_OPTION_HEADERS = 64
+ NFV9_FIELD_MPLS_LABEL_1 = 70
+ NFV9_FIELD_MPLS_LABEL_2 = 71
+ NFV9_FIELD_MPLS_LABEL_3 = 72
+ NFV9_FIELD_MPLS_LABEL_4 = 73
+ NFV9_FIELD_MPLS_LABEL_5 = 74
+ NFV9_FIELD_MPLS_LABEL_6 = 75
+ NFV9_FIELD_MPLS_LABEL_7 = 76
+ NFV9_FIELD_MPLS_LABEL_8 = 77
+ NFV9_FIELD_MPLS_LABEL_9 = 78
+ NFV9_FIELD_MPLS_LABEL_10 = 79
+ NFV9_FIELD_IN_DST_MAC = 80
+ NFV9_FIELD_OUT_SRC_MAC = 81
+ NFV9_FIELD_IF_NAME = 82
+ NFV9_FIELD_IF_DESC = 83
+ NFV9_FIELD_SAMPLER_NAME = 84
+ NFV9_FIELD_IN_PERMANENT_BYTES = 85
+ NFV9_FIELD_IN_PERMANENT_PKTS = 86
+ NFV9_FIELD_FRAGMENT_OFFSET = 88
+ NFV9_FIELD_FORWARDING_STATUS = 89
+ NFV9_FIELD_MPLS_PAL_RD = 90
+ NFV9_FIELD_MPLS_PREFIX_LEN = 91
+ NFV9_FIELD_SRC_TRAFFIC_INDEX = 92
+ NFV9_FIELD_DST_TRAFFIC_INDEX = 93
+ NFV9_FIELD_APPLICATION_DESCRIPTION = 94
+ NFV9_FIELD_APPLICATION_TAG = 95
+ NFV9_FIELD_APPLICATION_NAME = 96
+ NFV9_FIELD_postipDiffServCodePoint = 98
+ NFV9_FIELD_replication_factor = 99
+ NFV9_FIELD_layer2packetSectionOffset = 102
+ NFV9_FIELD_layer2packetSectionSize = 103
+ NFV9_FIELD_layer2packetSectionData = 104
+)
+
+type NFv9Packet struct {
+ Version uint16
+ Count uint16
+ SystemUptime uint32
+ UnixSeconds uint32
+ SequenceNumber uint32
+ SourceId uint32
+ FlowSets []interface{}
+}
+
+type NFv9OptionsTemplateFlowSet struct {
+ FlowSetHeader
+ Records []NFv9OptionsTemplateRecord
+}
+
+type NFv9OptionsTemplateRecord struct {
+ TemplateId uint16
+ ScopeLength uint16
+ OptionLength uint16
+ Scopes []Field
+ Options []Field
+}
+
+func NFv9TypeToString(typeId uint16) string {
+
+ nameList := map[uint16]string{
+ 1: "IN_BYTES",
+ 2: "IN_PKTS",
+ 3: "FLOWS",
+ 4: "PROTOCOL",
+ 5: "SRC_TOS",
+ 6: "TCP_FLAGS",
+ 7: "L4_SRC_PORT",
+ 8: "IPV4_SRC_ADDR",
+ 9: "SRC_MASK",
+ 10: "INPUT_SNMP",
+ 11: "L4_DST_PORT",
+ 12: "IPV4_DST_ADDR",
+ 13: "DST_MASK",
+ 14: "OUTPUT_SNMP",
+ 15: "IPV4_NEXT_HOP",
+ 16: "SRC_AS",
+ 17: "DST_AS",
+ 18: "BGP_IPV4_NEXT_HOP",
+ 19: "MUL_DST_PKTS",
+ 20: "MUL_DST_BYTES",
+ 21: "LAST_SWITCHED",
+ 22: "FIRST_SWITCHED",
+ 23: "OUT_BYTES",
+ 24: "OUT_PKTS",
+ 25: "MIN_PKT_LNGTH",
+ 26: "MAX_PKT_LNGTH",
+ 27: "IPV6_SRC_ADDR",
+ 28: "IPV6_DST_ADDR",
+ 29: "IPV6_SRC_MASK",
+ 30: "IPV6_DST_MASK",
+ 31: "IPV6_FLOW_LABEL",
+ 32: "ICMP_TYPE",
+ 33: "MUL_IGMP_TYPE",
+ 34: "SAMPLING_INTERVAL",
+ 35: "SAMPLING_ALGORITHM",
+ 36: "FLOW_ACTIVE_TIMEOUT",
+ 37: "FLOW_INACTIVE_TIMEOUT",
+ 38: "ENGINE_TYPE",
+ 39: "ENGINE_ID",
+ 40: "TOTAL_BYTES_EXP",
+ 41: "TOTAL_PKTS_EXP",
+ 42: "TOTAL_FLOWS_EXP",
+ 43: "*Vendor Proprietary*",
+ 44: "IPV4_SRC_PREFIX",
+ 45: "IPV4_DST_PREFIX",
+ 46: "MPLS_TOP_LABEL_TYPE",
+ 47: "MPLS_TOP_LABEL_IP_ADDR",
+ 48: "FLOW_SAMPLER_ID",
+ 49: "FLOW_SAMPLER_MODE",
+ 50: "FLOW_SAMPLER_RANDOM_INTERVAL",
+ 51: "*Vendor Proprietary*",
+ 52: "MIN_TTL",
+ 53: "MAX_TTL",
+ 54: "IPV4_IDENT",
+ 55: "DST_TOS",
+ 56: "IN_SRC_MAC",
+ 57: "OUT_DST_MAC",
+ 58: "SRC_VLAN",
+ 59: "DST_VLAN",
+ 60: "IP_PROTOCOL_VERSION",
+ 61: "DIRECTION",
+ 62: "IPV6_NEXT_HOP",
+ 63: "BPG_IPV6_NEXT_HOP",
+ 64: "IPV6_OPTION_HEADERS",
+ 65: "*Vendor Proprietary*",
+ 66: "*Vendor Proprietary*",
+ 67: "*Vendor Proprietary*",
+ 68: "*Vendor Proprietary*",
+ 69: "*Vendor Proprietary*",
+ 70: "MPLS_LABEL_1",
+ 71: "MPLS_LABEL_2",
+ 72: "MPLS_LABEL_3",
+ 73: "MPLS_LABEL_4",
+ 74: "MPLS_LABEL_5",
+ 75: "MPLS_LABEL_6",
+ 76: "MPLS_LABEL_7",
+ 77: "MPLS_LABEL_8",
+ 78: "MPLS_LABEL_9",
+ 79: "MPLS_LABEL_10",
+ 80: "IN_DST_MAC",
+ 81: "OUT_SRC_MAC",
+ 82: "IF_NAME",
+ 83: "IF_DESC",
+ 84: "SAMPLER_NAME",
+ 85: "IN_ PERMANENT _BYTES",
+ 86: "IN_ PERMANENT _PKTS",
+ 87: "*Vendor Proprietary*",
+ 88: "FRAGMENT_OFFSET",
+ 89: "FORWARDING STATUS",
+ 90: "MPLS PAL RD",
+ 91: "MPLS PREFIX LEN",
+ 92: "SRC TRAFFIC INDEX",
+ 93: "DST TRAFFIC INDEX",
+ 94: "APPLICATION DESCRIPTION",
+ 95: "APPLICATION TAG",
+ 96: "APPLICATION NAME",
+ 98: "postipDiffServCodePoint",
+ 99: "replication factor",
+ 100: "DEPRECATED",
+ 102: "layer2packetSectionOffset",
+ 103: "layer2packetSectionSize",
+ 104: "layer2packetSectionData",
+ 234: "ingressVRFID",
+ 235: "egressVRFID",
+ }
+
+ if typeId > 104 || typeId == 0 {
+ return "Unassigned"
+ } else {
+ return nameList[typeId]
+ }
+}
+
+func NFv9ScopeToString(scopeId uint16) string {
+ nameList := map[uint16]string{
+ 1: "System",
+ 2: "Interface",
+ 3: "Line Card",
+ 4: "NetFlow Cache",
+ 5: "Template",
+ }
+
+ if scopeId >= 1 && scopeId <= 5 {
+ return nameList[scopeId]
+ } else {
+ return "Unassigned"
+ }
+}
+
+func (flowSet NFv9OptionsTemplateFlowSet) String(TypeToString func(uint16) string) string {
+ str := fmt.Sprintf(" Id %v\n", flowSet.Id)
+ str += fmt.Sprintf(" Length: %v\n", flowSet.Length)
+ str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records))
+
+ for j, record := range flowSet.Records {
+ str += fmt.Sprintf(" - Record %v:\n", j)
+ str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId)
+ str += fmt.Sprintf(" ScopeLength: %v\n", record.ScopeLength)
+ str += fmt.Sprintf(" OptionLength: %v\n", record.OptionLength)
+ str += fmt.Sprintf(" Scopes (%v):\n", len(record.Scopes))
+
+ for k, field := range record.Scopes {
+ str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, NFv9ScopeToString(field.Type), field.Type, field.Length)
+ }
+
+ str += fmt.Sprintf(" Options (%v):\n", len(record.Options))
+
+ for k, field := range record.Options {
+ str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length)
+ }
+ }
+
+ return str
+}
+
+func (p NFv9Packet) String() string {
+ str := "Flow Packet\n"
+ str += "------------\n"
+ str += fmt.Sprintf(" Version: %v\n", p.Version)
+ str += fmt.Sprintf(" Count: %v\n", p.Count)
+
+ unixSeconds := time.Unix(int64(p.UnixSeconds), 0)
+ str += fmt.Sprintf(" SystemUptime: %v\n", p.SystemUptime)
+ str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.UTC().String())
+ str += fmt.Sprintf(" SequenceNumber: %v\n", p.SequenceNumber)
+ str += fmt.Sprintf(" SourceId: %v\n", p.SourceId)
+ str += fmt.Sprintf(" FlowSets (%v):\n", len(p.FlowSets))
+
+ for i, flowSet := range p.FlowSets {
+ switch flowSet := flowSet.(type) {
+ case TemplateFlowSet:
+ str += fmt.Sprintf(" - TemplateFlowSet %v:\n", i)
+ str += flowSet.String(NFv9TypeToString)
+ case NFv9OptionsTemplateFlowSet:
+ str += fmt.Sprintf(" - OptionsTemplateFlowSet %v:\n", i)
+ str += flowSet.String(NFv9TypeToString)
+ case DataFlowSet:
+ str += fmt.Sprintf(" - DataFlowSet %v:\n", i)
+ str += flowSet.String(NFv9TypeToString)
+ case OptionsDataFlowSet:
+ str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i)
+ str += flowSet.String(NFv9TypeToString, NFv9ScopeToString)
+ default:
+ str += fmt.Sprintf(" - (unknown type) %v: %v\n", i, flowSet)
+ }
+ }
+ return str
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/packet.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/packet.go
new file mode 100644
index 000000000..3e3707d64
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/packet.go
@@ -0,0 +1,158 @@
+package netflow
+
+import (
+ "fmt"
+)
+
+// FlowSetHeader contains fields shared by all Flow Sets (DataFlowSet,
+// TemplateFlowSet, OptionsTemplateFlowSet).
+type FlowSetHeader struct {
+ // FlowSet ID:
+ // 0 for TemplateFlowSet
+ // 1 for OptionsTemplateFlowSet
+ // 256-65535 for DataFlowSet (used as TemplateId)
+ Id uint16
+
+ // The total length of this FlowSet in bytes (including padding).
+ Length uint16
+}
+
+// TemplateFlowSet is a collection of templates that describe structure of Data
+// Records (actual NetFlow data).
+type TemplateFlowSet struct {
+ FlowSetHeader
+
+ // List of Template Records
+ Records []TemplateRecord
+}
+
+// DataFlowSet is a collection of Data Records (actual NetFlow data) and Options
+// Data Records (meta data).
+type DataFlowSet struct {
+ FlowSetHeader
+
+ Records []DataRecord
+}
+
+type OptionsDataFlowSet struct {
+ FlowSetHeader
+
+ Records []OptionsDataRecord
+}
+
+// TemplateRecord is a single template that describes structure of a Flow Record
+// (actual Netflow data).
+type TemplateRecord struct {
+ // Each of the newly generated Template Records is given a unique
+ // Template ID. This uniqueness is local to the Observation Domain that
+ // generated the Template ID. Template IDs of Data FlowSets are numbered
+ // from 256 to 65535.
+ TemplateId uint16
+
+ // Number of fields in this Template Record. Because a Template FlowSet
+ // usually contains multiple Template Records, this field allows the
+ // Collector to determine the end of the current Template Record and
+ // the start of the next.
+ FieldCount uint16
+
+ // List of fields in this Template Record.
+ Fields []Field
+}
+
+type DataRecord struct {
+ Values []DataField
+}
+
+// OptionsDataRecord is meta data sent alongide actual NetFlow data. Combined
+// with OptionsTemplateRecord it can be decoded to a single data row.
+type OptionsDataRecord struct {
+ // List of Scope values stored in raw format as []byte
+ ScopesValues []DataField
+
+ // List of Optons values stored in raw format as []byte
+ OptionsValues []DataField
+}
+
+// Field describes type and length of a single value in a Flow Data Record.
+// Field does not contain the record value itself it is just a description of
+// what record value will look like.
+type Field struct {
+ // A numeric value that represents the type of field.
+ PenProvided bool
+ Type uint16
+
+ // The length (in bytes) of the field.
+ Length uint16
+
+ Pen uint32
+}
+
+type DataField struct {
+ // A numeric value that represents the type of field.
+ PenProvided bool
+ Type uint16
+ Pen uint32
+
+ // The value (in bytes) of the field.
+ Value interface{}
+ //Value []byte
+}
+
+func (flowSet OptionsDataFlowSet) String(TypeToString func(uint16) string, ScopeToString func(uint16) string) string {
+ str := fmt.Sprintf(" Id %v\n", flowSet.Id)
+ str += fmt.Sprintf(" Length: %v\n", flowSet.Length)
+ str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records))
+
+ for j, record := range flowSet.Records {
+ str += fmt.Sprintf(" - Record %v:\n", j)
+ str += fmt.Sprintf(" Scopes (%v):\n", len(record.ScopesValues))
+
+ for k, value := range record.ScopesValues {
+ str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, ScopeToString(value.Type), value.Type, value.Value)
+ }
+
+ str += fmt.Sprintf(" Options (%v):\n", len(record.OptionsValues))
+
+ for k, value := range record.OptionsValues {
+ str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value)
+ }
+ }
+
+ return str
+}
+
+func (flowSet DataFlowSet) String(TypeToString func(uint16) string) string {
+ str := fmt.Sprintf(" Id %v\n", flowSet.Id)
+ str += fmt.Sprintf(" Length: %v\n", flowSet.Length)
+ str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records))
+
+ for j, record := range flowSet.Records {
+ str += fmt.Sprintf(" - Record %v:\n", j)
+ str += fmt.Sprintf(" Values (%v):\n", len(record.Values))
+
+ for k, value := range record.Values {
+ str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value)
+ }
+ }
+
+ return str
+}
+
+func (flowSet TemplateFlowSet) String(TypeToString func(uint16) string) string {
+ str := fmt.Sprintf(" Id %v\n", flowSet.Id)
+ str += fmt.Sprintf(" Length: %v\n", flowSet.Length)
+ str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records))
+
+ for j, record := range flowSet.Records {
+ str += fmt.Sprintf(" - %v. Record:\n", j)
+ str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId)
+ str += fmt.Sprintf(" FieldCount: %v\n", record.FieldCount)
+ str += fmt.Sprintf(" Fields (%v):\n", len(record.Fields))
+
+ for k, field := range record.Fields {
+ str += fmt.Sprintf(" - %v. %v (%v/%v): %v\n", k, TypeToString(field.Type), field.Type, field.PenProvided, field.Length)
+ }
+ }
+
+ return str
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/memory/memory.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/memory/memory.go
new file mode 100644
index 000000000..0f16b241e
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/memory/memory.go
@@ -0,0 +1,73 @@
+package memory
+
+import (
+ "context"
+ "github.com/netsampler/goflow2/decoders/netflow/templates"
+ "sync"
+)
+
+var (
+ Driver = &MemoryDriver{}
+)
+
+type templateData struct {
+ key *templates.TemplateKey
+ data interface{}
+}
+
+type MemoryDriver struct {
+ lock *sync.RWMutex
+ templates map[string]templateData
+}
+
+func (d *MemoryDriver) Prepare() error {
+ // could have an expiry
+ return nil
+}
+
+func (d *MemoryDriver) Init(context.Context) error {
+ d.lock = &sync.RWMutex{}
+ d.templates = make(map[string]templateData)
+ return nil
+}
+
+func (d *MemoryDriver) Close(context.Context) error {
+ return nil
+}
+
+func (d *MemoryDriver) ListTemplates(ctx context.Context, ch chan *templates.TemplateKey) error {
+ d.lock.RLock()
+ defer d.lock.RUnlock()
+ for _, v := range d.templates {
+ select {
+ case ch <- v.key:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ select {
+ case ch <- nil:
+ }
+ return nil
+}
+
+func (d *MemoryDriver) AddTemplate(ctx context.Context, key *templates.TemplateKey, template interface{}) error {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ d.templates[key.String()] = templateData{
+ key: key,
+ data: template,
+ }
+ return nil
+}
+
+func (d *MemoryDriver) GetTemplate(ctx context.Context, key *templates.TemplateKey) (interface{}, error) {
+ d.lock.RLock()
+ defer d.lock.RUnlock()
+ return d.templates[key.String()].data, nil
+}
+
+func init() {
+ templates.RegisterTemplateDriver("memory", Driver)
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/templates.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/templates.go
new file mode 100644
index 000000000..525e6b10e
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/templates.go
@@ -0,0 +1,139 @@
+package templates
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+var (
+ templateDrivers = make(map[string]TemplateDriver) // might be better to change into "factory"
+ lock = &sync.RWMutex{}
+)
+
+type TemplateDriver interface {
+ TemplateInterface
+
+ Prepare() error // Prepare driver (eg: flag registration)
+ Init(context.Context) error // Initialize driver (eg: parse keying)
+ Close(context.Context) error // Close drive (eg: close file)
+}
+
+type TemplateKey struct {
+ TemplateKey string
+ Version uint16
+ ObsDomainId uint32
+ TemplateId uint16
+}
+
+func NewTemplateKey(templateKey string, version uint16, obsDomainId uint32, templateId uint16) *TemplateKey {
+ return &TemplateKey{
+ TemplateKey: templateKey,
+ Version: version,
+ ObsDomainId: obsDomainId,
+ TemplateId: templateId,
+ }
+}
+
+func (k *TemplateKey) String() string {
+ return fmt.Sprintf("%s-%d-%d-%d", k.TemplateKey, k.Version, k.ObsDomainId, k.TemplateId)
+}
+
+func ParseTemplateKey(key string, k *TemplateKey) error {
+ if k != nil {
+ return nil
+ }
+ var version uint16
+ var obsDomainId uint32
+ var templateId uint16
+
+ keySplit := strings.Split(key, "-")
+ if len(keySplit) != 4 {
+ return fmt.Errorf("template key format is invalid")
+ }
+ templateKey := keySplit[0]
+ if val, err := strconv.ParseUint(keySplit[1], 10, 64); err != nil {
+ return fmt.Errorf("template key version is invalid")
+ } else {
+ version = uint16(val)
+ }
+ if val, err := strconv.ParseUint(keySplit[2], 10, 64); err != nil {
+ fmt.Errorf("template key observation domain I Dis invalid")
+ } else {
+ obsDomainId = uint32(val)
+ }
+ if val, err := strconv.ParseUint(keySplit[3], 10, 64); err != nil {
+ fmt.Errorf("template key template ID is invalid")
+ } else {
+ templateId = uint16(val)
+ }
+
+ k.TemplateKey = templateKey
+ k.Version = version
+ k.ObsDomainId = obsDomainId
+ k.TemplateId = templateId
+
+ return nil
+}
+
+type TemplateInterface interface {
+ ListTemplates(ctx context.Context, ch chan *TemplateKey) error
+ GetTemplate(ctx context.Context, key *TemplateKey) (interface{}, error)
+ AddTemplate(ctx context.Context, key *TemplateKey, template interface{}) error // add expiration
+}
+
+type TemplateSystem struct {
+ driver TemplateDriver
+}
+
+func (t *TemplateSystem) ListTemplates(ctx context.Context, ch chan *TemplateKey) error {
+ return t.driver.ListTemplates(ctx, ch)
+}
+
+func (t *TemplateSystem) AddTemplate(ctx context.Context, key *TemplateKey, template interface{}) error {
+ return t.driver.AddTemplate(ctx, key, template)
+}
+
+func (t *TemplateSystem) GetTemplate(ctx context.Context, key *TemplateKey) (interface{}, error) {
+ return t.driver.GetTemplate(ctx, key)
+}
+
+func (t *TemplateSystem) Close(ctx context.Context) error {
+ return t.driver.Close(ctx)
+}
+
+func RegisterTemplateDriver(name string, t TemplateDriver) {
+ lock.Lock()
+ templateDrivers[name] = t
+ lock.Unlock()
+
+ if err := t.Prepare(); err != nil {
+ panic(err)
+ }
+}
+
+func FindTemplateSystem(ctx context.Context, name string) (*TemplateSystem, error) {
+ lock.RLock()
+ t, ok := templateDrivers[name]
+ lock.RUnlock()
+ if !ok {
+ return nil, fmt.Errorf("Template %s not found", name)
+ }
+
+ err := t.Init(ctx)
+ return &TemplateSystem{t}, err
+}
+
+func GetTemplates() []string {
+ lock.RLock()
+ defer lock.RUnlock()
+ t := make([]string, len(templateDrivers))
+ var i int
+ for k, _ := range templateDrivers {
+ t[i] = k
+ i++
+ }
+ return t
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/netflow.go b/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/netflow.go
new file mode 100644
index 000000000..955f82cef
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/netflow.go
@@ -0,0 +1,90 @@
+package netflowlegacy
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/netsampler/goflow2/decoders/utils"
+)
+
+const (
+ MAX_COUNT = 1536
+)
+
+type ErrorVersion struct {
+ version uint16
+}
+
+func NewErrorVersion(version uint16) *ErrorVersion {
+ return &ErrorVersion{
+ version: version,
+ }
+}
+
+func (e *ErrorVersion) Error() string {
+ return fmt.Sprintf("Unknown NetFlow version %v (only decodes v5)", e.version)
+}
+
+func DecodeMessage(payload *bytes.Buffer) (interface{}, error) {
+ var version uint16
+ err := utils.BinaryDecoder(payload, &version)
+ if err != nil {
+ return nil, err
+ }
+ packet := PacketNetFlowV5{}
+ if version == 5 {
+ packet.Version = version
+
+ utils.BinaryDecoder(payload,
+ &(packet.Count),
+ &(packet.SysUptime),
+ &(packet.UnixSecs),
+ &(packet.UnixNSecs),
+ &(packet.FlowSequence),
+ &(packet.EngineType),
+ &(packet.EngineId),
+ &(packet.SamplingInterval),
+ )
+
+ packet.SamplingInterval = packet.SamplingInterval & 0x3FFF
+
+ if packet.Count > MAX_COUNT {
+ return nil, fmt.Errorf("Too many samples (%d > %d) in packet", packet.Count, MAX_COUNT)
+ }
+
+ packet.Records = make([]RecordsNetFlowV5, int(packet.Count))
+ for i := 0; i < int(packet.Count) && payload.Len() >= 48; i++ {
+ record := RecordsNetFlowV5{}
+ err := utils.BinaryDecoder(payload,
+ &record.SrcAddr,
+ &record.DstAddr,
+ &record.NextHop,
+ &record.Input,
+ &record.Output,
+ &record.DPkts,
+ &record.DOctets,
+ &record.First,
+ &record.Last,
+ &record.SrcPort,
+ &record.DstPort,
+ &record.Pad1,
+ &record.TCPFlags,
+ &record.Proto,
+ &record.Tos,
+ &record.SrcAS,
+ &record.DstAS,
+ &record.SrcMask,
+ &record.DstMask,
+ &record.Pad2,
+ )
+ if err != nil {
+ return packet, err
+ }
+ packet.Records[i] = record
+ }
+
+ return packet, nil
+ } else {
+ return nil, NewErrorVersion(version)
+ }
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/packet.go b/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/packet.go
new file mode 100644
index 000000000..078bba4df
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/packet.go
@@ -0,0 +1,96 @@
+package netflowlegacy
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+ "time"
+)
+
+type PacketNetFlowV5 struct {
+ Version uint16
+ Count uint16
+ SysUptime uint32
+ UnixSecs uint32
+ UnixNSecs uint32
+ FlowSequence uint32
+ EngineType uint8
+ EngineId uint8
+ SamplingInterval uint16
+ Records []RecordsNetFlowV5
+}
+
+type RecordsNetFlowV5 struct {
+ SrcAddr uint32
+ DstAddr uint32
+ NextHop uint32
+ Input uint16
+ Output uint16
+ DPkts uint32
+ DOctets uint32
+ First uint32
+ Last uint32
+ SrcPort uint16
+ DstPort uint16
+ Pad1 byte
+ TCPFlags uint8
+ Proto uint8
+ Tos uint8
+ SrcAS uint16
+ DstAS uint16
+ SrcMask uint8
+ DstMask uint8
+ Pad2 uint16
+}
+
+func (p PacketNetFlowV5) String() string {
+ str := "NetFlow v5 Packet\n"
+ str += "-----------------\n"
+ str += fmt.Sprintf(" Version: %v\n", p.Version)
+ str += fmt.Sprintf(" Count: %v\n", p.Count)
+
+ unixSeconds := time.Unix(int64(p.UnixSecs), int64(p.UnixNSecs))
+ str += fmt.Sprintf(" SystemUptime: %v\n", time.Duration(p.SysUptime)*time.Millisecond)
+ str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.String())
+ str += fmt.Sprintf(" FlowSequence: %v\n", p.FlowSequence)
+ str += fmt.Sprintf(" EngineType: %v\n", p.EngineType)
+ str += fmt.Sprintf(" EngineId: %v\n", p.EngineId)
+ str += fmt.Sprintf(" SamplingInterval: %v\n", p.SamplingInterval)
+ str += fmt.Sprintf(" Records (%v):\n", len(p.Records))
+
+ for i, record := range p.Records {
+ str += fmt.Sprintf(" Record %v:\n", i)
+ str += record.String()
+ }
+ return str
+}
+
+func (r RecordsNetFlowV5) String() string {
+ srcaddr := make(net.IP, 4)
+ binary.BigEndian.PutUint32(srcaddr, r.SrcAddr)
+ dstaddr := make(net.IP, 4)
+ binary.BigEndian.PutUint32(dstaddr, r.DstAddr)
+ nexthop := make(net.IP, 4)
+ binary.BigEndian.PutUint32(nexthop, r.NextHop)
+
+ str := fmt.Sprintf(" SrcAddr: %v\n", srcaddr.String())
+ str += fmt.Sprintf(" DstAddr: %v\n", dstaddr.String())
+ str += fmt.Sprintf(" NextHop: %v\n", nexthop.String())
+ str += fmt.Sprintf(" Input: %v\n", r.Input)
+ str += fmt.Sprintf(" Output: %v\n", r.Output)
+ str += fmt.Sprintf(" DPkts: %v\n", r.DPkts)
+ str += fmt.Sprintf(" DOctets: %v\n", r.DOctets)
+ str += fmt.Sprintf(" First: %v\n", time.Duration(r.First)*time.Millisecond)
+ str += fmt.Sprintf(" Last: %v\n", time.Duration(r.Last)*time.Millisecond)
+ str += fmt.Sprintf(" SrcPort: %v\n", r.SrcPort)
+ str += fmt.Sprintf(" DstPort: %v\n", r.DstPort)
+ str += fmt.Sprintf(" TCPFlags: %v\n", r.TCPFlags)
+ str += fmt.Sprintf(" Proto: %v\n", r.Proto)
+ str += fmt.Sprintf(" Tos: %v\n", r.Tos)
+ str += fmt.Sprintf(" SrcAS: %v\n", r.SrcAS)
+ str += fmt.Sprintf(" DstAS: %v\n", r.DstAS)
+ str += fmt.Sprintf(" SrcMask: %v\n", r.SrcMask)
+ str += fmt.Sprintf(" DstMask: %v\n", r.DstMask)
+
+ return str
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/sflow/datastructure.go b/vendor/github.com/netsampler/goflow2/decoders/sflow/datastructure.go
new file mode 100644
index 000000000..670652a2c
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/sflow/datastructure.go
@@ -0,0 +1,103 @@
+package sflow
+
+type SampledHeader struct {
+ Protocol uint32
+ FrameLength uint32
+ Stripped uint32
+ OriginalLength uint32
+ HeaderData []byte
+}
+
+type SampledEthernet struct {
+ Length uint32
+ SrcMac []byte
+ DstMac []byte
+ EthType uint32
+}
+
+type SampledIP_Base struct {
+ Length uint32
+ Protocol uint32
+ SrcIP []byte
+ DstIP []byte
+ SrcPort uint32
+ DstPort uint32
+ TcpFlags uint32
+}
+
+type SampledIPv4 struct {
+ Base SampledIP_Base
+ Tos uint32
+}
+
+type SampledIPv6 struct {
+ Base SampledIP_Base
+ Priority uint32
+}
+
+type ExtendedSwitch struct {
+ SrcVlan uint32
+ SrcPriority uint32
+ DstVlan uint32
+ DstPriority uint32
+}
+
+type ExtendedRouter struct {
+ NextHopIPVersion uint32
+ NextHop []byte
+ SrcMaskLen uint32
+ DstMaskLen uint32
+}
+
+type ExtendedGateway struct {
+ NextHopIPVersion uint32
+ NextHop []byte
+ AS uint32
+ SrcAS uint32
+ SrcPeerAS uint32
+ ASDestinations uint32
+ ASPathType uint32
+ ASPathLength uint32
+ ASPath []uint32
+ CommunitiesLength uint32
+ Communities []uint32
+ LocalPref uint32
+}
+
+type IfCounters struct {
+ IfIndex uint32
+ IfType uint32
+ IfSpeed uint64
+ IfDirection uint32
+ IfStatus uint32
+ IfInOctets uint64
+ IfInUcastPkts uint32
+ IfInMulticastPkts uint32
+ IfInBroadcastPkts uint32
+ IfInDiscards uint32
+ IfInErrors uint32
+ IfInUnknownProtos uint32
+ IfOutOctets uint64
+ IfOutUcastPkts uint32
+ IfOutMulticastPkts uint32
+ IfOutBroadcastPkts uint32
+ IfOutDiscards uint32
+ IfOutErrors uint32
+ IfPromiscuousMode uint32
+}
+
+type EthernetCounters struct {
+ Dot3StatsAlignmentErrors uint32
+ Dot3StatsFCSErrors uint32
+ Dot3StatsSingleCollisionFrames uint32
+ Dot3StatsMultipleCollisionFrames uint32
+ Dot3StatsSQETestErrors uint32
+ Dot3StatsDeferredTransmissions uint32
+ Dot3StatsLateCollisions uint32
+ Dot3StatsExcessiveCollisions uint32
+ Dot3StatsInternalMacTransmitErrors uint32
+ Dot3StatsCarrierSenseErrors uint32
+ Dot3StatsFrameTooLongs uint32
+ Dot3StatsInternalMacReceiveErrors uint32
+ Dot3StatsSymbolErrors uint32
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/sflow/packet.go b/vendor/github.com/netsampler/goflow2/decoders/sflow/packet.go
new file mode 100644
index 000000000..647f83db3
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/sflow/packet.go
@@ -0,0 +1,73 @@
+package sflow
+
+type Packet struct {
+ Version uint32
+ IPVersion uint32
+ AgentIP []byte
+ SubAgentId uint32
+ SequenceNumber uint32
+ Uptime uint32
+ SamplesCount uint32
+ Samples []interface{}
+}
+
+type SampleHeader struct {
+ Format uint32
+ Length uint32
+
+ SampleSequenceNumber uint32
+ SourceIdType uint32
+ SourceIdValue uint32
+}
+
+type FlowSample struct {
+ Header SampleHeader
+
+ SamplingRate uint32
+ SamplePool uint32
+ Drops uint32
+ Input uint32
+ Output uint32
+ FlowRecordsCount uint32
+ Records []FlowRecord
+}
+
+type CounterSample struct {
+ Header SampleHeader
+
+ CounterRecordsCount uint32
+ Records []CounterRecord
+}
+
+type ExpandedFlowSample struct {
+ Header SampleHeader
+
+ SamplingRate uint32
+ SamplePool uint32
+ Drops uint32
+ InputIfFormat uint32
+ InputIfValue uint32
+ OutputIfFormat uint32
+ OutputIfValue uint32
+ FlowRecordsCount uint32
+ Records []FlowRecord
+}
+
+type RecordHeader struct {
+ DataFormat uint32
+ Length uint32
+}
+
+type FlowRecord struct {
+ Header RecordHeader
+ Data interface{}
+}
+
+type FlowRecordRaw struct {
+ Data []byte
+}
+
+type CounterRecord struct {
+ Header RecordHeader
+ Data interface{}
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/sflow/sflow.go b/vendor/github.com/netsampler/goflow2/decoders/sflow/sflow.go
new file mode 100644
index 000000000..309e965aa
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/sflow/sflow.go
@@ -0,0 +1,482 @@
+package sflow
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "github.com/netsampler/goflow2/decoders/utils"
+)
+
+const (
+ FORMAT_EXT_SWITCH = 1001
+ FORMAT_EXT_ROUTER = 1002
+ FORMAT_EXT_GATEWAY = 1003
+ FORMAT_RAW_PKT = 1
+ FORMAT_ETH = 2
+ FORMAT_IPV4 = 3
+ FORMAT_IPV6 = 4
+
+ MAX_SAMPLES = 512
+ MAX_RECORDS = 8192
+ MAX_ATTRS = 16383
+)
+
+type ErrorDecodingSFlow struct {
+ msg string
+}
+
+func NewErrorDecodingSFlow(msg string) *ErrorDecodingSFlow {
+ return &ErrorDecodingSFlow{
+ msg: msg,
+ }
+}
+
+func (e *ErrorDecodingSFlow) Error() string {
+ return fmt.Sprintf("Error decoding sFlow: %v", e.msg)
+}
+
+type ErrorDataFormat struct {
+ dataformat uint32
+}
+
+func NewErrorDataFormat(dataformat uint32) *ErrorDataFormat {
+ return &ErrorDataFormat{
+ dataformat: dataformat,
+ }
+}
+
+func (e *ErrorDataFormat) Error() string {
+ return fmt.Sprintf("Unknown data format %v", e.dataformat)
+}
+
+type ErrorIPVersion struct {
+ version uint32
+}
+
+func NewErrorIPVersion(version uint32) *ErrorIPVersion {
+ return &ErrorIPVersion{
+ version: version,
+ }
+}
+
+func (e *ErrorIPVersion) Error() string {
+ return fmt.Sprintf("Unknown IP version: %v", e.version)
+}
+
+type ErrorVersion struct {
+ version uint32
+}
+
+func NewErrorVersion(version uint32) *ErrorVersion {
+ return &ErrorVersion{
+ version: version,
+ }
+}
+
+func (e *ErrorVersion) Error() string {
+ return fmt.Sprintf("Unknown sFlow version %v (supported v5)", e.version)
+}
+
+func DecodeCounterRecord(header *RecordHeader, payload *bytes.Buffer) (CounterRecord, error) {
+ counterRecord := CounterRecord{
+ Header: *header,
+ }
+ switch (*header).DataFormat {
+ case 1:
+ ifCounters := IfCounters{}
+ err := utils.BinaryDecoder(payload,
+ &ifCounters.IfIndex,
+ &ifCounters.IfType,
+ &ifCounters.IfSpeed,
+ &ifCounters.IfDirection,
+ &ifCounters.IfStatus,
+ &ifCounters.IfInOctets,
+ &ifCounters.IfInUcastPkts,
+ &ifCounters.IfInMulticastPkts,
+ &ifCounters.IfInBroadcastPkts,
+ &ifCounters.IfInDiscards,
+ &ifCounters.IfInErrors,
+ &ifCounters.IfInUnknownProtos,
+ &ifCounters.IfOutOctets,
+ &ifCounters.IfOutUcastPkts,
+ &ifCounters.IfOutMulticastPkts,
+ &ifCounters.IfOutBroadcastPkts,
+ &ifCounters.IfOutDiscards,
+ &ifCounters.IfOutErrors,
+ &ifCounters.IfPromiscuousMode,
+ )
+ if err != nil {
+ return counterRecord, err
+ }
+ counterRecord.Data = ifCounters
+ case 2:
+ ethernetCounters := EthernetCounters{}
+ err := utils.BinaryDecoder(payload,
+ ðernetCounters.Dot3StatsAlignmentErrors,
+ ðernetCounters.Dot3StatsFCSErrors,
+ ðernetCounters.Dot3StatsSingleCollisionFrames,
+ ðernetCounters.Dot3StatsMultipleCollisionFrames,
+ ðernetCounters.Dot3StatsSQETestErrors,
+ ðernetCounters.Dot3StatsDeferredTransmissions,
+ ðernetCounters.Dot3StatsLateCollisions,
+ ðernetCounters.Dot3StatsExcessiveCollisions,
+ ðernetCounters.Dot3StatsInternalMacTransmitErrors,
+ ðernetCounters.Dot3StatsCarrierSenseErrors,
+ ðernetCounters.Dot3StatsFrameTooLongs,
+ ðernetCounters.Dot3StatsInternalMacReceiveErrors,
+ ðernetCounters.Dot3StatsSymbolErrors,
+ )
+ if err != nil {
+ return counterRecord, err
+ }
+ counterRecord.Data = ethernetCounters
+ default:
+ counterRecord.Data = &FlowRecordRaw{
+ Data: payload.Next(int(header.Length)),
+ }
+ }
+
+ return counterRecord, nil
+}
+
+func DecodeIP(payload *bytes.Buffer) (uint32, []byte, error) {
+ var ipVersion uint32
+ err := utils.BinaryDecoder(payload, &ipVersion)
+ if err != nil {
+ return 0, nil, err
+ }
+ var ip []byte
+ if ipVersion == 1 {
+ ip = make([]byte, 4)
+ } else if ipVersion == 2 {
+ ip = make([]byte, 16)
+ } else {
+ return ipVersion, ip, NewErrorIPVersion(ipVersion)
+ }
+ if payload.Len() >= len(ip) {
+ err := utils.BinaryDecoder(payload, ip)
+ if err != nil {
+ return 0, nil, err
+ }
+ } else {
+ return ipVersion, ip, NewErrorDecodingSFlow(fmt.Sprintf("Not enough data: %v, needs %v.", payload.Len(), len(ip)))
+ }
+ return ipVersion, ip, nil
+}
+
+func DecodeFlowRecord(header *RecordHeader, payload *bytes.Buffer) (FlowRecord, error) {
+ flowRecord := FlowRecord{
+ Header: *header,
+ }
+ switch (*header).DataFormat {
+ case FORMAT_EXT_SWITCH:
+ extendedSwitch := ExtendedSwitch{}
+ err := utils.BinaryDecoder(payload, &extendedSwitch.SrcVlan, &extendedSwitch.SrcPriority, &extendedSwitch.DstVlan, &extendedSwitch.DstPriority)
+ if err != nil {
+ return flowRecord, err
+ }
+ flowRecord.Data = extendedSwitch
+ case FORMAT_RAW_PKT:
+ sampledHeader := SampledHeader{}
+ err := utils.BinaryDecoder(payload, &sampledHeader.Protocol, &sampledHeader.FrameLength, &sampledHeader.Stripped, &sampledHeader.OriginalLength)
+ if err != nil {
+ return flowRecord, err
+ }
+ sampledHeader.HeaderData = payload.Bytes()
+ flowRecord.Data = sampledHeader
+ case FORMAT_IPV4:
+ sampledIPBase := SampledIP_Base{
+ SrcIP: make([]byte, 4),
+ DstIP: make([]byte, 4),
+ }
+ err := utils.BinaryDecoder(payload, &sampledIPBase.Length, &sampledIPBase.Protocol, sampledIPBase.SrcIP, sampledIPBase.DstIP, &sampledIPBase.SrcPort, &sampledIPBase.DstPort, &sampledIPBase.TcpFlags)
+ if err != nil {
+ return flowRecord, err
+ }
+ sampledIPv4 := SampledIPv4{
+ Base: sampledIPBase,
+ }
+ err = utils.BinaryDecoder(payload, &(sampledIPv4.Tos))
+ if err != nil {
+ return flowRecord, err
+ }
+ flowRecord.Data = sampledIPv4
+ case FORMAT_IPV6:
+ sampledIPBase := SampledIP_Base{
+ SrcIP: make([]byte, 16),
+ DstIP: make([]byte, 16),
+ }
+ err := utils.BinaryDecoder(payload, &sampledIPBase.Length, &sampledIPBase.Protocol, sampledIPBase.SrcIP, sampledIPBase.DstIP, &sampledIPBase.SrcPort, &sampledIPBase.DstPort, &sampledIPBase.TcpFlags)
+ if err != nil {
+ return flowRecord, err
+ }
+ sampledIPv6 := SampledIPv6{
+ Base: sampledIPBase,
+ }
+ err = utils.BinaryDecoder(payload, &sampledIPv6.Priority)
+ if err != nil {
+ return flowRecord, err
+ }
+ flowRecord.Data = sampledIPv6
+ case FORMAT_EXT_ROUTER:
+ extendedRouter := ExtendedRouter{}
+
+ ipVersion, ip, err := DecodeIP(payload)
+ if err != nil {
+ return flowRecord, err
+ }
+ extendedRouter.NextHopIPVersion = ipVersion
+ extendedRouter.NextHop = ip
+ err = utils.BinaryDecoder(payload, &extendedRouter.SrcMaskLen, &extendedRouter.DstMaskLen)
+ if err != nil {
+ return flowRecord, err
+ }
+ flowRecord.Data = extendedRouter
+ case FORMAT_EXT_GATEWAY:
+ extendedGateway := ExtendedGateway{}
+ ipVersion, ip, err := DecodeIP(payload)
+ if err != nil {
+ return flowRecord, err
+ }
+ extendedGateway.NextHopIPVersion = ipVersion
+ extendedGateway.NextHop = ip
+ err = utils.BinaryDecoder(payload, &extendedGateway.AS, &extendedGateway.SrcAS, &extendedGateway.SrcPeerAS,
+ &extendedGateway.ASDestinations)
+ if err != nil {
+ return flowRecord, err
+ }
+ var asPath []uint32
+ if extendedGateway.ASDestinations != 0 {
+ err := utils.BinaryDecoder(payload, &extendedGateway.ASPathType, &extendedGateway.ASPathLength)
+ if err != nil {
+ return flowRecord, err
+ }
+ if int(extendedGateway.ASPathLength) > payload.Len()-4 {
+ return flowRecord, errors.New(fmt.Sprintf("Invalid AS path length: %v.", extendedGateway.ASPathLength))
+ }
+
+ if extendedGateway.ASPathLength > MAX_ATTRS {
+ return flowRecord, fmt.Errorf("AS path too large (%d > %d) in record", extendedGateway.ASPathLength, MAX_ATTRS)
+ }
+
+ asPath = make([]uint32, extendedGateway.ASPathLength)
+ if len(asPath) > 0 {
+ err = utils.BinaryDecoder(payload, asPath)
+ if err != nil {
+ return flowRecord, err
+ }
+ }
+ }
+ extendedGateway.ASPath = asPath
+
+ err = utils.BinaryDecoder(payload, &extendedGateway.CommunitiesLength)
+ if err != nil {
+ return flowRecord, err
+ }
+ if extendedGateway.CommunitiesLength > MAX_ATTRS {
+ return flowRecord, fmt.Errorf("Communities list too large (%d > %d) in record", extendedGateway.CommunitiesLength, MAX_ATTRS)
+ }
+
+ if int(extendedGateway.CommunitiesLength) > payload.Len()-4 {
+ return flowRecord, errors.New(fmt.Sprintf("Invalid Communities length: %v.", extendedGateway.ASPathLength))
+ }
+ communities := make([]uint32, extendedGateway.CommunitiesLength)
+ if len(communities) > 0 {
+ err = utils.BinaryDecoder(payload, communities)
+ if err != nil {
+ return flowRecord, err
+ }
+ }
+ err = utils.BinaryDecoder(payload, &extendedGateway.LocalPref)
+ if err != nil {
+ return flowRecord, err
+ }
+ extendedGateway.Communities = communities
+
+ flowRecord.Data = extendedGateway
+ default:
+ //return flowRecord, errors.New(fmt.Sprintf("Unknown data format %v.", (*header).DataFormat))
+ flowRecord.Data = &FlowRecordRaw{
+ Data: payload.Next(int(header.Length)),
+ }
+ }
+ return flowRecord, nil
+}
+
+func DecodeSample(header *SampleHeader, payload *bytes.Buffer) (interface{}, error) {
+ format := header.Format
+ var sample interface{}
+
+ err := utils.BinaryDecoder(payload, &header.SampleSequenceNumber)
+ if err != nil {
+ return sample, err
+ }
+ if format == FORMAT_RAW_PKT || format == FORMAT_ETH {
+ var sourceId uint32
+ err = utils.BinaryDecoder(payload, &sourceId)
+ if err != nil {
+ return sample, err
+ }
+
+ (*header).SourceIdType = sourceId >> 24
+ (*header).SourceIdValue = sourceId & 0x00ffffff
+ } else if format == FORMAT_IPV4 || format == FORMAT_IPV6 {
+ err = utils.BinaryDecoder(payload, &header.SourceIdType, &header.SourceIdValue)
+ if err != nil {
+ return sample, err
+ }
+ } else {
+ return nil, NewErrorDataFormat(format)
+ }
+
+ var recordsCount uint32
+ var flowSample FlowSample
+ var counterSample CounterSample
+ var expandedFlowSample ExpandedFlowSample
+ if format == FORMAT_RAW_PKT {
+ flowSample = FlowSample{
+ Header: *header,
+ }
+ err = utils.BinaryDecoder(payload, &flowSample.SamplingRate, &flowSample.SamplePool,
+ &flowSample.Drops, &flowSample.Input, &flowSample.Output, &flowSample.FlowRecordsCount)
+ if err != nil {
+ return sample, err
+ }
+ recordsCount = flowSample.FlowRecordsCount
+
+ if recordsCount > MAX_RECORDS {
+ return flowSample, fmt.Errorf("Too many records (%d > %d) in packet", recordsCount, MAX_RECORDS)
+ }
+
+ flowSample.Records = make([]FlowRecord, recordsCount)
+ sample = flowSample
+ } else if format == FORMAT_ETH || format == FORMAT_IPV6 {
+ err = utils.BinaryDecoder(payload, &recordsCount)
+ if err != nil {
+ return sample, err
+ }
+ counterSample = CounterSample{
+ Header: *header,
+ CounterRecordsCount: recordsCount,
+ }
+
+ if recordsCount > MAX_RECORDS {
+ return flowSample, fmt.Errorf("Too many records (%d > %d) in packet", recordsCount, MAX_RECORDS)
+ }
+ counterSample.Records = make([]CounterRecord, recordsCount)
+ sample = counterSample
+ } else if format == FORMAT_IPV4 {
+ expandedFlowSample = ExpandedFlowSample{
+ Header: *header,
+ }
+ err = utils.BinaryDecoder(payload, &expandedFlowSample.SamplingRate, &expandedFlowSample.SamplePool,
+ &expandedFlowSample.Drops, &expandedFlowSample.InputIfFormat, &expandedFlowSample.InputIfValue,
+ &expandedFlowSample.OutputIfFormat, &expandedFlowSample.OutputIfValue, &expandedFlowSample.FlowRecordsCount)
+ if err != nil {
+ return sample, err
+ }
+ recordsCount = expandedFlowSample.FlowRecordsCount
+
+ if recordsCount > MAX_RECORDS {
+ return flowSample, fmt.Errorf("Too many records (%d > %d) in packet", recordsCount, MAX_RECORDS)
+ }
+ expandedFlowSample.Records = make([]FlowRecord, recordsCount)
+ sample = expandedFlowSample
+ }
+ for i := 0; i < int(recordsCount) && payload.Len() >= 8; i++ {
+ recordHeader := RecordHeader{}
+ err = utils.BinaryDecoder(payload, &recordHeader.DataFormat, &recordHeader.Length)
+ if err != nil {
+ return sample, err
+ }
+ if int(recordHeader.Length) > payload.Len() {
+ break
+ }
+ recordReader := bytes.NewBuffer(payload.Next(int(recordHeader.Length)))
+ if format == FORMAT_RAW_PKT || format == FORMAT_IPV4 {
+ record, err := DecodeFlowRecord(&recordHeader, recordReader)
+ if err != nil {
+ continue
+ }
+ if format == FORMAT_RAW_PKT {
+ flowSample.Records[i] = record
+ } else if format == FORMAT_IPV4 {
+ expandedFlowSample.Records[i] = record
+ }
+ } else if format == FORMAT_ETH || format == FORMAT_IPV6 {
+ record, err := DecodeCounterRecord(&recordHeader, recordReader)
+ if err != nil {
+ continue
+ }
+ counterSample.Records[i] = record
+ }
+ }
+ return sample, nil
+}
+
+func DecodeMessage(payload *bytes.Buffer) (interface{}, error) {
+ var version uint32
+ err := utils.BinaryDecoder(payload, &version)
+ if err != nil {
+ return nil, err
+ }
+ packetV5 := Packet{}
+ if version == 5 {
+ packetV5.Version = version
+ err = utils.BinaryDecoder(payload, &(packetV5.IPVersion))
+ if err != nil {
+ return packetV5, err
+ }
+ var ip []byte
+ if packetV5.IPVersion == 1 {
+ ip = make([]byte, 4)
+ err = utils.BinaryDecoder(payload, ip)
+ if err != nil {
+ return packetV5, err
+ }
+ } else if packetV5.IPVersion == 2 {
+ ip = make([]byte, 16)
+ err = utils.BinaryDecoder(payload, ip)
+ if err != nil {
+ return packetV5, err
+ }
+ } else {
+ return nil, NewErrorIPVersion(packetV5.IPVersion)
+ }
+
+ packetV5.AgentIP = ip
+ err = utils.BinaryDecoder(payload, &packetV5.SubAgentId, &packetV5.SequenceNumber, &packetV5.Uptime, &packetV5.SamplesCount)
+ if err != nil {
+ return packetV5, err
+ }
+
+ if packetV5.SamplesCount > MAX_SAMPLES {
+ return nil, fmt.Errorf("Too many samples (%d > %d) in packet", packetV5.SamplesCount, MAX_SAMPLES)
+ }
+
+ packetV5.Samples = make([]interface{}, int(packetV5.SamplesCount))
+ for i := 0; i < int(packetV5.SamplesCount) && payload.Len() >= 8; i++ {
+ header := SampleHeader{}
+ err = utils.BinaryDecoder(payload, &(header.Format), &(header.Length))
+ if err != nil {
+ return packetV5, err
+ }
+ if int(header.Length) > payload.Len() {
+ break
+ }
+ sampleReader := bytes.NewBuffer(payload.Next(int(header.Length)))
+
+ sample, err := DecodeSample(&header, sampleReader)
+ if err != nil {
+ continue
+ } else {
+ packetV5.Samples[i] = sample
+ }
+ }
+
+ return packetV5, nil
+ } else {
+ return nil, NewErrorVersion(version)
+ }
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/utils/utils.go b/vendor/github.com/netsampler/goflow2/decoders/utils/utils.go
new file mode 100644
index 000000000..9c8e597f5
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/decoders/utils/utils.go
@@ -0,0 +1,128 @@
+package utils
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "reflect"
+)
+
+type BytesBuffer interface {
+ io.Reader
+ Next(int) []byte
+}
+
+func BinaryDecoder(payload *bytes.Buffer, dests ...interface{}) error {
+ for _, dest := range dests {
+ err := BinaryRead(payload, binary.BigEndian, dest)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+func BinaryRead(payload BytesBuffer, order binary.ByteOrder, data any) error {
+ // Fast path for basic types and slices.
+ if n := intDataSize(data); n != 0 {
+ bs := payload.Next(n)
+ if len(bs) < n {
+ return io.ErrUnexpectedEOF
+ }
+ switch data := data.(type) {
+ case *bool:
+ *data = bs[0] != 0
+ case *int8:
+ *data = int8(bs[0])
+ case *uint8:
+ *data = bs[0]
+ case *int16:
+ *data = int16(order.Uint16(bs))
+ case *uint16:
+ *data = order.Uint16(bs)
+ case *int32:
+ *data = int32(order.Uint32(bs))
+ case *uint32:
+ *data = order.Uint32(bs)
+ case *int64:
+ *data = int64(order.Uint64(bs))
+ case *uint64:
+ *data = order.Uint64(bs)
+ case []bool:
+ for i, x := range bs { // Easier to loop over the input for 8-bit values.
+ data[i] = x != 0
+ }
+ case []int8:
+ for i, x := range bs {
+ data[i] = int8(x)
+ }
+ case []uint8:
+ copy(data, bs)
+ case []int16:
+ for i := range data {
+ data[i] = int16(order.Uint16(bs[2*i:]))
+ }
+ case []uint16:
+ for i := range data {
+ data[i] = order.Uint16(bs[2*i:])
+ }
+ case []int32:
+ for i := range data {
+ data[i] = int32(order.Uint32(bs[4*i:]))
+ }
+ case []uint32:
+ for i := range data {
+ data[i] = order.Uint32(bs[4*i:])
+ }
+ case []int64:
+ for i := range data {
+ data[i] = int64(order.Uint64(bs[8*i:]))
+ }
+ case []uint64:
+ for i := range data {
+ data[i] = order.Uint64(bs[8*i:])
+ }
+ default:
+ n = 0 // fast path doesn't apply
+ }
+ if n != 0 {
+ return nil
+ }
+ }
+
+ return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
+}
+
+// intDataSize returns the size of the data required to represent the data when encoded.
+// It returns zero if the type cannot be implemented by the fast path in Read or Write.
+func intDataSize(data any) int {
+ switch data := data.(type) {
+ case bool, int8, uint8, *bool, *int8, *uint8:
+ return 1
+ case []bool:
+ return len(data)
+ case []int8:
+ return len(data)
+ case []uint8:
+ return len(data)
+ case int16, uint16, *int16, *uint16:
+ return 2
+ case []int16:
+ return 2 * len(data)
+ case []uint16:
+ return 2 * len(data)
+ case int32, uint32, *int32, *uint32:
+ return 4
+ case []int32:
+ return 4 * len(data)
+ case []uint32:
+ return 4 * len(data)
+ case int64, uint64, *int64, *uint64:
+ return 8
+ case []int64:
+ return 8 * len(data)
+ case []uint64:
+ return 8 * len(data)
+ }
+ return 0
+}
diff --git a/vendor/github.com/netsampler/goflow2/format/common/hash.go b/vendor/github.com/netsampler/goflow2/format/common/hash.go
new file mode 100644
index 000000000..1d9018603
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/format/common/hash.go
@@ -0,0 +1,56 @@
+package common
+
+import (
+ "flag"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+var (
+ fieldsVar string
+ fields []string // Hashing fields
+
+ hashDeclared bool
+ hashDeclaredLock = &sync.Mutex{}
+)
+
+func HashFlag() {
+ hashDeclaredLock.Lock()
+ defer hashDeclaredLock.Unlock()
+
+ if hashDeclared {
+ return
+ }
+ hashDeclared = true
+ flag.StringVar(&fieldsVar, "format.hash", "SamplerAddress", "List of fields to do hashing, separated by commas")
+
+}
+
+func ManualHashInit() error {
+ fields = strings.Split(fieldsVar, ",")
+ return nil
+}
+
+func HashProtoLocal(msg interface{}) string {
+ return HashProto(fields, msg)
+}
+
+func HashProto(fields []string, msg interface{}) string {
+ var keyStr string
+
+ if msg != nil {
+ vfm := reflect.ValueOf(msg)
+ vfm = reflect.Indirect(vfm)
+
+ for _, kf := range fields {
+ fieldValue := vfm.FieldByName(kf)
+ if fieldValue.IsValid() {
+ keyStr += fmt.Sprintf("%v-", fieldValue)
+ }
+ }
+ }
+
+ return keyStr
+}
diff --git a/vendor/github.com/netsampler/goflow2/format/common/selector.go b/vendor/github.com/netsampler/goflow2/format/common/selector.go
new file mode 100644
index 000000000..531c716fe
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/format/common/selector.go
@@ -0,0 +1,36 @@
+package common
+
+import (
+ "flag"
+ "strings"
+ "sync"
+)
+
+var (
+ selectorVar string
+ selector []string // Hashing fields
+ selectorTag string // Hashing fields
+
+ selectorDeclared bool
+ selectorDeclaredLock = &sync.Mutex{}
+)
+
+func SelectorFlag() {
+ selectorDeclaredLock.Lock()
+ defer selectorDeclaredLock.Unlock()
+
+ if selectorDeclared {
+ return
+ }
+ selectorDeclared = true
+ flag.StringVar(&selectorVar, "format.selector", "", "List of fields to do keep in output")
+ flag.StringVar(&selectorTag, "format.tag", "", "Use format tag")
+}
+
+func ManualSelectorInit() error {
+ if selectorVar == "" {
+ return nil
+ }
+ selector = strings.Split(selectorVar, ",")
+ return nil
+}
diff --git a/vendor/github.com/netsampler/goflow2/format/common/text.go b/vendor/github.com/netsampler/goflow2/format/common/text.go
new file mode 100644
index 000000000..6e009c583
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/format/common/text.go
@@ -0,0 +1,247 @@
+package common
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+ "reflect"
+ "strings"
+)
+
+const (
+ FORMAT_TYPE_UNKNOWN = iota
+ FORMAT_TYPE_STRING_FUNC
+ FORMAT_TYPE_STRING
+ FORMAT_TYPE_INTEGER
+ FORMAT_TYPE_IP
+ FORMAT_TYPE_MAC
+ FORMAT_TYPE_BYTES
+)
+
+var (
+ EtypeName = map[uint32]string{
+ 0x806: "ARP",
+ 0x800: "IPv4",
+ 0x86dd: "IPv6",
+ }
+ ProtoName = map[uint32]string{
+ 1: "ICMP",
+ 6: "TCP",
+ 17: "UDP",
+ 58: "ICMPv6",
+ 132: "SCTP",
+ }
+ IcmpTypeName = map[uint32]string{
+ 0: "EchoReply",
+ 3: "DestinationUnreachable",
+ 8: "Echo",
+ 9: "RouterAdvertisement",
+ 10: "RouterSolicitation",
+ 11: "TimeExceeded",
+ }
+ Icmp6TypeName = map[uint32]string{
+ 1: "DestinationUnreachable",
+ 2: "PacketTooBig",
+ 3: "TimeExceeded",
+ 128: "EchoRequest",
+ 129: "EchoReply",
+ 133: "RouterSolicitation",
+ 134: "RouterAdvertisement",
+ }
+
+ TextFields = map[string]int{
+ "Type": FORMAT_TYPE_STRING_FUNC,
+ "SamplerAddress": FORMAT_TYPE_IP,
+ "SrcAddr": FORMAT_TYPE_IP,
+ "DstAddr": FORMAT_TYPE_IP,
+ "SrcMac": FORMAT_TYPE_MAC,
+ "DstMac": FORMAT_TYPE_MAC,
+ "NextHop": FORMAT_TYPE_IP,
+ "MPLSLabelIP": FORMAT_TYPE_IP,
+ }
+
+ RenderExtras = map[string]RenderExtraFunction{
+ "EtypeName": RenderExtraFunctionEtypeName,
+ "ProtoName": RenderExtraFunctionProtoName,
+ "IcmpName": RenderExtraFunctionIcmpName,
+ }
+)
+
+/*
+func AddTextField(name string, jtype int) {
+ TextFields = append(TextFields, name)
+ TextFieldsTypes = append(TextFieldsTypes, jtype)
+}*/
+
+type RenderExtraFunction func(interface{}) string
+
+func RenderExtraFetchNumbers(msg interface{}, fields []string) []uint64 {
+ vfm := reflect.ValueOf(msg)
+ vfm = reflect.Indirect(vfm)
+
+ values := make([]uint64, len(fields))
+ for i, kf := range fields {
+ fieldValue := vfm.FieldByName(kf)
+ if fieldValue.IsValid() {
+ values[i] = fieldValue.Uint()
+ }
+ }
+
+ return values
+}
+
+func RenderExtraFunctionEtypeName(msg interface{}) string {
+ num := RenderExtraFetchNumbers(msg, []string{"Etype"})
+ return EtypeName[uint32(num[0])]
+}
+
+func RenderExtraFunctionProtoName(msg interface{}) string {
+ num := RenderExtraFetchNumbers(msg, []string{"Proto"})
+ return ProtoName[uint32(num[0])]
+}
+func RenderExtraFunctionIcmpName(msg interface{}) string {
+ num := RenderExtraFetchNumbers(msg, []string{"Proto", "IcmpCode", "IcmpType"})
+ return IcmpCodeType(uint32(num[0]), uint32(num[1]), uint32(num[2]))
+}
+
+func IcmpCodeType(proto, icmpCode, icmpType uint32) string {
+ if proto == 1 {
+ return IcmpTypeName[icmpType]
+ } else if proto == 58 {
+ return Icmp6TypeName[icmpType]
+ }
+ return ""
+}
+
+func RenderIP(addr []byte) string {
+ if addr == nil || (len(addr) != 4 && len(addr) != 16) {
+ return ""
+ }
+
+ return net.IP(addr).String()
+}
+
+func FormatMessageReflectText(msg interface{}, ext string) string {
+ return FormatMessageReflectCustom(msg, ext, "", " ", "=", false)
+}
+
+func FormatMessageReflectJSON(msg interface{}, ext string) string {
+ return fmt.Sprintf("{%s}", FormatMessageReflectCustom(msg, ext, "\"", ",", ":", true))
+}
+
+func ExtractTag(name, original string, tag reflect.StructTag) string {
+ lookup, ok := tag.Lookup(name)
+ if !ok {
+ return original
+ }
+ before, _, _ := strings.Cut(lookup, ",")
+ return before
+}
+
+func FormatMessageReflectCustom(msg interface{}, ext, quotes, sep, sign string, null bool) string {
+ customSelector := selector
+ reMap := make(map[string]string)
+
+ vfm := reflect.ValueOf(msg)
+ vfm = reflect.Indirect(vfm)
+ vft := vfm.Type()
+
+ if len(customSelector) == 0 || selectorTag != "" {
+ /*
+ // we would need proto v2
+ msgR := msg.ProtoReflect()
+ customSelector = make([]string, msgR.Fields().Len())
+ for i := 0; i flowpb.FlowMessage.FlowType
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_pb_flow_proto_init() }
+func file_pb_flow_proto_init() {
+ if File_pb_flow_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_flow_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FlowMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_flow_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_flow_proto_goTypes,
+ DependencyIndexes: file_pb_flow_proto_depIdxs,
+ EnumInfos: file_pb_flow_proto_enumTypes,
+ MessageInfos: file_pb_flow_proto_msgTypes,
+ }.Build()
+ File_pb_flow_proto = out.File
+ file_pb_flow_proto_rawDesc = nil
+ file_pb_flow_proto_goTypes = nil
+ file_pb_flow_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/netsampler/goflow2/pb/flow.proto b/vendor/github.com/netsampler/goflow2/pb/flow.proto
new file mode 100644
index 000000000..1cbaae8d5
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/pb/flow.proto
@@ -0,0 +1,131 @@
+syntax = "proto3";
+package flowpb;
+option go_package = "github.com/netsampler/goflow2/pb;flowpb";
+
+message FlowMessage {
+
+ enum FlowType {
+ FLOWUNKNOWN = 0;
+ SFLOW_5 = 1;
+ NETFLOW_V5 = 2;
+ NETFLOW_V9 = 3;
+ IPFIX = 4;
+ }
+ FlowType type = 1;
+
+ uint64 time_received = 2;
+ uint32 sequence_num = 4;
+ uint64 sampling_rate = 3;
+
+ uint32 flow_direction = 42;
+
+ // Sampler information
+ bytes sampler_address = 11;
+
+ // Found inside packet
+ uint64 time_flow_start = 38;
+ uint64 time_flow_end = 5;
+ uint64 time_flow_start_ms = 63;
+ uint64 time_flow_end_ms = 64;
+
+ // Size of the sampled packet
+ uint64 bytes = 9;
+ uint64 packets = 10;
+
+ // Source/destination addresses
+ bytes src_addr = 6;
+ bytes dst_addr = 7;
+
+ // Layer 3 protocol (IPv4/IPv6/ARP/MPLS...)
+ uint32 etype = 30;
+
+ // Layer 4 protocol
+ uint32 proto = 20;
+
+ // Ports for UDP and TCP
+ uint32 src_port = 21;
+ uint32 dst_port = 22;
+
+ // Interfaces
+ uint32 in_if = 18;
+ uint32 out_if = 19;
+
+ // Ethernet information
+ uint64 src_mac = 27;
+ uint64 dst_mac = 28;
+
+ // Vlan
+ uint32 src_vlan = 33;
+ uint32 dst_vlan = 34;
+ // 802.1q VLAN in sampled packet
+ uint32 vlan_id = 29;
+
+ // VRF
+ uint32 ingress_vrf_id = 39;
+ uint32 egress_vrf_id = 40;
+
+ // IP and TCP special flags
+ uint32 ip_tos = 23;
+ uint32 forwarding_status = 24;
+ uint32 ip_ttl = 25;
+ uint32 tcp_flags = 26;
+ uint32 icmp_type = 31;
+ uint32 icmp_code = 32;
+ uint32 ipv6_flow_label = 37;
+ // Fragments (IPv4/IPv6)
+ uint32 fragment_id = 35;
+ uint32 fragment_offset = 36;
+ uint32 bi_flow_direction = 41;
+
+ // Autonomous system information
+ uint32 src_as = 14;
+ uint32 dst_as = 15;
+
+ bytes next_hop = 12;
+ uint32 next_hop_as = 13;
+
+ // Prefix size
+ uint32 src_net = 16;
+ uint32 dst_net = 17;
+
+ // BGP information
+ bytes bgp_next_hop = 100;
+ repeated uint32 bgp_communities = 101;
+ repeated uint32 as_path = 102;
+
+ // MPLS information
+ bool has_mpls = 53;
+ uint32 mpls_count = 54;
+ uint32 mpls_1_ttl = 55; // First TTL
+ uint32 mpls_1_label = 56; // First Label
+ uint32 mpls_2_ttl = 57; // Second TTL
+ uint32 mpls_2_label = 58; // Second Label
+ uint32 mpls_3_ttl = 59; // Third TTL
+ uint32 mpls_3_label = 60; // Third Label
+ uint32 mpls_last_ttl = 61; // Last TTL
+ uint32 mpls_last_label = 62; // Last Label
+ bytes mpls_label_ip = 65; // MPLS TOP Label IP
+
+ uint32 observation_domain_id = 70;
+ uint32 observation_point_id = 71;
+
+ // Custom fields: start after ID 1000:
+ // uint32 my_custom_field = 1000;
+
+
+ // Custom allocations
+ uint64 custom_integer_1 = 1001;
+ uint64 custom_integer_2 = 1002;
+ uint64 custom_integer_3 = 1003;
+ uint64 custom_integer_4 = 1004;
+ uint64 custom_integer_5 = 1005;
+
+ bytes custom_bytes_1 = 1011;
+ bytes custom_bytes_2 = 1012;
+ bytes custom_bytes_3 = 1013;
+ bytes custom_bytes_4 = 1014;
+ bytes custom_bytes_5 = 1015;
+
+ repeated uint32 custom_list_1 = 1021;
+
+}
diff --git a/vendor/github.com/netsampler/goflow2/producer/producer_nf.go b/vendor/github.com/netsampler/goflow2/producer/producer_nf.go
new file mode 100644
index 000000000..d14936553
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/producer/producer_nf.go
@@ -0,0 +1,683 @@
+package producer
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/netsampler/goflow2/decoders/netflow"
+ "github.com/netsampler/goflow2/decoders/utils"
+ flowmessage "github.com/netsampler/goflow2/pb"
+)
+
+type SamplingRateSystem interface {
+ GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error)
+ AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32)
+}
+
+type basicSamplingRateSystem struct {
+ sampling map[uint16]map[uint32]uint32
+ samplinglock *sync.RWMutex
+}
+
+func CreateSamplingSystem() SamplingRateSystem {
+ ts := &basicSamplingRateSystem{
+ sampling: make(map[uint16]map[uint32]uint32),
+ samplinglock: &sync.RWMutex{},
+ }
+ return ts
+}
+
+func (s *basicSamplingRateSystem) AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) {
+ s.samplinglock.Lock()
+ defer s.samplinglock.Unlock()
+ _, exists := s.sampling[version]
+ if exists != true {
+ s.sampling[version] = make(map[uint32]uint32)
+ }
+ s.sampling[version][obsDomainId] = samplingRate
+}
+
+func (s *basicSamplingRateSystem) GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) {
+ s.samplinglock.RLock()
+ defer s.samplinglock.RUnlock()
+ samplingVersion, okver := s.sampling[version]
+ if okver {
+ samplingRate, okid := samplingVersion[obsDomainId]
+ if okid {
+ return samplingRate, nil
+ }
+ return 0, errors.New("") // TBC
+ }
+ return 0, errors.New("") // TBC
+}
+
+type SingleSamplingRateSystem struct {
+ Sampling uint32
+}
+
+func (s *SingleSamplingRateSystem) AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) {
+}
+
+func (s *SingleSamplingRateSystem) GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) {
+ return s.Sampling, nil
+}
+
+func NetFlowLookFor(dataFields []netflow.DataField, typeId uint16) (bool, interface{}) {
+ for _, dataField := range dataFields {
+ if dataField.Type == typeId {
+ return true, dataField.Value
+ }
+ }
+ return false, nil
+}
+
+func NetFlowPopulate(dataFields []netflow.DataField, typeId uint16, addr interface{}) bool {
+ exists, value := NetFlowLookFor(dataFields, typeId)
+ if exists && value != nil {
+ valueBytes, ok := value.([]byte)
+ valueReader := bytes.NewBuffer(valueBytes)
+ if ok {
+ switch addrt := addr.(type) {
+ case *(net.IP):
+ *addrt = valueBytes
+ case *(time.Time):
+ t := uint64(0)
+ utils.BinaryRead(valueReader, binary.BigEndian, &t)
+ t64 := int64(t / 1000)
+ *addrt = time.Unix(t64, 0)
+ default:
+ utils.BinaryRead(valueReader, binary.BigEndian, addr)
+ }
+ }
+ }
+ return exists
+}
+
+func WriteUDecoded(o uint64, out interface{}) error {
+ switch t := out.(type) {
+ case *byte:
+ *t = byte(o)
+ case *uint16:
+ *t = uint16(o)
+ case *uint32:
+ *t = uint32(o)
+ case *uint64:
+ *t = o
+ default:
+ return errors.New("The parameter is not a pointer to a byte/uint16/uint32/uint64 structure")
+ }
+ return nil
+}
+
+func WriteDecoded(o int64, out interface{}) error {
+ switch t := out.(type) {
+ case *int8:
+ *t = int8(o)
+ case *int16:
+ *t = int16(o)
+ case *int32:
+ *t = int32(o)
+ case *int64:
+ *t = o
+ default:
+ return errors.New("The parameter is not a pointer to a int8/int16/int32/int64 structure")
+ }
+ return nil
+}
+
+func DecodeUNumber(b []byte, out interface{}) error {
+ var o uint64
+ l := len(b)
+ switch l {
+ case 1:
+ o = uint64(b[0])
+ case 2:
+ o = uint64(binary.BigEndian.Uint16(b))
+ case 4:
+ o = uint64(binary.BigEndian.Uint32(b))
+ case 8:
+ o = binary.BigEndian.Uint64(b)
+ default:
+ if l < 8 {
+ var iter uint
+ for i := range b {
+ o |= uint64(b[i]) << uint(8*(uint(l)-iter-1))
+ iter++
+ }
+ } else {
+ return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l))
+ }
+ }
+ return WriteUDecoded(o, out)
+}
+
+func DecodeUNumberLE(b []byte, out interface{}) error {
+ var o uint64
+ l := len(b)
+ switch l {
+ case 1:
+ o = uint64(b[0])
+ case 2:
+ o = uint64(binary.LittleEndian.Uint16(b))
+ case 4:
+ o = uint64(binary.LittleEndian.Uint32(b))
+ case 8:
+ o = binary.LittleEndian.Uint64(b)
+ default:
+ if l < 8 {
+ var iter uint
+ for i := range b {
+ o |= uint64(b[i]) << uint(8*(iter))
+ iter++
+ }
+ } else {
+ return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l))
+ }
+ }
+ return WriteUDecoded(o, out)
+}
+
+func DecodeNumber(b []byte, out interface{}) error {
+ var o int64
+ l := len(b)
+ switch l {
+ case 1:
+ o = int64(int8(b[0]))
+ case 2:
+ o = int64(int16(binary.BigEndian.Uint16(b)))
+ case 4:
+ o = int64(int32(binary.BigEndian.Uint32(b)))
+ case 8:
+ o = int64(binary.BigEndian.Uint64(b))
+ default:
+ if l < 8 {
+ var iter int
+ for i := range b {
+ o |= int64(b[i]) << int(8*(int(l)-iter-1))
+ iter++
+ }
+ } else {
+ return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l))
+ }
+ }
+ return WriteDecoded(o, out)
+}
+
+func DecodeNumberLE(b []byte, out interface{}) error {
+ var o int64
+ l := len(b)
+ switch l {
+ case 1:
+ o = int64(int8(b[0]))
+ case 2:
+ o = int64(int16(binary.LittleEndian.Uint16(b)))
+ case 4:
+ o = int64(int32(binary.LittleEndian.Uint32(b)))
+ case 8:
+ o = int64(binary.LittleEndian.Uint64(b))
+ default:
+ if l < 8 {
+ var iter int
+ for i := range b {
+ o |= int64(b[i]) << int(8*(iter))
+ iter++
+ }
+ } else {
+ return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l))
+ }
+ }
+ return WriteDecoded(o, out)
+}
+
+func allZeroes(v []byte) bool {
+ for _, b := range v {
+ if b != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func addrReplaceCheck(dstAddr *[]byte, v []byte, eType *uint32, ipv6 bool) {
+ if (len(*dstAddr) == 0 && len(v) > 0) ||
+ (len(*dstAddr) != 0 && len(v) > 0 && !allZeroes(v)) {
+ *dstAddr = v
+
+ if ipv6 {
+ *eType = 0x86dd
+ } else {
+ *eType = 0x800
+ }
+
+ }
+}
+
+func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, record []netflow.DataField, mapperNetFlow *NetFlowMapper, mapperSFlow *SFlowMapper) *flowmessage.FlowMessage {
+ flowMessage := &flowmessage.FlowMessage{}
+ var time uint64
+
+ if version == 9 {
+ flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V9
+ } else if version == 10 {
+ flowMessage.Type = flowmessage.FlowMessage_IPFIX
+ }
+
+ for i := range record {
+ df := record[i]
+
+ v, ok := df.Value.([]byte)
+ if !ok {
+ continue
+ }
+
+ MapCustomNetFlow(flowMessage, df, mapperNetFlow)
+
+ if df.PenProvided {
+ continue
+ }
+
+ switch df.Type {
+
+ case netflow.IPFIX_FIELD_observationPointId:
+ DecodeUNumber(v, &(flowMessage.ObservationPointId))
+
+ // Statistics
+ case netflow.NFV9_FIELD_IN_BYTES:
+ DecodeUNumber(v, &(flowMessage.Bytes))
+ case netflow.NFV9_FIELD_IN_PKTS:
+ DecodeUNumber(v, &(flowMessage.Packets))
+ case netflow.NFV9_FIELD_OUT_BYTES:
+ DecodeUNumber(v, &(flowMessage.Bytes))
+ case netflow.NFV9_FIELD_OUT_PKTS:
+ DecodeUNumber(v, &(flowMessage.Packets))
+
+ // L4
+ case netflow.NFV9_FIELD_L4_SRC_PORT:
+ DecodeUNumber(v, &(flowMessage.SrcPort))
+ case netflow.NFV9_FIELD_L4_DST_PORT:
+ DecodeUNumber(v, &(flowMessage.DstPort))
+ case netflow.NFV9_FIELD_PROTOCOL:
+ DecodeUNumber(v, &(flowMessage.Proto))
+
+ // Network
+ case netflow.NFV9_FIELD_SRC_AS:
+ DecodeUNumber(v, &(flowMessage.SrcAs))
+ case netflow.NFV9_FIELD_DST_AS:
+ DecodeUNumber(v, &(flowMessage.DstAs))
+
+ // Interfaces
+ case netflow.NFV9_FIELD_INPUT_SNMP:
+ DecodeUNumber(v, &(flowMessage.InIf))
+ case netflow.NFV9_FIELD_OUTPUT_SNMP:
+ DecodeUNumber(v, &(flowMessage.OutIf))
+
+ case netflow.NFV9_FIELD_FORWARDING_STATUS:
+ DecodeUNumber(v, &(flowMessage.ForwardingStatus))
+ case netflow.NFV9_FIELD_SRC_TOS:
+ DecodeUNumber(v, &(flowMessage.IpTos))
+ case netflow.NFV9_FIELD_TCP_FLAGS:
+ DecodeUNumber(v, &(flowMessage.TcpFlags))
+ case netflow.NFV9_FIELD_MIN_TTL:
+ DecodeUNumber(v, &(flowMessage.IpTtl))
+
+ // IP
+ case netflow.NFV9_FIELD_IP_PROTOCOL_VERSION:
+ if len(v) > 0 {
+ if v[0] == 4 {
+ flowMessage.Etype = 0x800
+ } else if v[0] == 6 {
+ flowMessage.Etype = 0x86dd
+ }
+ }
+
+ case netflow.NFV9_FIELD_IPV4_SRC_ADDR:
+ addrReplaceCheck(&(flowMessage.SrcAddr), v, &(flowMessage.Etype), false)
+
+ case netflow.NFV9_FIELD_IPV4_DST_ADDR:
+ addrReplaceCheck(&(flowMessage.DstAddr), v, &(flowMessage.Etype), false)
+
+ case netflow.NFV9_FIELD_SRC_MASK:
+ DecodeUNumber(v, &(flowMessage.SrcNet))
+ case netflow.NFV9_FIELD_DST_MASK:
+ DecodeUNumber(v, &(flowMessage.DstNet))
+
+ case netflow.NFV9_FIELD_IPV6_SRC_ADDR:
+ addrReplaceCheck(&(flowMessage.SrcAddr), v, &(flowMessage.Etype), true)
+
+ case netflow.NFV9_FIELD_IPV6_DST_ADDR:
+ addrReplaceCheck(&(flowMessage.DstAddr), v, &(flowMessage.Etype), true)
+
+ case netflow.NFV9_FIELD_IPV6_SRC_MASK:
+ DecodeUNumber(v, &(flowMessage.SrcNet))
+ case netflow.NFV9_FIELD_IPV6_DST_MASK:
+ DecodeUNumber(v, &(flowMessage.DstNet))
+
+ case netflow.NFV9_FIELD_IPV4_NEXT_HOP:
+ flowMessage.NextHop = v
+ case netflow.NFV9_FIELD_BGP_IPV4_NEXT_HOP:
+ flowMessage.BgpNextHop = v
+
+ case netflow.NFV9_FIELD_IPV6_NEXT_HOP:
+ flowMessage.NextHop = v
+ case netflow.NFV9_FIELD_BGP_IPV6_NEXT_HOP:
+ flowMessage.BgpNextHop = v
+
+ // ICMP
+ case netflow.NFV9_FIELD_ICMP_TYPE:
+ var icmpTypeCode uint16
+ DecodeUNumber(v, &icmpTypeCode)
+ flowMessage.IcmpType = uint32(icmpTypeCode >> 8)
+ flowMessage.IcmpCode = uint32(icmpTypeCode & 0xff)
+ case netflow.IPFIX_FIELD_icmpTypeCodeIPv6:
+ var icmpTypeCode uint16
+ DecodeUNumber(v, &icmpTypeCode)
+ flowMessage.IcmpType = uint32(icmpTypeCode >> 8)
+ flowMessage.IcmpCode = uint32(icmpTypeCode & 0xff)
+ case netflow.IPFIX_FIELD_icmpTypeIPv4:
+ DecodeUNumber(v, &(flowMessage.IcmpType))
+ case netflow.IPFIX_FIELD_icmpTypeIPv6:
+ DecodeUNumber(v, &(flowMessage.IcmpType))
+ case netflow.IPFIX_FIELD_icmpCodeIPv4:
+ DecodeUNumber(v, &(flowMessage.IcmpCode))
+ case netflow.IPFIX_FIELD_icmpCodeIPv6:
+ DecodeUNumber(v, &(flowMessage.IcmpCode))
+
+ // Mac
+ case netflow.NFV9_FIELD_IN_SRC_MAC:
+ DecodeUNumber(v, &(flowMessage.SrcMac))
+ case netflow.NFV9_FIELD_IN_DST_MAC:
+ DecodeUNumber(v, &(flowMessage.DstMac))
+ case netflow.NFV9_FIELD_OUT_SRC_MAC:
+ DecodeUNumber(v, &(flowMessage.SrcMac))
+ case netflow.NFV9_FIELD_OUT_DST_MAC:
+ DecodeUNumber(v, &(flowMessage.DstMac))
+
+ case netflow.NFV9_FIELD_SRC_VLAN:
+ DecodeUNumber(v, &(flowMessage.VlanId))
+ DecodeUNumber(v, &(flowMessage.SrcVlan))
+ case netflow.NFV9_FIELD_DST_VLAN:
+ DecodeUNumber(v, &(flowMessage.DstVlan))
+
+ case netflow.IPFIX_FIELD_ingressVRFID:
+ DecodeUNumber(v, &(flowMessage.IngressVrfId))
+ case netflow.IPFIX_FIELD_egressVRFID:
+ DecodeUNumber(v, &(flowMessage.EgressVrfId))
+
+ case netflow.NFV9_FIELD_IPV4_IDENT:
+ DecodeUNumber(v, &(flowMessage.FragmentId))
+ case netflow.NFV9_FIELD_FRAGMENT_OFFSET:
+ var fragOffset uint32
+ DecodeUNumber(v, &fragOffset)
+ flowMessage.FragmentOffset |= fragOffset
+ case netflow.IPFIX_FIELD_fragmentFlags:
+ var ipFlags uint32
+ DecodeUNumber(v, &ipFlags)
+ flowMessage.FragmentOffset |= ipFlags
+ case netflow.NFV9_FIELD_IPV6_FLOW_LABEL:
+ DecodeUNumber(v, &(flowMessage.Ipv6FlowLabel))
+
+ case netflow.IPFIX_FIELD_biflowDirection:
+ DecodeUNumber(v, &(flowMessage.BiFlowDirection))
+
+ case netflow.NFV9_FIELD_DIRECTION:
+ DecodeUNumber(v, &(flowMessage.FlowDirection))
+
+ // MPLS
+ case netflow.IPFIX_FIELD_mplsTopLabelStackSection:
+ var mplsLabel uint32
+ DecodeUNumber(v, &mplsLabel)
+ flowMessage.Mpls_1Label = uint32(mplsLabel >> 4)
+ flowMessage.HasMpls = true
+ case netflow.IPFIX_FIELD_mplsLabelStackSection2:
+ var mplsLabel uint32
+ DecodeUNumber(v, &mplsLabel)
+ flowMessage.Mpls_2Label = uint32(mplsLabel >> 4)
+ case netflow.IPFIX_FIELD_mplsLabelStackSection3:
+ var mplsLabel uint32
+ DecodeUNumber(v, &mplsLabel)
+ flowMessage.Mpls_3Label = uint32(mplsLabel >> 4)
+ case netflow.IPFIX_FIELD_mplsTopLabelIPv4Address:
+ flowMessage.MplsLabelIp = v
+ case netflow.IPFIX_FIELD_mplsTopLabelIPv6Address:
+ flowMessage.MplsLabelIp = v
+
+ default:
+ if version == 9 {
+ // NetFlow v9 time works with a differential based on router's uptime
+ switch df.Type {
+ case netflow.NFV9_FIELD_FIRST_SWITCHED:
+ var timeFirstSwitched uint32
+ DecodeUNumber(v, &timeFirstSwitched)
+ timeDiff := (uptime - timeFirstSwitched)
+ flowMessage.TimeFlowStart = uint64(baseTime - timeDiff/1000)
+ flowMessage.TimeFlowStartMs = uint64(baseTime)*1000 - uint64(timeDiff)
+ case netflow.NFV9_FIELD_LAST_SWITCHED:
+ var timeLastSwitched uint32
+ DecodeUNumber(v, &timeLastSwitched)
+ timeDiff := (uptime - timeLastSwitched)
+ flowMessage.TimeFlowEnd = uint64(baseTime - timeDiff/1000)
+ flowMessage.TimeFlowEndMs = uint64(baseTime)*1000 - uint64(timeDiff)
+ }
+ } else if version == 10 {
+ switch df.Type {
+ case netflow.IPFIX_FIELD_flowStartSeconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowStart = time
+ flowMessage.TimeFlowStartMs = time * 1000
+ case netflow.IPFIX_FIELD_flowStartMilliseconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowStart = time / 1000
+ flowMessage.TimeFlowStartMs = time
+ case netflow.IPFIX_FIELD_flowStartMicroseconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowStart = time / 1000000
+ flowMessage.TimeFlowStartMs = time / 1000
+ case netflow.IPFIX_FIELD_flowStartNanoseconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowStart = time / 1000000000
+ flowMessage.TimeFlowStartMs = time / 1000000
+ case netflow.IPFIX_FIELD_flowEndSeconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowEnd = time
+ flowMessage.TimeFlowEndMs = time * 1000
+ case netflow.IPFIX_FIELD_flowEndMilliseconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowEnd = time / 1000
+ flowMessage.TimeFlowEndMs = time
+ case netflow.IPFIX_FIELD_flowEndMicroseconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowEnd = time / 1000000
+ flowMessage.TimeFlowEndMs = time / 1000
+ case netflow.IPFIX_FIELD_flowEndNanoseconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowEnd = time / 1000000000
+ flowMessage.TimeFlowEndMs = time / 1000000
+ case netflow.IPFIX_FIELD_flowStartDeltaMicroseconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowStart = uint64(baseTime) - time/1000000
+ flowMessage.TimeFlowStartMs = uint64(baseTime)*1000 - time/1000
+ case netflow.IPFIX_FIELD_flowEndDeltaMicroseconds:
+ DecodeUNumber(v, &time)
+ flowMessage.TimeFlowEnd = uint64(baseTime) - time/1000000
+ flowMessage.TimeFlowEndMs = uint64(baseTime)*1000 - time/1000
+ // RFC7133
+ case netflow.IPFIX_FIELD_dataLinkFrameSize:
+ DecodeUNumber(v, &(flowMessage.Bytes))
+ flowMessage.Packets = 1
+ case netflow.IPFIX_FIELD_dataLinkFrameSection:
+ ParseEthernetHeader(flowMessage, v, mapperSFlow)
+ flowMessage.Packets = 1
+ if flowMessage.Bytes == 0 {
+ flowMessage.Bytes = uint64(len(v))
+ }
+ }
+ }
+ }
+
+ }
+
+ return flowMessage
+}
+
+func SearchNetFlowDataSetsRecords(version uint16, baseTime uint32, uptime uint32, dataRecords []netflow.DataRecord, mapperNetFlow *NetFlowMapper, mapperSFlow *SFlowMapper) []*flowmessage.FlowMessage {
+ var flowMessageSet []*flowmessage.FlowMessage
+ for _, record := range dataRecords {
+ fmsg := ConvertNetFlowDataSet(version, baseTime, uptime, record.Values, mapperNetFlow, mapperSFlow)
+ if fmsg != nil {
+ flowMessageSet = append(flowMessageSet, fmsg)
+ }
+ }
+ return flowMessageSet
+}
+
+func SearchNetFlowDataSets(version uint16, baseTime uint32, uptime uint32, dataFlowSet []netflow.DataFlowSet, mapperNetFlow *NetFlowMapper, mapperSFlow *SFlowMapper) []*flowmessage.FlowMessage {
+ var flowMessageSet []*flowmessage.FlowMessage
+ for _, dataFlowSetItem := range dataFlowSet {
+ fmsg := SearchNetFlowDataSetsRecords(version, baseTime, uptime, dataFlowSetItem.Records, mapperNetFlow, mapperSFlow)
+ if fmsg != nil {
+ flowMessageSet = append(flowMessageSet, fmsg...)
+ }
+ }
+ return flowMessageSet
+}
+
+func SearchNetFlowOptionDataSets(dataFlowSet []netflow.OptionsDataFlowSet) (uint32, bool) {
+ var samplingRate uint32
+ var found bool
+ for _, dataFlowSetItem := range dataFlowSet {
+ for _, record := range dataFlowSetItem.Records {
+ b := NetFlowPopulate(record.OptionsValues, 305, &samplingRate)
+ if b {
+ return samplingRate, b
+ }
+ b = NetFlowPopulate(record.OptionsValues, 50, &samplingRate)
+ if b {
+ return samplingRate, b
+ }
+ b = NetFlowPopulate(record.OptionsValues, 34, &samplingRate)
+ if b {
+ return samplingRate, b
+ }
+ }
+ }
+ return samplingRate, found
+}
+
+func SplitNetFlowSets(packetNFv9 netflow.NFv9Packet) ([]netflow.DataFlowSet, []netflow.TemplateFlowSet, []netflow.NFv9OptionsTemplateFlowSet, []netflow.OptionsDataFlowSet) {
+ var dataFlowSet []netflow.DataFlowSet
+ var templatesFlowSet []netflow.TemplateFlowSet
+ var optionsTemplatesFlowSet []netflow.NFv9OptionsTemplateFlowSet
+ var optionsDataFlowSet []netflow.OptionsDataFlowSet
+ for _, flowSet := range packetNFv9.FlowSets {
+ switch tFlowSet := flowSet.(type) {
+ case netflow.TemplateFlowSet:
+ templatesFlowSet = append(templatesFlowSet, tFlowSet)
+ case netflow.NFv9OptionsTemplateFlowSet:
+ optionsTemplatesFlowSet = append(optionsTemplatesFlowSet, tFlowSet)
+ case netflow.DataFlowSet:
+ dataFlowSet = append(dataFlowSet, tFlowSet)
+ case netflow.OptionsDataFlowSet:
+ optionsDataFlowSet = append(optionsDataFlowSet, tFlowSet)
+ }
+ }
+ return dataFlowSet, templatesFlowSet, optionsTemplatesFlowSet, optionsDataFlowSet
+}
+
+func SplitIPFIXSets(packetIPFIX netflow.IPFIXPacket) ([]netflow.DataFlowSet, []netflow.TemplateFlowSet, []netflow.IPFIXOptionsTemplateFlowSet, []netflow.OptionsDataFlowSet) {
+ var dataFlowSet []netflow.DataFlowSet
+ var templatesFlowSet []netflow.TemplateFlowSet
+ var optionsTemplatesFlowSet []netflow.IPFIXOptionsTemplateFlowSet
+ var optionsDataFlowSet []netflow.OptionsDataFlowSet
+ for _, flowSet := range packetIPFIX.FlowSets {
+ switch tFlowSet := flowSet.(type) {
+ case netflow.TemplateFlowSet:
+ templatesFlowSet = append(templatesFlowSet, tFlowSet)
+ case netflow.IPFIXOptionsTemplateFlowSet:
+ optionsTemplatesFlowSet = append(optionsTemplatesFlowSet, tFlowSet)
+ case netflow.DataFlowSet:
+ dataFlowSet = append(dataFlowSet, tFlowSet)
+ case netflow.OptionsDataFlowSet:
+ optionsDataFlowSet = append(optionsDataFlowSet, tFlowSet)
+ }
+ }
+ return dataFlowSet, templatesFlowSet, optionsTemplatesFlowSet, optionsDataFlowSet
+}
+
+func ProcessMessageNetFlow(msgDec interface{}, samplingRateSys SamplingRateSystem) ([]*flowmessage.FlowMessage, error) {
+ return ProcessMessageNetFlowConfig(msgDec, samplingRateSys, nil)
+}
+
+// Convert a NetFlow datastructure to a FlowMessage protobuf
+// Does not put sampling rate
+func ProcessMessageNetFlowConfig(msgDec interface{}, samplingRateSys SamplingRateSystem, config *ProducerConfigMapped) ([]*flowmessage.FlowMessage, error) {
+ seqnum := uint32(0)
+ var baseTime uint32
+ var uptime uint32
+
+ var flowMessageSet []*flowmessage.FlowMessage
+
+ switch msgDecConv := msgDec.(type) {
+ case netflow.NFv9Packet:
+ dataFlowSet, _, _, optionDataFlowSet := SplitNetFlowSets(msgDecConv)
+
+ seqnum = msgDecConv.SequenceNumber
+ baseTime = msgDecConv.UnixSeconds
+ uptime = msgDecConv.SystemUptime
+ obsDomainId := msgDecConv.SourceId
+
+ var cfg *NetFlowMapper
+ if config != nil {
+ cfg = config.NetFlowV9
+ }
+ flowMessageSet = SearchNetFlowDataSets(9, baseTime, uptime, dataFlowSet, cfg, nil)
+ samplingRate, found := SearchNetFlowOptionDataSets(optionDataFlowSet)
+ if samplingRateSys != nil {
+ if found {
+ samplingRateSys.AddSamplingRate(9, obsDomainId, samplingRate)
+ } else {
+ samplingRate, _ = samplingRateSys.GetSamplingRate(9, obsDomainId)
+ }
+ }
+ for _, fmsg := range flowMessageSet {
+ fmsg.SequenceNum = seqnum
+ fmsg.SamplingRate = uint64(samplingRate)
+ }
+ case netflow.IPFIXPacket:
+ dataFlowSet, _, _, optionDataFlowSet := SplitIPFIXSets(msgDecConv)
+
+ seqnum = msgDecConv.SequenceNumber
+ baseTime = msgDecConv.ExportTime
+ obsDomainId := msgDecConv.ObservationDomainId
+
+ var cfgIpfix *NetFlowMapper
+ var cfgSflow *SFlowMapper
+ if config != nil {
+ cfgIpfix = config.IPFIX
+ cfgSflow = config.SFlow
+ }
+ flowMessageSet = SearchNetFlowDataSets(10, baseTime, uptime, dataFlowSet, cfgIpfix, cfgSflow)
+
+ samplingRate, found := SearchNetFlowOptionDataSets(optionDataFlowSet)
+ if samplingRateSys != nil {
+ if found {
+ samplingRateSys.AddSamplingRate(10, obsDomainId, samplingRate)
+ } else {
+ samplingRate, _ = samplingRateSys.GetSamplingRate(10, obsDomainId)
+ }
+ }
+ for _, fmsg := range flowMessageSet {
+ fmsg.SequenceNum = seqnum
+ fmsg.SamplingRate = uint64(samplingRate)
+ fmsg.ObservationDomainId = obsDomainId
+ }
+ default:
+ return flowMessageSet, errors.New("Bad NetFlow/IPFIX version")
+ }
+
+ return flowMessageSet, nil
+}
diff --git a/vendor/github.com/netsampler/goflow2/producer/producer_nflegacy.go b/vendor/github.com/netsampler/goflow2/producer/producer_nflegacy.go
new file mode 100644
index 000000000..8acb3eeaa
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/producer/producer_nflegacy.go
@@ -0,0 +1,81 @@
+package producer
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+
+ "github.com/netsampler/goflow2/decoders/netflowlegacy"
+ flowmessage "github.com/netsampler/goflow2/pb"
+)
+
+func ConvertNetFlowLegacyRecord(baseTime uint32, uptime uint32, record netflowlegacy.RecordsNetFlowV5) *flowmessage.FlowMessage {
+ flowMessage := &flowmessage.FlowMessage{}
+
+ flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V5
+
+ timeDiffFirst := (uptime - record.First)
+ timeDiffLast := (uptime - record.Last)
+ flowMessage.TimeFlowStart = uint64(baseTime - timeDiffFirst/1000)
+ flowMessage.TimeFlowStartMs = uint64(baseTime)*1000 - uint64(timeDiffFirst)
+ flowMessage.TimeFlowEnd = uint64(baseTime - timeDiffLast/1000)
+ flowMessage.TimeFlowEndMs = uint64(baseTime)*1000 - uint64(timeDiffLast)
+
+ v := make(net.IP, 4)
+ binary.BigEndian.PutUint32(v, record.NextHop)
+ flowMessage.NextHop = v
+ v = make(net.IP, 4)
+ binary.BigEndian.PutUint32(v, record.SrcAddr)
+ flowMessage.SrcAddr = v
+ v = make(net.IP, 4)
+ binary.BigEndian.PutUint32(v, record.DstAddr)
+ flowMessage.DstAddr = v
+
+ flowMessage.Etype = 0x800
+ flowMessage.SrcAs = uint32(record.SrcAS)
+ flowMessage.DstAs = uint32(record.DstAS)
+ flowMessage.SrcNet = uint32(record.SrcMask)
+ flowMessage.DstNet = uint32(record.DstMask)
+ flowMessage.Proto = uint32(record.Proto)
+ flowMessage.TcpFlags = uint32(record.TCPFlags)
+ flowMessage.IpTos = uint32(record.Tos)
+ flowMessage.InIf = uint32(record.Input)
+ flowMessage.OutIf = uint32(record.Output)
+ flowMessage.SrcPort = uint32(record.SrcPort)
+ flowMessage.DstPort = uint32(record.DstPort)
+ flowMessage.Packets = uint64(record.DPkts)
+ flowMessage.Bytes = uint64(record.DOctets)
+
+ return flowMessage
+}
+
+func SearchNetFlowLegacyRecords(baseTime uint32, uptime uint32, dataRecords []netflowlegacy.RecordsNetFlowV5) []*flowmessage.FlowMessage {
+ var flowMessageSet []*flowmessage.FlowMessage
+ for _, record := range dataRecords {
+ fmsg := ConvertNetFlowLegacyRecord(baseTime, uptime, record)
+ if fmsg != nil {
+ flowMessageSet = append(flowMessageSet, fmsg)
+ }
+ }
+ return flowMessageSet
+}
+
+func ProcessMessageNetFlowLegacy(msgDec interface{}) ([]*flowmessage.FlowMessage, error) {
+ switch packet := msgDec.(type) {
+ case netflowlegacy.PacketNetFlowV5:
+ seqnum := packet.FlowSequence
+ samplingRate := packet.SamplingInterval
+ baseTime := packet.UnixSecs
+ uptime := packet.SysUptime
+
+ flowMessageSet := SearchNetFlowLegacyRecords(baseTime, uptime, packet.Records)
+ for _, fmsg := range flowMessageSet {
+ fmsg.SequenceNum = seqnum
+ fmsg.SamplingRate = uint64(samplingRate)
+ }
+
+ return flowMessageSet, nil
+ default:
+ return []*flowmessage.FlowMessage{}, errors.New("Bad NetFlow v5 version")
+ }
+}
diff --git a/vendor/github.com/netsampler/goflow2/producer/producer_sf.go b/vendor/github.com/netsampler/goflow2/producer/producer_sf.go
new file mode 100644
index 000000000..ab949144b
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/producer/producer_sf.go
@@ -0,0 +1,349 @@
+package producer
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+
+ "github.com/netsampler/goflow2/decoders/sflow"
+ flowmessage "github.com/netsampler/goflow2/pb"
+)
+
+func GetSFlowFlowSamples(packet *sflow.Packet) []interface{} {
+ var flowSamples []interface{}
+ for _, sample := range packet.Samples {
+ switch sample.(type) {
+ case sflow.FlowSample:
+ flowSamples = append(flowSamples, sample)
+ case sflow.ExpandedFlowSample:
+ flowSamples = append(flowSamples, sample)
+ }
+ }
+ return flowSamples
+}
+
+func ParseSampledHeader(flowMessage *flowmessage.FlowMessage, sampledHeader *sflow.SampledHeader) error {
+ return ParseSampledHeaderConfig(flowMessage, sampledHeader, nil)
+}
+
+func ParseEthernetHeader(flowMessage *flowmessage.FlowMessage, data []byte, config *SFlowMapper) {
+ var hasMpls bool
+ var countMpls uint32
+ var firstLabelMpls uint32
+ var firstTtlMpls uint8
+ var secondLabelMpls uint32
+ var secondTtlMpls uint8
+ var thirdLabelMpls uint32
+ var thirdTtlMpls uint8
+ var lastLabelMpls uint32
+ var lastTtlMpls uint8
+
+ var nextHeader byte
+ var tcpflags byte
+ srcIP := net.IP{}
+ dstIP := net.IP{}
+ offset := 14
+
+ var srcMac uint64
+ var dstMac uint64
+
+ var tos byte
+ var ttl byte
+ var identification uint16
+ var fragOffset uint16
+ var flowLabel uint32
+
+ var srcPort uint16
+ var dstPort uint16
+
+ for _, configLayer := range GetSFlowConfigLayer(config, 0) {
+ extracted := GetBytes(data, configLayer.Offset, configLayer.Length)
+ MapCustom(flowMessage, extracted, configLayer.Destination, configLayer.Endian)
+ }
+
+ etherType := data[12:14]
+
+ dstMac = binary.BigEndian.Uint64(append([]byte{0, 0}, data[0:6]...))
+ srcMac = binary.BigEndian.Uint64(append([]byte{0, 0}, data[6:12]...))
+ (*flowMessage).SrcMac = srcMac
+ (*flowMessage).DstMac = dstMac
+
+ encap := true
+ iterations := 0
+ for encap && iterations <= 1 {
+ encap = false
+
+ if etherType[0] == 0x81 && etherType[1] == 0x0 { // VLAN 802.1Q
+ (*flowMessage).VlanId = uint32(binary.BigEndian.Uint16(data[14:16]))
+ offset += 4
+ etherType = data[16:18]
+ }
+
+ if etherType[0] == 0x88 && etherType[1] == 0x47 { // MPLS
+ iterateMpls := true
+ hasMpls = true
+ for iterateMpls {
+ if len(data) < offset+5 {
+ iterateMpls = false
+ break
+ }
+ label := binary.BigEndian.Uint32(append([]byte{0}, data[offset:offset+3]...)) >> 4
+ //exp := data[offset+2] > 1
+ bottom := data[offset+2] & 1
+ mplsTtl := data[offset+3]
+ offset += 4
+
+ if bottom == 1 || label <= 15 || offset > len(data) {
+ if data[offset]&0xf0>>4 == 4 {
+ etherType = []byte{0x8, 0x0}
+ } else if data[offset]&0xf0>>4 == 6 {
+ etherType = []byte{0x86, 0xdd}
+ }
+ iterateMpls = false
+ }
+
+ if countMpls == 0 {
+ firstLabelMpls = label
+ firstTtlMpls = mplsTtl
+ } else if countMpls == 1 {
+ secondLabelMpls = label
+ secondTtlMpls = mplsTtl
+ } else if countMpls == 2 {
+ thirdLabelMpls = label
+ thirdTtlMpls = mplsTtl
+ } else {
+ lastLabelMpls = label
+ lastTtlMpls = mplsTtl
+ }
+ countMpls++
+ }
+ }
+
+ for _, configLayer := range GetSFlowConfigLayer(config, 3) {
+ extracted := GetBytes(data, offset*8+configLayer.Offset, configLayer.Length)
+ MapCustom(flowMessage, extracted, configLayer.Destination, configLayer.Endian)
+ }
+
+ if etherType[0] == 0x8 && etherType[1] == 0x0 { // IPv4
+ if len(data) >= offset+20 {
+ nextHeader = data[offset+9]
+ srcIP = data[offset+12 : offset+16]
+ dstIP = data[offset+16 : offset+20]
+ tos = data[offset+1]
+ ttl = data[offset+8]
+
+ identification = binary.BigEndian.Uint16(data[offset+4 : offset+6])
+ fragOffset = binary.BigEndian.Uint16(data[offset+6:offset+8]) & 8191
+
+ offset += 20
+ }
+ } else if etherType[0] == 0x86 && etherType[1] == 0xdd { // IPv6
+ if len(data) >= offset+40 {
+ nextHeader = data[offset+6]
+ srcIP = data[offset+8 : offset+24]
+ dstIP = data[offset+24 : offset+40]
+
+ tostmp := uint32(binary.BigEndian.Uint16(data[offset : offset+2]))
+ tos = uint8(tostmp & 0x0ff0 >> 4)
+ ttl = data[offset+7]
+
+ flowLabel = binary.BigEndian.Uint32(data[offset : offset+4])
+
+ offset += 40
+
+ }
+ } else if etherType[0] == 0x8 && etherType[1] == 0x6 { // ARP
+ } /*else {
+ return errors.New(fmt.Sprintf("Unknown EtherType: %v\n", etherType))
+ } */
+
+ for _, configLayer := range GetSFlowConfigLayer(config, 4) {
+ extracted := GetBytes(data, offset*8+configLayer.Offset, configLayer.Length)
+ MapCustom(flowMessage, extracted, configLayer.Destination, configLayer.Endian)
+ }
+
+ appOffset := 0
+ if len(data) >= offset+4 && (nextHeader == 17 || nextHeader == 6) && fragOffset&8191 == 0 {
+ srcPort = binary.BigEndian.Uint16(data[offset+0 : offset+2])
+ dstPort = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+ }
+
+ if nextHeader == 17 {
+ appOffset = 8
+ }
+
+ if len(data) > offset+13 && nextHeader == 6 {
+ tcpflags = data[offset+13]
+
+ appOffset = int(data[13]>>4) * 4
+ }
+
+ // ICMP and ICMPv6
+ if len(data) >= offset+2 && (nextHeader == 1 || nextHeader == 58) {
+ (*flowMessage).IcmpType = uint32(data[offset+0])
+ (*flowMessage).IcmpCode = uint32(data[offset+1])
+ }
+
+ if appOffset > 0 {
+ for _, configLayer := range GetSFlowConfigLayer(config, 7) {
+ extracted := GetBytes(data, (offset+appOffset)*8+configLayer.Offset, configLayer.Length)
+ MapCustom(flowMessage, extracted, configLayer.Destination, configLayer.Endian)
+ }
+ }
+
+ iterations++
+ }
+
+ (*flowMessage).HasMpls = hasMpls
+ (*flowMessage).MplsCount = countMpls
+ (*flowMessage).Mpls_1Label = firstLabelMpls
+ (*flowMessage).Mpls_1Ttl = uint32(firstTtlMpls)
+ (*flowMessage).Mpls_2Label = secondLabelMpls
+ (*flowMessage).Mpls_2Ttl = uint32(secondTtlMpls)
+ (*flowMessage).Mpls_3Label = thirdLabelMpls
+ (*flowMessage).Mpls_3Ttl = uint32(thirdTtlMpls)
+ (*flowMessage).MplsLastLabel = lastLabelMpls
+ (*flowMessage).MplsLastTtl = uint32(lastTtlMpls)
+
+ (*flowMessage).Etype = uint32(binary.BigEndian.Uint16(etherType[0:2]))
+ (*flowMessage).Ipv6FlowLabel = flowLabel & 0xFFFFF
+
+ (*flowMessage).SrcPort = uint32(srcPort)
+ (*flowMessage).DstPort = uint32(dstPort)
+
+ (*flowMessage).SrcAddr = srcIP
+ (*flowMessage).DstAddr = dstIP
+ (*flowMessage).Proto = uint32(nextHeader)
+ (*flowMessage).IpTos = uint32(tos)
+ (*flowMessage).IpTtl = uint32(ttl)
+ (*flowMessage).TcpFlags = uint32(tcpflags)
+
+ (*flowMessage).FragmentId = uint32(identification)
+ (*flowMessage).FragmentOffset = uint32(fragOffset)
+}
+
+func ParseSampledHeaderConfig(flowMessage *flowmessage.FlowMessage, sampledHeader *sflow.SampledHeader, config *SFlowMapper) error {
+ data := (*sampledHeader).HeaderData
+ switch (*sampledHeader).Protocol {
+ case 1: // Ethernet
+ ParseEthernetHeader(flowMessage, data, config)
+ }
+ return nil
+}
+
+func SearchSFlowSamples(samples []interface{}) []*flowmessage.FlowMessage {
+ return SearchSFlowSamples(samples)
+}
+
+func SearchSFlowSamplesConfig(samples []interface{}, config *SFlowMapper) []*flowmessage.FlowMessage {
+ var flowMessageSet []*flowmessage.FlowMessage
+
+ for _, flowSample := range samples {
+ var records []sflow.FlowRecord
+
+ flowMessage := &flowmessage.FlowMessage{}
+ flowMessage.Type = flowmessage.FlowMessage_SFLOW_5
+
+ switch flowSample := flowSample.(type) {
+ case sflow.FlowSample:
+ records = flowSample.Records
+ flowMessage.SamplingRate = uint64(flowSample.SamplingRate)
+ flowMessage.InIf = flowSample.Input
+ flowMessage.OutIf = flowSample.Output
+ case sflow.ExpandedFlowSample:
+ records = flowSample.Records
+ flowMessage.SamplingRate = uint64(flowSample.SamplingRate)
+ flowMessage.InIf = flowSample.InputIfValue
+ flowMessage.OutIf = flowSample.OutputIfValue
+ }
+
+ ipNh := net.IP{}
+ ipSrc := net.IP{}
+ ipDst := net.IP{}
+ flowMessage.Packets = 1
+ for _, record := range records {
+ switch recordData := record.Data.(type) {
+ case sflow.SampledHeader:
+ flowMessage.Bytes = uint64(recordData.FrameLength)
+ ParseSampledHeaderConfig(flowMessage, &recordData, config)
+ case sflow.SampledIPv4:
+ ipSrc = recordData.Base.SrcIP
+ ipDst = recordData.Base.DstIP
+ flowMessage.SrcAddr = ipSrc
+ flowMessage.DstAddr = ipDst
+ flowMessage.Bytes = uint64(recordData.Base.Length)
+ flowMessage.Proto = recordData.Base.Protocol
+ flowMessage.SrcPort = recordData.Base.SrcPort
+ flowMessage.DstPort = recordData.Base.DstPort
+ flowMessage.IpTos = recordData.Tos
+ flowMessage.Etype = 0x800
+ case sflow.SampledIPv6:
+ ipSrc = recordData.Base.SrcIP
+ ipDst = recordData.Base.DstIP
+ flowMessage.SrcAddr = ipSrc
+ flowMessage.DstAddr = ipDst
+ flowMessage.Bytes = uint64(recordData.Base.Length)
+ flowMessage.Proto = recordData.Base.Protocol
+ flowMessage.SrcPort = recordData.Base.SrcPort
+ flowMessage.DstPort = recordData.Base.DstPort
+ flowMessage.IpTos = recordData.Priority
+ flowMessage.Etype = 0x86dd
+ case sflow.ExtendedRouter:
+ ipNh = recordData.NextHop
+ flowMessage.NextHop = ipNh
+ flowMessage.SrcNet = recordData.SrcMaskLen
+ flowMessage.DstNet = recordData.DstMaskLen
+ case sflow.ExtendedGateway:
+ ipNh = recordData.NextHop
+ flowMessage.BgpNextHop = ipNh
+ flowMessage.BgpCommunities = recordData.Communities
+ flowMessage.AsPath = recordData.ASPath
+ if len(recordData.ASPath) > 0 {
+ flowMessage.DstAs = recordData.ASPath[len(recordData.ASPath)-1]
+ flowMessage.NextHopAs = recordData.ASPath[0]
+ } else {
+ flowMessage.DstAs = recordData.AS
+ }
+ if recordData.SrcAS > 0 {
+ flowMessage.SrcAs = recordData.SrcAS
+ } else {
+ flowMessage.SrcAs = recordData.AS
+ }
+ case sflow.ExtendedSwitch:
+ flowMessage.SrcVlan = recordData.SrcVlan
+ flowMessage.DstVlan = recordData.DstVlan
+ }
+ }
+ flowMessageSet = append(flowMessageSet, flowMessage)
+ }
+ return flowMessageSet
+}
+
+func ProcessMessageSFlow(msgDec interface{}) ([]*flowmessage.FlowMessage, error) {
+ return ProcessMessageSFlowConfig(msgDec, nil)
+}
+
+func ProcessMessageSFlowConfig(msgDec interface{}, config *ProducerConfigMapped) ([]*flowmessage.FlowMessage, error) {
+ switch packet := msgDec.(type) {
+ case sflow.Packet:
+ seqnum := packet.SequenceNumber
+ var agent net.IP
+ agent = packet.AgentIP
+
+ var cfg *SFlowMapper
+ if config != nil {
+ cfg = config.SFlow
+ }
+
+ flowSamples := GetSFlowFlowSamples(&packet)
+ flowMessageSet := SearchSFlowSamplesConfig(flowSamples, cfg)
+ for _, fmsg := range flowMessageSet {
+ fmsg.SamplerAddress = agent
+ fmsg.SequenceNum = seqnum
+ }
+
+ return flowMessageSet, nil
+ default:
+ return []*flowmessage.FlowMessage{}, errors.New("Bad sFlow version")
+ }
+}
diff --git a/vendor/github.com/netsampler/goflow2/producer/reflect.go b/vendor/github.com/netsampler/goflow2/producer/reflect.go
new file mode 100644
index 000000000..91a2a415c
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/producer/reflect.go
@@ -0,0 +1,233 @@
+package producer
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/netsampler/goflow2/decoders/netflow"
+ flowmessage "github.com/netsampler/goflow2/pb"
+)
+
+type EndianType string
+
+var (
+ BigEndian EndianType = "big"
+ LittleEndian EndianType = "little"
+)
+
+func GetBytes(d []byte, offset int, length int) []byte {
+ if length == 0 {
+ return nil
+ }
+ leftBytes := offset / 8
+ rightBytes := (offset + length) / 8
+ if (offset+length)%8 != 0 {
+ rightBytes += 1
+ }
+ if leftBytes >= len(d) {
+ return nil
+ }
+ if rightBytes > len(d) {
+ rightBytes = len(d)
+ }
+ chunk := make([]byte, rightBytes-leftBytes)
+
+ offsetMod8 := (offset % 8)
+ shiftAnd := byte(0xff >> (8 - offsetMod8))
+
+ var shifted byte
+ for i := range chunk {
+ j := len(chunk) - 1 - i
+ cur := d[j+leftBytes]
+ chunk[j] = (cur << offsetMod8) | shifted
+ shifted = shiftAnd & cur
+ }
+ last := len(chunk) - 1
+ shiftAndLast := byte(0xff << ((8 - ((offset + length) % 8)) % 8))
+ chunk[last] = chunk[last] & shiftAndLast
+ return chunk
+}
+
+func IsUInt(k reflect.Kind) bool {
+ return k == reflect.Uint8 || k == reflect.Uint16 || k == reflect.Uint32 || k == reflect.Uint64
+}
+
+func IsInt(k reflect.Kind) bool {
+ return k == reflect.Int8 || k == reflect.Int16 || k == reflect.Int32 || k == reflect.Int64
+}
+
+func MapCustomNetFlow(flowMessage *flowmessage.FlowMessage, df netflow.DataField, mapper *NetFlowMapper) {
+ if mapper == nil {
+ return
+ }
+ mapped, ok := mapper.Map(df)
+ if ok {
+ v := df.Value.([]byte)
+ MapCustom(flowMessage, v, mapped.Destination, mapped.Endian)
+ }
+}
+
+func MapCustom(flowMessage *flowmessage.FlowMessage, v []byte, destination string, endianness EndianType) {
+ vfm := reflect.ValueOf(flowMessage)
+ vfm = reflect.Indirect(vfm)
+
+ fieldValue := vfm.FieldByName(destination)
+
+ if fieldValue.IsValid() {
+ typeDest := fieldValue.Type()
+ fieldValueAddr := fieldValue.Addr()
+
+ if typeDest.Kind() == reflect.Slice {
+
+ if typeDest.Elem().Kind() == reflect.Uint8 {
+ fieldValue.SetBytes(v)
+ } else {
+ item := reflect.New(typeDest.Elem())
+
+ if IsUInt(typeDest.Elem().Kind()) {
+ if endianness == LittleEndian {
+ DecodeUNumberLE(v, item.Interface())
+ } else {
+ DecodeUNumber(v, item.Interface())
+ }
+ } else if IsUInt(typeDest.Elem().Kind()) {
+ if endianness == LittleEndian {
+ DecodeUNumberLE(v, item.Interface())
+ } else {
+ DecodeUNumber(v, item.Interface())
+ }
+ }
+
+ itemi := reflect.Indirect(item)
+ tmpFieldValue := reflect.Append(fieldValue, itemi)
+ fieldValue.Set(tmpFieldValue)
+ }
+
+ } else if fieldValueAddr.IsValid() && IsUInt(typeDest.Kind()) {
+ if endianness == LittleEndian {
+ DecodeUNumberLE(v, fieldValueAddr.Interface())
+ } else {
+ DecodeUNumber(v, fieldValueAddr.Interface())
+ }
+ } else if fieldValueAddr.IsValid() && IsInt(typeDest.Kind()) {
+ if endianness == LittleEndian {
+ DecodeUNumberLE(v, fieldValueAddr.Interface())
+ } else {
+ DecodeUNumber(v, fieldValueAddr.Interface())
+ }
+ }
+ }
+}
+
+type NetFlowMapField struct {
+ PenProvided bool `json:"penprovided" yaml:"penprovided"`
+ Type uint16 `json:"field" yaml:"field"`
+ Pen uint32 `json:"pen" yaml:"pen"`
+
+ Destination string `json:"destination" yaml:"destination"`
+ Endian EndianType `json:"endianness" yaml:"endianness"`
+ //DestinationLength uint8 `json:"dlen"` // could be used if populating a slice of uint16 that aren't in protobuf
+}
+
+type IPFIXProducerConfig struct {
+ Mapping []NetFlowMapField `json:"mapping"`
+ //PacketMapping []SFlowMapField `json:"packet-mapping"` // for embedded frames: use sFlow configuration
+}
+
+type NetFlowV9ProducerConfig struct {
+ Mapping []NetFlowMapField `json:"mapping"`
+}
+
+type SFlowMapField struct {
+ Layer int `json:"layer"`
+ Offset int `json:"offset"` // offset in bits
+ Length int `json:"length"` // length in bits
+
+ Destination string `json:"destination" yaml:"destination"`
+ Endian EndianType `json:"endianness" yaml:"endianness"`
+ //DestinationLength uint8 `json:"dlen"`
+}
+
+type SFlowProducerConfig struct {
+ Mapping []SFlowMapField `json:"mapping"`
+}
+
+type ProducerConfig struct {
+ IPFIX IPFIXProducerConfig `json:"ipfix"`
+ NetFlowV9 NetFlowV9ProducerConfig `json:"netflowv9"`
+ SFlow SFlowProducerConfig `json:"sflow"` // also used for IPFIX data frames
+
+ // should do a rename map list for when printing
+}
+
+type DataMap struct {
+ Destination string
+ Endian EndianType
+}
+
+type NetFlowMapper struct {
+ data map[string]DataMap // maps field to destination
+}
+
+func (m *NetFlowMapper) Map(field netflow.DataField) (DataMap, bool) {
+ mapped, found := m.data[fmt.Sprintf("%v-%d-%d", field.PenProvided, field.Pen, field.Type)]
+ return mapped, found
+}
+
+func MapFieldsNetFlow(fields []NetFlowMapField) *NetFlowMapper {
+ ret := make(map[string]DataMap)
+ for _, field := range fields {
+ ret[fmt.Sprintf("%v-%d-%d", field.PenProvided, field.Pen, field.Type)] = DataMap{Destination: field.Destination, Endian: field.Endian}
+ }
+ return &NetFlowMapper{ret}
+}
+
+type DataMapLayer struct {
+ Offset int
+ Length int
+ Destination string
+ Endian EndianType
+}
+
+type SFlowMapper struct {
+ data map[int][]DataMapLayer // map layer to list of offsets
+}
+
+func GetSFlowConfigLayer(m *SFlowMapper, layer int) []DataMapLayer {
+ if m == nil {
+ return nil
+ }
+ return m.data[layer]
+}
+
+func MapFieldsSFlow(fields []SFlowMapField) *SFlowMapper {
+ ret := make(map[int][]DataMapLayer)
+ for _, field := range fields {
+ retLayerEntry := DataMapLayer{
+ Offset: field.Offset,
+ Length: field.Length,
+ Destination: field.Destination,
+ Endian: field.Endian,
+ }
+ retLayer := ret[field.Layer]
+ retLayer = append(retLayer, retLayerEntry)
+ ret[field.Layer] = retLayer
+ }
+ return &SFlowMapper{ret}
+}
+
+type ProducerConfigMapped struct {
+ IPFIX *NetFlowMapper `json:"ipfix"`
+ NetFlowV9 *NetFlowMapper `json:"netflowv9"`
+ SFlow *SFlowMapper `json:"sflow"`
+}
+
+func NewProducerConfigMapped(config *ProducerConfig) *ProducerConfigMapped {
+ newCfg := &ProducerConfigMapped{}
+ if config != nil {
+ newCfg.IPFIX = MapFieldsNetFlow(config.IPFIX.Mapping)
+ newCfg.NetFlowV9 = MapFieldsNetFlow(config.NetFlowV9.Mapping)
+ newCfg.SFlow = MapFieldsSFlow(config.SFlow.Mapping)
+ }
+ return newCfg
+}
diff --git a/vendor/github.com/netsampler/goflow2/transport/transport.go b/vendor/github.com/netsampler/goflow2/transport/transport.go
new file mode 100644
index 000000000..11e9c6786
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/transport/transport.go
@@ -0,0 +1,68 @@
+package transport
+
+import (
+ "context"
+ "fmt"
+ "sync"
+)
+
+var (
+ transportDrivers = make(map[string]TransportDriver)
+ lock = &sync.RWMutex{}
+)
+
+type TransportDriver interface {
+ Prepare() error // Prepare driver (eg: flag registration)
+ Init(context.Context) error // Initialize driver (eg: start connections, open files...)
+ Close(context.Context) error // Close driver (eg: close connections and files...)
+ Send(key, data []byte) error // Send a formatted message
+}
+
+type TransportInterface interface {
+ Send(key, data []byte) error
+}
+
+type Transport struct {
+ driver TransportDriver
+}
+
+func (t *Transport) Close(ctx context.Context) {
+ t.driver.Close(ctx)
+}
+func (t *Transport) Send(key, data []byte) error {
+ return t.driver.Send(key, data)
+}
+
+func RegisterTransportDriver(name string, t TransportDriver) {
+ lock.Lock()
+ transportDrivers[name] = t
+ lock.Unlock()
+
+ if err := t.Prepare(); err != nil {
+ panic(err)
+ }
+}
+
+func FindTransport(ctx context.Context, name string) (*Transport, error) {
+ lock.RLock()
+ t, ok := transportDrivers[name]
+ lock.RUnlock()
+ if !ok {
+ return nil, fmt.Errorf("Transport %s not found", name)
+ }
+
+ err := t.Init(ctx)
+ return &Transport{t}, err
+}
+
+func GetTransports() []string {
+ lock.RLock()
+ defer lock.RUnlock()
+ t := make([]string, len(transportDrivers))
+ var i int
+ for k, _ := range transportDrivers {
+ t[i] = k
+ i++
+ }
+ return t
+}
diff --git a/vendor/github.com/netsampler/goflow2/utils/metrics.go b/vendor/github.com/netsampler/goflow2/utils/metrics.go
new file mode 100644
index 000000000..eb3f23158
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/utils/metrics.go
@@ -0,0 +1,171 @@
+package utils
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ MetricTrafficBytes = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_traffic_bytes",
+ Help: "Bytes received by the application.",
+ },
+ []string{"remote_ip", "local_ip", "local_port", "type"},
+ )
+ MetricTrafficPackets = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_traffic_packets",
+ Help: "Packets received by the application.",
+ },
+ []string{"remote_ip", "local_ip", "local_port", "type"},
+ )
+ MetricPacketSizeSum = prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "flow_traffic_summary_size_bytes",
+ Help: "Summary of packet size.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"remote_ip", "local_ip", "local_port", "type"},
+ )
+ DecoderStats = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_decoder_count",
+ Help: "Decoder processed count.",
+ },
+ []string{"worker", "name"},
+ )
+ DecoderErrors = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_decoder_error_count",
+ Help: "Decoder processed error count.",
+ },
+ []string{"worker", "name"},
+ )
+ DecoderTime = prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "flow_summary_decoding_time_us",
+ Help: "Decoding time summary.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"name"},
+ )
+ DecoderProcessTime = prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "flow_summary_processing_time_us",
+ Help: "Processing time summary.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"name"},
+ )
+ NetFlowStats = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_nf_count",
+ Help: "NetFlows processed.",
+ },
+ []string{"router", "version"},
+ )
+ NetFlowErrors = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_nf_errors_count",
+ Help: "NetFlows processed errors.",
+ },
+ []string{"router", "error"},
+ )
+ NetFlowSetRecordsStatsSum = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_nf_flowset_records_sum",
+ Help: "NetFlows FlowSets sum of records.",
+ },
+ []string{"router", "version", "type"}, // data-template, data, opts...
+ )
+ NetFlowSetStatsSum = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_nf_flowset_sum",
+ Help: "NetFlows FlowSets sum.",
+ },
+ []string{"router", "version", "type"}, // data-template, data, opts...
+ )
+ NetFlowTimeStatsSum = prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "flow_process_nf_delay_summary_seconds",
+ Help: "NetFlows time difference between time of flow and processing.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"router", "version"},
+ )
+ NetFlowTemplatesStats = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_nf_templates_count",
+ Help: "NetFlows Template count.",
+ },
+ []string{"router", "version", "obs_domain_id", "template_id", "type"}, // options/template
+ )
+ SFlowStats = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_sf_count",
+ Help: "sFlows processed.",
+ },
+ []string{"router", "agent", "version"},
+ )
+ SFlowErrors = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_sf_errors_count",
+ Help: "sFlows processed errors.",
+ },
+ []string{"router", "error"},
+ )
+ SFlowSampleStatsSum = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_sf_samples_sum",
+ Help: "SFlows samples sum.",
+ },
+ []string{"router", "agent", "version", "type"}, // counter, flow, expanded...
+ )
+ SFlowSampleRecordsStatsSum = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "flow_process_sf_samples_records_sum",
+ Help: "SFlows samples sum of records.",
+ },
+ []string{"router", "agent", "version", "type"}, // data-template, data, opts...
+ )
+)
+
+func init() {
+ prometheus.MustRegister(MetricTrafficBytes)
+ prometheus.MustRegister(MetricTrafficPackets)
+ prometheus.MustRegister(MetricPacketSizeSum)
+
+ prometheus.MustRegister(DecoderStats)
+ prometheus.MustRegister(DecoderErrors)
+ prometheus.MustRegister(DecoderTime)
+ prometheus.MustRegister(DecoderProcessTime)
+
+ prometheus.MustRegister(NetFlowStats)
+ prometheus.MustRegister(NetFlowErrors)
+ prometheus.MustRegister(NetFlowSetRecordsStatsSum)
+ prometheus.MustRegister(NetFlowSetStatsSum)
+ prometheus.MustRegister(NetFlowTimeStatsSum)
+ prometheus.MustRegister(NetFlowTemplatesStats)
+
+ prometheus.MustRegister(SFlowStats)
+ prometheus.MustRegister(SFlowErrors)
+ prometheus.MustRegister(SFlowSampleStatsSum)
+ prometheus.MustRegister(SFlowSampleRecordsStatsSum)
+}
+
+func DefaultAccountCallback(name string, id int, start, end time.Time) {
+ DecoderProcessTime.With(
+ prometheus.Labels{
+ "name": name,
+ }).
+ Observe(float64((end.Sub(start)).Nanoseconds()) / 1000)
+ DecoderStats.With(
+ prometheus.Labels{
+ "worker": strconv.Itoa(id),
+ "name": name,
+ }).
+ Inc()
+}
diff --git a/vendor/github.com/netsampler/goflow2/utils/netflow.go b/vendor/github.com/netsampler/goflow2/utils/netflow.go
new file mode 100644
index 000000000..0923ee3b7
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/utils/netflow.go
@@ -0,0 +1,377 @@
+package utils
+
+import (
+ "bytes"
+ "context"
+ "sync"
+ "time"
+
+ "github.com/netsampler/goflow2/decoders/netflow"
+ "github.com/netsampler/goflow2/decoders/netflow/templates"
+ "github.com/netsampler/goflow2/format"
+ flowmessage "github.com/netsampler/goflow2/pb"
+ "github.com/netsampler/goflow2/producer"
+ "github.com/netsampler/goflow2/transport"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+/*
+type TemplateSystem struct {
+ key string
+ templates *netflow.BasicTemplateSystem
+}
+
+func (s *TemplateSystem) AddTemplate(version uint16, obsDomainId uint32, template interface{}) {
+ s.templates.AddTemplate(version, obsDomainId, template)
+
+ typeStr := "options_template"
+ var templateId uint16
+ switch templateIdConv := template.(type) {
+ case netflow.IPFIXOptionsTemplateRecord:
+ templateId = templateIdConv.TemplateId
+ case netflow.NFv9OptionsTemplateRecord:
+ templateId = templateIdConv.TemplateId
+ case netflow.TemplateRecord:
+ templateId = templateIdConv.TemplateId
+ typeStr = "template"
+ }
+ NetFlowTemplatesStats.With(
+ prometheus.Labels{
+ "router": s.key,
+ "version": strconv.Itoa(int(version)),
+ "obs_domain_id": strconv.Itoa(int(obsDomainId)),
+ "template_id": strconv.Itoa(int(templateId)),
+ "type": typeStr,
+ }).
+ Inc()
+}
+
+func (s *TemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) {
+ return s.templates.GetTemplate(version, obsDomainId, templateId)
+}
+*/
+
+type StateNetFlow struct {
+ stopper
+
+ Format format.FormatInterface
+ Transport transport.TransportInterface
+ Logger Logger
+ /*templateslock *sync.RWMutex
+ templates map[string]*TemplateSystem*/
+
+ samplinglock *sync.RWMutex
+ sampling map[string]producer.SamplingRateSystem
+
+ Config *producer.ProducerConfig
+ configMapped *producer.ProducerConfigMapped
+
+ TemplateSystem templates.TemplateInterface
+
+ ctx context.Context
+}
+
+func NewStateNetFlow() *StateNetFlow {
+ return &StateNetFlow{
+ ctx: context.Background(),
+ samplinglock: &sync.RWMutex{},
+ sampling: make(map[string]producer.SamplingRateSystem),
+ }
+}
+
+func (s *StateNetFlow) DecodeFlow(msg interface{}) error {
+ pkt := msg.(BaseMessage)
+ buf := bytes.NewBuffer(pkt.Payload)
+
+ key := pkt.Src.String()
+ samplerAddress := pkt.Src
+ if samplerAddress.To4() != nil {
+ samplerAddress = samplerAddress.To4()
+ }
+
+ s.samplinglock.RLock()
+ sampling, ok := s.sampling[key]
+ s.samplinglock.RUnlock()
+ if !ok {
+ sampling = producer.CreateSamplingSystem()
+ s.samplinglock.Lock()
+ s.sampling[key] = sampling
+ s.samplinglock.Unlock()
+ }
+
+ ts := uint64(time.Now().UTC().Unix())
+ if pkt.SetTime {
+ ts = uint64(pkt.RecvTime.UTC().Unix())
+ }
+
+ timeTrackStart := time.Now()
+ msgDec, err := netflow.DecodeMessageContext(s.ctx, buf, key, netflow.TemplateWrapper{s.ctx, key, s.TemplateSystem})
+ if err != nil {
+ switch err.(type) {
+ case *netflow.ErrorTemplateNotFound:
+ NetFlowErrors.With(
+ prometheus.Labels{
+ "router": key,
+ "error": "template_not_found",
+ }).
+ Inc()
+ default:
+ NetFlowErrors.With(
+ prometheus.Labels{
+ "router": key,
+ "error": "error_decoding",
+ }).
+ Inc()
+ }
+ return err
+ }
+
+ var flowMessageSet []*flowmessage.FlowMessage
+
+ switch msgDecConv := msgDec.(type) {
+ case netflow.NFv9Packet:
+ NetFlowStats.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ }).
+ Inc()
+
+ for _, fs := range msgDecConv.FlowSets {
+ switch fsConv := fs.(type) {
+ case netflow.TemplateFlowSet:
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ "type": "TemplateFlowSet",
+ }).
+ Inc()
+
+ NetFlowSetRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ "type": "TemplateFlowSet",
+ }).
+ Add(float64(len(fsConv.Records)))
+
+ case netflow.NFv9OptionsTemplateFlowSet:
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ "type": "OptionsTemplateFlowSet",
+ }).
+ Inc()
+
+ NetFlowSetRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ "type": "OptionsTemplateFlowSet",
+ }).
+ Add(float64(len(fsConv.Records)))
+
+ case netflow.OptionsDataFlowSet:
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ "type": "OptionsDataFlowSet",
+ }).
+ Inc()
+
+ NetFlowSetRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ "type": "OptionsDataFlowSet",
+ }).
+ Add(float64(len(fsConv.Records)))
+ case netflow.DataFlowSet:
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ "type": "DataFlowSet",
+ }).
+ Inc()
+
+ NetFlowSetRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ "type": "DataFlowSet",
+ }).
+ Add(float64(len(fsConv.Records)))
+ }
+ }
+ flowMessageSet, err = producer.ProcessMessageNetFlowConfig(msgDecConv, sampling, s.configMapped)
+
+ for _, fmsg := range flowMessageSet {
+ fmsg.TimeReceived = ts
+ fmsg.SamplerAddress = samplerAddress
+ timeDiff := fmsg.TimeReceived - fmsg.TimeFlowEnd
+ NetFlowTimeStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "9",
+ }).
+ Observe(float64(timeDiff))
+ }
+ case netflow.IPFIXPacket:
+ NetFlowStats.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ }).
+ Inc()
+
+ for _, fs := range msgDecConv.FlowSets {
+ switch fsConv := fs.(type) {
+ case netflow.TemplateFlowSet:
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ "type": "TemplateFlowSet",
+ }).
+ Inc()
+
+ NetFlowSetRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ "type": "TemplateFlowSet",
+ }).
+ Add(float64(len(fsConv.Records)))
+
+ case netflow.IPFIXOptionsTemplateFlowSet:
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ "type": "OptionsTemplateFlowSet",
+ }).
+ Inc()
+
+ NetFlowSetRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ "type": "OptionsTemplateFlowSet",
+ }).
+ Add(float64(len(fsConv.Records)))
+
+ case netflow.OptionsDataFlowSet:
+
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ "type": "OptionsDataFlowSet",
+ }).
+ Inc()
+
+ NetFlowSetRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ "type": "OptionsDataFlowSet",
+ }).
+ Add(float64(len(fsConv.Records)))
+
+ case netflow.DataFlowSet:
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ "type": "DataFlowSet",
+ }).
+ Inc()
+
+ NetFlowSetRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ "type": "DataFlowSet",
+ }).
+ Add(float64(len(fsConv.Records)))
+ }
+ }
+ flowMessageSet, err = producer.ProcessMessageNetFlowConfig(msgDecConv, sampling, s.configMapped)
+
+ for _, fmsg := range flowMessageSet {
+ fmsg.TimeReceived = ts
+ fmsg.SamplerAddress = samplerAddress
+ timeDiff := fmsg.TimeReceived - fmsg.TimeFlowEnd
+ NetFlowTimeStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "10",
+ }).
+ Observe(float64(timeDiff))
+ }
+ }
+
+ timeTrackStop := time.Now()
+ DecoderTime.With(
+ prometheus.Labels{
+ "name": "NetFlow",
+ }).
+ Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000)
+
+ for _, fmsg := range flowMessageSet {
+ if s.Format != nil {
+ key, data, err := s.Format.Format(fmsg)
+
+ if err != nil && s.Logger != nil {
+ s.Logger.Error(err)
+ }
+ if err == nil && s.Transport != nil {
+ err = s.Transport.Send(key, data)
+ if err != nil {
+ s.Logger.Error(err)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+/*
+func (s *StateNetFlow) ServeHTTPTemplates(w http.ResponseWriter, r *http.Request) {
+ tmp := make(map[string]map[uint16]map[uint32]map[uint16]interface{})
+ s.templateslock.RLock()
+ for key, templatesrouterstr := range s.templates {
+ templatesrouter := templatesrouterstr.templates.GetTemplates()
+ tmp[key] = templatesrouter
+ }
+ s.templateslock.RUnlock()
+ enc := json.NewEncoder(w)
+ enc.Encode(tmp)
+}
+
+func (s *StateNetFlow) InitTemplates() {
+ s.templates = make(map[string]*TemplateSystem)
+ s.templateslock = &sync.RWMutex{}
+ s.sampling = make(map[string]producer.SamplingRateSystem)
+ s.samplinglock = &sync.RWMutex{}
+}*/
+
+func (s *StateNetFlow) initConfig() {
+ s.configMapped = producer.NewProducerConfigMapped(s.Config)
+}
+
+func (s *StateNetFlow) FlowRoutine(workers int, addr string, port int, reuseport bool) error {
+ if err := s.start(); err != nil {
+ return err
+ }
+ //s.InitTemplates()
+ s.initConfig()
+ return UDPStoppableRoutine(s.stopCh, "NetFlow", s.DecodeFlow, workers, addr, port, reuseport, s.Logger)
+}
+
+// FlowRoutineCtx?
diff --git a/vendor/github.com/netsampler/goflow2/utils/nflegacy.go b/vendor/github.com/netsampler/goflow2/utils/nflegacy.go
new file mode 100644
index 000000000..dcfc36dbe
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/utils/nflegacy.go
@@ -0,0 +1,111 @@
+package utils
+
+import (
+ "bytes"
+ "time"
+
+ "github.com/netsampler/goflow2/decoders/netflowlegacy"
+ "github.com/netsampler/goflow2/format"
+ flowmessage "github.com/netsampler/goflow2/pb"
+ "github.com/netsampler/goflow2/producer"
+ "github.com/netsampler/goflow2/transport"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type StateNFLegacy struct {
+ stopper
+
+ Format format.FormatInterface
+ Transport transport.TransportInterface
+ Logger Logger
+}
+
+func NewStateNFLegacy() *StateNFLegacy {
+ return &StateNFLegacy{}
+}
+
+func (s *StateNFLegacy) DecodeFlow(msg interface{}) error {
+ pkt := msg.(BaseMessage)
+ buf := bytes.NewBuffer(pkt.Payload)
+ key := pkt.Src.String()
+ samplerAddress := pkt.Src
+ if samplerAddress.To4() != nil {
+ samplerAddress = samplerAddress.To4()
+ }
+
+ ts := uint64(time.Now().UTC().Unix())
+ if pkt.SetTime {
+ ts = uint64(pkt.RecvTime.UTC().Unix())
+ }
+
+ timeTrackStart := time.Now()
+ msgDec, err := netflowlegacy.DecodeMessage(buf)
+
+ if err != nil {
+ switch err.(type) {
+ case *netflowlegacy.ErrorVersion:
+ NetFlowErrors.With(
+ prometheus.Labels{
+ "router": key,
+ "error": "error_version",
+ }).
+ Inc()
+ }
+ return err
+ }
+
+ switch msgDecConv := msgDec.(type) {
+ case netflowlegacy.PacketNetFlowV5:
+ NetFlowStats.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "5",
+ }).
+ Inc()
+ NetFlowSetStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "version": "5",
+ "type": "DataFlowSet",
+ }).
+ Add(float64(msgDecConv.Count))
+ }
+
+ var flowMessageSet []*flowmessage.FlowMessage
+ flowMessageSet, err = producer.ProcessMessageNetFlowLegacy(msgDec)
+
+ timeTrackStop := time.Now()
+ DecoderTime.With(
+ prometheus.Labels{
+ "name": "NetFlowV5",
+ }).
+ Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000)
+
+ for _, fmsg := range flowMessageSet {
+ fmsg.TimeReceived = ts
+ fmsg.SamplerAddress = samplerAddress
+
+ if s.Format != nil {
+ key, data, err := s.Format.Format(fmsg)
+
+ if err != nil && s.Logger != nil {
+ s.Logger.Error(err)
+ }
+ if err == nil && s.Transport != nil {
+ err = s.Transport.Send(key, data)
+ if err != nil {
+ s.Logger.Error(err)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *StateNFLegacy) FlowRoutine(workers int, addr string, port int, reuseport bool) error {
+ if err := s.start(); err != nil {
+ return err
+ }
+ return UDPStoppableRoutine(s.stopCh, "NetFlowV5", s.DecodeFlow, workers, addr, port, reuseport, s.Logger)
+}
diff --git a/vendor/github.com/netsampler/goflow2/utils/sflow.go b/vendor/github.com/netsampler/goflow2/utils/sflow.go
new file mode 100644
index 000000000..27223bcc0
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/utils/sflow.go
@@ -0,0 +1,170 @@
+package utils
+
+import (
+ "bytes"
+ "net"
+ "time"
+
+ "github.com/netsampler/goflow2/decoders/sflow"
+ "github.com/netsampler/goflow2/format"
+ flowmessage "github.com/netsampler/goflow2/pb"
+ "github.com/netsampler/goflow2/producer"
+ "github.com/netsampler/goflow2/transport"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type StateSFlow struct {
+ stopper
+
+ Format format.FormatInterface
+ Transport transport.TransportInterface
+ Logger Logger
+
+ Config *producer.ProducerConfig
+ configMapped *producer.ProducerConfigMapped
+}
+
+func NewStateSFlow() *StateSFlow {
+ return &StateSFlow{}
+}
+
+func (s *StateSFlow) DecodeFlow(msg interface{}) error {
+ pkt := msg.(BaseMessage)
+ buf := bytes.NewBuffer(pkt.Payload)
+ key := pkt.Src.String()
+
+ ts := uint64(time.Now().UTC().Unix())
+ if pkt.SetTime {
+ ts = uint64(pkt.RecvTime.UTC().Unix())
+ }
+
+ timeTrackStart := time.Now()
+ msgDec, err := sflow.DecodeMessage(buf)
+
+ if err != nil {
+ switch err.(type) {
+ case *sflow.ErrorVersion:
+ SFlowErrors.With(
+ prometheus.Labels{
+ "router": key,
+ "error": "error_version",
+ }).
+ Inc()
+ case *sflow.ErrorIPVersion:
+ SFlowErrors.With(
+ prometheus.Labels{
+ "router": key,
+ "error": "error_ip_version",
+ }).
+ Inc()
+ case *sflow.ErrorDataFormat:
+ SFlowErrors.With(
+ prometheus.Labels{
+ "router": key,
+ "error": "error_data_format",
+ }).
+ Inc()
+ default:
+ SFlowErrors.With(
+ prometheus.Labels{
+ "router": key,
+ "error": "error_decoding",
+ }).
+ Inc()
+ }
+ return err
+ }
+
+ switch msgDecConv := msgDec.(type) {
+ case sflow.Packet:
+ agentStr := net.IP(msgDecConv.AgentIP).String()
+ SFlowStats.With(
+ prometheus.Labels{
+ "router": key,
+ "agent": agentStr,
+ "version": "5",
+ }).
+ Inc()
+
+ for _, samples := range msgDecConv.Samples {
+ typeStr := "unknown"
+ countRec := 0
+ switch samplesConv := samples.(type) {
+ case sflow.FlowSample:
+ typeStr = "FlowSample"
+ countRec = len(samplesConv.Records)
+ case sflow.CounterSample:
+ typeStr = "CounterSample"
+ if samplesConv.Header.Format == 4 {
+ typeStr = "Expanded" + typeStr
+ }
+ countRec = len(samplesConv.Records)
+ case sflow.ExpandedFlowSample:
+ typeStr = "ExpandedFlowSample"
+ countRec = len(samplesConv.Records)
+ }
+ SFlowSampleStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "agent": agentStr,
+ "version": "5",
+ "type": typeStr,
+ }).
+ Inc()
+
+ SFlowSampleRecordsStatsSum.With(
+ prometheus.Labels{
+ "router": key,
+ "agent": agentStr,
+ "version": "5",
+ "type": typeStr,
+ }).
+ Add(float64(countRec))
+ }
+
+ }
+
+ var flowMessageSet []*flowmessage.FlowMessage
+ flowMessageSet, err = producer.ProcessMessageSFlowConfig(msgDec, s.configMapped)
+
+ timeTrackStop := time.Now()
+ DecoderTime.With(
+ prometheus.Labels{
+ "name": "sFlow",
+ }).
+ Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000)
+
+ for _, fmsg := range flowMessageSet {
+ fmsg.TimeReceived = ts
+ fmsg.TimeFlowStart = ts
+ fmsg.TimeFlowEnd = ts
+
+ if s.Format != nil {
+ key, data, err := s.Format.Format(fmsg)
+
+ if err != nil && s.Logger != nil {
+ s.Logger.Error(err)
+ }
+ if err == nil && s.Transport != nil {
+ err = s.Transport.Send(key, data)
+ if err != nil {
+ s.Logger.Error(err)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *StateSFlow) initConfig() {
+ s.configMapped = producer.NewProducerConfigMapped(s.Config)
+}
+
+func (s *StateSFlow) FlowRoutine(workers int, addr string, port int, reuseport bool) error {
+ if err := s.start(); err != nil {
+ return err
+ }
+ s.initConfig()
+ return UDPStoppableRoutine(s.stopCh, "sFlow", s.DecodeFlow, workers, addr, port, reuseport, s.Logger)
+}
diff --git a/vendor/github.com/netsampler/goflow2/utils/stopper.go b/vendor/github.com/netsampler/goflow2/utils/stopper.go
new file mode 100644
index 000000000..153b1bd17
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/utils/stopper.go
@@ -0,0 +1,33 @@
+package utils
+
+import (
+ "errors"
+)
+
+// ErrAlreadyStarted error happens when you try to start twice a flow routine
+var ErrAlreadyStarted = errors.New("the routine is already started")
+
+// stopper mechanism, common for all the flow routines
+type stopper struct {
+ stopCh chan struct{}
+}
+
+func (s *stopper) start() error {
+ if s.stopCh != nil {
+ return ErrAlreadyStarted
+ }
+ s.stopCh = make(chan struct{})
+ return nil
+}
+
+func (s *stopper) Shutdown() {
+ if s.stopCh != nil {
+ select {
+ case <-s.stopCh:
+ default:
+ close(s.stopCh)
+ }
+
+ s.stopCh = nil
+ }
+}
diff --git a/vendor/github.com/netsampler/goflow2/utils/utils.go b/vendor/github.com/netsampler/goflow2/utils/utils.go
new file mode 100644
index 000000000..2aab45a10
--- /dev/null
+++ b/vendor/github.com/netsampler/goflow2/utils/utils.go
@@ -0,0 +1,234 @@
+package utils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ reuseport "github.com/libp2p/go-reuseport"
+ decoder "github.com/netsampler/goflow2/decoders"
+ "github.com/netsampler/goflow2/decoders/netflow"
+ flowmessage "github.com/netsampler/goflow2/pb"
+ "github.com/netsampler/goflow2/producer"
+ "github.com/prometheus/client_golang/prometheus"
+ "gopkg.in/yaml.v2"
+)
+
+type ProducerConfig *producer.ProducerConfig
+
+func LoadMapping(f io.Reader) (ProducerConfig, error) {
+ config := &producer.ProducerConfig{}
+ dec := yaml.NewDecoder(f)
+ err := dec.Decode(config)
+ return config, err
+}
+
+func GetServiceAddresses(srv string) (addrs []string, err error) {
+ _, srvs, err := net.LookupSRV("", "", srv)
+ if err != nil {
+ return nil, errors.New(fmt.Sprintf("Service discovery: %v\n", err))
+ }
+ for _, srv := range srvs {
+ addrs = append(addrs, net.JoinHostPort(srv.Target, strconv.Itoa(int(srv.Port))))
+ }
+ return addrs, nil
+}
+
+type Logger interface {
+ Printf(string, ...interface{})
+ Errorf(string, ...interface{})
+ Warnf(string, ...interface{})
+ Warn(...interface{})
+ Error(...interface{})
+ Debug(...interface{})
+ Debugf(string, ...interface{})
+ Infof(string, ...interface{})
+ Fatalf(string, ...interface{})
+}
+
+type BaseMessage struct {
+ Src net.IP
+ Port int
+ Payload []byte
+
+ SetTime bool
+ RecvTime time.Time
+}
+
+type Transport interface {
+ Send([]*flowmessage.FlowMessage)
+}
+
+type Formatter interface {
+ Format([]*flowmessage.FlowMessage)
+}
+
+/*
+type DefaultLogTransport struct {
+}
+
+ func (s *DefaultLogTransport) Publish(msgs []*flowmessage.FlowMessage) {
+ for _, msg := range msgs {
+ fmt.Printf("%v\n", FlowMessageToString(msg))
+ }
+ }
+
+type DefaultJSONTransport struct {
+}
+
+ func (s *DefaultJSONTransport) Publish(msgs []*flowmessage.FlowMessage) {
+ for _, msg := range msgs {
+ fmt.Printf("%v\n", FlowMessageToJSON(msg))
+ }
+ }
+*/
+type DefaultErrorCallback struct {
+ Logger Logger
+}
+
+func (cb *DefaultErrorCallback) Callback(name string, id int, start, end time.Time, err error) {
+ if _, ok := err.(*netflow.ErrorTemplateNotFound); ok {
+ return
+ }
+ if cb.Logger != nil {
+ cb.Logger.Errorf("Error from: %v (%v) duration: %v. %v", name, id, end.Sub(start), err)
+ }
+}
+
+func UDPRoutine(name string, decodeFunc decoder.DecoderFunc, workers int, addr string, port int, sockReuse bool, logger Logger) error {
+ return UDPStoppableRoutine(make(chan struct{}), name, decodeFunc, workers, addr, port, sockReuse, logger)
+}
+
+// UDPStoppableRoutine runs a UDPRoutine that can be stopped by closing the stopCh passed as argument
+func UDPStoppableRoutine(stopCh <-chan struct{}, name string, decodeFunc decoder.DecoderFunc, workers int, addr string, port int, sockReuse bool, logger Logger) error {
+ ecb := DefaultErrorCallback{
+ Logger: logger,
+ }
+
+ decoderParams := decoder.DecoderParams{
+ DecoderFunc: decodeFunc,
+ DoneCallback: DefaultAccountCallback,
+ ErrorCallback: ecb.Callback,
+ }
+
+ processor := decoder.CreateProcessor(workers, decoderParams, name)
+ processor.Start()
+
+ addrUDP := net.UDPAddr{
+ IP: net.ParseIP(addr),
+ Port: port,
+ }
+
+ var udpconn *net.UDPConn
+ var err error
+
+ if sockReuse {
+ pconn, err := reuseport.ListenPacket("udp", addrUDP.String())
+ if err != nil {
+ return err
+ }
+ defer pconn.Close()
+ var ok bool
+ udpconn, ok = pconn.(*net.UDPConn)
+ if !ok {
+ return err
+ }
+ } else {
+ udpconn, err = net.ListenUDP("udp", &addrUDP)
+ if err != nil {
+ return err
+ }
+ defer udpconn.Close()
+ }
+
+ payload := make([]byte, 9000)
+
+ localIP := addrUDP.IP.String()
+ if addrUDP.IP == nil {
+ localIP = ""
+ }
+
+ type udpData struct {
+ size int
+ pktAddr *net.UDPAddr
+ payload []byte
+ }
+
+ udpDataCh := make(chan udpData)
+ defer close(udpDataCh)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ u := udpData{}
+ u.size, u.pktAddr, _ = udpconn.ReadFromUDP(payload)
+ if u.size == 0 { // Ignore 0 byte packets.
+ continue
+ }
+ u.payload = make([]byte, u.size)
+ copy(u.payload, payload[0:u.size])
+ select {
+ case <-stopCh:
+ return
+ default:
+ udpDataCh <- u
+ }
+ }
+ }()
+ func() {
+ for {
+ select {
+ case u := <-udpDataCh:
+ process(u.size, u.payload, u.pktAddr, processor, localIP, addrUDP, name)
+ case <-stopCh:
+ return
+ }
+ }
+ }()
+
+ for _ = range udpDataCh {
+ // drain
+ }
+ wg.Wait()
+ return nil
+}
+
+func process(size int, payload []byte, pktAddr *net.UDPAddr, processor decoder.Processor, localIP string, addrUDP net.UDPAddr, name string) {
+ baseMessage := BaseMessage{
+ Src: pktAddr.IP,
+ Port: pktAddr.Port,
+ Payload: payload,
+ }
+ processor.ProcessMessage(baseMessage)
+
+ MetricTrafficBytes.With(
+ prometheus.Labels{
+ "remote_ip": pktAddr.IP.String(),
+ "local_ip": localIP,
+ "local_port": strconv.Itoa(addrUDP.Port),
+ "type": name,
+ }).
+ Add(float64(size))
+ MetricTrafficPackets.With(
+ prometheus.Labels{
+ "remote_ip": pktAddr.IP.String(),
+ "local_ip": localIP,
+ "local_port": strconv.Itoa(addrUDP.Port),
+ "type": name,
+ }).
+ Inc()
+ MetricPacketSizeSum.With(
+ prometheus.Labels{
+ "remote_ip": pktAddr.IP.String(),
+ "local_ip": localIP,
+ "local_port": strconv.Itoa(addrUDP.Port),
+ "type": name,
+ }).
+ Observe(float64(size))
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/LICENSE b/vendor/github.com/ovn-org/libovsdb/LICENSE
new file mode 100644
index 000000000..e06d20818
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/ovn-org/libovsdb/NOTICE b/vendor/github.com/ovn-org/libovsdb/NOTICE
new file mode 100644
index 000000000..156dcf39f
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/NOTICE
@@ -0,0 +1,13 @@
+libovsdb
+
+Copyright 2014-2015 Socketplane Inc.
+Copyright 2015-2018 Docker Inc.
+
+This software consists of voluntary contributions made by many individuals. For
+exact contribution history, see the commit history.
+
+Modifications Copyright 2018-2019 eBay Inc.
+
+This software contains modifications developed by eBay Inc. and voluntary contributions
+from other individuals in a fork maintained at https://github.com/eBay/libovsdb
+For details on these contributions, please consult the git history.
diff --git a/vendor/github.com/ovn-org/libovsdb/cache/cache.go b/vendor/github.com/ovn-org/libovsdb/cache/cache.go
new file mode 100644
index 000000000..0b1e09e72
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/cache/cache.go
@@ -0,0 +1,1284 @@
+package cache
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/gob"
+ "encoding/hex"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/libovsdb/updates"
+)
+
+const (
+ updateEvent = "update"
+ addEvent = "add"
+ deleteEvent = "delete"
+ bufferSize = 65536
+ columnDelimiter = ","
+ keyDelimiter = "|"
+)
+
+// ErrCacheInconsistent is an error that can occur when an operation
+// would cause the cache to be inconsistent
+type ErrCacheInconsistent struct {
+ details string
+}
+
+// Error implements the error interface
+func (e *ErrCacheInconsistent) Error() string {
+ msg := "cache inconsistent"
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+func NewErrCacheInconsistent(details string) *ErrCacheInconsistent {
+ return &ErrCacheInconsistent{
+ details: details,
+ }
+}
+
+// ErrIndexExists is returned when an item in the database cannot be inserted due to existing indexes
+type ErrIndexExists struct {
+ Table string
+ Value interface{}
+ Index string
+ New string
+ Existing []string
+}
+
+func (e *ErrIndexExists) Error() string {
+ return fmt.Sprintf("cannot insert %s in the %s table. item %s has identical indexes. index: %s, value: %v", e.New, e.Table, e.Existing, e.Index, e.Value)
+}
+
+func NewIndexExistsError(table string, value interface{}, index string, new string, existing []string) *ErrIndexExists {
+ return &ErrIndexExists{
+ table, value, index, new, existing,
+ }
+}
+
+// map of unique values to uuids
+type valueToUUIDs map[interface{}]uuidset
+
+// map of column name(s) to unique values, to UUIDs
+type columnToValue map[index]valueToUUIDs
+
+// index is the type used to implement multiple cache indexes
+type index string
+
+// indexType is the type of index
+type indexType uint
+
+const (
+ schemaIndexType indexType = iota
+ clientIndexType
+)
+
+// indexSpec contains details about an index
+type indexSpec struct {
+ index index
+ columns []model.ColumnKey
+ indexType indexType
+}
+
+func (s indexSpec) isClientIndex() bool {
+ return s.indexType == clientIndexType
+}
+
+func (s indexSpec) isSchemaIndex() bool {
+ return s.indexType == schemaIndexType
+}
+
+// newIndex builds a index from a list of columns
+func newIndexFromColumns(columns ...string) index {
+ sort.Strings(columns)
+ return index(strings.Join(columns, columnDelimiter))
+}
+
+// newIndexFromColumnKeys builds a index from a list of column keys
+func newIndexFromColumnKeys(columnsKeys ...model.ColumnKey) index {
+ // RFC 7047 says that Indexes is a [] and "Each is a set of
+ // columns whose values, taken together within any given row, must be
+ // unique within the table". We'll store the column names, separated by comma
+ // as we'll assume (RFC is not clear), that comma isn't valid in a
+ columns := make([]string, 0, len(columnsKeys))
+ columnsMap := map[string]struct{}{}
+ for _, columnKey := range columnsKeys {
+ var column string
+ if columnKey.Key != nil {
+ column = fmt.Sprintf("%s%s%v", columnKey.Column, keyDelimiter, columnKey.Key)
+ } else {
+ column = columnKey.Column
+ }
+ if _, found := columnsMap[column]; !found {
+ columns = append(columns, column)
+ columnsMap[column] = struct{}{}
+ }
+ }
+ return newIndexFromColumns(columns...)
+}
+
+// newColumnKeysFromColumns builds a list of column keys from a list of columns
+func newColumnKeysFromColumns(columns ...string) []model.ColumnKey {
+ columnKeys := make([]model.ColumnKey, len(columns))
+ for i, column := range columns {
+ columnKeys[i] = model.ColumnKey{Column: column}
+ }
+ return columnKeys
+}
+
+// RowCache is a collections of Models hashed by UUID
+type RowCache struct {
+ name string
+ dbModel model.DatabaseModel
+ dataType reflect.Type
+ cache map[string]model.Model
+ indexSpecs []indexSpec
+ indexes columnToValue
+ mutex sync.RWMutex
+}
+
+// rowByUUID returns one model from the cache by UUID. Caller must hold the row
+// cache lock.
+func (r *RowCache) rowByUUID(uuid string) model.Model {
+ if row, ok := r.cache[uuid]; ok {
+ return model.Clone(row)
+ }
+ return nil
+}
+
+// Row returns one model from the cache by UUID
+func (r *RowCache) Row(uuid string) model.Model {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ return r.rowByUUID(uuid)
+}
+
+func (r *RowCache) HasRow(uuid string) bool {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ _, found := r.cache[uuid]
+ return found
+}
+
+// rowsByModels searches the cache to find all rows matching any of the provided
+// models, either by UUID or indexes. An error is returned if the model schema
+// has no UUID field, or if the provided models are not all the same type.
+func (r *RowCache) rowsByModels(models []model.Model, useClientIndexes bool) (map[string]model.Model, error) {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+
+ results := make(map[string]model.Model, len(models))
+ for _, m := range models {
+ if reflect.TypeOf(m) != r.dataType {
+ return nil, fmt.Errorf("model type %s didn't match expected row type %s", reflect.TypeOf(m), r.dataType)
+ }
+ info, _ := r.dbModel.NewModelInfo(m)
+ field, err := info.FieldByColumn("_uuid")
+ if err != nil {
+ return nil, err
+ }
+ if uuid := field.(string); uuid != "" {
+ if _, ok := results[uuid]; !ok {
+ if row := r.rowByUUID(uuid); row != nil {
+ results[uuid] = row
+ continue
+ }
+ }
+ }
+
+ // indexSpecs are ordered, schema indexes go first, then client indexes
+ for _, indexSpec := range r.indexSpecs {
+ if indexSpec.isClientIndex() && !useClientIndexes {
+ // Given the ordered indexSpecs, we can break here if we reach the
+ // first client index
+ break
+ }
+ val, err := valueFromIndex(info, indexSpec.columns)
+ if err != nil {
+ continue
+ }
+ vals := r.indexes[indexSpec.index]
+ if uuids, ok := vals[val]; ok {
+ for uuid := range uuids {
+ if _, ok := results[uuid]; !ok {
+ results[uuid] = r.rowByUUID(uuid)
+ }
+ }
+ // Break after handling the first found index
+ // to ensure we preserve index order preference
+ break
+ }
+ }
+ }
+ if len(results) == 0 {
+ return nil, nil
+ }
+ return results, nil
+}
+
+// RowByModel searches the cache by UUID and schema indexes. UUID search is
+// performed first. Then schema indexes are evaluated in turn by the same order
+// with which they are defined in the schema. The model for the first matching
+// index is returned along with its UUID. An empty string and nil is returned if
+// no Model is found.
+func (r *RowCache) RowByModel(m model.Model) (string, model.Model, error) {
+ models, err := r.rowsByModels([]model.Model{m}, false)
+ if err != nil {
+ return "", nil, err
+ }
+ for uuid, model := range models {
+ return uuid, model, nil
+ }
+ return "", nil, nil
+}
+
+// RowsByModels searches the cache by UUID, schema indexes and client indexes.
+// UUID search is performed first. Schema indexes are evaluated next in turn by
+// the same order with which they are defined in the schema. Finally, client
+// indexes are evaluated in turn by the same order with which they are defined
+// in the client DB model. The models for the first matching index are returned,
+// which might be more than 1 if they were found through a client index since in
+// that case uniqueness is not enforced. Nil is returned if no Model is found.
+func (r *RowCache) RowsByModels(models []model.Model) (map[string]model.Model, error) {
+ return r.rowsByModels(models, true)
+}
+
+// Create writes the provided content to the cache
+func (r *RowCache) Create(uuid string, m model.Model, checkIndexes bool) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if _, ok := r.cache[uuid]; ok {
+ return NewErrCacheInconsistent(fmt.Sprintf("cannot create row %s as it already exists", uuid))
+ }
+ if reflect.TypeOf(m) != r.dataType {
+ return fmt.Errorf("expected data of type %s, but got %s", r.dataType.String(), reflect.TypeOf(m).String())
+ }
+ info, err := r.dbModel.NewModelInfo(m)
+ if err != nil {
+ return err
+ }
+ addIndexes := r.newIndexes()
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ val, err := valueFromIndex(info, indexSpec.columns)
+ if err != nil {
+ return err
+ }
+
+ uuidset := newUUIDSet(uuid)
+
+ vals := r.indexes[index]
+ existing := vals[val]
+ if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) {
+ return NewIndexExistsError(r.name, val, string(index), uuid, existing.list())
+ }
+
+ addIndexes[index][val] = uuidset
+ }
+
+ // write indexes
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ for k, v := range addIndexes[index] {
+ if indexSpec.isSchemaIndex() {
+ r.indexes[index][k] = v
+ } else {
+ r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v)
+ }
+ }
+ }
+
+ r.cache[uuid] = model.Clone(m)
+ return nil
+}
+
+// Update updates the content in the cache and returns the original (pre-update) model
+func (r *RowCache) Update(uuid string, m model.Model, checkIndexes bool) (model.Model, error) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if _, ok := r.cache[uuid]; !ok {
+ return nil, NewErrCacheInconsistent(fmt.Sprintf("cannot update row %s as it does not exist in the cache", uuid))
+ }
+ oldRow := model.Clone(r.cache[uuid])
+ oldInfo, err := r.dbModel.NewModelInfo(oldRow)
+ if err != nil {
+ return nil, err
+ }
+ newInfo, err := r.dbModel.NewModelInfo(m)
+ if err != nil {
+ return nil, err
+ }
+
+ addIndexes := r.newIndexes()
+ removeIndexes := r.newIndexes()
+ var errs []error
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ var err error
+ oldVal, err := valueFromIndex(oldInfo, indexSpec.columns)
+ if err != nil {
+ return nil, err
+ }
+ newVal, err := valueFromIndex(newInfo, indexSpec.columns)
+ if err != nil {
+ return nil, err
+ }
+
+ // if old and new values are the same, don't worry
+ if oldVal == newVal {
+ continue
+ }
+ // old and new values are NOT the same
+
+ uuidset := newUUIDSet(uuid)
+
+ // check that there are no conflicts
+ vals := r.indexes[index]
+ existing := vals[newVal]
+ if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) {
+ errs = append(errs, NewIndexExistsError(
+ r.name,
+ newVal,
+ string(index),
+ uuid,
+ existing.list(),
+ ))
+ }
+
+ addIndexes[index][newVal] = uuidset
+ removeIndexes[index][oldVal] = uuidset
+ }
+ if len(errs) > 0 {
+ return nil, fmt.Errorf("%+v", errs)
+ }
+
+ // write indexes
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ for k, v := range addIndexes[index] {
+ if indexSpec.isSchemaIndex() {
+ r.indexes[index][k] = v
+ } else {
+ r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v)
+ }
+ }
+ for k, v := range removeIndexes[index] {
+ if indexSpec.isSchemaIndex() || substractUUIDSet(r.indexes[index][k], v).empty() {
+ delete(r.indexes[index], k)
+ }
+ }
+ }
+
+ r.cache[uuid] = model.Clone(m)
+ return oldRow, nil
+}
+
+// IndexExists checks if any of the schema indexes of the provided model is
+// already in the cache under a different UUID.
+func (r *RowCache) IndexExists(row model.Model) error {
+ info, err := r.dbModel.NewModelInfo(row)
+ if err != nil {
+ return err
+ }
+ field, err := info.FieldByColumn("_uuid")
+ if err != nil {
+ return nil
+ }
+ uuid := field.(string)
+ for _, indexSpec := range r.indexSpecs {
+ if !indexSpec.isSchemaIndex() {
+ // Given the ordered indexSpecs, we can break here if we reach the
+ // first non schema index
+ break
+ }
+ index := indexSpec.index
+ val, err := valueFromIndex(info, indexSpec.columns)
+ if err != nil {
+ continue
+ }
+ vals := r.indexes[index]
+ existing := vals[val]
+ if !existing.empty() && !existing.equals(newUUIDSet(uuid)) {
+ return NewIndexExistsError(
+ r.name,
+ val,
+ string(index),
+ uuid,
+ existing.list(),
+ )
+ }
+ }
+ return nil
+}
+
+// Delete deletes a row from the cache
+func (r *RowCache) Delete(uuid string) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if _, ok := r.cache[uuid]; !ok {
+ return NewErrCacheInconsistent(fmt.Sprintf("cannot delete row %s as it does not exist in the cache", uuid))
+ }
+ oldRow := r.cache[uuid]
+ oldInfo, err := r.dbModel.NewModelInfo(oldRow)
+ if err != nil {
+ return err
+ }
+
+ removeIndexes := r.newIndexes()
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ oldVal, err := valueFromIndex(oldInfo, indexSpec.columns)
+ if err != nil {
+ return err
+ }
+
+ removeIndexes[index][oldVal] = newUUIDSet(uuid)
+ }
+
+ // write indexes
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ for k, v := range removeIndexes[index] {
+ // only remove the index if it is pointing to this uuid
+ // otherwise we can cause a consistency issue if we've processed
+ // updates out of order
+ if substractUUIDSet(r.indexes[index][k], v).empty() {
+ delete(r.indexes[index], k)
+ }
+ }
+ }
+
+ delete(r.cache, uuid)
+ return nil
+}
+
+// Rows returns a copy of all Rows in the Cache
+func (r *RowCache) Rows() map[string]model.Model {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ result := make(map[string]model.Model)
+ for k, v := range r.cache {
+ result[k] = model.Clone(v)
+ }
+ return result
+}
+
+// RowsShallow returns a clone'd list of f all Rows in the cache, but does not
+// clone the underlying objects. Therefore, the objects returned are READ ONLY.
+// This is, however, thread safe, as the cached objects are cloned before being updated
+// when modifications come in.
+func (r *RowCache) RowsShallow() map[string]model.Model {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+
+ result := make(map[string]model.Model, len(r.cache))
+ for k, v := range r.cache {
+ result[k] = v
+ }
+ return result
+}
+
+// uuidsByConditionsAsIndexes checks possible indexes that can be built with a
+// subset of the provided conditions and returns the uuids for the models that
+// match that subset of conditions. If no conditions could be used as indexes,
+// returns nil. Note that this method does not necessarily match all the
+// provided conditions. Thus the caller is required to evaluate all the
+// conditions against the returned candidates. This is only useful to obtain, as
+// quick as possible, via indexes, a reduced list of candidate models that might
+// match all conditions, which should be better than just evaluating all
+// conditions against all rows of a table.
+//
+//nolint:gocyclo // warns overall function is complex but ignores inner functions
+func (r *RowCache) uuidsByConditionsAsIndexes(conditions []ovsdb.Condition, nativeValues []interface{}) (uuidset, error) {
+ type indexableCondition struct {
+ column string
+ keys []interface{}
+ nativeValue interface{}
+ }
+
+ // build an indexable condition, more appropriate for our processing, from
+ // an ovsdb condition. Only equality based conditions can be used as indexes
+ // (or `includes` conditions on map values).
+ toIndexableCondition := func(condition ovsdb.Condition, nativeValue interface{}) *indexableCondition {
+ if condition.Column == "_uuid" {
+ return nil
+ }
+ if condition.Function != ovsdb.ConditionEqual && condition.Function != ovsdb.ConditionIncludes {
+ return nil
+ }
+ v := reflect.ValueOf(nativeValue)
+ if !v.IsValid() {
+ return nil
+ }
+ isSet := v.Kind() == reflect.Slice || v.Kind() == reflect.Array
+ if condition.Function == ovsdb.ConditionIncludes && isSet {
+ return nil
+ }
+ keys := []interface{}{}
+ if v.Kind() == reflect.Map && condition.Function == ovsdb.ConditionIncludes {
+ for _, key := range v.MapKeys() {
+ keys = append(keys, key.Interface())
+ }
+ }
+ return &indexableCondition{
+ column: condition.Column,
+ keys: keys,
+ nativeValue: nativeValue,
+ }
+ }
+
+ // for any given set of conditions, we need to check if an index uses the
+ // same fields as the conditions
+ indexMatchesConditions := func(spec indexSpec, conditions []*indexableCondition) bool {
+ columnKeys := []model.ColumnKey{}
+ for _, condition := range conditions {
+ if len(condition.keys) == 0 {
+ columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column})
+ continue
+ }
+ for _, key := range condition.keys {
+ columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column, Key: key})
+ }
+ }
+ index := newIndexFromColumnKeys(columnKeys...)
+ return index == spec.index
+ }
+
+ // for a specific set of conditions, check if an index can be built from
+ // them and return the associated UUIDs
+ evaluateConditionSetAsIndex := func(conditions []*indexableCondition) (uuidset, error) {
+ // build a model with the values from the conditions
+ m, err := r.dbModel.NewModel(r.name)
+ if err != nil {
+ return nil, err
+ }
+ info, err := r.dbModel.NewModelInfo(m)
+ if err != nil {
+ return nil, err
+ }
+ for _, conditions := range conditions {
+ err := info.SetField(conditions.column, conditions.nativeValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, spec := range r.indexSpecs {
+ if !indexMatchesConditions(spec, conditions) {
+ continue
+ }
+ // if we have an index for those conditions, calculate the index
+ // value. The models mapped to that value match the conditions.
+ v, err := valueFromIndex(info, spec.columns)
+ if err != nil {
+ return nil, err
+ }
+ if v != nil {
+ uuids := r.indexes[spec.index][v]
+ if uuids == nil {
+ // this set of conditions was represented by an index but
+ // had no matches, return an empty set
+ uuids = uuidset{}
+ }
+ return uuids, nil
+ }
+ }
+ return nil, nil
+ }
+
+ // set of uuids that match the conditions as we evaluate them
+ var matching uuidset
+
+ // attempt to evaluate a set of conditions via indexes and intersect the
+ // results against matches of previous sets
+ intersectUUIDsFromConditionSet := func(indexableConditions []*indexableCondition) (bool, error) {
+ uuids, err := evaluateConditionSetAsIndex(indexableConditions)
+ if err != nil {
+ return true, err
+ }
+ if matching == nil {
+ matching = uuids
+ } else if uuids != nil {
+ matching = intersectUUIDSets(matching, uuids)
+ }
+ if matching != nil && len(matching) <= 1 {
+ // if we had no matches or a single match, no point in continuing
+ // searching for additional indexes. If we had a single match, it's
+ // cheaper to just evaluate all conditions on it.
+ return true, nil
+ }
+ return false, nil
+ }
+
+ // First, filter out conditions that cannot be matched against indexes. With
+ // the remaining conditions build all possible subsets (the power set of all
+ // conditions) and for any subset that is an index, intersect the obtained
+ // uuids with the ones obtained from previous subsets
+ matchUUIDsFromConditionsPowerSet := func() error {
+ ps := [][]*indexableCondition{}
+ // prime the power set with a first empty subset
+ ps = append(ps, []*indexableCondition{})
+ for i, condition := range conditions {
+ nativeValue := nativeValues[i]
+ iCondition := toIndexableCondition(condition, nativeValue)
+ // this is not a condition we can use as an index, skip it
+ if iCondition == nil {
+ continue
+ }
+ // the power set is built appending the subsets that result from
+ // adding each item to each of the previous subsets
+ ss := make([][]*indexableCondition, len(ps))
+ for j := range ss {
+ ss[j] = make([]*indexableCondition, len(ps[j]), len(ps[j])+1)
+ copy(ss[j], ps[j])
+ ss[j] = append(ss[j], iCondition)
+ // as we add them to the power set, attempt to evaluate this
+ // subset of conditions as indexes
+ stop, err := intersectUUIDsFromConditionSet(ss[j])
+ if stop || err != nil {
+ return err
+ }
+ }
+ ps = append(ps, ss...)
+ }
+ return nil
+ }
+
+ // finally
+ err := matchUUIDsFromConditionsPowerSet()
+ return matching, err
+}
+
+// RowsByCondition searches models in the cache that match all conditions
+func (r *RowCache) RowsByCondition(conditions []ovsdb.Condition) (map[string]model.Model, error) {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ results := make(map[string]model.Model)
+ schema := r.dbModel.Schema.Table(r.name)
+
+ // no conditions matches all rows
+ if len(conditions) == 0 {
+ for uuid := range r.cache {
+ results[uuid] = r.rowByUUID(uuid)
+ }
+ return results, nil
+ }
+
+ // one pass to obtain the native values
+ nativeValues := make([]interface{}, 0, len(conditions))
+ for _, condition := range conditions {
+ tSchema := schema.Column(condition.Column)
+ nativeValue, err := ovsdb.OvsToNative(tSchema, condition.Value)
+ if err != nil {
+ return nil, err
+ }
+ nativeValues = append(nativeValues, nativeValue)
+ }
+
+ // obtain all possible matches using conditions as indexes
+ matching, err := r.uuidsByConditionsAsIndexes(conditions, nativeValues)
+ if err != nil {
+ return nil, err
+ }
+
+ // From the matches obtained with indexes, which might have not used all
+ // conditions, continue trimming down the list explicitly evaluating the
+ // conditions.
+ for i, condition := range conditions {
+ matchingCondition := uuidset{}
+
+ if condition.Column == "_uuid" && (condition.Function == ovsdb.ConditionEqual || condition.Function == ovsdb.ConditionIncludes) {
+ uuid, ok := nativeValues[i].(string)
+ if !ok {
+ panic(fmt.Sprintf("%+v is not a uuid", nativeValues[i]))
+ }
+ if _, found := r.cache[uuid]; found {
+ matchingCondition.add(uuid)
+ }
+ } else {
+ matchCondition := func(uuid string) error {
+ row := r.cache[uuid]
+ info, err := r.dbModel.NewModelInfo(row)
+ if err != nil {
+ return err
+ }
+ value, err := info.FieldByColumn(condition.Column)
+ if err != nil {
+ return err
+ }
+ ok, err := condition.Function.Evaluate(value, nativeValues[i])
+ if err != nil {
+ return err
+ }
+ if ok {
+ matchingCondition.add(uuid)
+ }
+ return nil
+ }
+ if matching != nil {
+ // we just need to consider rows that matched previous
+ // conditions
+ for uuid := range matching {
+ err = matchCondition(uuid)
+ if err != nil {
+ return nil, err
+ }
+ }
+ } else {
+ // If this is the first condition we are able to check, just run
+ // it by whole table
+ for uuid := range r.cache {
+ err = matchCondition(uuid)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+ if matching == nil {
+ matching = matchingCondition
+ } else {
+ matching = intersectUUIDSets(matching, matchingCondition)
+ }
+ if matching.empty() {
+ // no models match the conditions checked up to now, no need to
+ // check remaining conditions
+ break
+ }
+ }
+
+ for uuid := range matching {
+ results[uuid] = r.rowByUUID(uuid)
+ }
+
+ return results, nil
+}
+
+// Len returns the length of the cache
+func (r *RowCache) Len() int {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ return len(r.cache)
+}
+
+func (r *RowCache) Index(columns ...string) (map[interface{}][]string, error) {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ spec := newIndexFromColumns(columns...)
+ index, ok := r.indexes[spec]
+ if !ok {
+ return nil, fmt.Errorf("%v is not an index", columns)
+ }
+ dbIndex := make(map[interface{}][]string, len(index))
+ for k, v := range index {
+ dbIndex[k] = v.list()
+ }
+ return dbIndex, nil
+}
+
+// EventHandler can handle events when the contents of the cache changes
+type EventHandler interface {
+ OnAdd(table string, model model.Model)
+ OnUpdate(table string, old model.Model, new model.Model)
+ OnDelete(table string, model model.Model)
+}
+
+// EventHandlerFuncs is a wrapper for the EventHandler interface
+// It allows a caller to only implement the functions they need
+type EventHandlerFuncs struct {
+ AddFunc func(table string, model model.Model)
+ UpdateFunc func(table string, old model.Model, new model.Model)
+ DeleteFunc func(table string, model model.Model)
+}
+
+// OnAdd calls AddFunc if it is not nil
+func (e *EventHandlerFuncs) OnAdd(table string, model model.Model) {
+ if e.AddFunc != nil {
+ e.AddFunc(table, model)
+ }
+}
+
+// OnUpdate calls UpdateFunc if it is not nil
+func (e *EventHandlerFuncs) OnUpdate(table string, old, new model.Model) {
+ if e.UpdateFunc != nil {
+ e.UpdateFunc(table, old, new)
+ }
+}
+
+// OnDelete calls DeleteFunc if it is not nil
+func (e *EventHandlerFuncs) OnDelete(table string, row model.Model) {
+ if e.DeleteFunc != nil {
+ e.DeleteFunc(table, row)
+ }
+}
+
+// TableCache contains a collection of RowCaches, hashed by name,
+// and an array of EventHandlers that respond to cache updates
+// It implements the ovsdb.NotificationHandler interface so it may
+// handle update notifications
+type TableCache struct {
+ cache map[string]*RowCache
+ eventProcessor *eventProcessor
+ dbModel model.DatabaseModel
+ ovsdb.NotificationHandler
+ mutex sync.RWMutex
+ logger *logr.Logger
+}
+
+// Data is the type for data that can be prepopulated in the cache
+type Data map[string]map[string]model.Model
+
+// NewTableCache creates a new TableCache
+func NewTableCache(dbModel model.DatabaseModel, data Data, logger *logr.Logger) (*TableCache, error) {
+ if !dbModel.Valid() {
+ return nil, fmt.Errorf("tablecache without valid databasemodel cannot be populated")
+ }
+ if logger == nil {
+ l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("cache")
+ logger = &l
+ } else {
+ l := logger.WithName("cache")
+ logger = &l
+ }
+ eventProcessor := newEventProcessor(bufferSize, logger)
+ cache := make(map[string]*RowCache)
+ tableTypes := dbModel.Types()
+ for name := range dbModel.Schema.Tables {
+ cache[name] = newRowCache(name, dbModel, tableTypes[name])
+ }
+ for table, rowData := range data {
+ if _, ok := dbModel.Schema.Tables[table]; !ok {
+ return nil, fmt.Errorf("table %s is not in schema", table)
+ }
+ rowCache := cache[table]
+ for uuid, row := range rowData {
+ if err := rowCache.Create(uuid, row, true); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return &TableCache{
+ cache: cache,
+ eventProcessor: eventProcessor,
+ dbModel: dbModel,
+ mutex: sync.RWMutex{},
+ logger: logger,
+ }, nil
+}
+
+// Mapper returns the mapper
+func (t *TableCache) Mapper() mapper.Mapper {
+ return t.dbModel.Mapper
+}
+
+// DatabaseModel returns the DatabaseModelRequest
+func (t *TableCache) DatabaseModel() model.DatabaseModel {
+ return t.dbModel
+}
+
+// Table returns the a Table from the cache with a given name
+func (t *TableCache) Table(name string) *RowCache {
+ t.mutex.RLock()
+ defer t.mutex.RUnlock()
+ if table, ok := t.cache[name]; ok {
+ return table
+ }
+ return nil
+}
+
+// Tables returns a list of table names that are in the cache
+func (t *TableCache) Tables() []string {
+ t.mutex.RLock()
+ defer t.mutex.RUnlock()
+ var result []string
+ for k := range t.cache {
+ result = append(result, k)
+ }
+ return result
+}
+
+// Update implements the update method of the NotificationHandler interface
+// this populates a channel with updates so they can be processed after the initial
+// state has been Populated
+func (t *TableCache) Update(context interface{}, tableUpdates ovsdb.TableUpdates) error {
+ if len(tableUpdates) == 0 {
+ return nil
+ }
+ if err := t.Populate(tableUpdates); err != nil {
+ t.logger.Error(err, "during libovsdb cache populate")
+ return err
+ }
+ return nil
+}
+
+// Update2 implements the update method of the NotificationHandler interface
+// this populates a channel with updates so they can be processed after the initial
+// state has been Populated
+func (t *TableCache) Update2(context interface{}, tableUpdates ovsdb.TableUpdates2) error {
+ if len(tableUpdates) == 0 {
+ return nil
+ }
+ if err := t.Populate2(tableUpdates); err != nil {
+ t.logger.Error(err, "during libovsdb cache populate2")
+ return err
+ }
+ return nil
+}
+
+// Locked implements the locked method of the NotificationHandler interface
+func (t *TableCache) Locked([]interface{}) {
+}
+
+// Stolen implements the stolen method of the NotificationHandler interface
+func (t *TableCache) Stolen([]interface{}) {
+}
+
+// Echo implements the echo method of the NotificationHandler interface
+func (t *TableCache) Echo([]interface{}) {
+}
+
+// Disconnected implements the disconnected method of the NotificationHandler interface
+func (t *TableCache) Disconnected() {
+}
+
+// Populate adds data to the cache and places an event on the channel
+func (t *TableCache) Populate(tableUpdates ovsdb.TableUpdates) error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ for table := range t.dbModel.Types() {
+ tu, ok := tableUpdates[table]
+ if !ok {
+ continue
+ }
+ tCache := t.cache[table]
+ for uuid, row := range tu {
+ t.logger.V(5).Info("processing update", "table", table, "uuid", uuid)
+ update := updates.ModelUpdates{}
+ current := tCache.cache[uuid]
+ err := update.AddRowUpdate(t.dbModel, table, uuid, current, *row)
+ if err != nil {
+ return err
+ }
+ err = t.ApplyCacheUpdate(update)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Populate2 adds data to the cache and places an event on the channel
+func (t *TableCache) Populate2(tableUpdates ovsdb.TableUpdates2) error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ for table := range t.dbModel.Types() {
+ tu, ok := tableUpdates[table]
+ if !ok {
+ continue
+ }
+ tCache := t.cache[table]
+ for uuid, row := range tu {
+ t.logger.V(5).Info("processing update", "table", table, "uuid", uuid)
+ update := updates.ModelUpdates{}
+ current := tCache.cache[uuid]
+ if row.Initial == nil && row.Insert == nil && current == nil {
+ return NewErrCacheInconsistent(fmt.Sprintf("row with uuid %s does not exist", uuid))
+ }
+ err := update.AddRowUpdate2(t.dbModel, table, uuid, current, *row)
+ if err != nil {
+ return err
+ }
+ err = t.ApplyCacheUpdate(update)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Purge drops all data in the cache and reinitializes it using the
+// provided database model
+func (t *TableCache) Purge(dbModel model.DatabaseModel) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.dbModel = dbModel
+ tableTypes := t.dbModel.Types()
+ for name := range t.dbModel.Schema.Tables {
+ t.cache[name] = newRowCache(name, t.dbModel, tableTypes[name])
+ }
+}
+
+// AddEventHandler registers the supplied EventHandler to receive cache events
+func (t *TableCache) AddEventHandler(handler EventHandler) {
+ t.eventProcessor.AddEventHandler(handler)
+}
+
+// Run starts the event processing and update processing loops.
+// It blocks until the stop channel is closed.
+// Once closed, it clears the updates/updates2 channels to ensure we don't process stale updates on a new connection
+func (t *TableCache) Run(stopCh <-chan struct{}) {
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ t.eventProcessor.Run(stopCh)
+ }()
+ wg.Wait()
+}
+
+// newRowCache creates a new row cache with the provided data
+// if the data is nil, and empty RowCache will be created
+func newRowCache(name string, dbModel model.DatabaseModel, dataType reflect.Type) *RowCache {
+ schemaIndexes := dbModel.Schema.Table(name).Indexes
+ clientIndexes := dbModel.Client().Indexes(name)
+
+ r := &RowCache{
+ name: name,
+ dbModel: dbModel,
+ indexSpecs: make([]indexSpec, 0, len(schemaIndexes)+len(clientIndexes)),
+ dataType: dataType,
+ cache: make(map[string]model.Model),
+ mutex: sync.RWMutex{},
+ }
+
+ // respect the order of indexes, add first schema indexes, then client
+ // indexes
+ indexes := map[index]indexSpec{}
+ for _, columns := range schemaIndexes {
+ columnKeys := newColumnKeysFromColumns(columns...)
+ index := newIndexFromColumnKeys(columnKeys...)
+ spec := indexSpec{index: index, columns: columnKeys, indexType: schemaIndexType}
+ r.indexSpecs = append(r.indexSpecs, spec)
+ indexes[index] = spec
+ }
+ for _, clientIndex := range clientIndexes {
+ columnKeys := clientIndex.Columns
+ index := newIndexFromColumnKeys(columnKeys...)
+ // if this is already a DB index, ignore
+ if _, ok := indexes[index]; ok {
+ continue
+ }
+ spec := indexSpec{index: index, columns: columnKeys, indexType: clientIndexType}
+ r.indexSpecs = append(r.indexSpecs, spec)
+ indexes[index] = spec
+ }
+
+ r.indexes = r.newIndexes()
+ return r
+}
+
+func (r *RowCache) newIndexes() columnToValue {
+ c := make(columnToValue)
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ c[index] = make(valueToUUIDs)
+ }
+ return c
+}
+
+// event encapsulates a cache event
+type event struct {
+ eventType string
+ table string
+ old model.Model
+ new model.Model
+}
+
+// eventProcessor handles the queueing and processing of cache events
+type eventProcessor struct {
+ events chan *event
+ // handlersMutex locks the handlers array when we add a handler or dispatch events
+ // we don't need a RWMutex in this case as we only have one thread reading and the write
+ // volume is very low (i.e only when AddEventHandler is called)
+ handlersMutex sync.Mutex
+ handlers []EventHandler
+ logger *logr.Logger
+}
+
+func newEventProcessor(capacity int, logger *logr.Logger) *eventProcessor {
+ return &eventProcessor{
+ events: make(chan *event, capacity),
+ handlers: []EventHandler{},
+ logger: logger,
+ }
+}
+
+// AddEventHandler registers the supplied EventHandler with the eventProcessor
+// EventHandlers MUST process events quickly, for example, pushing them to a queue
+// to be processed by the client. Long Running handler functions adversely affect
+// other handlers and MAY cause loss of data if the channel buffer is full
+func (e *eventProcessor) AddEventHandler(handler EventHandler) {
+ e.handlersMutex.Lock()
+ defer e.handlersMutex.Unlock()
+ e.handlers = append(e.handlers, handler)
+}
+
+// AddEvent writes an event to the channel
+func (e *eventProcessor) AddEvent(eventType string, table string, old model.Model, new model.Model) {
+ // We don't need to check for error here since there
+ // is only a single writer. RPC is run in blocking mode
+ event := event{
+ eventType: eventType,
+ table: table,
+ old: old,
+ new: new,
+ }
+ select {
+ case e.events <- &event:
+ // noop
+ return
+ default:
+ e.logger.V(0).Info("dropping event because event buffer is full")
+ }
+}
+
+// Run runs the eventProcessor loop.
+// It will block until the stopCh has been closed
+// Otherwise it will wait for events to arrive on the event channel
+// Once received, it will dispatch the event to each registered handler
+func (e *eventProcessor) Run(stopCh <-chan struct{}) {
+ for {
+ select {
+ case <-stopCh:
+ return
+ case event := <-e.events:
+ e.handlersMutex.Lock()
+ for _, handler := range e.handlers {
+ switch event.eventType {
+ case addEvent:
+ handler.OnAdd(event.table, event.new)
+ case updateEvent:
+ handler.OnUpdate(event.table, event.old, event.new)
+ case deleteEvent:
+ handler.OnDelete(event.table, event.old)
+ }
+ }
+ e.handlersMutex.Unlock()
+ }
+ }
+}
+
+type cacheUpdate interface {
+ GetUpdatedTables() []string
+ ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error
+}
+
+func (t *TableCache) ApplyCacheUpdate(update cacheUpdate) error {
+ tables := update.GetUpdatedTables()
+ for _, table := range tables {
+ tCache := t.cache[table]
+ err := update.ForEachModelUpdate(table, func(uuid string, old, new model.Model) error {
+ switch {
+ case old == nil && new != nil:
+ t.logger.V(5).Info("inserting model", "table", table, "uuid", uuid, "model", new)
+ err := tCache.Create(uuid, new, false)
+ if err != nil {
+ return err
+ }
+ t.eventProcessor.AddEvent(addEvent, table, nil, new)
+ case old != nil && new != nil:
+ t.logger.V(5).Info("updating model", "table", table, "uuid", uuid, "old", old, "new", new)
+ _, err := tCache.Update(uuid, new, false)
+ if err != nil {
+ return err
+ }
+ t.eventProcessor.AddEvent(updateEvent, table, old, new)
+ case new == nil:
+ t.logger.V(5).Info("deleting model", "table", table, "uuid", uuid, "model", old)
+ err := tCache.Delete(uuid)
+ if err != nil {
+ return err
+ }
+ t.eventProcessor.AddEvent(deleteEvent, table, old, nil)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func valueFromIndex(info *mapper.Info, columnKeys []model.ColumnKey) (interface{}, error) {
+ if len(columnKeys) > 1 {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ for _, columnKey := range columnKeys {
+ val, err := valueFromColumnKey(info, columnKey)
+ if err != nil {
+ return "", err
+ }
+ // if object is nil dont try to encode it
+ value := reflect.ValueOf(val)
+ if value.Kind() == reflect.Invalid {
+ continue
+ }
+ // if object is a nil pointer dont try to encode it
+ if value.Kind() == reflect.Pointer && value.IsNil() {
+ continue
+ }
+ err = enc.Encode(val)
+ if err != nil {
+ return "", err
+ }
+ }
+ h := sha256.New()
+ val := hex.EncodeToString(h.Sum(buf.Bytes()))
+ return val, nil
+ }
+ val, err := valueFromColumnKey(info, columnKeys[0])
+ if err != nil {
+ return "", err
+ }
+ return val, err
+}
+
+func valueFromColumnKey(info *mapper.Info, columnKey model.ColumnKey) (interface{}, error) {
+ val, err := info.FieldByColumn(columnKey.Column)
+ if err != nil {
+ return nil, err
+ }
+ if columnKey.Key != nil {
+ val, err = valueFromMap(val, columnKey.Key)
+ if err != nil {
+ return "", fmt.Errorf("can't get key value from map: %v", err)
+ }
+ }
+ // if the value is a non-nil pointer of an optional, dereference
+ v := reflect.ValueOf(val)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ val = v.Elem().Interface()
+ }
+ return val, err
+}
+
+func valueFromMap(aMap interface{}, key interface{}) (interface{}, error) {
+ m := reflect.ValueOf(aMap)
+ if m.Kind() != reflect.Map {
+ return nil, fmt.Errorf("expected map but got %s", m.Kind())
+ }
+ v := m.MapIndex(reflect.ValueOf(key))
+ if !v.IsValid() {
+ // return the zero value for the map value type
+ return reflect.Indirect(reflect.New(m.Type().Elem())).Interface(), nil
+ }
+
+ return v.Interface(), nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/cache/doc.go b/vendor/github.com/ovn-org/libovsdb/cache/doc.go
new file mode 100644
index 000000000..3b176f277
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/cache/doc.go
@@ -0,0 +1,16 @@
+/*
+Package cache provides a cache of model.Model elements that can be used in an OVSDB client or server.
+
+The cache can be accessed using a simple API:
+
+ cache.Table("Open_vSwitch").Row("")
+
+It implements the ovsdb.NotificationHandler interface
+such that it can be populated automatically by
+update notifications
+
+It also contains an eventProcessor where callers
+may registers functions that will get called on
+every Add/Update/Delete event.
+*/
+package cache
diff --git a/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go b/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go
new file mode 100644
index 000000000..f7c139737
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go
@@ -0,0 +1,101 @@
+package cache
+
+type void struct{}
+type uuidset map[string]void
+
+func newUUIDSet(uuids ...string) uuidset {
+ s := uuidset{}
+ for _, uuid := range uuids {
+ s[uuid] = void{}
+ }
+ return s
+}
+
+func (s uuidset) add(uuid string) {
+ s[uuid] = void{}
+}
+
+func (s uuidset) remove(uuid string) {
+ delete(s, uuid)
+}
+
+func (s uuidset) has(uuid string) bool {
+ _, ok := s[uuid]
+ return ok
+}
+
+func (s uuidset) equals(o uuidset) bool {
+ if len(s) != len(o) {
+ return false
+ }
+ for uuid := range s {
+ if !o.has(uuid) {
+ return false
+ }
+ }
+ return true
+}
+
+func (s uuidset) getAny() string {
+ for k := range s {
+ return k
+ }
+ return ""
+}
+
+func (s uuidset) list() []string {
+ uuids := make([]string, 0, len(s))
+ for uuid := range s {
+ uuids = append(uuids, uuid)
+ }
+ return uuids
+}
+
+func (s uuidset) empty() bool {
+ return len(s) == 0
+}
+
+func addUUIDSet(s1, s2 uuidset) uuidset {
+ if len(s2) == 0 {
+ return s1
+ }
+ if s1 == nil {
+ s1 = uuidset{}
+ }
+ for uuid := range s2 {
+ s1.add(uuid)
+ }
+ return s1
+}
+
+func substractUUIDSet(s1, s2 uuidset) uuidset {
+ if len(s1) == 0 || len(s2) == 0 {
+ return s1
+ }
+ for uuid := range s2 {
+ s1.remove(uuid)
+ }
+ return s1
+}
+
+func intersectUUIDSets(s1, s2 uuidset) uuidset {
+ if len(s1) == 0 || len(s2) == 0 {
+ return nil
+ }
+ var big uuidset
+ var small uuidset
+ if len(s1) > len(s2) {
+ big = s1
+ small = s2
+ } else {
+ big = s2
+ small = s1
+ }
+ f := uuidset{}
+ for uuid := range small {
+ if big.has(uuid) {
+ f.add(uuid)
+ }
+ }
+ return f
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/api.go b/vendor/github.com/ovn-org/libovsdb/client/api.go
new file mode 100644
index 000000000..497758944
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/api.go
@@ -0,0 +1,593 @@
+package client
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/go-logr/logr"
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// API defines basic operations to interact with the database
+type API interface {
+ // List populates a slice of Models objects based on their type
+ // The function parameter must be a pointer to a slice of Models
+ // Models can be structs or pointers to structs
+ // If the slice is null, the entire cache will be copied into the slice
+ // If it has a capacity != 0, only 'capacity' elements will be filled in
+ List(ctx context.Context, result interface{}) error
+
+ // Create a Conditional API from a Function that is used to filter cached data
+ // The function must accept a Model implementation and return a boolean. E.g:
+ // ConditionFromFunc(func(l *LogicalSwitch) bool { return l.Enabled })
+ WhereCache(predicate interface{}) ConditionalAPI
+
+ // Create a ConditionalAPI from a Model's index data, where operations
+ // apply to elements that match the values provided in one or more
+ // model.Models according to the indexes. All provided Models must be
+ // the same type or an error will be generated when operations are
+ // are performed on the ConditionalAPI.
+ Where(...model.Model) ConditionalAPI
+
+ // WhereAny creates a ConditionalAPI from a list of Conditions where
+ // operations apply to elements that match any (eg, logical OR) of the
+ // conditions.
+ WhereAny(model.Model, ...model.Condition) ConditionalAPI
+
+ // WhereAll creates a ConditionalAPI from a list of Conditions where
+ // operations apply to elements that match all (eg, logical AND) of the
+ // conditions.
+ WhereAll(model.Model, ...model.Condition) ConditionalAPI
+
+ // Get retrieves a model from the cache
+ // The way the object will be fetch depends on the data contained in the
+ // provided model and the indexes defined in the associated schema
+ // For more complex ways of searching for elements in the cache, the
+ // preferred way is Where({condition}).List()
+ Get(context.Context, model.Model) error
+
+ // Create returns the operation needed to add the model(s) to the Database
+ // Only fields with non-default values will be added to the transaction. If
+ // the field associated with column "_uuid" has some content other than a
+ // UUID, it will be treated as named-uuid
+ Create(...model.Model) ([]ovsdb.Operation, error)
+}
+
+// ConditionalAPI is an interface used to perform operations that require / use Conditions
+type ConditionalAPI interface {
+ // List uses the condition to search on the cache and populates
+ // the slice of Models objects based on their type
+ List(ctx context.Context, result interface{}) error
+
+ // Mutate returns the operations needed to perform the mutation specified
+ // By the model and the list of Mutation objects
+ // Depending on the Condition, it might return one or many operations
+ Mutate(model.Model, ...model.Mutation) ([]ovsdb.Operation, error)
+
+ // Update returns the operations needed to update any number of rows according
+ // to the data in the given model.
+ // By default, all the non-default values contained in model will be updated.
+ // Optional fields can be passed (pointer to fields in the model) to select the
+ // the fields to be updated
+ Update(model.Model, ...interface{}) ([]ovsdb.Operation, error)
+
+ // Delete returns the Operations needed to delete the models selected via the condition
+ Delete() ([]ovsdb.Operation, error)
+
+ // Wait returns the operations needed to perform the wait specified
+ // by the until condition, timeout, row and columns based on provided parameters.
+ Wait(ovsdb.WaitCondition, *int, model.Model, ...interface{}) ([]ovsdb.Operation, error)
+}
+
+// ErrWrongType is used to report the user provided parameter has the wrong type
+type ErrWrongType struct {
+ inputType reflect.Type
+ reason string
+}
+
+func (e *ErrWrongType) Error() string {
+ return fmt.Sprintf("Wrong parameter type (%s): %s", e.inputType, e.reason)
+}
+
+// ErrNotFound is used to inform the object or table was not found in the cache
+var ErrNotFound = errors.New("object not found")
+
+// api struct implements both API and ConditionalAPI
+// Where() can be used to create a ConditionalAPI api
+type api struct {
+ cache *cache.TableCache
+ cond Conditional
+ logger *logr.Logger
+}
+
+// List populates a slice of Models given as parameter based on the configured Condition
+func (a api) List(ctx context.Context, result interface{}) error {
+ resultPtr := reflect.ValueOf(result)
+ if resultPtr.Type().Kind() != reflect.Ptr {
+ return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"}
+ }
+
+ resultVal := reflect.Indirect(resultPtr)
+ if resultVal.Type().Kind() != reflect.Slice {
+ return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"}
+ }
+
+ // List accepts a slice of Models that can be either structs or pointer to
+ // structs
+ var appendValue func(reflect.Value)
+ var m model.Model
+ if resultVal.Type().Elem().Kind() == reflect.Ptr {
+ m = reflect.New(resultVal.Type().Elem().Elem()).Interface()
+ appendValue = func(v reflect.Value) {
+ resultVal.Set(reflect.Append(resultVal, v))
+ }
+ } else {
+ m = reflect.New(resultVal.Type().Elem()).Interface()
+ appendValue = func(v reflect.Value) {
+ resultVal.Set(reflect.Append(resultVal, reflect.Indirect(v)))
+ }
+ }
+
+ table, err := a.getTableFromModel(m)
+ if err != nil {
+ return err
+ }
+
+ if a.cond != nil && a.cond.Table() != table {
+ return &ErrWrongType{resultPtr.Type(),
+ fmt.Sprintf("Table derived from input type (%s) does not match Table from Condition (%s)", table, a.cond.Table())}
+ }
+
+ tableCache := a.cache.Table(table)
+ if tableCache == nil {
+ return ErrNotFound
+ }
+
+ var rows map[string]model.Model
+ if a.cond != nil {
+ rows, err = a.cond.Matches()
+ if err != nil {
+ return err
+ }
+ } else {
+ rows = tableCache.Rows()
+ }
+ // If given a null slice, fill it in the cache table completely, if not, just up to
+ // its capability.
+ if resultVal.IsNil() || resultVal.Cap() == 0 {
+ resultVal.Set(reflect.MakeSlice(resultVal.Type(), 0, len(rows)))
+ }
+ i := resultVal.Len()
+ maxCap := resultVal.Cap()
+
+ for _, row := range rows {
+ if i >= maxCap {
+ break
+ }
+ appendValue(reflect.ValueOf(row))
+ i++
+ }
+
+ return nil
+}
+
+// Where returns a conditionalAPI based on model indexes. All provided models
+// must be the same type.
+func (a api) Where(models ...model.Model) ConditionalAPI {
+ return newConditionalAPI(a.cache, a.conditionFromModels(models), a.logger)
+}
+
+// WhereAny returns a conditionalAPI based on a Condition list that matches any
+// of the conditions individually
+func (a api) WhereAny(m model.Model, cond ...model.Condition) ConditionalAPI {
+ return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(false, m, cond...), a.logger)
+}
+
+// WhereAll returns a conditionalAPI based on a Condition list that matches all
+// of the conditions together
+func (a api) WhereAll(m model.Model, cond ...model.Condition) ConditionalAPI {
+ return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(true, m, cond...), a.logger)
+}
+
+// WhereCache returns a conditionalAPI based a Predicate
+func (a api) WhereCache(predicate interface{}) ConditionalAPI {
+ return newConditionalAPI(a.cache, a.conditionFromFunc(predicate), a.logger)
+}
+
+// Conditional interface implementation
+// FromFunc returns a Condition from a function
+func (a api) conditionFromFunc(predicate interface{}) Conditional {
+ table, err := a.getTableFromFunc(predicate)
+ if err != nil {
+ return newErrorConditional(err)
+ }
+
+ condition, err := newPredicateConditional(table, a.cache, predicate)
+ if err != nil {
+ return newErrorConditional(err)
+ }
+ return condition
+}
+
+// conditionFromModels returns a Conditional from one or more models.
+func (a api) conditionFromModels(models []model.Model) Conditional {
+ if len(models) == 0 {
+ return newErrorConditional(fmt.Errorf("at least one model required"))
+ }
+ tableName, err := a.getTableFromModel(models[0])
+ if tableName == "" {
+ return newErrorConditional(err)
+ }
+ conditional, err := newEqualityConditional(tableName, a.cache, models)
+ if err != nil {
+ return newErrorConditional(err)
+ }
+ return conditional
+}
+
+// conditionFromExplicitConditions returns a Conditional from a model and a set
+// of explicit conditions. If matchAll is true, then models that match all the given
+// conditions are selected by the Conditional. If matchAll is false, then any model
+// that matches one of the conditions is selected.
+func (a api) conditionFromExplicitConditions(matchAll bool, m model.Model, cond ...model.Condition) Conditional {
+ if len(cond) == 0 {
+ return newErrorConditional(fmt.Errorf("at least one condition is required"))
+ }
+ tableName, err := a.getTableFromModel(m)
+ if tableName == "" {
+ return newErrorConditional(err)
+ }
+ conditional, err := newExplicitConditional(tableName, a.cache, matchAll, m, cond...)
+ if err != nil {
+ return newErrorConditional(err)
+ }
+ return conditional
+}
+
+// Get is a generic Get function capable of returning (through a provided pointer)
+// a instance of any row in the cache.
+// 'result' must be a pointer to an Model that exists in the ClientDBModel
+//
+// The way the cache is searched depends on the fields already populated in 'result'
+// Any table index (including _uuid) will be used for comparison
+func (a api) Get(ctx context.Context, m model.Model) error {
+ table, err := a.getTableFromModel(m)
+ if err != nil {
+ return err
+ }
+
+ tableCache := a.cache.Table(table)
+ if tableCache == nil {
+ return ErrNotFound
+ }
+
+ _, found, err := tableCache.RowByModel(m)
+ if err != nil {
+ return err
+ } else if found == nil {
+ return ErrNotFound
+ }
+
+ model.CloneInto(found, m)
+
+ return nil
+}
+
+// Create is a generic function capable of creating any row in the DB
+// A valid Model (pointer to object) must be provided.
+func (a api) Create(models ...model.Model) ([]ovsdb.Operation, error) {
+ var operations []ovsdb.Operation
+
+ for _, model := range models {
+ var realUUID, namedUUID string
+ var err error
+
+ tableName, err := a.getTableFromModel(model)
+ if err != nil {
+ return nil, err
+ }
+
+ // Read _uuid field, and use it as named-uuid
+ info, err := a.cache.DatabaseModel().NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ if uuid, err := info.FieldByColumn("_uuid"); err == nil {
+ tmpUUID := uuid.(string)
+ if ovsdb.IsNamedUUID(tmpUUID) {
+ namedUUID = tmpUUID
+ } else if ovsdb.IsValidUUID(tmpUUID) {
+ realUUID = tmpUUID
+ }
+ } else {
+ return nil, err
+ }
+
+ row, err := a.cache.Mapper().NewRow(info)
+ if err != nil {
+ return nil, err
+ }
+ // UUID is given in the operation, not the object
+ delete(row, "_uuid")
+
+ operations = append(operations, ovsdb.Operation{
+ Op: ovsdb.OperationInsert,
+ Table: tableName,
+ Row: row,
+ UUID: realUUID,
+ UUIDName: namedUUID,
+ })
+ }
+ return operations, nil
+}
+
+// Mutate returns the operations needed to transform the one Model into another one
+func (a api) Mutate(model model.Model, mutationObjs ...model.Mutation) ([]ovsdb.Operation, error) {
+ var mutations []ovsdb.Mutation
+ var operations []ovsdb.Operation
+
+ if len(mutationObjs) < 1 {
+ return nil, fmt.Errorf("at least one Mutation must be provided")
+ }
+
+ tableName := a.cache.DatabaseModel().FindTable(reflect.ValueOf(model).Type())
+ if tableName == "" {
+ return nil, fmt.Errorf("table not found for object")
+ }
+ table := a.cache.Mapper().Schema.Table(tableName)
+ if table == nil {
+ return nil, fmt.Errorf("schema error: table not found in Database Model for type %s", reflect.TypeOf(model))
+ }
+
+ conditions, err := a.cond.Generate()
+ if err != nil {
+ return nil, err
+ }
+
+ info, err := a.cache.DatabaseModel().NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, mobj := range mutationObjs {
+ col, err := info.ColumnByPtr(mobj.Field)
+ if err != nil {
+ return nil, err
+ }
+
+ mutation, err := a.cache.Mapper().NewMutation(info, col, mobj.Mutator, mobj.Value)
+ if err != nil {
+ return nil, err
+ }
+ mutations = append(mutations, *mutation)
+ }
+ for _, condition := range conditions {
+ operations = append(operations,
+ ovsdb.Operation{
+ Op: ovsdb.OperationMutate,
+ Table: tableName,
+ Mutations: mutations,
+ Where: condition,
+ },
+ )
+ }
+
+ return operations, nil
+}
+
+// Update is a generic function capable of updating any mutable field in any row in the database
+// Additional fields can be passed (variadic opts) to indicate fields to be updated
+// All immutable fields will be ignored
+func (a api) Update(model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) {
+ var operations []ovsdb.Operation
+ table, err := a.getTableFromModel(model)
+ if err != nil {
+ return nil, err
+ }
+ tableSchema := a.cache.Mapper().Schema.Table(table)
+ info, err := a.cache.DatabaseModel().NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(fields) > 0 {
+ for _, f := range fields {
+ colName, err := info.ColumnByPtr(f)
+ if err != nil {
+ return nil, err
+ }
+ if !tableSchema.Columns[colName].Mutable() {
+ return nil, fmt.Errorf("unable to update field %s of table %s as it is not mutable", colName, table)
+ }
+ }
+ }
+
+ conditions, err := a.cond.Generate()
+ if err != nil {
+ return nil, err
+ }
+
+ row, err := a.cache.Mapper().NewRow(info, fields...)
+ if err != nil {
+ return nil, err
+ }
+
+ for colName, column := range tableSchema.Columns {
+ if !column.Mutable() {
+ a.logger.V(2).Info("removing immutable field", "name", colName)
+ delete(row, colName)
+ }
+ }
+ delete(row, "_uuid")
+
+ if len(row) == 0 {
+ return nil, fmt.Errorf("attempted to update using an empty row. please check that all fields you wish to update are mutable")
+ }
+
+ for _, condition := range conditions {
+ operations = append(operations,
+ ovsdb.Operation{
+ Op: ovsdb.OperationUpdate,
+ Table: table,
+ Row: row,
+ Where: condition,
+ },
+ )
+ }
+ return operations, nil
+}
+
+// Delete returns the Operation needed to delete the selected models from the database
+func (a api) Delete() ([]ovsdb.Operation, error) {
+ var operations []ovsdb.Operation
+ conditions, err := a.cond.Generate()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, condition := range conditions {
+ operations = append(operations,
+ ovsdb.Operation{
+ Op: ovsdb.OperationDelete,
+ Table: a.cond.Table(),
+ Where: condition,
+ },
+ )
+ }
+
+ return operations, nil
+}
+
+func (a api) Wait(untilConFun ovsdb.WaitCondition, timeout *int, model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) {
+ var operations []ovsdb.Operation
+
+ /*
+ Ref: https://datatracker.ietf.org/doc/html/rfc7047.txt#section-5.2.6
+
+ lb := &nbdb.LoadBalancer{}
+ condition := model.Condition{
+ Field: &lb.Name,
+ Function: ovsdb.ConditionEqual,
+ Value: "lbName",
+ }
+ timeout0 := 0
+ client.Where(lb, condition).Wait(
+ ovsdb.WaitConditionNotEqual, // Until
+ &timeout0, // Timeout
+ &lb, // Row (and Table)
+ &lb.Name, // Cols (aka fields)
+ )
+ */
+
+ conditions, err := a.cond.Generate()
+ if err != nil {
+ return nil, err
+ }
+
+ table, err := a.getTableFromModel(model)
+ if err != nil {
+ return nil, err
+ }
+
+ info, err := a.cache.DatabaseModel().NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+
+ var columnNames []string
+ if len(fields) > 0 {
+ columnNames = make([]string, 0, len(fields))
+ for _, f := range fields {
+ colName, err := info.ColumnByPtr(f)
+ if err != nil {
+ return nil, err
+ }
+ columnNames = append(columnNames, colName)
+ }
+ }
+
+ row, err := a.cache.Mapper().NewRow(info, fields...)
+ if err != nil {
+ return nil, err
+ }
+ rows := []ovsdb.Row{row}
+
+ for _, condition := range conditions {
+ operation := ovsdb.Operation{
+ Op: ovsdb.OperationWait,
+ Table: table,
+ Where: condition,
+ Until: string(untilConFun),
+ Columns: columnNames,
+ Rows: rows,
+ }
+
+ if timeout != nil {
+ operation.Timeout = timeout
+ }
+
+ operations = append(operations, operation)
+ }
+
+ return operations, nil
+}
+
+// getTableFromModel returns the table name from a Model object after performing
+// type verifications on the model
+func (a api) getTableFromModel(m interface{}) (string, error) {
+ if _, ok := m.(model.Model); !ok {
+ return "", &ErrWrongType{reflect.TypeOf(m), "Type does not implement Model interface"}
+ }
+ table := a.cache.DatabaseModel().FindTable(reflect.TypeOf(m))
+ if table == "" {
+ return "", &ErrWrongType{reflect.TypeOf(m), "Model not found in Database Model"}
+ }
+ return table, nil
+}
+
+// getTableFromModel returns the table name from a the predicate after performing
+// type verifications
+func (a api) getTableFromFunc(predicate interface{}) (string, error) {
+ predType := reflect.TypeOf(predicate)
+ if predType == nil || predType.Kind() != reflect.Func {
+ return "", &ErrWrongType{predType, "Expected function"}
+ }
+ if predType.NumIn() != 1 || predType.NumOut() != 1 || predType.Out(0).Kind() != reflect.Bool {
+ return "", &ErrWrongType{predType, "Expected func(Model) bool"}
+ }
+
+ modelInterface := reflect.TypeOf((*model.Model)(nil)).Elem()
+ modelType := predType.In(0)
+ if !modelType.Implements(modelInterface) {
+ return "", &ErrWrongType{predType,
+ fmt.Sprintf("Type %s does not implement Model interface", modelType.String())}
+ }
+
+ table := a.cache.DatabaseModel().FindTable(modelType)
+ if table == "" {
+ return "", &ErrWrongType{predType,
+ fmt.Sprintf("Model %s not found in Database Model", modelType.String())}
+ }
+ return table, nil
+}
+
+// newAPI returns a new API to interact with the database
+func newAPI(cache *cache.TableCache, logger *logr.Logger) API {
+ return api{
+ cache: cache,
+ logger: logger,
+ }
+}
+
+// newConditionalAPI returns a new ConditionalAPI to interact with the database
+func newConditionalAPI(cache *cache.TableCache, cond Conditional, logger *logr.Logger) ConditionalAPI {
+ return api{
+ cache: cache,
+ cond: cond,
+ logger: logger,
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go b/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go
new file mode 100644
index 000000000..36ea476e0
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go
@@ -0,0 +1,167 @@
+package client
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/stretchr/testify/assert"
+)
+
+var apiTestSchema = []byte(`{
+ "name": "OVN_Northbound",
+ "version": "5.31.0",
+ "cksum": "2352750632 28701",
+ "tables": {
+ "Logical_Switch": {
+ "columns": {
+ "name": {"type": "string"},
+ "ports": {"type": {"key": {"type": "uuid",
+ "refTable": "Logical_Switch_Port",
+ "refType": "strong"},
+ "min": 0,
+ "max": "unlimited"}},
+ "acls": {"type": {"key": {"type": "uuid",
+ "refTable": "ACL",
+ "refType": "strong"},
+ "min": 0,
+ "max": "unlimited"}},
+ "qos_rules": {"type": {"key": {"type": "uuid",
+ "refTable": "QoS",
+ "refType": "strong"},
+ "min": 0,
+ "max": "unlimited"}},
+ "load_balancer": {"type": {"key": {"type": "uuid",
+ "refTable": "Load_Balancer",
+ "refType": "weak"},
+ "min": 0,
+ "max": "unlimited"}},
+ "dns_records": {"type": {"key": {"type": "uuid",
+ "refTable": "DNS",
+ "refType": "weak"},
+ "min": 0,
+ "max": "unlimited"}},
+ "other_config": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}},
+ "external_ids": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}},
+ "forwarding_groups": {
+ "type": {"key": {"type": "uuid",
+ "refTable": "Forwarding_Group",
+ "refType": "strong"},
+ "min": 0, "max": "unlimited"}}},
+ "isRoot": true},
+ "Logical_Switch_Port": {
+ "columns": {
+ "name": {"type": "string"},
+ "type": {"type": "string"},
+ "options": {
+ "type": {"key": "string",
+ "value": "string",
+ "min": 0,
+ "max": "unlimited"}},
+ "parent_name": {"type": {"key": "string", "min": 0, "max": 1}},
+ "tag_request": {
+ "type": {"key": {"type": "integer",
+ "minInteger": 0,
+ "maxInteger": 4095},
+ "min": 0, "max": 1}},
+ "tag": {
+ "type": {"key": {"type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4095},
+ "min": 0, "max": 1}},
+ "addresses": {"type": {"key": "string",
+ "min": 0,
+ "max": "unlimited"}},
+ "dynamic_addresses": {"type": {"key": "string",
+ "min": 0,
+ "max": 1}},
+ "port_security": {"type": {"key": "string",
+ "min": 0,
+ "max": "unlimited"}},
+ "up": {"type": {"key": "boolean", "min": 0, "max": 1}},
+ "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}},
+ "dhcpv4_options": {"type": {"key": {"type": "uuid",
+ "refTable": "DHCP_Options",
+ "refType": "weak"},
+ "min": 0,
+ "max": 1}},
+ "dhcpv6_options": {"type": {"key": {"type": "uuid",
+ "refTable": "DHCP_Options",
+ "refType": "weak"},
+ "min": 0,
+ "max": 1}},
+ "ha_chassis_group": {
+ "type": {"key": {"type": "uuid",
+ "refTable": "HA_Chassis_Group",
+ "refType": "strong"},
+ "min": 0,
+ "max": 1}},
+ "external_ids": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}}},
+ "indexes": [["name"]],
+ "isRoot": false}
+ }
+ }`)
+
+type testLogicalSwitch struct {
+ UUID string `ovsdb:"_uuid"`
+ Ports []string `ovsdb:"ports"`
+ ExternalIds map[string]string `ovsdb:"external_ids"`
+ Name string `ovsdb:"name"`
+ QosRules []string `ovsdb:"qos_rules"`
+ LoadBalancer []string `ovsdb:"load_balancer"`
+ DNSRecords []string `ovsdb:"dns_records"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+ ForwardingGroups []string `ovsdb:"forwarding_groups"`
+ Acls []string `ovsdb:"acls"`
+}
+
+// Table returns the table name. It's part of the Model interface
+func (*testLogicalSwitch) Table() string {
+ return "Logical_Switch"
+}
+
+//LogicalSwitchPort struct defines an object in Logical_Switch_Port table
+type testLogicalSwitchPort struct {
+ UUID string `ovsdb:"_uuid"`
+ Up *bool `ovsdb:"up"`
+ Dhcpv4Options *string `ovsdb:"dhcpv4_options"`
+ Name string `ovsdb:"name"`
+ DynamicAddresses *string `ovsdb:"dynamic_addresses"`
+ HaChassisGroup *string `ovsdb:"ha_chassis_group"`
+ Options map[string]string `ovsdb:"options"`
+ Enabled *bool `ovsdb:"enabled"`
+ Addresses []string `ovsdb:"addresses"`
+ Dhcpv6Options *string `ovsdb:"dhcpv6_options"`
+ TagRequest *int `ovsdb:"tag_request"`
+ Tag *int `ovsdb:"tag"`
+ PortSecurity []string `ovsdb:"port_security"`
+ ExternalIds map[string]string `ovsdb:"external_ids"`
+ Type string `ovsdb:"type"`
+ ParentName *string `ovsdb:"parent_name"`
+}
+
+// Table returns the table name. It's part of the Model interface
+func (*testLogicalSwitchPort) Table() string {
+ return "Logical_Switch_Port"
+}
+
+func apiTestCache(t testing.TB, data map[string]map[string]model.Model) *cache.TableCache {
+ var schema ovsdb.DatabaseSchema
+ err := json.Unmarshal(apiTestSchema, &schema)
+ assert.Nil(t, err)
+ db, err := model.NewClientDBModel("OVN_Northbound", map[string]model.Model{"Logical_Switch": &testLogicalSwitch{}, "Logical_Switch_Port": &testLogicalSwitchPort{}})
+ assert.Nil(t, err)
+ dbModel, errs := model.NewDatabaseModel(schema, db)
+ assert.Empty(t, errs)
+ cache, err := cache.NewTableCache(dbModel, data, nil)
+ assert.Nil(t, err)
+ return cache
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/client.go b/vendor/github.com/ovn-org/libovsdb/client/client.go
new file mode 100644
index 000000000..10ea757ec
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/client.go
@@ -0,0 +1,1480 @@
+package client
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "net/url"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/cenkalti/rpc2"
+ "github.com/cenkalti/rpc2/jsonrpc"
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/libovsdb/ovsdb/serverdb"
+)
+
+// Constants defined for libovsdb
+const (
+ SSL = "ssl"
+ TCP = "tcp"
+ UNIX = "unix"
+)
+
+const serverDB = "_Server"
+
+// ErrNotConnected is an error returned when the client is not connected
+var ErrNotConnected = errors.New("not connected")
+
+// ErrAlreadyConnected is an error returned when the client is already connected
+var ErrAlreadyConnected = errors.New("already connected")
+
+// ErrUnsupportedRPC is an error returned when an unsupported RPC method is called
+var ErrUnsupportedRPC = errors.New("unsupported rpc")
+
+// Client represents an OVSDB Client Connection
+// It provides all the necessary functionality to Connect to a server,
+// perform transactions, and build your own replica of the database with
+// Monitor or MonitorAll. It also provides a Cache that is populated from OVSDB
+// update notifications.
+type Client interface {
+ Connect(context.Context) error
+ Disconnect()
+ Close()
+ Schema() ovsdb.DatabaseSchema
+ Cache() *cache.TableCache
+ UpdateEndpoints([]string)
+ SetOption(Option) error
+ Connected() bool
+ DisconnectNotify() chan struct{}
+ Echo(context.Context) error
+ Transact(context.Context, ...ovsdb.Operation) ([]ovsdb.OperationResult, error)
+ Monitor(context.Context, *Monitor) (MonitorCookie, error)
+ MonitorAll(context.Context) (MonitorCookie, error)
+ MonitorCancel(ctx context.Context, cookie MonitorCookie) error
+ NewMonitor(...MonitorOption) *Monitor
+ CurrentEndpoint() string
+ API
+}
+
+type bufferedUpdate struct {
+ updates *ovsdb.TableUpdates
+ updates2 *ovsdb.TableUpdates2
+ lastTxnID string
+}
+
+type epInfo struct {
+ address string
+ serverID string
+}
+
+// ovsdbClient is an OVSDB client
+type ovsdbClient struct {
+ options *options
+ metrics metrics
+ connected bool
+ rpcClient *rpc2.Client
+ rpcMutex sync.RWMutex
+ // endpoints contains all possible endpoints; the first element is
+ // the active endpoint if connected=true
+ endpoints []*epInfo
+
+ // The name of the "primary" database - that is to say, the DB
+ // that the user expects to interact with.
+ primaryDBName string
+ databases map[string]*database
+
+ errorCh chan error
+ stopCh chan struct{}
+ disconnect chan struct{}
+ shutdown bool
+ shutdownMutex sync.Mutex
+
+ handlerShutdown *sync.WaitGroup
+
+ trafficSeen chan struct{}
+
+ logger *logr.Logger
+}
+
+// database is everything needed to map between go types and an ovsdb Database
+type database struct {
+ // model encapsulates the database schema and model of the database we're connecting to
+ model model.DatabaseModel
+ // modelMutex protects model from being replaced (via reconnect) while in use
+ modelMutex sync.RWMutex
+
+ // cache is used to store the updates for monitored tables
+ cache *cache.TableCache
+ // cacheMutex protects cache from being replaced (via reconnect) while in use
+ cacheMutex sync.RWMutex
+
+ api API
+
+ // any ongoing monitors, so we can re-create them if we disconnect
+ monitors map[string]*Monitor
+ monitorsMutex sync.Mutex
+
+ // tracks any outstanding updates while waiting for a monitor response
+ deferUpdates bool
+ deferredUpdates []*bufferedUpdate
+}
+
+// NewOVSDBClient creates a new OVSDB Client with the provided
+// database model. The client can be configured using one or more Option(s),
+// like WithTLSConfig. If no WithEndpoint option is supplied, the default of
+// unix:/var/run/openvswitch/ovsdb.sock is used
+func NewOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (Client, error) {
+ return newOVSDBClient(clientDBModel, opts...)
+}
+
+// newOVSDBClient creates a new ovsdbClient
+func newOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (*ovsdbClient, error) {
+ ovs := &ovsdbClient{
+ primaryDBName: clientDBModel.Name(),
+ databases: map[string]*database{
+ clientDBModel.Name(): {
+ model: model.NewPartialDatabaseModel(clientDBModel),
+ monitors: make(map[string]*Monitor),
+ deferUpdates: true,
+ deferredUpdates: make([]*bufferedUpdate, 0),
+ },
+ },
+ errorCh: make(chan error),
+ handlerShutdown: &sync.WaitGroup{},
+ disconnect: make(chan struct{}),
+ }
+ var err error
+ ovs.options, err = newOptions(opts...)
+ if err != nil {
+ return nil, err
+ }
+ for _, address := range ovs.options.endpoints {
+ ovs.endpoints = append(ovs.endpoints, &epInfo{address: address})
+ }
+
+ if ovs.options.logger == nil {
+ // create a new logger to log to stdout
+ l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("libovsdb").WithValues(
+ "database", ovs.primaryDBName,
+ )
+ stdr.SetVerbosity(5)
+ ovs.logger = &l
+ } else {
+ // add the "database" value to the structured logger
+ // to make it easier to tell between different DBs (e.g. ovn nbdb vs. sbdb)
+ l := ovs.options.logger.WithValues(
+ "database", ovs.primaryDBName,
+ )
+ ovs.logger = &l
+ }
+ ovs.metrics.init(clientDBModel.Name(), ovs.options.metricNamespace, ovs.options.metricSubsystem)
+ ovs.registerMetrics()
+
+ // if we should only connect to the leader, then add the special "_Server" database as well
+ if ovs.options.leaderOnly {
+ sm, err := serverdb.FullDatabaseModel()
+ if err != nil {
+ return nil, fmt.Errorf("could not initialize model _Server: %w", err)
+ }
+ ovs.databases[serverDB] = &database{
+ model: model.NewPartialDatabaseModel(sm),
+ monitors: make(map[string]*Monitor),
+ }
+ }
+
+ return ovs, nil
+}
+
+// Connect opens a connection to an OVSDB Server using the
+// endpoint provided when the Client was created.
+// The connection can be configured using one or more Option(s), like WithTLSConfig
+// If no WithEndpoint option is supplied, the default of unix:/var/run/openvswitch/ovsdb.sock is used
+func (o *ovsdbClient) Connect(ctx context.Context) error {
+ if err := o.connect(ctx, false); err != nil {
+ if err == ErrAlreadyConnected {
+ return nil
+ }
+ return err
+ }
+ if o.options.leaderOnly {
+ if err := o.watchForLeaderChange(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// moveEndpointFirst makes the endpoint requested by active the first element
+// in the endpoints slice, indicating it is the active endpoint
+func (o *ovsdbClient) moveEndpointFirst(i int) {
+ firstEp := o.endpoints[i]
+ othereps := append(o.endpoints[:i], o.endpoints[i+1:]...)
+ o.endpoints = append([]*epInfo{firstEp}, othereps...)
+}
+
+// moveEndpointLast moves the requested endpoint to the end of the list
+func (o *ovsdbClient) moveEndpointLast(i int) {
+ lastEp := o.endpoints[i]
+ othereps := append(o.endpoints[:i], o.endpoints[i+1:]...)
+ o.endpoints = append(othereps, lastEp)
+}
+
+func (o *ovsdbClient) resetRPCClient() {
+ if o.rpcClient != nil {
+ o.rpcClient.Close()
+ o.rpcClient = nil
+ }
+}
+
+func (o *ovsdbClient) connect(ctx context.Context, reconnect bool) error {
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ if o.rpcClient != nil {
+ return ErrAlreadyConnected
+ }
+
+ connected := false
+ connectErrors := []error{}
+ for i, endpoint := range o.endpoints {
+ u, err := url.Parse(endpoint.address)
+ if err != nil {
+ return err
+ }
+ if sid, err := o.tryEndpoint(ctx, u); err != nil {
+ o.resetRPCClient()
+ connectErrors = append(connectErrors,
+ fmt.Errorf("failed to connect to %s: %w", endpoint.address, err))
+ continue
+ } else {
+ o.logger.V(3).Info("successfully connected", "endpoint", endpoint.address, "sid", sid)
+ endpoint.serverID = sid
+ o.moveEndpointFirst(i)
+ connected = true
+ break
+ }
+ }
+
+ if !connected {
+ if len(connectErrors) == 1 {
+ return connectErrors[0]
+ }
+ var combined []string
+ for _, e := range connectErrors {
+ combined = append(combined, e.Error())
+ }
+
+ return fmt.Errorf("unable to connect to any endpoints: %s", strings.Join(combined, ". "))
+ }
+
+ // if we're reconnecting, re-start all the monitors
+ if reconnect {
+ o.logger.V(3).Info("reconnected - restarting monitors")
+ for dbName, db := range o.databases {
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+
+ // Purge entire cache if no monitors exist to update dynamically
+ if len(db.monitors) == 0 {
+ db.cache.Purge(db.model)
+ continue
+ }
+
+ // Restart all monitors; each monitor will handle purging
+ // the cache if necessary
+ for id, request := range db.monitors {
+ err := o.monitor(ctx, MonitorCookie{DatabaseName: dbName, ID: id}, true, request)
+ if err != nil {
+ o.resetRPCClient()
+ return err
+ }
+ }
+ }
+ }
+
+ go o.handleDisconnectNotification()
+ if o.options.inactivityTimeout > 0 {
+ o.handlerShutdown.Add(1)
+ go o.handleInactivityProbes()
+ }
+ for _, db := range o.databases {
+ o.handlerShutdown.Add(1)
+ eventStopChan := make(chan struct{})
+ go o.handleClientErrors(eventStopChan)
+ o.handlerShutdown.Add(1)
+ go func(db *database) {
+ defer o.handlerShutdown.Done()
+ db.cache.Run(o.stopCh)
+ close(eventStopChan)
+ }(db)
+ }
+
+ o.connected = true
+ return nil
+}
+
+// tryEndpoint connects to a single database endpoint. Returns the
+// server ID (if clustered) on success, or an error.
+func (o *ovsdbClient) tryEndpoint(ctx context.Context, u *url.URL) (string, error) {
+ o.logger.V(3).Info("trying to connect", "endpoint", fmt.Sprintf("%v", u))
+ var dialer net.Dialer
+ var err error
+ var c net.Conn
+
+ switch u.Scheme {
+ case UNIX:
+ c, err = dialer.DialContext(ctx, u.Scheme, u.Path)
+ case TCP:
+ c, err = dialer.DialContext(ctx, u.Scheme, u.Opaque)
+ case SSL:
+ dialer := tls.Dialer{
+ Config: o.options.tlsConfig,
+ }
+ c, err = dialer.DialContext(ctx, "tcp", u.Opaque)
+ default:
+ err = fmt.Errorf("unknown network protocol %s", u.Scheme)
+ }
+ if err != nil {
+ return "", fmt.Errorf("failed to open connection: %w", err)
+ }
+
+ o.createRPC2Client(c)
+
+ serverDBNames, err := o.listDbs(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ // for every requested database, ensure the DB exists in the server and
+ // that the schema matches what we expect.
+ for dbName, db := range o.databases {
+ // check the server has what we want
+ found := false
+ for _, name := range serverDBNames {
+ if name == dbName {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return "", fmt.Errorf("target database %s not found", dbName)
+ }
+
+ // load and validate the schema
+ schema, err := o.getSchema(ctx, dbName)
+ if err != nil {
+ return "", err
+ }
+
+ db.modelMutex.Lock()
+ var errors []error
+ db.model, errors = model.NewDatabaseModel(schema, db.model.Client())
+ db.modelMutex.Unlock()
+ if len(errors) > 0 {
+ var combined []string
+ for _, err := range errors {
+ combined = append(combined, err.Error())
+ }
+ return "", fmt.Errorf("database %s validation error (%d): %s",
+ dbName, len(errors), strings.Join(combined, ". "))
+ }
+
+ db.cacheMutex.Lock()
+ if db.cache == nil {
+ db.cache, err = cache.NewTableCache(db.model, nil, o.logger)
+ if err != nil {
+ db.cacheMutex.Unlock()
+ return "", err
+ }
+ db.api = newAPI(db.cache, o.logger)
+ }
+ db.cacheMutex.Unlock()
+ }
+
+ // check that this is the leader
+ var sid string
+ if o.options.leaderOnly {
+ var leader bool
+ leader, sid, err = o.isEndpointLeader(ctx)
+ if err != nil {
+ return "", err
+ }
+ if !leader {
+ return "", fmt.Errorf("endpoint is not leader")
+ }
+ }
+ return sid, nil
+}
+
+// createRPC2Client creates an rpcClient using the provided connection
+// It is also responsible for setting up go routines for client-side event handling
+// Should only be called when the mutex is held
+func (o *ovsdbClient) createRPC2Client(conn net.Conn) {
+ o.stopCh = make(chan struct{})
+ if o.options.inactivityTimeout > 0 {
+ o.trafficSeen = make(chan struct{})
+ }
+ o.rpcClient = rpc2.NewClientWithCodec(jsonrpc.NewJSONCodec(conn))
+ o.rpcClient.SetBlocking(true)
+ o.rpcClient.Handle("echo", func(_ *rpc2.Client, args []interface{}, reply *[]interface{}) error {
+ return o.echo(args, reply)
+ })
+ o.rpcClient.Handle("update", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error {
+ return o.update(args, reply)
+ })
+ o.rpcClient.Handle("update2", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error {
+ return o.update2(args, reply)
+ })
+ o.rpcClient.Handle("update3", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error {
+ return o.update3(args, reply)
+ })
+ go o.rpcClient.Run()
+}
+
+// isEndpointLeader returns true if the currently connected endpoint is leader,
+// otherwise false or an error. If the currently connected endpoint is the leader
+// and the database is clustered, also returns the database's Server ID.
+// Assumes rpcMutex is held.
+func (o *ovsdbClient) isEndpointLeader(ctx context.Context) (bool, string, error) {
+ op := ovsdb.Operation{
+ Op: ovsdb.OperationSelect,
+ Table: "Database",
+ Columns: []string{"name", "model", "leader", "sid"},
+ }
+ results, err := o.transact(ctx, serverDB, true, op)
+ if err != nil {
+ return false, "", fmt.Errorf("could not check if server was leader: %w", err)
+ }
+ // for now, if no rows are returned, just accept this server
+ if len(results) != 1 {
+ return true, "", nil
+ }
+ result := results[0]
+ if len(result.Rows) == 0 {
+ return true, "", nil
+ }
+
+ for _, row := range result.Rows {
+ dbName, ok := row["name"].(string)
+ if !ok {
+ return false, "", fmt.Errorf("could not parse name")
+ }
+ if dbName != o.primaryDBName {
+ continue
+ }
+
+ model, ok := row["model"].(string)
+ if !ok {
+ return false, "", fmt.Errorf("could not parse model")
+ }
+
+ // the database reports whether or not it is part of a cluster via the
+ // "model" column. If it's not clustered, it is by definition leader.
+ if model != serverdb.DatabaseModelClustered {
+ return true, "", nil
+ }
+
+ // Clustered database must have a Server ID
+ sid, ok := row["sid"].(ovsdb.UUID)
+ if !ok {
+ return false, "", fmt.Errorf("could not parse server id")
+ }
+
+ leader, ok := row["leader"].(bool)
+ if !ok {
+ return false, "", fmt.Errorf("could not parse leader")
+ }
+
+ return leader, sid.GoUUID, nil
+ }
+
+ // Extremely unlikely: there is no _Server row for the desired DB (which we made sure existed)
+ // for now, just continue
+ o.logger.V(3).Info("Couldn't find a row in _Server for our database. Continuing without leader detection", "database", o.primaryDBName)
+ return true, "", nil
+}
+
+func (o *ovsdbClient) primaryDB() *database {
+ return o.databases[o.primaryDBName]
+}
+
+// Schema returns the DatabaseSchema that is being used by the client
+// it will be nil until a connection has been established
+func (o *ovsdbClient) Schema() ovsdb.DatabaseSchema {
+ db := o.primaryDB()
+ db.modelMutex.RLock()
+ defer db.modelMutex.RUnlock()
+ return db.model.Schema
+}
+
+// Cache returns the TableCache that is populated from
+// ovsdb update notifications. It will be nil until a connection
+// has been established, and empty unless you call Monitor
+func (o *ovsdbClient) Cache() *cache.TableCache {
+ db := o.primaryDB()
+ db.cacheMutex.RLock()
+ defer db.cacheMutex.RUnlock()
+ return db.cache
+}
+
+// UpdateEndpoints sets client endpoints
+// It is intended to be called at runtime
+func (o *ovsdbClient) UpdateEndpoints(endpoints []string) {
+ o.logger.V(3).Info("update endpoints", "endpoints", endpoints)
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ if len(endpoints) == 0 {
+ endpoints = []string{defaultUnixEndpoint}
+ }
+ o.options.endpoints = endpoints
+ originEps := o.endpoints[:]
+ var newEps []*epInfo
+ activeIdx := -1
+ for i, address := range o.options.endpoints {
+ var serverID string
+ for j, origin := range originEps {
+ if address == origin.address {
+ if j == 0 {
+ activeIdx = i
+ }
+ serverID = origin.serverID
+ break
+ }
+ }
+ newEps = append(newEps, &epInfo{address: address, serverID: serverID})
+ }
+ o.endpoints = newEps
+ if activeIdx > 0 {
+ o.moveEndpointFirst(activeIdx)
+ } else if activeIdx == -1 {
+ o._disconnect()
+ }
+}
+
+// SetOption sets a new value for an option.
+// It may only be called when the client is not connected
+func (o *ovsdbClient) SetOption(opt Option) error {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ if o.rpcClient != nil {
+ return fmt.Errorf("cannot set option when client is connected")
+ }
+ return opt(o.options)
+}
+
+// Connected returns whether or not the client is currently connected to the server
+func (o *ovsdbClient) Connected() bool {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ return o.connected
+}
+
+func (o *ovsdbClient) CurrentEndpoint() string {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ if o.rpcClient == nil {
+ return ""
+ }
+ return o.endpoints[0].address
+}
+
+// DisconnectNotify returns a channel which will notify the caller when the
+// server has disconnected
+func (o *ovsdbClient) DisconnectNotify() chan struct{} {
+ return o.disconnect
+}
+
+// RFC 7047 : Section 4.1.6 : Echo
+func (o *ovsdbClient) echo(args []interface{}, reply *[]interface{}) error {
+ *reply = args
+ return nil
+}
+
+// RFC 7047 : Update Notification Section 4.1.6
+// params is an array of length 2: [json-value, table-updates]
+// - json-value: the arbitrary json-value passed when creating the Monitor, i.e. the "cookie"
+// - table-updates: map of table name to table-update. Table-update is a map of uuid to (old, new) row paris
+func (o *ovsdbClient) update(params []json.RawMessage, reply *[]interface{}) error {
+ cookie := MonitorCookie{}
+ *reply = []interface{}{}
+ if len(params) > 2 {
+ return fmt.Errorf("update requires exactly 2 args")
+ }
+ err := json.Unmarshal(params[0], &cookie)
+ if err != nil {
+ return err
+ }
+ var updates ovsdb.TableUpdates
+ err = json.Unmarshal(params[1], &updates)
+ if err != nil {
+ return err
+ }
+ db := o.databases[cookie.DatabaseName]
+ if db == nil {
+ return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName)
+ }
+ o.metrics.numUpdates.WithLabelValues(cookie.DatabaseName).Inc()
+ for tableName := range updates {
+ o.metrics.numTableUpdates.WithLabelValues(cookie.DatabaseName, tableName).Inc()
+ }
+
+ db.cacheMutex.Lock()
+ if db.deferUpdates {
+ db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{&updates, nil, ""})
+ db.cacheMutex.Unlock()
+ return nil
+ }
+ db.cacheMutex.Unlock()
+
+ // Update the local DB cache with the tableUpdates
+ db.cacheMutex.RLock()
+ err = db.cache.Update(cookie.ID, updates)
+ db.cacheMutex.RUnlock()
+
+ if err != nil {
+ o.errorCh <- err
+ }
+
+ return err
+}
+
+// update2 handling from ovsdb-server.7
+func (o *ovsdbClient) update2(params []json.RawMessage, reply *[]interface{}) error {
+ cookie := MonitorCookie{}
+ *reply = []interface{}{}
+ if len(params) > 2 {
+ return fmt.Errorf("update2 requires exactly 2 args")
+ }
+ err := json.Unmarshal(params[0], &cookie)
+ if err != nil {
+ return err
+ }
+ var updates ovsdb.TableUpdates2
+ err = json.Unmarshal(params[1], &updates)
+ if err != nil {
+ return err
+ }
+ db := o.databases[cookie.DatabaseName]
+ if db == nil {
+ return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName)
+ }
+
+ db.cacheMutex.Lock()
+ if db.deferUpdates {
+ db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, ""})
+ db.cacheMutex.Unlock()
+ return nil
+ }
+ db.cacheMutex.Unlock()
+
+ // Update the local DB cache with the tableUpdates
+ db.cacheMutex.RLock()
+ err = db.cache.Update2(cookie, updates)
+ db.cacheMutex.RUnlock()
+
+ if err != nil {
+ o.errorCh <- err
+ }
+
+ return err
+}
+
+// update3 handling from ovsdb-server.7
+func (o *ovsdbClient) update3(params []json.RawMessage, reply *[]interface{}) error {
+ cookie := MonitorCookie{}
+ *reply = []interface{}{}
+ if len(params) > 3 {
+ return fmt.Errorf("update requires exactly 3 args")
+ }
+ err := json.Unmarshal(params[0], &cookie)
+ if err != nil {
+ return err
+ }
+ var lastTransactionID string
+ err = json.Unmarshal(params[1], &lastTransactionID)
+ if err != nil {
+ return err
+ }
+ var updates ovsdb.TableUpdates2
+ err = json.Unmarshal(params[2], &updates)
+ if err != nil {
+ return err
+ }
+
+ db := o.databases[cookie.DatabaseName]
+ if db == nil {
+ return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName)
+ }
+
+ db.cacheMutex.Lock()
+ if db.deferUpdates {
+ db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, lastTransactionID})
+ db.cacheMutex.Unlock()
+ return nil
+ }
+ db.cacheMutex.Unlock()
+
+ // Update the local DB cache with the tableUpdates
+ db.cacheMutex.RLock()
+ err = db.cache.Update2(cookie, updates)
+ db.cacheMutex.RUnlock()
+
+ if err == nil {
+ db.monitorsMutex.Lock()
+ mon := db.monitors[cookie.ID]
+ mon.LastTransactionID = lastTransactionID
+ db.monitorsMutex.Unlock()
+ }
+
+ return err
+}
+
+// getSchema returns the schema in use for the provided database name
+// RFC 7047 : get_schema
+// Should only be called when mutex is held
+func (o *ovsdbClient) getSchema(ctx context.Context, dbName string) (ovsdb.DatabaseSchema, error) {
+ args := ovsdb.NewGetSchemaArgs(dbName)
+ var reply ovsdb.DatabaseSchema
+ err := o.rpcClient.CallWithContext(ctx, "get_schema", args, &reply)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return ovsdb.DatabaseSchema{}, ErrNotConnected
+ }
+ return ovsdb.DatabaseSchema{}, err
+ }
+ return reply, err
+}
+
+// listDbs returns the list of databases on the server
+// RFC 7047 : list_dbs
+// Should only be called when mutex is held
+func (o *ovsdbClient) listDbs(ctx context.Context) ([]string, error) {
+ var dbs []string
+ err := o.rpcClient.CallWithContext(ctx, "list_dbs", nil, &dbs)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return nil, ErrNotConnected
+ }
+ return nil, fmt.Errorf("listdbs failure - %v", err)
+ }
+ return dbs, err
+}
+
+// logFromContext returns a Logger from ctx or return the default logger
+func (o *ovsdbClient) logFromContext(ctx context.Context) *logr.Logger {
+ if logger, err := logr.FromContext(ctx); err == nil {
+ return &logger
+ }
+ return o.logger
+}
+
+// Transact performs the provided Operations on the database
+// RFC 7047 : transact
+func (o *ovsdbClient) Transact(ctx context.Context, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) {
+ logger := o.logFromContext(ctx)
+ o.rpcMutex.RLock()
+ if o.rpcClient == nil || !o.connected {
+ o.rpcMutex.RUnlock()
+ if o.options.reconnect {
+ logger.V(5).Info("blocking transaction until reconnected", "operations",
+ fmt.Sprintf("%+v", operation))
+ ticker := time.NewTicker(50 * time.Millisecond)
+ defer ticker.Stop()
+ ReconnectWaitLoop:
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, fmt.Errorf("%w: while awaiting reconnection", ctx.Err())
+ case <-ticker.C:
+ o.rpcMutex.RLock()
+ if o.rpcClient != nil && o.connected {
+ break ReconnectWaitLoop
+ }
+ o.rpcMutex.RUnlock()
+ }
+ }
+ } else {
+ return nil, ErrNotConnected
+ }
+ }
+ defer o.rpcMutex.RUnlock()
+ return o.transact(ctx, o.primaryDBName, false, operation...)
+}
+
+func (o *ovsdbClient) transact(ctx context.Context, dbName string, skipChWrite bool, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) {
+ logger := o.logFromContext(ctx)
+ var reply []ovsdb.OperationResult
+ db := o.databases[dbName]
+ db.modelMutex.RLock()
+ schema := o.databases[dbName].model.Schema
+ db.modelMutex.RUnlock()
+ if reflect.DeepEqual(schema, ovsdb.DatabaseSchema{}) {
+ return nil, fmt.Errorf("cannot transact to database %s: schema unknown", dbName)
+ }
+ if ok := schema.ValidateOperations(operation...); !ok {
+ return nil, fmt.Errorf("validation failed for the operation")
+ }
+
+ args := ovsdb.NewTransactArgs(dbName, operation...)
+ if o.rpcClient == nil {
+ return nil, ErrNotConnected
+ }
+ dbgLogger := logger.WithValues("database", dbName).V(4)
+ if dbgLogger.Enabled() {
+ dbgLogger.Info("transacting operations", "operations", fmt.Sprintf("%+v", operation))
+ }
+ err := o.rpcClient.CallWithContext(ctx, "transact", args, &reply)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return nil, ErrNotConnected
+ }
+ return nil, err
+ }
+
+ if !skipChWrite && o.trafficSeen != nil {
+ o.trafficSeen <- struct{}{}
+ }
+ return reply, nil
+}
+
+// MonitorAll is a convenience method to monitor every table/column
+func (o *ovsdbClient) MonitorAll(ctx context.Context) (MonitorCookie, error) {
+ m := newMonitor()
+ for name := range o.primaryDB().model.Types() {
+ m.Tables = append(m.Tables, TableMonitor{Table: name})
+ }
+ return o.Monitor(ctx, m)
+}
+
+// MonitorCancel will request cancel a previously issued monitor request
+// RFC 7047 : monitor_cancel
+func (o *ovsdbClient) MonitorCancel(ctx context.Context, cookie MonitorCookie) error {
+ var reply ovsdb.OperationResult
+ args := ovsdb.NewMonitorCancelArgs(cookie)
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ if o.rpcClient == nil {
+ return ErrNotConnected
+ }
+ err := o.rpcClient.CallWithContext(ctx, "monitor_cancel", args, &reply)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return ErrNotConnected
+ }
+ return err
+ }
+ if reply.Error != "" {
+ return fmt.Errorf("error while executing transaction: %s", reply.Error)
+ }
+ o.primaryDB().monitorsMutex.Lock()
+ defer o.primaryDB().monitorsMutex.Unlock()
+ delete(o.primaryDB().monitors, cookie.ID)
+ o.metrics.numMonitors.Dec()
+ return nil
+}
+
+// Monitor will provide updates for a given table/column
+// and populate the cache with them. Subsequent updates will be processed
+// by the Update Notifications
+// RFC 7047 : monitor
+func (o *ovsdbClient) Monitor(ctx context.Context, monitor *Monitor) (MonitorCookie, error) {
+ cookie := newMonitorCookie(o.primaryDBName)
+ db := o.databases[o.primaryDBName]
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+ return cookie, o.monitor(ctx, cookie, false, monitor)
+}
+
+// If fields is provided, the request will be constrained to the provided columns
+// If no fields are provided, all columns will be used
+func newMonitorRequest(data *mapper.Info, fields []string, conditions []ovsdb.Condition) (*ovsdb.MonitorRequest, error) {
+ var columns []string
+ if len(fields) > 0 {
+ columns = append(columns, fields...)
+ } else {
+ for c := range data.Metadata.TableSchema.Columns {
+ columns = append(columns, c)
+ }
+ }
+ return &ovsdb.MonitorRequest{Columns: columns, Where: conditions, Select: ovsdb.NewDefaultMonitorSelect()}, nil
+}
+
+// monitor must only be called with a lock on monitorsMutex
+//
+//gocyclo:ignore
+func (o *ovsdbClient) monitor(ctx context.Context, cookie MonitorCookie, reconnecting bool, monitor *Monitor) error {
+ // if we're reconnecting, we already hold the rpcMutex
+ if !reconnecting {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ }
+ if o.rpcClient == nil {
+ return ErrNotConnected
+ }
+ if len(monitor.Errors) != 0 {
+ var errString []string
+ for _, err := range monitor.Errors {
+ errString = append(errString, err.Error())
+ }
+ return fmt.Errorf(strings.Join(errString, ". "))
+ }
+ if len(monitor.Tables) == 0 {
+ return fmt.Errorf("at least one table should be monitored")
+ }
+ dbName := cookie.DatabaseName
+ db := o.databases[dbName]
+ db.modelMutex.RLock()
+ typeMap := db.model.Types()
+ requests := make(map[string]ovsdb.MonitorRequest)
+ for _, o := range monitor.Tables {
+ _, ok := typeMap[o.Table]
+ if !ok {
+ return fmt.Errorf("type for table %s does not exist in model", o.Table)
+ }
+ model, err := db.model.NewModel(o.Table)
+ if err != nil {
+ return err
+ }
+ info, err := db.model.NewModelInfo(model)
+ if err != nil {
+ return err
+ }
+ request, err := newMonitorRequest(info, o.Fields, o.Conditions)
+ if err != nil {
+ return err
+ }
+ requests[o.Table] = *request
+ }
+ db.modelMutex.RUnlock()
+
+ var args []interface{}
+ if monitor.Method == ovsdb.ConditionalMonitorSinceRPC {
+ // If we are reconnecting a CondSince monitor that is the only
+ // monitor, then we can use its LastTransactionID since it is
+ // valid (because we're reconnecting) and we can safely keep
+ // the cache intact (because it's the only monitor).
+ transactionID := emptyUUID
+ if reconnecting && len(db.monitors) == 1 {
+ transactionID = monitor.LastTransactionID
+ }
+ args = ovsdb.NewMonitorCondSinceArgs(dbName, cookie, requests, transactionID)
+ } else {
+ args = ovsdb.NewMonitorArgs(dbName, cookie, requests)
+ }
+ var err error
+ var tableUpdates interface{}
+
+ var lastTransactionFound bool
+ switch monitor.Method {
+ case ovsdb.MonitorRPC:
+ var reply ovsdb.TableUpdates
+ err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply)
+ tableUpdates = reply
+ case ovsdb.ConditionalMonitorRPC:
+ var reply ovsdb.TableUpdates2
+ err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply)
+ tableUpdates = reply
+ case ovsdb.ConditionalMonitorSinceRPC:
+ var reply ovsdb.MonitorCondSinceReply
+ err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply)
+ if err == nil && reply.Found {
+ monitor.LastTransactionID = reply.LastTransactionID
+ lastTransactionFound = true
+ }
+ tableUpdates = reply.Updates
+ default:
+ return fmt.Errorf("unsupported monitor method: %v", monitor.Method)
+ }
+
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return ErrNotConnected
+ }
+ if err.Error() == "unknown method" {
+ if monitor.Method == ovsdb.ConditionalMonitorSinceRPC {
+ o.logger.V(3).Error(err, "method monitor_cond_since not supported, falling back to monitor_cond")
+ monitor.Method = ovsdb.ConditionalMonitorRPC
+ return o.monitor(ctx, cookie, reconnecting, monitor)
+ }
+ if monitor.Method == ovsdb.ConditionalMonitorRPC {
+ o.logger.V(3).Error(err, "method monitor_cond not supported, falling back to monitor")
+ monitor.Method = ovsdb.MonitorRPC
+ return o.monitor(ctx, cookie, reconnecting, monitor)
+ }
+ }
+ return err
+ }
+
+ if !reconnecting {
+ db.monitors[cookie.ID] = monitor
+ o.metrics.numMonitors.Inc()
+ }
+
+ db.cacheMutex.Lock()
+ defer db.cacheMutex.Unlock()
+
+ // On reconnect, purge the cache _unless_ the only monitor is a
+ // MonitorCondSince one, whose LastTransactionID was known to the
+ // server. In this case the reply contains only updates to the existing
+ // cache data, while otherwise it includes complete DB data so we must
+ // purge to get rid of old rows.
+ if reconnecting && (len(db.monitors) > 1 || !lastTransactionFound) {
+ db.cache.Purge(db.model)
+ }
+
+ if monitor.Method == ovsdb.MonitorRPC {
+ u := tableUpdates.(ovsdb.TableUpdates)
+ err = db.cache.Populate(u)
+ } else {
+ u := tableUpdates.(ovsdb.TableUpdates2)
+ err = db.cache.Populate2(u)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ // populate any deferred updates
+ db.deferUpdates = false
+ for _, update := range db.deferredUpdates {
+ if update.updates != nil {
+ if err = db.cache.Populate(*update.updates); err != nil {
+ return err
+ }
+ }
+
+ if update.updates2 != nil {
+ if err = db.cache.Populate2(*update.updates2); err != nil {
+ return err
+ }
+ }
+ if len(update.lastTxnID) > 0 {
+ db.monitors[cookie.ID].LastTransactionID = update.lastTxnID
+ }
+ }
+ // clear deferred updates for next time
+ db.deferredUpdates = make([]*bufferedUpdate, 0)
+
+ return err
+}
+
+// Echo tests the liveness of the OVSDB connetion
+func (o *ovsdbClient) Echo(ctx context.Context) error {
+ args := ovsdb.NewEchoArgs()
+ var reply []interface{}
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ if o.rpcClient == nil {
+ return ErrNotConnected
+ }
+ err := o.rpcClient.CallWithContext(ctx, "echo", args, &reply)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return ErrNotConnected
+ }
+ }
+ if !reflect.DeepEqual(args, reply) {
+ return fmt.Errorf("incorrect server response: %v, %v", args, reply)
+ }
+ return nil
+}
+
+// watchForLeaderChange will trigger a reconnect if the connected endpoint
+// ever loses leadership
+func (o *ovsdbClient) watchForLeaderChange() error {
+ updates := make(chan model.Model)
+ o.databases[serverDB].cache.AddEventHandler(&cache.EventHandlerFuncs{
+ UpdateFunc: func(table string, _, new model.Model) {
+ if table == "Database" {
+ updates <- new
+ }
+ },
+ })
+
+ m := newMonitor()
+ // NOTE: _Server does not support monitor_cond_since
+ m.Method = ovsdb.ConditionalMonitorRPC
+ m.Tables = []TableMonitor{{Table: "Database"}}
+ db := o.databases[serverDB]
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+ err := o.monitor(context.Background(), newMonitorCookie(serverDB), false, m)
+ if err != nil {
+ return err
+ }
+
+ go func() {
+ for m := range updates {
+ dbInfo, ok := m.(*serverdb.Database)
+ if !ok {
+ continue
+ }
+
+ // Ignore the dbInfo for _Server
+ if dbInfo.Name != o.primaryDBName {
+ continue
+ }
+
+ // Only handle leadership changes for clustered databases
+ if dbInfo.Model != serverdb.DatabaseModelClustered {
+ continue
+ }
+
+ // Clustered database servers must have a valid Server ID
+ var sid string
+ if dbInfo.Sid != nil {
+ sid = *dbInfo.Sid
+ }
+ if sid == "" {
+ o.logger.V(3).Info("clustered database update contained invalid server ID")
+ continue
+ }
+
+ o.rpcMutex.Lock()
+ if !dbInfo.Leader && o.connected {
+ activeEndpoint := o.endpoints[0]
+ if sid == activeEndpoint.serverID {
+ o.logger.V(3).Info("endpoint lost leader, reconnecting",
+ "endpoint", activeEndpoint.address, "sid", sid)
+ // don't immediately reconnect to the active endpoint since it's no longer leader
+ o.moveEndpointLast(0)
+ o._disconnect()
+ } else {
+ o.logger.V(3).Info("endpoint lost leader but had unexpected server ID",
+ "endpoint", activeEndpoint.address,
+ "expected", activeEndpoint.serverID, "found", sid)
+ }
+ }
+ o.rpcMutex.Unlock()
+ }
+ }()
+ return nil
+}
+
+func (o *ovsdbClient) handleClientErrors(stopCh <-chan struct{}) {
+ defer o.handlerShutdown.Done()
+ var errColumnNotFound *mapper.ErrColumnNotFound
+ var errCacheInconsistent *cache.ErrCacheInconsistent
+ var errIndexExists *cache.ErrIndexExists
+ for {
+ select {
+ case <-stopCh:
+ return
+ case err := <-o.errorCh:
+ if errors.As(err, &errColumnNotFound) {
+ o.logger.V(3).Error(err, "error updating cache, DB schema may be newer than client!")
+ } else if errors.As(err, &errCacheInconsistent) || errors.As(err, &errIndexExists) {
+ // trigger a reconnect, which will purge the cache
+ // hopefully a rebuild will fix any inconsistency
+ o.logger.V(3).Error(err, "triggering reconnect to rebuild cache")
+ // for rebuilding cache with mon_cond_since (not yet fully supported in libovsdb) we
+ // need to reset the last txn ID
+ for _, db := range o.databases {
+ db.monitorsMutex.Lock()
+ for _, mon := range db.monitors {
+ mon.LastTransactionID = emptyUUID
+ }
+ db.monitorsMutex.Unlock()
+ }
+ o.Disconnect()
+ } else {
+ o.logger.V(3).Error(err, "error updating cache")
+ }
+ }
+ }
+}
+
+func (o *ovsdbClient) sendEcho(args []interface{}, reply *[]interface{}) *rpc2.Call {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ if o.rpcClient == nil {
+ return nil
+ }
+ return o.rpcClient.Go("echo", args, reply, make(chan *rpc2.Call, 1))
+}
+
+func (o *ovsdbClient) handleInactivityProbes() {
+ defer o.handlerShutdown.Done()
+ echoReplied := make(chan string)
+ var lastEcho string
+ stopCh := o.stopCh
+ trafficSeen := o.trafficSeen
+ for {
+ select {
+ case <-stopCh:
+ return
+ case <-trafficSeen:
+ // We got some traffic from the server, restart our timer
+ case ts := <-echoReplied:
+ // Got a response from the server, check it against lastEcho; if same clear lastEcho; if not same Disconnect()
+ if ts != lastEcho {
+ o.Disconnect()
+ return
+ }
+ lastEcho = ""
+ case <-time.After(o.options.inactivityTimeout):
+ // If there's a lastEcho already, then we didn't get a server reply, disconnect
+ if lastEcho != "" {
+ o.Disconnect()
+ return
+ }
+ // Otherwise send an echo
+ thisEcho := fmt.Sprintf("%d", time.Now().UnixMicro())
+ args := []interface{}{"libovsdb echo", thisEcho}
+ var reply []interface{}
+ // Can't use o.Echo() because it blocks; we need the Call object direct from o.rpcClient.Go()
+ call := o.sendEcho(args, &reply)
+ if call == nil {
+ o.Disconnect()
+ return
+ }
+ lastEcho = thisEcho
+ go func() {
+ // Wait for the echo reply
+ select {
+ case <-stopCh:
+ return
+ case <-call.Done:
+ if call.Error != nil {
+ // RPC timeout; disconnect
+ o.logger.V(3).Error(call.Error, "server echo reply error")
+ o.Disconnect()
+ } else if !reflect.DeepEqual(args, reply) {
+ o.logger.V(3).Info("warning: incorrect server echo reply",
+ "expected", args, "reply", reply)
+ o.Disconnect()
+ } else {
+ // Otherwise stuff thisEcho into the echoReplied channel
+ echoReplied <- thisEcho
+ }
+ }
+ }()
+ }
+ }
+}
+
+func (o *ovsdbClient) handleDisconnectNotification() {
+ <-o.rpcClient.DisconnectNotify()
+ // close the stopCh, which will stop the cache event processor
+ close(o.stopCh)
+ if o.trafficSeen != nil {
+ close(o.trafficSeen)
+ }
+ o.metrics.numDisconnects.Inc()
+ // wait for client related handlers to shutdown
+ o.handlerShutdown.Wait()
+ o.rpcMutex.Lock()
+ if o.options.reconnect && !o.shutdown {
+ o.rpcClient = nil
+ o.rpcMutex.Unlock()
+ suppressionCounter := 1
+ connect := func() error {
+ // need to ensure deferredUpdates is cleared on every reconnect attempt
+ for _, db := range o.databases {
+ db.cacheMutex.Lock()
+ db.deferredUpdates = make([]*bufferedUpdate, 0)
+ db.deferUpdates = true
+ db.cacheMutex.Unlock()
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), o.options.timeout)
+ defer cancel()
+ err := o.connect(ctx, true)
+ if err != nil {
+ if suppressionCounter < 5 {
+ o.logger.V(2).Error(err, "failed to reconnect")
+ } else if suppressionCounter == 5 {
+ o.logger.V(2).Error(err, "reconnect has failed 5 times, suppressing logging "+
+ "for future attempts")
+ }
+ }
+ suppressionCounter++
+ return err
+ }
+ o.logger.V(3).Info("connection lost, reconnecting", "endpoint", o.endpoints[0].address)
+ err := backoff.Retry(connect, o.options.backoff)
+ if err != nil {
+ // TODO: We should look at passing this back to the
+ // caller to handle
+ panic(err)
+ }
+ // this goroutine finishes, and is replaced with a new one (from Connect)
+ return
+ }
+
+ // clear connection state
+ o.rpcClient = nil
+ o.rpcMutex.Unlock()
+
+ for _, db := range o.databases {
+ db.cacheMutex.Lock()
+ defer db.cacheMutex.Unlock()
+ db.cache = nil
+ // need to defer updates if/when we reconnect and clear any stale updates
+ db.deferUpdates = true
+ db.deferredUpdates = make([]*bufferedUpdate, 0)
+
+ db.modelMutex.Lock()
+ defer db.modelMutex.Unlock()
+ db.model = model.NewPartialDatabaseModel(db.model.Client())
+
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+ db.monitors = make(map[string]*Monitor)
+ }
+ o.metrics.numMonitors.Set(0)
+
+ o.shutdownMutex.Lock()
+ defer o.shutdownMutex.Unlock()
+ o.shutdown = false
+
+ select {
+ case o.disconnect <- struct{}{}:
+ // sent disconnect notification to client
+ default:
+ // client is not listening to the channel
+ }
+}
+
+// _disconnect will close the connection to the OVSDB server
+// If the client was created with WithReconnect then the client
+// will reconnect afterwards. Assumes rpcMutex is held.
+func (o *ovsdbClient) _disconnect() {
+ o.connected = false
+ if o.rpcClient == nil {
+ return
+ }
+ o.rpcClient.Close()
+}
+
+// Disconnect will close the connection to the OVSDB server
+// If the client was created with WithReconnect then the client
+// will reconnect afterwards
+func (o *ovsdbClient) Disconnect() {
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ o._disconnect()
+}
+
+// Close will close the connection to the OVSDB server
+// It will remove all stored state ready for the next connection
+// Even If the client was created with WithReconnect it will not reconnect afterwards
+func (o *ovsdbClient) Close() {
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ o.connected = false
+ if o.rpcClient == nil {
+ return
+ }
+ o.shutdownMutex.Lock()
+ defer o.shutdownMutex.Unlock()
+ o.shutdown = true
+ o.rpcClient.Close()
+}
+
+// Ensures the cache is consistent by evaluating that the client is connected
+// and the monitor is fully setup, with the cache populated. Caller must hold
+// the database's cache mutex for reading.
+func isCacheConsistent(db *database) bool {
+ // This works because when a client is disconnected the deferUpdates variable
+ // will be set to true. deferUpdates is also protected by the db.cacheMutex.
+ // When the client reconnects and then re-establishes the monitor; the final step
+ // is to process all deferred updates, set deferUpdates back to false, and unlock cacheMutex
+ return !db.deferUpdates
+}
+
+// best effort to ensure cache is in a good state for reading. RLocks the
+// database's cache before returning; caller must always unlock.
+func waitForCacheConsistent(ctx context.Context, db *database, logger *logr.Logger, dbName string) {
+ if !hasMonitors(db) {
+ db.cacheMutex.RLock()
+ return
+ }
+ // Check immediately as a fastpath
+ db.cacheMutex.RLock()
+ if isCacheConsistent(db) {
+ return
+ }
+ db.cacheMutex.RUnlock()
+
+ ticker := time.NewTicker(50 * time.Millisecond)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ logger.V(3).Info("warning: unable to ensure cache consistency for reading",
+ "database", dbName)
+ db.cacheMutex.RLock()
+ return
+ case <-ticker.C:
+ db.cacheMutex.RLock()
+ if isCacheConsistent(db) {
+ return
+ }
+ db.cacheMutex.RUnlock()
+ }
+ }
+}
+
+func hasMonitors(db *database) bool {
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+ return len(db.monitors) > 0
+}
+
+// Client API interface wrapper functions
+// We add this wrapper to allow users to access the API directly on the
+// client object
+
+// Get implements the API interface's Get function
+func (o *ovsdbClient) Get(ctx context.Context, model model.Model) error {
+ primaryDB := o.primaryDB()
+ waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName)
+ defer primaryDB.cacheMutex.RUnlock()
+ return primaryDB.api.Get(ctx, model)
+}
+
+// Create implements the API interface's Create function
+func (o *ovsdbClient) Create(models ...model.Model) ([]ovsdb.Operation, error) {
+ return o.primaryDB().api.Create(models...)
+}
+
+// List implements the API interface's List function
+func (o *ovsdbClient) List(ctx context.Context, result interface{}) error {
+ primaryDB := o.primaryDB()
+ waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName)
+ defer primaryDB.cacheMutex.RUnlock()
+ return primaryDB.api.List(ctx, result)
+}
+
+// Where implements the API interface's Where function
+func (o *ovsdbClient) Where(models ...model.Model) ConditionalAPI {
+ return o.primaryDB().api.Where(models...)
+}
+
+// WhereAny implements the API interface's WhereAny function
+func (o *ovsdbClient) WhereAny(m model.Model, conditions ...model.Condition) ConditionalAPI {
+ return o.primaryDB().api.WhereAny(m, conditions...)
+}
+
+// WhereAll implements the API interface's WhereAll function
+func (o *ovsdbClient) WhereAll(m model.Model, conditions ...model.Condition) ConditionalAPI {
+ return o.primaryDB().api.WhereAll(m, conditions...)
+}
+
+// WhereCache implements the API interface's WhereCache function
+func (o *ovsdbClient) WhereCache(predicate interface{}) ConditionalAPI {
+ return o.primaryDB().api.WhereCache(predicate)
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/condition.go b/vendor/github.com/ovn-org/libovsdb/client/condition.go
new file mode 100644
index 000000000..1dfabda02
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/condition.go
@@ -0,0 +1,248 @@
+package client
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// Conditional is the interface used by the ConditionalAPI to match on cache objects
+// and generate ovsdb conditions
+type Conditional interface {
+ // Generate returns a list of lists of conditions to be used in Operations
+ // Each element in the (outer) list corresponds to an operation
+ Generate() ([][]ovsdb.Condition, error)
+ // Returns the models that match the conditions
+ Matches() (map[string]model.Model, error)
+ // returns the table that this condition is associated with
+ Table() string
+}
+
+func generateConditionsFromModels(dbModel model.DatabaseModel, models map[string]model.Model) ([][]ovsdb.Condition, error) {
+ anyConditions := make([][]ovsdb.Condition, 0, len(models))
+ for _, model := range models {
+ info, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ allConditions, err := dbModel.Mapper.NewEqualityCondition(info)
+ if err != nil {
+ return nil, err
+ }
+ anyConditions = append(anyConditions, allConditions)
+ }
+ return anyConditions, nil
+}
+
+func generateOvsdbConditionsFromModelConditions(dbModel model.DatabaseModel, info *mapper.Info, conditions []model.Condition, singleOp bool) ([][]ovsdb.Condition, error) {
+ anyConditions := [][]ovsdb.Condition{}
+ if singleOp {
+ anyConditions = append(anyConditions, []ovsdb.Condition{})
+ }
+ for _, condition := range conditions {
+ ovsdbCond, err := dbModel.Mapper.NewCondition(info, condition.Field, condition.Function, condition.Value)
+ if err != nil {
+ return nil, err
+ }
+ allConditions := []ovsdb.Condition{*ovsdbCond}
+ if singleOp {
+ anyConditions[0] = append(anyConditions[0], allConditions...)
+ } else {
+ anyConditions = append(anyConditions, allConditions)
+ }
+ }
+ return anyConditions, nil
+}
+
+// equalityConditional uses the indexes available in a provided model to find a
+// matching model in the database.
+type equalityConditional struct {
+ tableName string
+ models []model.Model
+ cache *cache.TableCache
+}
+
+func (c *equalityConditional) Table() string {
+ return c.tableName
+}
+
+// Returns the models that match the indexes available through the provided
+// model.
+func (c *equalityConditional) Matches() (map[string]model.Model, error) {
+ tableCache := c.cache.Table(c.tableName)
+ if tableCache == nil {
+ return nil, ErrNotFound
+ }
+ return tableCache.RowsByModels(c.models)
+}
+
+// Generate conditions based on the equality of the first available index. If
+// the index can be matched against a model in the cache, the condition will be
+// based on the UUID of the found model. Otherwise, the conditions will be based
+// on the index.
+func (c *equalityConditional) Generate() ([][]ovsdb.Condition, error) {
+ models, err := c.Matches()
+ if err != nil && err != ErrNotFound {
+ return nil, err
+ }
+ if len(models) == 0 {
+ // no cache hits, generate condition from models we were given
+ modelMap := make(map[string]model.Model, len(c.models))
+ for i, m := range c.models {
+ // generateConditionsFromModels() ignores the map keys
+ // so just use the range index
+ modelMap[fmt.Sprintf("%d", i)] = m
+ }
+ return generateConditionsFromModels(c.cache.DatabaseModel(), modelMap)
+ }
+ return generateConditionsFromModels(c.cache.DatabaseModel(), models)
+}
+
+// NewEqualityCondition creates a new equalityConditional
+func newEqualityConditional(table string, cache *cache.TableCache, models []model.Model) (Conditional, error) {
+ return &equalityConditional{
+ tableName: table,
+ models: models,
+ cache: cache,
+ }, nil
+}
+
+// explicitConditional generates conditions based on the provided Condition list
+type explicitConditional struct {
+ tableName string
+ anyConditions [][]ovsdb.Condition
+ cache *cache.TableCache
+}
+
+func (c *explicitConditional) Table() string {
+ return c.tableName
+}
+
+// Returns the models that match the conditions
+func (c *explicitConditional) Matches() (map[string]model.Model, error) {
+ tableCache := c.cache.Table(c.tableName)
+ if tableCache == nil {
+ return nil, ErrNotFound
+ }
+ found := map[string]model.Model{}
+ for _, allConditions := range c.anyConditions {
+ models, err := tableCache.RowsByCondition(allConditions)
+ if err != nil {
+ return nil, err
+ }
+ for uuid, model := range models {
+ found[uuid] = model
+ }
+ }
+ return found, nil
+}
+
+// Generate returns conditions based on the provided Condition list
+func (c *explicitConditional) Generate() ([][]ovsdb.Condition, error) {
+ models, err := c.Matches()
+ if err != nil && err != ErrNotFound {
+ return nil, err
+ }
+ if len(models) == 0 {
+ // no cache hits, return conditions we were given
+ return c.anyConditions, nil
+ }
+ return generateConditionsFromModels(c.cache.DatabaseModel(), models)
+}
+
+// newExplicitConditional creates a new explicitConditional
+func newExplicitConditional(table string, cache *cache.TableCache, matchAll bool, model model.Model, cond ...model.Condition) (Conditional, error) {
+ dbModel := cache.DatabaseModel()
+ info, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ anyConditions, err := generateOvsdbConditionsFromModelConditions(dbModel, info, cond, matchAll)
+ if err != nil {
+ return nil, err
+ }
+ return &explicitConditional{
+ tableName: table,
+ anyConditions: anyConditions,
+ cache: cache,
+ }, nil
+}
+
+// predicateConditional is a Conditional that calls a provided function pointer
+// to match on models.
+type predicateConditional struct {
+ tableName string
+ predicate interface{}
+ cache *cache.TableCache
+}
+
+// matches returns the result of the execution of the predicate
+// Type verifications are not performed
+// Returns the models that match the conditions
+func (c *predicateConditional) Matches() (map[string]model.Model, error) {
+ tableCache := c.cache.Table(c.tableName)
+ if tableCache == nil {
+ return nil, ErrNotFound
+ }
+ found := map[string]model.Model{}
+ // run the predicate on a shallow copy of the models for speed and only
+ // clone the matches
+ for u, m := range tableCache.RowsShallow() {
+ ret := reflect.ValueOf(c.predicate).Call([]reflect.Value{reflect.ValueOf(m)})
+ if ret[0].Bool() {
+ found[u] = model.Clone(m)
+ }
+ }
+ return found, nil
+}
+
+func (c *predicateConditional) Table() string {
+ return c.tableName
+}
+
+// generate returns a list of conditions that match, by _uuid equality, all the objects that
+// match the predicate
+func (c *predicateConditional) Generate() ([][]ovsdb.Condition, error) {
+ models, err := c.Matches()
+ if err != nil {
+ return nil, err
+ }
+ return generateConditionsFromModels(c.cache.DatabaseModel(), models)
+}
+
+// newPredicateConditional creates a new predicateConditional
+func newPredicateConditional(table string, cache *cache.TableCache, predicate interface{}) (Conditional, error) {
+ return &predicateConditional{
+ tableName: table,
+ predicate: predicate,
+ cache: cache,
+ }, nil
+}
+
+// errorConditional is a conditional that encapsulates an error
+// It is used to delay the reporting of errors from conditional creation to API method call
+type errorConditional struct {
+ err error
+}
+
+func (e *errorConditional) Matches() (map[string]model.Model, error) {
+ return nil, e.err
+}
+
+func (e *errorConditional) Table() string {
+ return ""
+}
+
+func (e *errorConditional) Generate() ([][]ovsdb.Condition, error) {
+ return nil, e.err
+}
+
+func newErrorConditional(err error) Conditional {
+ return &errorConditional{
+ err: fmt.Errorf("conditionerror: %s", err.Error()),
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/config.go b/vendor/github.com/ovn-org/libovsdb/client/config.go
new file mode 100644
index 000000000..a9c00f56a
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/config.go
@@ -0,0 +1,27 @@
+/**
+ * Copyright (c) 2019 eBay Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package client
+
+import (
+ "crypto/tls"
+)
+
+// Config is a structure used in provisioning a connection to ovsdb.
+type Config struct {
+ Addr string
+ TLSConfig *tls.Config
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/doc.go b/vendor/github.com/ovn-org/libovsdb/client/doc.go
new file mode 100644
index 000000000..90e409ee7
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/doc.go
@@ -0,0 +1,164 @@
+/*
+Package client connects to, monitors and interacts with OVSDB servers (RFC7047).
+
+This package uses structs, that contain the 'ovs' field tag to determine which field goes to
+which column in the database. We refer to pointers to this structs as Models. Example:
+
+ type MyLogicalSwitch struct {
+ UUID string `ovsdb:"_uuid"` // _uuid tag is mandatory
+ Name string `ovsdb:"name"`
+ Ports []string `ovsdb:"ports"`
+ Config map[string]string `ovsdb:"other_config"`
+ }
+
+Based on these Models a Database Model (see ClientDBModel type) is built to represent
+the entire OVSDB:
+
+ clientDBModel, _ := client.NewClientDBModel("OVN_Northbound",
+ map[string]client.Model{
+ "Logical_Switch": &MyLogicalSwitch{},
+ })
+
+
+The ClientDBModel represents the entire Database (or the part of it we're interested in).
+Using it, the libovsdb.client package is able to properly encode and decode OVSDB messages
+and store them in Model instances.
+A client instance is created by simply specifying the connection information and the database model:
+
+ ovs, _ := client.Connect(context.Background(), clientDBModel)
+
+Main API
+
+After creating a OvsdbClient using the Connect() function, we can use a number of CRUD-like
+to interact with the database:
+List(), Get(), Create(), Update(), Mutate(), Delete().
+
+The specific database table that the operation targets is automatically determined based on the type
+of the parameter.
+
+In terms of return values, some of these functions like Create(), Update(), Mutate() and Delete(),
+interact with the database so they return list of ovsdb.Operation objects that can be grouped together
+and passed to client.Transact().
+
+Others, such as List() and Get(), interact with the client's internal cache and are able to
+return Model instances (or a list thereof) directly.
+
+Conditions
+
+Some API functions (Create() and Get()), can be run directly. Others, require us to use
+a ConditionalAPI. The ConditionalAPI injects RFC7047 Conditions into ovsdb Operations as well as
+uses the Conditions to search the internal cache.
+
+The ConditionalAPI is created using the Where(), WhereCache() and WhereAll() functions.
+
+Where() accepts a Model (pointer to a struct with ovs tags) and a number of Condition instances.
+Conditions must refer to fields of the provided Model (via pointer to fields). Example:
+
+ ls = &MyLogicalSwitch {}
+ ovs.Where(ls, client.Condition {
+ Field: &ls.Ports,
+ Function: ovsdb.ConditionIncludes,
+ Value: []string{"portUUID"},
+ })
+
+If no client.Condition is provided, the client will use any of fields that correspond to indexes to
+generate an appropriate condition. Therefore the following two statements are equivalent:
+
+ ls = &MyLogicalSwitch {UUID:"myUUID"}
+
+ ovs.Where(ls)
+
+ ovs.Where(ls, client.Condition {
+ Field: &ls.UUID,
+ Function: ovsdb.ConditionEqual,
+ Value: "myUUID"},
+ })
+
+Where() accepts multiple Condition instances (through variadic arguments).
+If provided, the client will generate multiple operations each matching one condition.
+For example, the following operation will delete all the Logical Switches named "foo" OR "bar":
+
+ ops, err := ovs.Where(ls,
+ client.Condition {
+ Field: &ls.Name
+ Function: ovsdb.ConditionEqual,
+ Value: "foo",
+ },client.Condition {
+ Field: &ls.Port,
+ Function: ovsdb.ConditionIncludes,
+ Value: "bar",
+ }).Delete()
+
+To create a Condition that matches all of the conditions simultaneously (i.e: AND semantics), use WhereAll().
+
+Where() or WhereAll() evaluate the provided index values or explicit conditions against the cache and generate
+conditions based on the UUIDs of matching models. If no matches are found in the cache, the generated conditions
+will be based on the index or condition fields themselves.
+
+A more flexible mechanism to search the cache is available: WhereCache()
+
+WhereCache() accepts a function that takes any Model as argument and returns a boolean.
+It is used to search the cache so commonly used with List() function. For example:
+
+ lsList := &[]LogicalSwitch{}
+ err := ovs.WhereCache(
+ func(ls *LogicalSwitch) bool {
+ return strings.HasPrefix(ls.Name, "ext_")
+ }).List(lsList)
+
+Server side operations can be executed using WhereCache() conditions but it's not recommended. For each matching
+cache element, an operation will be created matching on the "_uuid" column. The number of operations can be
+quite large depending on the cache size and the provided function. Most likely there is a way to express the
+same condition using Where() or WhereAll() which will be more efficient.
+
+Get
+
+Get() operation is a simple operation capable of retrieving one Model based on some of its schema indexes. E.g:
+
+ ls := &LogicalSwitch{UUID:"myUUID"}
+ err := ovs.Get(ls)
+ fmt.Printf("Name of the switch is: &s", ls.Name)
+
+List
+
+List() searches the cache and populates a slice of Models. It can be used directly or using WhereCache()
+
+ lsList := &[]LogicalSwitch{}
+ err := ovs.List(lsList) // List all elements
+
+ err := ovs.WhereCache(
+ func(ls *LogicalSwitch) bool {
+ return strings.HasPrefix(ls.Name, "ext_")
+ }).List(lsList)
+
+Create
+
+Create returns a list of operations to create the models provided. E.g:
+
+ ops, err := ovs.Create(&LogicalSwitch{Name:"foo")}, &LogicalSwitch{Name:"bar"})
+
+Update
+Update returns a list of operations to update the matching rows to match the values of the provided model. E.g:
+
+ ls := &LogicalSwitch{ExternalIDs: map[string]string {"foo": "bar"}}
+ ops, err := ovs.Where(...).Update(&ls, &ls.ExternalIDs}
+
+Mutate
+
+Mutate returns a list of operations needed to mutate the matching rows as described by the list of Mutation objects. E.g:
+
+ ls := &LogicalSwitch{}
+ ops, err := ovs.Where(...).Mutate(&ls, client.Mutation {
+ Field: &ls.Config,
+ Mutator: ovsdb.MutateOperationInsert,
+ Value: map[string]string{"foo":"bar"},
+ })
+
+Delete
+
+Delete returns a list of operations needed to delete the matching rows. E.g:
+
+ ops, err := ovs.Where(...).Delete()
+
+*/
+package client
diff --git a/vendor/github.com/ovn-org/libovsdb/client/metrics.go b/vendor/github.com/ovn-org/libovsdb/client/metrics.go
new file mode 100644
index 000000000..8c4e5f6f2
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/metrics.go
@@ -0,0 +1,88 @@
+package client
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const libovsdbName = "libovsdb"
+
+type metrics struct {
+ numUpdates *prometheus.CounterVec
+ numTableUpdates *prometheus.CounterVec
+ numDisconnects prometheus.Counter
+ numMonitors prometheus.Gauge
+ registerOnce sync.Once
+}
+
+func (m *metrics) init(modelName string, namespace, subsystem string) {
+ // labels that are the same across all metrics
+ constLabels := prometheus.Labels{"primary_model": modelName}
+
+ if namespace == "" {
+ namespace = libovsdbName
+ subsystem = ""
+ }
+
+ m.numUpdates = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "update_messages_total",
+ Help: "Count of libovsdb monitor update messages processed, partitioned by database",
+ ConstLabels: constLabels,
+ },
+ []string{"database"},
+ )
+
+ m.numTableUpdates = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "table_updates_total",
+ Help: "Count of libovsdb monitor update messages per table",
+ ConstLabels: constLabels,
+ },
+ []string{"database", "table"},
+ )
+
+ m.numDisconnects = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "disconnects_total",
+ Help: "Count of libovsdb disconnects encountered",
+ ConstLabels: constLabels,
+ },
+ )
+
+ m.numMonitors = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "monitors",
+ Help: "Number of running libovsdb ovsdb monitors",
+ ConstLabels: constLabels,
+ },
+ )
+}
+
+func (m *metrics) register(r prometheus.Registerer) {
+ m.registerOnce.Do(func() {
+ r.MustRegister(
+ m.numUpdates,
+ m.numTableUpdates,
+ m.numDisconnects,
+ m.numMonitors,
+ )
+ })
+}
+
+func (o *ovsdbClient) registerMetrics() {
+ if !o.options.shouldRegisterMetrics || o.options.registry == nil {
+ return
+ }
+ o.metrics.register(o.options.registry)
+ o.options.shouldRegisterMetrics = false
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/monitor.go b/vendor/github.com/ovn-org/libovsdb/client/monitor.go
new file mode 100644
index 000000000..4a0270a87
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/monitor.go
@@ -0,0 +1,136 @@
+package client
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+const emptyUUID = "00000000-0000-0000-0000-000000000000"
+
+// Monitor represents a monitor
+type Monitor struct {
+ Method string
+ Tables []TableMonitor
+ Errors []error
+ LastTransactionID string
+}
+
+// newMonitor creates a new *Monitor with default values
+func newMonitor() *Monitor {
+ return &Monitor{
+ Method: ovsdb.ConditionalMonitorSinceRPC,
+ Errors: make([]error, 0),
+ LastTransactionID: emptyUUID,
+ }
+}
+
+// NewMonitor creates a new Monitor with the provided options
+func (o *ovsdbClient) NewMonitor(opts ...MonitorOption) *Monitor {
+ m := newMonitor()
+ for _, opt := range opts {
+ err := opt(o, m)
+ if err != nil {
+ m.Errors = append(m.Errors, err)
+ }
+ }
+ return m
+}
+
+// MonitorOption adds Tables to a Monitor
+type MonitorOption func(o *ovsdbClient, m *Monitor) error
+
+// MonitorCookie is the struct we pass to correlate from updates back to their
+// originating Monitor request.
+type MonitorCookie struct {
+ DatabaseName string `json:"databaseName"`
+ ID string `json:"id"`
+}
+
+func newMonitorCookie(dbName string) MonitorCookie {
+ return MonitorCookie{
+ DatabaseName: dbName,
+ ID: uuid.NewString(),
+ }
+}
+
+// TableMonitor is a table to be monitored
+type TableMonitor struct {
+ // Table is the table to be monitored
+ Table string
+ // Conditions are the conditions under which the table should be monitored
+ Conditions []ovsdb.Condition
+ // Fields are the fields in the model to monitor
+ // If none are supplied, all fields will be used
+ Fields []string
+}
+
+func newTableMonitor(o *ovsdbClient, m model.Model, conditions []model.Condition, fields []interface{}) (*TableMonitor, error) {
+ dbModel := o.primaryDB().model
+ tableName := dbModel.FindTable(reflect.TypeOf(m))
+ if tableName == "" {
+ return nil, fmt.Errorf("object of type %s is not part of the ClientDBModel", reflect.TypeOf(m))
+ }
+
+ var columns []string
+ var ovsdbConds []ovsdb.Condition
+
+ if len(fields) == 0 && len(conditions) == 0 {
+ return &TableMonitor{
+ Table: tableName,
+ Conditions: ovsdbConds,
+ Fields: columns,
+ }, nil
+ }
+
+ data, err := dbModel.NewModelInfo(m)
+ if err != nil {
+ return nil, fmt.Errorf("unable to obtain info from model %v: %v", m, err)
+ }
+ for _, f := range fields {
+ column, err := data.ColumnByPtr(f)
+ if err != nil {
+ return nil, fmt.Errorf("unable to obtain column from model %v: %v", data, err)
+ }
+ columns = append(columns, column)
+ }
+ db := o.databases[o.primaryDBName]
+ mmapper := db.model.Mapper
+ for _, modelCond := range conditions {
+ ovsdbCond, err := mmapper.NewCondition(data, modelCond.Field, modelCond.Function, modelCond.Value)
+ if err != nil {
+ return nil, fmt.Errorf("unable to convert condition %v: %v", modelCond, err)
+ }
+ ovsdbConds = append(ovsdbConds, *ovsdbCond)
+ }
+ return &TableMonitor{
+ Table: tableName,
+ Conditions: ovsdbConds,
+ Fields: columns,
+ }, nil
+}
+
+func WithTable(m model.Model, fields ...interface{}) MonitorOption {
+ return func(o *ovsdbClient, monitor *Monitor) error {
+ tableMonitor, err := newTableMonitor(o, m, []model.Condition{}, fields)
+ if err != nil {
+ return err
+ }
+ monitor.Tables = append(monitor.Tables, *tableMonitor)
+ return nil
+ }
+}
+
+func WithConditionalTable(m model.Model, conditions []model.Condition, fields ...interface{}) MonitorOption {
+ return func(o *ovsdbClient, monitor *Monitor) error {
+ tableMonitor, err := newTableMonitor(o, m, conditions, fields)
+ if err != nil {
+ return err
+ }
+ monitor.Tables = append(monitor.Tables, *tableMonitor)
+ return nil
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/options.go b/vendor/github.com/ovn-org/libovsdb/client/options.go
new file mode 100644
index 000000000..81ccffe20
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/options.go
@@ -0,0 +1,164 @@
+package client
+
+import (
+ "crypto/tls"
+ "net/url"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/go-logr/logr"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ defaultTCPEndpoint = "tcp:127.0.0.1:6640"
+ defaultSSLEndpoint = "ssl:127.0.0.1:6640"
+ defaultUnixEndpoint = "unix:/var/run/openvswitch/ovsdb.sock"
+)
+
+type options struct {
+ endpoints []string
+ tlsConfig *tls.Config
+ reconnect bool
+ leaderOnly bool
+ timeout time.Duration
+ backoff backoff.BackOff
+ logger *logr.Logger
+ registry prometheus.Registerer
+ shouldRegisterMetrics bool // in case metrics are changed after-the-fact
+ metricNamespace string // prometheus metric namespace
+ metricSubsystem string // prometheus metric subsystem
+ inactivityTimeout time.Duration
+}
+
+type Option func(o *options) error
+
+func newOptions(opts ...Option) (*options, error) {
+ o := &options{}
+ for _, opt := range opts {
+ if err := opt(o); err != nil {
+ return nil, err
+ }
+ }
+ // if no endpoints are supplied, use the default unix socket
+ if len(o.endpoints) == 0 {
+ o.endpoints = []string{defaultUnixEndpoint}
+ }
+ return o, nil
+}
+
+// WithTLSConfig sets the tls.Config for use by the client
+func WithTLSConfig(cfg *tls.Config) Option {
+ return func(o *options) error {
+ o.tlsConfig = cfg
+ return nil
+ }
+}
+
+// WithEndpoint sets the endpoint to be used by the client
+// It can be used multiple times, and the first endpoint that
+// successfully connects will be used.
+// Endpoints are specified in OVSDB Connection Format
+// For more details, see the ovsdb(7) man page
+func WithEndpoint(endpoint string) Option {
+ return func(o *options) error {
+ ep, err := url.Parse(endpoint)
+ if err != nil {
+ return err
+ }
+ switch ep.Scheme {
+ case UNIX:
+ if len(ep.Path) == 0 {
+ o.endpoints = append(o.endpoints, defaultUnixEndpoint)
+ return nil
+ }
+ case TCP:
+ if len(ep.Opaque) == 0 {
+ o.endpoints = append(o.endpoints, defaultTCPEndpoint)
+ return nil
+ }
+ case SSL:
+ if len(ep.Opaque) == 0 {
+ o.endpoints = append(o.endpoints, defaultSSLEndpoint)
+ return nil
+ }
+ }
+ o.endpoints = append(o.endpoints, endpoint)
+ return nil
+ }
+}
+
+// WithLeaderOnly tells the client to treat endpoints that are clustered
+// and not the leader as down.
+func WithLeaderOnly(leaderOnly bool) Option {
+ return func(o *options) error {
+ o.leaderOnly = leaderOnly
+ return nil
+ }
+}
+
+// WithReconnect tells the client to automatically reconnect when
+// disconnected. The timeout is used to construct the context on
+// each call to Connect, while backoff dictates the backoff
+// algorithm to use. Using WithReconnect implies that
+// requested transactions will block until the client has fully reconnected,
+// rather than immediately returning an error if there is no connection.
+func WithReconnect(timeout time.Duration, backoff backoff.BackOff) Option {
+ return func(o *options) error {
+ o.reconnect = true
+ o.timeout = timeout
+ o.backoff = backoff
+ return nil
+ }
+}
+
+// WithInactivityCheck tells the client to send Echo request to ovsdb server periodically
+// upon inactivityTimeout. When Echo request fails, then it attempts to reconnect
+// with server. The inactivity check is performed as long as the connection is established.
+// The reconnectTimeout argument is used to construct the context on each call to Connect,
+// while reconnectBackoff dictates the backoff algorithm to use.
+func WithInactivityCheck(inactivityTimeout, reconnectTimeout time.Duration,
+ reconnectBackoff backoff.BackOff) Option {
+ return func(o *options) error {
+ o.reconnect = true
+ o.timeout = reconnectTimeout
+ o.backoff = reconnectBackoff
+ o.inactivityTimeout = inactivityTimeout
+ return nil
+ }
+}
+
+// WithLogger allows setting a specific log sink. Otherwise, the default
+// go log package is used.
+func WithLogger(l *logr.Logger) Option {
+ return func(o *options) error {
+ o.logger = l
+ return nil
+ }
+}
+
+// WithMetricsRegistry allows the user to specify a Prometheus metrics registry.
+// If supplied, the metrics as defined in metrics.go will be registered.
+func WithMetricsRegistry(r prometheus.Registerer) Option {
+ return func(o *options) error {
+ o.registry = r
+ o.shouldRegisterMetrics = (r != nil)
+ return nil
+ }
+}
+
+// WithMetricsRegistryNamespaceSubsystem allows the user to specify a Prometheus metrics registry
+// and Prometheus metric namespace and subsystem of the component utilizing libovsdb.
+// If supplied, the metrics as defined in metrics.go will be registered.
+func WithMetricsRegistryNamespaceSubsystem(r prometheus.Registerer, namespace, subsystem string) Option {
+ if namespace == "" || subsystem == "" {
+ panic("libovsdb function WithMetricsRegistryNamespaceSubsystem arguments 'namespace' and 'subsystem' must not be empty")
+ }
+ return func(o *options) error {
+ o.registry = r
+ o.shouldRegisterMetrics = (r != nil)
+ o.metricNamespace = namespace
+ o.metricSubsystem = subsystem
+ return nil
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/database/database.go b/vendor/github.com/ovn-org/libovsdb/database/database.go
new file mode 100644
index 000000000..12f1222f1
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/database.go
@@ -0,0 +1,33 @@
+package database
+
+import (
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// Database abstracts a database that a server can use to store and transact data
+type Database interface {
+ CreateDatabase(database string, model ovsdb.DatabaseSchema) error
+ Exists(database string) bool
+ NewTransaction(database string) Transaction
+ Commit(database string, id uuid.UUID, update Update) error
+ CheckIndexes(database string, table string, m model.Model) error
+ List(database, table string, conditions ...ovsdb.Condition) (map[string]model.Model, error)
+ Get(database, table string, uuid string) (model.Model, error)
+ GetReferences(database, table, row string) (References, error)
+}
+
+// Transaction abstracts a database transaction that can generate database
+// updates
+type Transaction interface {
+ Transact(operations ...ovsdb.Operation) ([]*ovsdb.OperationResult, Update)
+}
+
+// Update abstracts an update that can be committed to a database
+type Update interface {
+ GetUpdatedTables() []string
+ ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error
+ ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error
+ ForReferenceUpdates(do func(references References) error) error
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/database/doc.go b/vendor/github.com/ovn-org/libovsdb/database/doc.go
new file mode 100644
index 000000000..c0a858c20
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/doc.go
@@ -0,0 +1,5 @@
+/*
+Package database collects database related types, interfaces and
+implementations.
+*/
+package database
diff --git a/vendor/github.com/ovn-org/libovsdb/database/references.go b/vendor/github.com/ovn-org/libovsdb/database/references.go
new file mode 100644
index 000000000..d8181a7a5
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/references.go
@@ -0,0 +1,71 @@
+package database
+
+// References tracks the references to rows from other rows at specific
+// locations in the schema.
+type References map[ReferenceSpec]Reference
+
+// ReferenceSpec specifies details about where in the schema a reference occurs.
+type ReferenceSpec struct {
+ // ToTable is the table of the row to which the reference is made
+ ToTable string
+
+ // FromTable is the table of the row from which the reference is made
+ FromTable string
+
+ // FromColumn is the column of the row from which the reference is made
+ FromColumn string
+
+ // FromValue flags if the reference is made on a map key or map value when
+ // the column is a map
+ FromValue bool
+}
+
+// Reference maps the UUIDs of rows to which the reference is made to the
+// rows it is made from
+type Reference map[string][]string
+
+// GetReferences gets references to a row
+func (rs References) GetReferences(table, uuid string) References {
+ refs := References{}
+ for spec, values := range rs {
+ if spec.ToTable != table {
+ continue
+ }
+ if _, ok := values[uuid]; ok {
+ refs[spec] = Reference{uuid: values[uuid]}
+ }
+ }
+ return refs
+}
+
+// UpdateReferences updates the references with the provided ones. Dangling
+// references, that is, the references of rows that are no longer referenced
+// from anywhere, are cleaned up.
+func (rs References) UpdateReferences(other References) {
+ for spec, otherRefs := range other {
+ for to, from := range otherRefs {
+ rs.updateReference(spec, to, from)
+ }
+ }
+}
+
+// updateReference updates the references to a row at a specific location in the
+// schema
+func (rs References) updateReference(spec ReferenceSpec, to string, from []string) {
+ thisRefs, ok := rs[spec]
+ if !ok && len(from) > 0 {
+ // add references from a previously untracked location
+ rs[spec] = Reference{to: from}
+ return
+ }
+ if len(from) > 0 {
+ // replace references to this row at this specific location
+ thisRefs[to] = from
+ return
+ }
+ // otherwise remove previously tracked references
+ delete(thisRefs, to)
+ if len(thisRefs) == 0 {
+ delete(rs, spec)
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/mapper/info.go b/vendor/github.com/ovn-org/libovsdb/mapper/info.go
new file mode 100644
index 000000000..8ac436c79
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/mapper/info.go
@@ -0,0 +1,179 @@
+package mapper
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// ErrColumnNotFound is an error that can occur when the column does not exist for a table
+type ErrColumnNotFound struct {
+ column string
+ table string
+}
+
+// Error implements the error interface
+func (e *ErrColumnNotFound) Error() string {
+ return fmt.Sprintf("column: %s not found in table: %s", e.column, e.table)
+}
+
+func NewErrColumnNotFound(column, table string) *ErrColumnNotFound {
+ return &ErrColumnNotFound{
+ column: column,
+ table: table,
+ }
+}
+
+// Info is a struct that wraps an object with its metadata
+type Info struct {
+ // FieldName indexed by column
+ Obj interface{}
+ Metadata Metadata
+}
+
+// Metadata represents the information needed to know how to map OVSDB columns into an objetss fields
+type Metadata struct {
+ Fields map[string]string // Map of ColumnName -> FieldName
+ TableSchema *ovsdb.TableSchema // TableSchema associated
+ TableName string // Table name
+}
+
+// FieldByColumn returns the field value that corresponds to a column
+func (i *Info) FieldByColumn(column string) (interface{}, error) {
+ fieldName, ok := i.Metadata.Fields[column]
+ if !ok {
+ return nil, NewErrColumnNotFound(column, i.Metadata.TableName)
+ }
+ return reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName).Interface(), nil
+}
+
+// FieldByColumn returns the field value that corresponds to a column
+func (i *Info) hasColumn(column string) bool {
+ _, ok := i.Metadata.Fields[column]
+ return ok
+}
+
+// SetField sets the field in the column to the specified value
+func (i *Info) SetField(column string, value interface{}) error {
+ fieldName, ok := i.Metadata.Fields[column]
+ if !ok {
+ return fmt.Errorf("SetField: column %s not found in orm info", column)
+ }
+ fieldValue := reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName)
+
+ if !fieldValue.Type().AssignableTo(reflect.TypeOf(value)) {
+ return fmt.Errorf("column %s: native value %v (%s) is not assignable to field %s (%s)",
+ column, value, reflect.TypeOf(value), fieldName, fieldValue.Type())
+ }
+ fieldValue.Set(reflect.ValueOf(value))
+ return nil
+}
+
+// ColumnByPtr returns the column name that corresponds to the field by the field's pointer
+func (i *Info) ColumnByPtr(fieldPtr interface{}) (string, error) {
+ fieldPtrVal := reflect.ValueOf(fieldPtr)
+ if fieldPtrVal.Kind() != reflect.Ptr {
+ return "", ovsdb.NewErrWrongType("ColumnByPointer", "pointer to a field in the struct", fieldPtr)
+ }
+ offset := fieldPtrVal.Pointer() - reflect.ValueOf(i.Obj).Pointer()
+ objType := reflect.TypeOf(i.Obj).Elem()
+ for j := 0; j < objType.NumField(); j++ {
+ if objType.Field(j).Offset == offset {
+ column := objType.Field(j).Tag.Get("ovsdb")
+ if _, ok := i.Metadata.Fields[column]; !ok {
+ return "", fmt.Errorf("field does not have orm column information")
+ }
+ return column, nil
+ }
+ }
+ return "", fmt.Errorf("field pointer does not correspond to orm struct")
+}
+
+// getValidIndexes inspects the object and returns the a list of indexes (set of columns) for witch
+// the object has non-default values
+func (i *Info) getValidIndexes() ([][]string, error) {
+ var validIndexes [][]string
+ var possibleIndexes [][]string
+
+ possibleIndexes = append(possibleIndexes, []string{"_uuid"})
+ possibleIndexes = append(possibleIndexes, i.Metadata.TableSchema.Indexes...)
+
+ // Iterate through indexes and validate them
+OUTER:
+ for _, idx := range possibleIndexes {
+ for _, col := range idx {
+ if !i.hasColumn(col) {
+ continue OUTER
+ }
+ columnSchema := i.Metadata.TableSchema.Column(col)
+ if columnSchema == nil {
+ continue OUTER
+ }
+ field, err := i.FieldByColumn(col)
+ if err != nil {
+ return nil, err
+ }
+ if !reflect.ValueOf(field).IsValid() || ovsdb.IsDefaultValue(columnSchema, field) {
+ continue OUTER
+ }
+ }
+ validIndexes = append(validIndexes, idx)
+ }
+ return validIndexes, nil
+}
+
+// NewInfo creates a MapperInfo structure around an object based on a given table schema
+func NewInfo(tableName string, table *ovsdb.TableSchema, obj interface{}) (*Info, error) {
+ objPtrVal := reflect.ValueOf(obj)
+ if objPtrVal.Type().Kind() != reflect.Ptr {
+ return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj)
+ }
+ objVal := reflect.Indirect(objPtrVal)
+ if objVal.Kind() != reflect.Struct {
+ return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj)
+ }
+ objType := objVal.Type()
+
+ fields := make(map[string]string, objType.NumField())
+ for i := 0; i < objType.NumField(); i++ {
+ field := objType.Field(i)
+ colName := field.Tag.Get("ovsdb")
+ if colName == "" {
+ // Untagged fields are ignored
+ continue
+ }
+ column := table.Column(colName)
+ if column == nil {
+ return nil, &ErrMapper{
+ objType: objType.String(),
+ field: field.Name,
+ fieldType: field.Type.String(),
+ fieldTag: colName,
+ reason: "Column does not exist in schema",
+ }
+ }
+
+ // Perform schema-based type checking
+ expType := ovsdb.NativeType(column)
+ if expType != field.Type {
+ return nil, &ErrMapper{
+ objType: objType.String(),
+ field: field.Name,
+ fieldType: field.Type.String(),
+ fieldTag: colName,
+ reason: fmt.Sprintf("Wrong type, column expects %s", expType),
+ }
+ }
+ fields[colName] = field.Name
+ }
+
+ return &Info{
+ Obj: obj,
+ Metadata: Metadata{
+ Fields: fields,
+ TableSchema: table,
+ TableName: tableName,
+ },
+ }, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go b/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go
new file mode 100644
index 000000000..5ca7a412b
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go
@@ -0,0 +1,317 @@
+package mapper
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// Mapper offers functions to interact with libovsdb through user-provided native structs.
+// The way to specify what field of the struct goes
+// to what column in the database id through field a field tag.
+// The tag used is "ovsdb" and has the following structure
+// 'ovsdb:"${COLUMN_NAME}"'
+// where COLUMN_NAME is the name of the column and must match the schema
+//
+//Example:
+// type MyObj struct {
+// Name string `ovsdb:"name"`
+// }
+type Mapper struct {
+ Schema ovsdb.DatabaseSchema
+}
+
+// ErrMapper describes an error in an Mapper type
+type ErrMapper struct {
+ objType string
+ field string
+ fieldType string
+ fieldTag string
+ reason string
+}
+
+func (e *ErrMapper) Error() string {
+ return fmt.Sprintf("Mapper Error. Object type %s contains field %s (%s) ovs tag %s: %s",
+ e.objType, e.field, e.fieldType, e.fieldTag, e.reason)
+}
+
+// NewMapper returns a new mapper
+func NewMapper(schema ovsdb.DatabaseSchema) Mapper {
+ return Mapper{
+ Schema: schema,
+ }
+}
+
+// GetRowData transforms a Row to a struct based on its tags
+// The result object must be given as pointer to an object with the right tags
+func (m Mapper) GetRowData(row *ovsdb.Row, result *Info) error {
+ if row == nil {
+ return nil
+ }
+ return m.getData(*row, result)
+}
+
+// getData transforms a map[string]interface{} containing OvS types (e.g: a ResultRow
+// has this format) to orm struct
+// The result object must be given as pointer to an object with the right tags
+func (m Mapper) getData(ovsData ovsdb.Row, result *Info) error {
+ for name, column := range result.Metadata.TableSchema.Columns {
+ if !result.hasColumn(name) {
+ // If provided struct does not have a field to hold this value, skip it
+ continue
+ }
+
+ ovsElem, ok := ovsData[name]
+ if !ok {
+ // Ignore missing columns
+ continue
+ }
+
+ nativeElem, err := ovsdb.OvsToNative(column, ovsElem)
+ if err != nil {
+ return fmt.Errorf("table %s, column %s: failed to extract native element: %s",
+ result.Metadata.TableName, name, err.Error())
+ }
+
+ if err := result.SetField(name, nativeElem); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewRow transforms an orm struct to a map[string] interface{} that can be used as libovsdb.Row
+// By default, default or null values are skipped. This behavior can be modified by specifying
+// a list of fields (pointers to fields in the struct) to be added to the row
+func (m Mapper) NewRow(data *Info, fields ...interface{}) (ovsdb.Row, error) {
+ columns := make(map[string]*ovsdb.ColumnSchema)
+ for k, v := range data.Metadata.TableSchema.Columns {
+ columns[k] = v
+ }
+ columns["_uuid"] = &ovsdb.UUIDColumn
+ ovsRow := make(map[string]interface{}, len(columns))
+ for name, column := range columns {
+ nativeElem, err := data.FieldByColumn(name)
+ if err != nil {
+ // If provided struct does not have a field to hold this value, skip it
+ continue
+ }
+
+ // add specific fields
+ if len(fields) > 0 {
+ found := false
+ for _, f := range fields {
+ col, err := data.ColumnByPtr(f)
+ if err != nil {
+ return nil, err
+ }
+ if col == name {
+ found = true
+ break
+ }
+ }
+ if !found {
+ continue
+ }
+ }
+ if len(fields) == 0 && ovsdb.IsDefaultValue(column, nativeElem) {
+ continue
+ }
+ ovsElem, err := ovsdb.NativeToOvs(column, nativeElem)
+ if err != nil {
+ return nil, fmt.Errorf("table %s, column %s: failed to generate ovs element. %s", data.Metadata.TableName, name, err.Error())
+ }
+ ovsRow[name] = ovsElem
+ }
+ return ovsRow, nil
+}
+
+// NewEqualityCondition returns a list of equality conditions that match a given object
+// A list of valid columns that shall be used as a index can be provided.
+// If none are provided, we will try to use object's field that matches the '_uuid' ovsdb tag
+// If it does not exist or is null (""), then we will traverse all of the table indexes and
+// use the first index (list of simultaneously unique columns) for which the provided mapper
+// object has valid data. The order in which they are traversed matches the order defined
+// in the schema.
+// By `valid data` we mean non-default data.
+func (m Mapper) NewEqualityCondition(data *Info, fields ...interface{}) ([]ovsdb.Condition, error) {
+ var conditions []ovsdb.Condition
+ var condIndex [][]string
+
+ // If index is provided, use it. If not, obtain the valid indexes from the mapper info
+ if len(fields) > 0 {
+ providedIndex := []string{}
+ for i := range fields {
+ if col, err := data.ColumnByPtr(fields[i]); err == nil {
+ providedIndex = append(providedIndex, col)
+ } else {
+ return nil, err
+ }
+ }
+ condIndex = append(condIndex, providedIndex)
+ } else {
+ var err error
+ condIndex, err = data.getValidIndexes()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(condIndex) == 0 {
+ return nil, fmt.Errorf("failed to find a valid index")
+ }
+
+ // Pick the first valid index
+ for _, col := range condIndex[0] {
+ field, err := data.FieldByColumn(col)
+ if err != nil {
+ return nil, err
+ }
+
+ column := data.Metadata.TableSchema.Column(col)
+ if column == nil {
+ return nil, fmt.Errorf("column %s not found", col)
+ }
+ ovsVal, err := ovsdb.NativeToOvs(column, field)
+ if err != nil {
+ return nil, err
+ }
+ conditions = append(conditions, ovsdb.NewCondition(col, ovsdb.ConditionEqual, ovsVal))
+ }
+ return conditions, nil
+}
+
+// EqualFields compares two mapped objects.
+// The indexes to use for comparison are, the _uuid, the table indexes and the columns that correspond
+// to the mapped fields pointed to by 'fields'. They must be pointers to fields on the first mapped element (i.e: one)
+func (m Mapper) EqualFields(one, other *Info, fields ...interface{}) (bool, error) {
+ indexes := []string{}
+ for _, f := range fields {
+ col, err := one.ColumnByPtr(f)
+ if err != nil {
+ return false, err
+ }
+ indexes = append(indexes, col)
+ }
+ return m.equalIndexes(one, other, indexes...)
+}
+
+// NewCondition returns a ovsdb.Condition based on the model
+func (m Mapper) NewCondition(data *Info, field interface{}, function ovsdb.ConditionFunction, value interface{}) (*ovsdb.Condition, error) {
+ column, err := data.ColumnByPtr(field)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check that the condition is valid
+ columnSchema := data.Metadata.TableSchema.Column(column)
+ if columnSchema == nil {
+ return nil, fmt.Errorf("column %s not found", column)
+ }
+ if err := ovsdb.ValidateCondition(columnSchema, function, value); err != nil {
+ return nil, err
+ }
+
+ ovsValue, err := ovsdb.NativeToOvs(columnSchema, value)
+ if err != nil {
+ return nil, err
+ }
+
+ ovsdbCondition := ovsdb.NewCondition(column, function, ovsValue)
+
+ return &ovsdbCondition, nil
+
+}
+
+// NewMutation creates a RFC7047 mutation object based on an ORM object and the mutation fields (in native format)
+// It takes care of field validation against the column type
+func (m Mapper) NewMutation(data *Info, column string, mutator ovsdb.Mutator, value interface{}) (*ovsdb.Mutation, error) {
+ // Check the column exists in the object
+ if !data.hasColumn(column) {
+ return nil, fmt.Errorf("mutation contains column %s that does not exist in object %v", column, data)
+ }
+ // Check that the mutation is valid
+ columnSchema := data.Metadata.TableSchema.Column(column)
+ if columnSchema == nil {
+ return nil, fmt.Errorf("column %s not found", column)
+ }
+ if err := ovsdb.ValidateMutation(columnSchema, mutator, value); err != nil {
+ return nil, err
+ }
+
+ var ovsValue interface{}
+ var err error
+ // Usually a mutation value is of the same type of the value being mutated
+ // except for delete mutation of maps where it can also be a list of same type of
+ // keys (rfc7047 5.1). Handle this special case here.
+ if mutator == "delete" && columnSchema.Type == ovsdb.TypeMap && reflect.TypeOf(value).Kind() != reflect.Map {
+ // It's OK to cast the value to a list of elements because validation has passed
+ ovsSet, err := ovsdb.NewOvsSet(value)
+ if err != nil {
+ return nil, err
+ }
+ ovsValue = ovsSet
+ } else {
+ ovsValue, err = ovsdb.NativeToOvs(columnSchema, value)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &ovsdb.Mutation{Column: column, Mutator: mutator, Value: ovsValue}, nil
+}
+
+// equalIndexes returns whether both models are equal from the DB point of view
+// Two objects are considered equal if any of the following conditions is true
+// They have a field tagged with column name '_uuid' and their values match
+// For any of the indexes defined in the Table Schema, the values all of its columns are simultaneously equal
+// (as per RFC7047)
+// The values of all of the optional indexes passed as variadic parameter to this function are equal.
+func (m Mapper) equalIndexes(one, other *Info, indexes ...string) (bool, error) {
+ match := false
+
+ oneIndexes, err := one.getValidIndexes()
+ if err != nil {
+ return false, err
+ }
+
+ otherIndexes, err := other.getValidIndexes()
+ if err != nil {
+ return false, err
+ }
+
+ oneIndexes = append(oneIndexes, indexes)
+ otherIndexes = append(otherIndexes, indexes)
+
+ for _, lidx := range oneIndexes {
+ for _, ridx := range otherIndexes {
+ if reflect.DeepEqual(ridx, lidx) {
+ // All columns in an index must be simultaneously equal
+ for _, col := range lidx {
+ if !one.hasColumn(col) || !other.hasColumn(col) {
+ break
+ }
+ lfield, err := one.FieldByColumn(col)
+ if err != nil {
+ return false, err
+ }
+ rfield, err := other.FieldByColumn(col)
+ if err != nil {
+ return false, err
+ }
+ if reflect.DeepEqual(lfield, rfield) {
+ match = true
+ } else {
+ match = false
+ break
+ }
+ }
+ if match {
+ return true, nil
+ }
+ }
+ }
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/model/client.go b/vendor/github.com/ovn-org/libovsdb/model/client.go
new file mode 100644
index 000000000..5eb686244
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/model/client.go
@@ -0,0 +1,171 @@
+package model
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// ColumnKey addresses a column and optionally a key within a column
+type ColumnKey struct {
+ Column string
+ Key interface{}
+}
+
+// ClientIndex defines a client index by a set of columns
+type ClientIndex struct {
+ Columns []ColumnKey
+}
+
+// ClientDBModel contains the client information needed to build a DatabaseModel
+type ClientDBModel struct {
+ name string
+ types map[string]reflect.Type
+ indexes map[string][]ClientIndex
+}
+
+// NewModel returns a new instance of a model from a specific string
+func (db ClientDBModel) newModel(table string) (Model, error) {
+ mtype, ok := db.types[table]
+ if !ok {
+ return nil, fmt.Errorf("table %s not found in database model", string(table))
+ }
+ model := reflect.New(mtype.Elem())
+ return model.Interface().(Model), nil
+}
+
+// Name returns the database name
+func (db ClientDBModel) Name() string {
+ return db.name
+}
+
+// Indexes returns the client indexes for a model
+func (db ClientDBModel) Indexes(table string) []ClientIndex {
+ if len(db.indexes) == 0 {
+ return nil
+ }
+ if _, ok := db.indexes[table]; ok {
+ return copyIndexes(db.indexes)[table]
+ }
+ return nil
+}
+
+// SetIndexes sets the client indexes. Client indexes are optional, similar to
+// schema indexes and are only tracked in the specific client instances that are
+// provided with this client model. A client index may point to multiple models
+// as uniqueness is not enforced. They are defined per table and multiple
+// indexes can be defined for a table. Each index consists of a set of columns.
+// If the column is a map, specific keys of that map can be addressed for the
+// index.
+func (db *ClientDBModel) SetIndexes(indexes map[string][]ClientIndex) {
+ db.indexes = copyIndexes(indexes)
+}
+
+// Validate validates the DatabaseModel against the input schema
+// Returns all the errors detected
+func (db ClientDBModel) validate(schema ovsdb.DatabaseSchema) []error {
+ var errors []error
+ if db.name != schema.Name {
+ errors = append(errors, fmt.Errorf("database model name (%s) does not match schema (%s)",
+ db.name, schema.Name))
+ }
+
+ infos := make(map[string]*mapper.Info, len(db.types))
+ for tableName := range db.types {
+ tableSchema := schema.Table(tableName)
+ if tableSchema == nil {
+ errors = append(errors, fmt.Errorf("database model contains a model for table %s that does not exist in schema", tableName))
+ continue
+ }
+ model, err := db.newModel(tableName)
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+ info, err := mapper.NewInfo(tableName, tableSchema, model)
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+ infos[tableName] = info
+ }
+
+ for tableName, indexSets := range db.indexes {
+ info, ok := infos[tableName]
+ if !ok {
+ errors = append(errors, fmt.Errorf("database model contains a client index for table %s that does not exist in schema", tableName))
+ continue
+ }
+ for _, indexSet := range indexSets {
+ for _, indexColumn := range indexSet.Columns {
+ f, err := info.FieldByColumn(indexColumn.Column)
+ if err != nil {
+ errors = append(
+ errors,
+ fmt.Errorf("database model contains a client index for column %s that does not exist in table %s",
+ indexColumn.Column,
+ tableName))
+ continue
+ }
+ if indexColumn.Key != nil && reflect.ValueOf(f).Kind() != reflect.Map {
+ errors = append(
+ errors,
+ fmt.Errorf("database model contains a client index for key %s in column %s of table %s that is not a map",
+ indexColumn.Key,
+ indexColumn.Column,
+ tableName))
+ continue
+ }
+ }
+ }
+ }
+ return errors
+}
+
+// NewClientDBModel constructs a ClientDBModel based on a database name and dictionary of models indexed by table name
+func NewClientDBModel(name string, models map[string]Model) (ClientDBModel, error) {
+ types := make(map[string]reflect.Type, len(models))
+ for table, model := range models {
+ modelType := reflect.TypeOf(model)
+ if modelType.Kind() != reflect.Ptr || modelType.Elem().Kind() != reflect.Struct {
+ return ClientDBModel{}, fmt.Errorf("model is expected to be a pointer to struct")
+ }
+ hasUUID := false
+ for i := 0; i < modelType.Elem().NumField(); i++ {
+ if field := modelType.Elem().Field(i); field.Tag.Get("ovsdb") == "_uuid" &&
+ field.Type.Kind() == reflect.String {
+ hasUUID = true
+ break
+ }
+ }
+ if !hasUUID {
+ return ClientDBModel{}, fmt.Errorf("model is expected to have a string field called uuid")
+ }
+
+ types[table] = modelType
+ }
+ return ClientDBModel{
+ types: types,
+ name: name,
+ }, nil
+}
+
+func copyIndexes(src map[string][]ClientIndex) map[string][]ClientIndex {
+ if len(src) == 0 {
+ return nil
+ }
+ dst := make(map[string][]ClientIndex, len(src))
+ for table, indexSets := range src {
+ dst[table] = make([]ClientIndex, 0, len(indexSets))
+ for _, indexSet := range indexSets {
+ indexSetCopy := ClientIndex{
+ Columns: make([]ColumnKey, len(indexSet.Columns)),
+ }
+ copy(indexSetCopy.Columns, indexSet.Columns)
+ dst[table] = append(dst[table], indexSetCopy)
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/model/database.go b/vendor/github.com/ovn-org/libovsdb/model/database.go
new file mode 100644
index 000000000..0857d903f
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/model/database.go
@@ -0,0 +1,118 @@
+package model
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// A DatabaseModel represents libovsdb's metadata about the database.
+// It's the result of combining the client's ClientDBModel and the server's Schema
+type DatabaseModel struct {
+ client ClientDBModel
+ Schema ovsdb.DatabaseSchema
+ Mapper mapper.Mapper
+ metadata map[reflect.Type]mapper.Metadata
+}
+
+// NewDatabaseModel returns a new DatabaseModel
+func NewDatabaseModel(schema ovsdb.DatabaseSchema, client ClientDBModel) (DatabaseModel, []error) {
+ dbModel := &DatabaseModel{
+ Schema: schema,
+ client: client,
+ }
+ errs := client.validate(schema)
+ if len(errs) > 0 {
+ return DatabaseModel{}, errs
+ }
+ dbModel.Mapper = mapper.NewMapper(schema)
+ var metadata map[reflect.Type]mapper.Metadata
+ metadata, errs = generateModelInfo(schema, client.types)
+ if len(errs) > 0 {
+ return DatabaseModel{}, errs
+ }
+ dbModel.metadata = metadata
+ return *dbModel, nil
+}
+
+// NewPartialDatabaseModel returns a DatabaseModel what does not have a schema yet
+func NewPartialDatabaseModel(client ClientDBModel) DatabaseModel {
+ return DatabaseModel{
+ client: client,
+ }
+}
+
+// Valid returns whether the DatabaseModel is fully functional
+func (db DatabaseModel) Valid() bool {
+ return !reflect.DeepEqual(db.Schema, ovsdb.DatabaseSchema{})
+}
+
+// Client returns the DatabaseModel's client dbModel
+func (db DatabaseModel) Client() ClientDBModel {
+ return db.client
+}
+
+// NewModel returns a new instance of a model from a specific string
+func (db DatabaseModel) NewModel(table string) (Model, error) {
+ mtype, ok := db.client.types[table]
+ if !ok {
+ return nil, fmt.Errorf("table %s not found in database model", string(table))
+ }
+ model := reflect.New(mtype.Elem())
+ return model.Interface().(Model), nil
+}
+
+// Types returns the DatabaseModel Types
+// the DatabaseModel types is a map of reflect.Types indexed by string
+// The reflect.Type is a pointer to a struct that contains 'ovs' tags
+// as described above. Such pointer to struct also implements the Model interface
+func (db DatabaseModel) Types() map[string]reflect.Type {
+ return db.client.types
+}
+
+// FindTable returns the string associated with a reflect.Type or ""
+func (db DatabaseModel) FindTable(mType reflect.Type) string {
+ for table, tType := range db.client.types {
+ if tType == mType {
+ return table
+ }
+ }
+ return ""
+}
+
+// generateModelMetadata creates metadata objects from all models included in the
+// database and caches them for future re-use
+func generateModelInfo(dbSchema ovsdb.DatabaseSchema, modelTypes map[string]reflect.Type) (map[reflect.Type]mapper.Metadata, []error) {
+ errors := []error{}
+ metadata := make(map[reflect.Type]mapper.Metadata, len(modelTypes))
+ for tableName, tType := range modelTypes {
+ tableSchema := dbSchema.Table(tableName)
+ if tableSchema == nil {
+ errors = append(errors, fmt.Errorf("database Model contains model for table %s which is not present in schema", tableName))
+ continue
+ }
+
+ obj := reflect.New(tType.Elem()).Interface().(Model)
+ info, err := mapper.NewInfo(tableName, tableSchema, obj)
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+ metadata[tType] = info.Metadata
+ }
+ return metadata, errors
+}
+
+// NewModelInfo returns a mapper.Info object based on a provided model
+func (db DatabaseModel) NewModelInfo(obj interface{}) (*mapper.Info, error) {
+ meta, ok := db.metadata[reflect.TypeOf(obj)]
+ if !ok {
+ return nil, ovsdb.NewErrWrongType("NewModelInfo", "type that is part of the DatabaseModel", obj)
+ }
+ return &mapper.Info{
+ Obj: obj,
+ Metadata: meta,
+ }, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/model/model.go b/vendor/github.com/ovn-org/libovsdb/model/model.go
new file mode 100644
index 000000000..c8575f5bf
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/model/model.go
@@ -0,0 +1,130 @@
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// A Model is the base interface used to build Database Models. It is used
+// to express how data from a specific Database Table shall be translated into structs
+// A Model is a struct with at least one (most likely more) field tagged with the 'ovs' tag
+// The value of 'ovs' field must be a valid column name in the OVS Database
+// A field associated with the "_uuid" column mandatory. The rest of the columns are optional
+// The struct may also have non-tagged fields (which will be ignored by the API calls)
+// The Model interface must be implemented by the pointer to such type
+// Example:
+//type MyLogicalRouter struct {
+// UUID string `ovsdb:"_uuid"`
+// Name string `ovsdb:"name"`
+// ExternalIDs map[string]string `ovsdb:"external_ids"`
+// LoadBalancers []string `ovsdb:"load_balancer"`
+//}
+type Model interface{}
+
+type CloneableModel interface {
+ CloneModel() Model
+ CloneModelInto(Model)
+}
+
+type ComparableModel interface {
+ EqualsModel(Model) bool
+}
+
+// Clone creates a deep copy of a model
+func Clone(a Model) Model {
+ if cloner, ok := a.(CloneableModel); ok {
+ return cloner.CloneModel()
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(a))
+ b := reflect.New(val.Type()).Interface()
+ aBytes, _ := json.Marshal(a)
+ _ = json.Unmarshal(aBytes, b)
+ return b
+}
+
+// CloneInto deep copies a model into another one
+func CloneInto(src, dst Model) {
+ if cloner, ok := src.(CloneableModel); ok {
+ cloner.CloneModelInto(dst)
+ return
+ }
+
+ aBytes, _ := json.Marshal(src)
+ _ = json.Unmarshal(aBytes, dst)
+}
+
+func Equal(l, r Model) bool {
+ if comparator, ok := l.(ComparableModel); ok {
+ return comparator.EqualsModel(r)
+ }
+
+ return reflect.DeepEqual(l, r)
+}
+
+func modelSetUUID(model Model, uuid string) error {
+ modelVal := reflect.ValueOf(model).Elem()
+ for i := 0; i < modelVal.NumField(); i++ {
+ if field := modelVal.Type().Field(i); field.Tag.Get("ovsdb") == "_uuid" &&
+ field.Type.Kind() == reflect.String {
+ modelVal.Field(i).Set(reflect.ValueOf(uuid))
+ return nil
+ }
+ }
+ return fmt.Errorf("model is expected to have a string field mapped to column _uuid")
+}
+
+// Condition is a model-based representation of an OVSDB Condition
+type Condition struct {
+ // Pointer to the field of the model where the operation applies
+ Field interface{}
+ // Condition function
+ Function ovsdb.ConditionFunction
+ // Value to use in the condition
+ Value interface{}
+}
+
+// Mutation is a model-based representation of an OVSDB Mutation
+type Mutation struct {
+ // Pointer to the field of the model that shall be mutated
+ Field interface{}
+ // String representing the mutator (as per RFC7047)
+ Mutator ovsdb.Mutator
+ // Value to use in the mutation
+ Value interface{}
+}
+
+// CreateModel creates a new Model instance based on an OVSDB Row information
+func CreateModel(dbModel DatabaseModel, tableName string, row *ovsdb.Row, uuid string) (Model, error) {
+ if !dbModel.Valid() {
+ return nil, fmt.Errorf("database model not valid")
+ }
+
+ table := dbModel.Schema.Table(tableName)
+ if table == nil {
+ return nil, fmt.Errorf("table %s not found", tableName)
+ }
+ model, err := dbModel.NewModel(tableName)
+ if err != nil {
+ return nil, err
+ }
+ info, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ err = dbModel.Mapper.GetRowData(row, info)
+ if err != nil {
+ return nil, err
+ }
+
+ if uuid != "" {
+ if err := info.SetField("_uuid", uuid); err != nil {
+ return nil, err
+ }
+ }
+
+ return model, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go
new file mode 100644
index 000000000..aebe2c2d0
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go
@@ -0,0 +1,427 @@
+package ovsdb
+
+import (
+ "fmt"
+ "reflect"
+)
+
+var (
+ intType = reflect.TypeOf(0)
+ realType = reflect.TypeOf(0.0)
+ boolType = reflect.TypeOf(true)
+ strType = reflect.TypeOf("")
+)
+
+// ErrWrongType describes typing error
+type ErrWrongType struct {
+ from string
+ expected string
+ got interface{}
+}
+
+func (e *ErrWrongType) Error() string {
+ return fmt.Sprintf("Wrong Type (%s): expected %s but got %+v (%s)",
+ e.from, e.expected, e.got, reflect.TypeOf(e.got))
+}
+
+// NewErrWrongType creates a new ErrWrongType
+func NewErrWrongType(from, expected string, got interface{}) error {
+ return &ErrWrongType{
+ from: from,
+ expected: expected,
+ got: got,
+ }
+}
+
+// NativeTypeFromAtomic returns the native type that can hold a value of an
+// AtomicType
+func NativeTypeFromAtomic(basicType string) reflect.Type {
+ switch basicType {
+ case TypeInteger:
+ return intType
+ case TypeReal:
+ return realType
+ case TypeBoolean:
+ return boolType
+ case TypeString:
+ return strType
+ case TypeUUID:
+ return strType
+ default:
+ panic("Unknown basic type %s basicType")
+ }
+}
+
+// NativeType returns the reflect.Type that can hold the value of a column
+// OVS Type to Native Type convertions:
+//
+// OVS sets -> go slices or a go native type depending on the key
+// OVS uuid -> go strings
+// OVS map -> go map
+// OVS enum -> go native type depending on the type of the enum key
+func NativeType(column *ColumnSchema) reflect.Type {
+ switch column.Type {
+ case TypeInteger, TypeReal, TypeBoolean, TypeUUID, TypeString:
+ return NativeTypeFromAtomic(column.Type)
+ case TypeEnum:
+ return NativeTypeFromAtomic(column.TypeObj.Key.Type)
+ case TypeMap:
+ keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type)
+ valueType := NativeTypeFromAtomic(column.TypeObj.Value.Type)
+ return reflect.MapOf(keyType, valueType)
+ case TypeSet:
+ keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type)
+ // optional type
+ if column.TypeObj.Min() == 0 && column.TypeObj.Max() == 1 {
+ return reflect.PtrTo(keyType)
+ }
+ // non-optional type with max 1
+ if column.TypeObj.Min() == 1 && column.TypeObj.Max() == 1 {
+ return keyType
+ }
+ return reflect.SliceOf(keyType)
+ default:
+ panic(fmt.Errorf("unknown extended type %s", column.Type))
+ }
+}
+
+// OvsToNativeAtomic returns the native type of the basic ovs type
+func OvsToNativeAtomic(basicType string, ovsElem interface{}) (interface{}, error) {
+ switch basicType {
+ case TypeReal, TypeString, TypeBoolean:
+ naType := NativeTypeFromAtomic(basicType)
+ if reflect.TypeOf(ovsElem) != naType {
+ return nil, NewErrWrongType("OvsToNativeAtomic", naType.String(), ovsElem)
+ }
+ return ovsElem, nil
+ case TypeInteger:
+ naType := NativeTypeFromAtomic(basicType)
+ // Default decoding of numbers is float64, convert them to int
+ if !reflect.TypeOf(ovsElem).ConvertibleTo(naType) {
+ return nil, NewErrWrongType("OvsToNativeAtomic", fmt.Sprintf("Convertible to %s", naType), ovsElem)
+ }
+ return reflect.ValueOf(ovsElem).Convert(naType).Interface(), nil
+ case TypeUUID:
+ uuid, ok := ovsElem.(UUID)
+ if !ok {
+ return nil, NewErrWrongType("OvsToNativeAtomic", "UUID", ovsElem)
+ }
+ return uuid.GoUUID, nil
+ default:
+ panic(fmt.Errorf("unknown atomic type %s", basicType))
+ }
+}
+
+func OvsToNativeSlice(baseType string, ovsElem interface{}) (interface{}, error) {
+ naType := NativeTypeFromAtomic(baseType)
+ var nativeSet reflect.Value
+ switch ovsSet := ovsElem.(type) {
+ case OvsSet:
+ nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, len(ovsSet.GoSet))
+ for _, v := range ovsSet.GoSet {
+ nv, err := OvsToNativeAtomic(baseType, v)
+ if err != nil {
+ return nil, err
+ }
+ nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv))
+ }
+
+ default:
+ nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, 1)
+ nv, err := OvsToNativeAtomic(baseType, ovsElem)
+ if err != nil {
+ return nil, err
+ }
+
+ nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv))
+ }
+ return nativeSet.Interface(), nil
+}
+
+// OvsToNative transforms an ovs type to native one based on the column type information
+func OvsToNative(column *ColumnSchema, ovsElem interface{}) (interface{}, error) {
+ switch column.Type {
+ case TypeReal, TypeString, TypeBoolean, TypeInteger, TypeUUID:
+ return OvsToNativeAtomic(column.Type, ovsElem)
+ case TypeEnum:
+ return OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem)
+ case TypeSet:
+ naType := NativeType(column)
+ // The inner slice is []interface{}
+ // We need to convert it to the real type os slice
+ switch naType.Kind() {
+ case reflect.Ptr:
+ switch ovsSet := ovsElem.(type) {
+ case OvsSet:
+ if len(ovsSet.GoSet) > 1 {
+ return nil, fmt.Errorf("expected a slice of len =< 1, but got a slice with %d elements", len(ovsSet.GoSet))
+ }
+ if len(ovsSet.GoSet) == 0 {
+ return reflect.Zero(naType).Interface(), nil
+ }
+ native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsSet.GoSet[0])
+ if err != nil {
+ return nil, err
+ }
+ pv := reflect.New(naType.Elem())
+ pv.Elem().Set(reflect.ValueOf(native))
+ return pv.Interface(), nil
+ default:
+ native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem)
+ if err != nil {
+ return nil, err
+ }
+ pv := reflect.New(naType.Elem())
+ pv.Elem().Set(reflect.ValueOf(native))
+ return pv.Interface(), nil
+ }
+ case reflect.Slice:
+ return OvsToNativeSlice(column.TypeObj.Key.Type, ovsElem)
+ default:
+ return nil, fmt.Errorf("native type was not slice or pointer. got %d", naType.Kind())
+ }
+ case TypeMap:
+ naType := NativeType(column)
+ ovsMap, ok := ovsElem.(OvsMap)
+ if !ok {
+ return nil, NewErrWrongType("OvsToNative", "OvsMap", ovsElem)
+ }
+ // The inner slice is map[interface]interface{}
+ // We need to convert it to the real type os slice
+ nativeMap := reflect.MakeMapWithSize(naType, len(ovsMap.GoMap))
+ for k, v := range ovsMap.GoMap {
+ nk, err := OvsToNativeAtomic(column.TypeObj.Key.Type, k)
+ if err != nil {
+ return nil, err
+ }
+ nv, err := OvsToNativeAtomic(column.TypeObj.Value.Type, v)
+ if err != nil {
+ return nil, err
+ }
+ nativeMap.SetMapIndex(reflect.ValueOf(nk), reflect.ValueOf(nv))
+ }
+ return nativeMap.Interface(), nil
+ default:
+ panic(fmt.Sprintf("Unknown Type: %v", column.Type))
+ }
+}
+
+// NativeToOvsAtomic returns the OVS type of the atomic native value
+func NativeToOvsAtomic(basicType string, nativeElem interface{}) (interface{}, error) {
+ naType := NativeTypeFromAtomic(basicType)
+ if reflect.TypeOf(nativeElem) != naType {
+ return nil, NewErrWrongType("NativeToOvsAtomic", naType.String(), nativeElem)
+ }
+ switch basicType {
+ case TypeUUID:
+ return UUID{GoUUID: nativeElem.(string)}, nil
+ default:
+ return nativeElem, nil
+ }
+}
+
+// NativeToOvs transforms an native type to a ovs type based on the column type information
+func NativeToOvs(column *ColumnSchema, rawElem interface{}) (interface{}, error) {
+ naType := NativeType(column)
+ if t := reflect.TypeOf(rawElem); t != naType {
+ return nil, NewErrWrongType("NativeToOvs", naType.String(), rawElem)
+ }
+
+ switch column.Type {
+ case TypeInteger, TypeReal, TypeString, TypeBoolean, TypeEnum:
+ return rawElem, nil
+ case TypeUUID:
+ return UUID{GoUUID: rawElem.(string)}, nil
+ case TypeSet:
+ var ovsSet OvsSet
+ if column.TypeObj.Key.Type == TypeUUID {
+ ovsSlice := []interface{}{}
+ if _, ok := rawElem.([]string); ok {
+ for _, v := range rawElem.([]string) {
+ uuid := UUID{GoUUID: v}
+ ovsSlice = append(ovsSlice, uuid)
+ }
+ } else if _, ok := rawElem.(*string); ok {
+ v := rawElem.(*string)
+ if v != nil {
+ uuid := UUID{GoUUID: *v}
+ ovsSlice = append(ovsSlice, uuid)
+ }
+ } else {
+ return nil, fmt.Errorf("uuid slice was neither []string or *string")
+ }
+ ovsSet = OvsSet{GoSet: ovsSlice}
+
+ } else {
+ var err error
+ ovsSet, err = NewOvsSet(rawElem)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ovsSet, nil
+ case TypeMap:
+ nativeMapVal := reflect.ValueOf(rawElem)
+ ovsMap := make(map[interface{}]interface{}, nativeMapVal.Len())
+ for _, key := range nativeMapVal.MapKeys() {
+ ovsKey, err := NativeToOvsAtomic(column.TypeObj.Key.Type, key.Interface())
+ if err != nil {
+ return nil, err
+ }
+ ovsVal, err := NativeToOvsAtomic(column.TypeObj.Value.Type, nativeMapVal.MapIndex(key).Interface())
+ if err != nil {
+ return nil, err
+ }
+ ovsMap[ovsKey] = ovsVal
+ }
+ return OvsMap{GoMap: ovsMap}, nil
+
+ default:
+ panic(fmt.Sprintf("Unknown Type: %v", column.Type))
+ }
+}
+
+// IsDefaultValue checks if a provided native element corresponds to the default value of its
+// designated column type
+func IsDefaultValue(column *ColumnSchema, nativeElem interface{}) bool {
+ switch column.Type {
+ case TypeEnum:
+ return isDefaultBaseValue(nativeElem, column.TypeObj.Key.Type)
+ default:
+ return isDefaultBaseValue(nativeElem, column.Type)
+ }
+}
+
+// ValidateMutationAtomic checks if the mutation is valid for a specific AtomicType
+func validateMutationAtomic(atype string, mutator Mutator, value interface{}) error {
+ nType := NativeTypeFromAtomic(atype)
+ if reflect.TypeOf(value) != nType {
+ return NewErrWrongType(fmt.Sprintf("Mutation of atomic type %s", atype), nType.String(), value)
+ }
+
+ switch atype {
+ case TypeUUID, TypeString, TypeBoolean:
+ return fmt.Errorf("atomictype %s does not support mutation", atype)
+ case TypeReal:
+ switch mutator {
+ case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide:
+ return nil
+ default:
+ return fmt.Errorf("wrong mutator for real type %s", mutator)
+ }
+ case TypeInteger:
+ switch mutator {
+ case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo:
+ return nil
+ default:
+ return fmt.Errorf("wrong mutator for integer type: %s", mutator)
+ }
+ default:
+ panic("Unsupported Atomic Type")
+ }
+}
+
+// ValidateMutation checks if the mutation value and mutator string area appropriate
+// for a given column based on the rules specified RFC7047
+func ValidateMutation(column *ColumnSchema, mutator Mutator, value interface{}) error {
+ if !column.Mutable() {
+ return fmt.Errorf("column is not mutable")
+ }
+ switch column.Type {
+ case TypeSet:
+ switch mutator {
+ case MutateOperationInsert, MutateOperationDelete:
+ // RFC7047 says a may be an with a single
+ // element. Check if we can store this value in our column
+ if reflect.TypeOf(value).Kind() != reflect.Slice {
+ if NativeType(column) != reflect.SliceOf(reflect.TypeOf(value)) {
+ return NewErrWrongType(fmt.Sprintf("Mutation %s of single value in to column %s", mutator, column),
+ NativeType(column).String(), reflect.SliceOf(reflect.TypeOf(value)).String())
+ }
+ return nil
+ }
+ if NativeType(column) != reflect.TypeOf(value) {
+ return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column),
+ NativeType(column).String(), value)
+ }
+ return nil
+ default:
+ return validateMutationAtomic(column.TypeObj.Key.Type, mutator, value)
+ }
+ case TypeMap:
+ switch mutator {
+ case MutateOperationInsert:
+ // Value must be a map of the same kind
+ if reflect.TypeOf(value) != NativeType(column) {
+ return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column),
+ NativeType(column).String(), value)
+ }
+ return nil
+ case MutateOperationDelete:
+ // Value must be a map of the same kind or a set of keys to delete
+ if reflect.TypeOf(value) != NativeType(column) &&
+ reflect.TypeOf(value) != reflect.SliceOf(NativeTypeFromAtomic(column.TypeObj.Key.Type)) {
+ return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column),
+ "compatible map type", value)
+ }
+ return nil
+
+ default:
+ return fmt.Errorf("wrong mutator for map type: %s", mutator)
+ }
+ case TypeEnum:
+ // RFC does not clarify what to do with enums.
+ return fmt.Errorf("enums do not support mutation")
+ default:
+ return validateMutationAtomic(column.Type, mutator, value)
+ }
+}
+
+func ValidateCondition(column *ColumnSchema, function ConditionFunction, nativeValue interface{}) error {
+ if NativeType(column) != reflect.TypeOf(nativeValue) {
+ return NewErrWrongType(fmt.Sprintf("Condition for column %s", column),
+ NativeType(column).String(), nativeValue)
+ }
+
+ switch column.Type {
+ case TypeSet, TypeMap, TypeBoolean, TypeString, TypeUUID:
+ switch function {
+ case ConditionEqual, ConditionNotEqual, ConditionIncludes, ConditionExcludes:
+ return nil
+ default:
+ return fmt.Errorf("wrong condition function %s for type: %s", function, column.Type)
+ }
+ case TypeInteger, TypeReal:
+ // All functions are valid
+ return nil
+ default:
+ panic("Unsupported Type")
+ }
+}
+
+func isDefaultBaseValue(elem interface{}, etype ExtendedType) bool {
+ value := reflect.ValueOf(elem)
+ if !value.IsValid() {
+ return true
+ }
+ if reflect.TypeOf(elem).Kind() == reflect.Ptr {
+ return reflect.ValueOf(elem).IsZero()
+ }
+ switch etype {
+ case TypeUUID:
+ return elem.(string) == "00000000-0000-0000-0000-000000000000" || elem.(string) == ""
+ case TypeMap, TypeSet:
+ if value.Kind() == reflect.Array {
+ return value.Len() == 0
+ }
+ return value.IsNil() || value.Len() == 0
+ case TypeString:
+ return elem.(string) == ""
+ case TypeInteger:
+ return elem.(int) == 0
+ case TypeReal:
+ return elem.(float64) == 0
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go
new file mode 100644
index 000000000..783ac0f55
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go
@@ -0,0 +1,223 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+type ConditionFunction string
+type WaitCondition string
+
+const (
+ // ConditionLessThan is the less than condition
+ ConditionLessThan ConditionFunction = "<"
+ // ConditionLessThanOrEqual is the less than or equal condition
+ ConditionLessThanOrEqual ConditionFunction = "<="
+ // ConditionEqual is the equal condition
+ ConditionEqual ConditionFunction = "=="
+ // ConditionNotEqual is the not equal condition
+ ConditionNotEqual ConditionFunction = "!="
+ // ConditionGreaterThan is the greater than condition
+ ConditionGreaterThan ConditionFunction = ">"
+ // ConditionGreaterThanOrEqual is the greater than or equal condition
+ ConditionGreaterThanOrEqual ConditionFunction = ">="
+ // ConditionIncludes is the includes condition
+ ConditionIncludes ConditionFunction = "includes"
+ // ConditionExcludes is the excludes condition
+ ConditionExcludes ConditionFunction = "excludes"
+
+ // WaitConditionEqual is the equal condition
+ WaitConditionEqual WaitCondition = "=="
+ // WaitConditionNotEqual is the not equal condition
+ WaitConditionNotEqual WaitCondition = "!="
+)
+
+// Condition is described in RFC 7047: 5.1
+type Condition struct {
+ Column string
+ Function ConditionFunction
+ Value interface{}
+}
+
+func (c Condition) String() string {
+ return fmt.Sprintf("where column %s %s %v", c.Column, c.Function, c.Value)
+}
+
+// NewCondition returns a new condition
+func NewCondition(column string, function ConditionFunction, value interface{}) Condition {
+ return Condition{
+ Column: column,
+ Function: function,
+ Value: value,
+ }
+}
+
+// MarshalJSON marshals a condition to a 3 element JSON array
+func (c Condition) MarshalJSON() ([]byte, error) {
+ v := []interface{}{c.Column, c.Function, c.Value}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON converts a 3 element JSON array to a Condition
+func (c *Condition) UnmarshalJSON(b []byte) error {
+ var v []interface{}
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+ if len(v) != 3 {
+ return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v))
+ }
+ c.Column = v[0].(string)
+ function := ConditionFunction(v[1].(string))
+ switch function {
+ case ConditionEqual,
+ ConditionNotEqual,
+ ConditionIncludes,
+ ConditionExcludes,
+ ConditionGreaterThan,
+ ConditionGreaterThanOrEqual,
+ ConditionLessThan,
+ ConditionLessThanOrEqual:
+ c.Function = function
+ default:
+ return fmt.Errorf("%s is not a valid function", function)
+ }
+ vv, err := ovsSliceToGoNotation(v[2])
+ if err != nil {
+ return err
+ }
+ c.Value = vv
+ return nil
+}
+
+// Evaluate will evaluate the condition on the two provided values
+// The conditions operately differently depending on the type of
+// the provided values. The behavior is as described in RFC7047
+func (c ConditionFunction) Evaluate(a interface{}, b interface{}) (bool, error) {
+ x := reflect.ValueOf(a)
+ y := reflect.ValueOf(b)
+ if x.Kind() != y.Kind() {
+ return false, fmt.Errorf("comparison between %s and %s not supported", x.Kind(), y.Kind())
+ }
+ switch c {
+ case ConditionEqual:
+ return reflect.DeepEqual(a, b), nil
+ case ConditionNotEqual:
+ return !reflect.DeepEqual(a, b), nil
+ case ConditionIncludes:
+ switch x.Kind() {
+ case reflect.Slice:
+ return sliceContains(x, y), nil
+ case reflect.Map:
+ return mapContains(x, y), nil
+ case reflect.Int, reflect.Float64, reflect.Bool, reflect.String:
+ return reflect.DeepEqual(a, b), nil
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionExcludes:
+ switch x.Kind() {
+ case reflect.Slice:
+ return !sliceContains(x, y), nil
+ case reflect.Map:
+ return !mapContains(x, y), nil
+ case reflect.Int, reflect.Float64, reflect.Bool, reflect.String:
+ return !reflect.DeepEqual(a, b), nil
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionGreaterThan:
+ switch x.Kind() {
+ case reflect.Int:
+ return x.Int() > y.Int(), nil
+ case reflect.Float64:
+ return x.Float() > y.Float(), nil
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionGreaterThanOrEqual:
+ switch x.Kind() {
+ case reflect.Int:
+ return x.Int() >= y.Int(), nil
+ case reflect.Float64:
+ return x.Float() >= y.Float(), nil
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionLessThan:
+ switch x.Kind() {
+ case reflect.Int:
+ return x.Int() < y.Int(), nil
+ case reflect.Float64:
+ return x.Float() < y.Float(), nil
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionLessThanOrEqual:
+ switch x.Kind() {
+ case reflect.Int:
+ return x.Int() <= y.Int(), nil
+ case reflect.Float64:
+ return x.Float() <= y.Float(), nil
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ default:
+ return false, fmt.Errorf("unsupported condition function %s", c)
+ }
+ // we should never get here
+ return false, fmt.Errorf("unreachable condition")
+}
+
+func sliceContains(x, y reflect.Value) bool {
+ for i := 0; i < y.Len(); i++ {
+ found := false
+ vy := y.Index(i)
+ for j := 0; j < x.Len(); j++ {
+ vx := x.Index(j)
+ if vy.Kind() == reflect.Interface {
+ if vy.Elem() == vx.Elem() {
+ found = true
+ break
+ }
+ } else {
+ if vy.Interface() == vx.Interface() {
+ found = true
+ break
+ }
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
+
+func mapContains(x, y reflect.Value) bool {
+ iter := y.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ vx := x.MapIndex(k)
+ if !vx.IsValid() {
+ return false
+ }
+ if v.Kind() != reflect.Interface {
+ if v.Interface() != vx.Interface() {
+ return false
+ }
+ } else {
+ if v.Elem() != vx.Elem() {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go
new file mode 100644
index 000000000..4a85c541c
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go
@@ -0,0 +1,373 @@
+package ovsdb
+
+import "fmt"
+
+const (
+ referentialIntegrityViolation = "referential integrity violation"
+ constraintViolation = "constraint violation"
+ resourcesExhausted = "resources exhausted"
+ ioError = "I/O error"
+ duplicateUUIDName = "duplicate uuid name"
+ domainError = "domain error"
+ rangeError = "range error"
+ timedOut = "timed out"
+ notSupported = "not supported"
+ aborted = "aborted"
+ notOwner = "not owner"
+)
+
+// errorFromResult returns an specific OVSDB error type from
+// an OperationResult
+func errorFromResult(op *Operation, r OperationResult) OperationError {
+ if r.Error == "" {
+ return nil
+ }
+ switch r.Error {
+ case referentialIntegrityViolation:
+ return &ReferentialIntegrityViolation{r.Details, op}
+ case constraintViolation:
+ return &ConstraintViolation{r.Details, op}
+ case resourcesExhausted:
+ return &ResourcesExhausted{r.Details, op}
+ case ioError:
+ return &IOError{r.Details, op}
+ case duplicateUUIDName:
+ return &DuplicateUUIDName{r.Details, op}
+ case domainError:
+ return &DomainError{r.Details, op}
+ case rangeError:
+ return &RangeError{r.Details, op}
+ case timedOut:
+ return &TimedOut{r.Details, op}
+ case notSupported:
+ return &NotSupported{r.Details, op}
+ case aborted:
+ return &Aborted{r.Details, op}
+ case notOwner:
+ return &NotOwner{r.Details, op}
+ default:
+ return &Error{r.Error, r.Details, op}
+ }
+}
+
+func ResultFromError(err error) OperationResult {
+ if err == nil {
+ panic("Program error: passed nil error to resultFromError")
+ }
+ switch e := err.(type) {
+ case *ReferentialIntegrityViolation:
+ return OperationResult{Error: referentialIntegrityViolation, Details: e.details}
+ case *ConstraintViolation:
+ return OperationResult{Error: constraintViolation, Details: e.details}
+ case *ResourcesExhausted:
+ return OperationResult{Error: resourcesExhausted, Details: e.details}
+ case *IOError:
+ return OperationResult{Error: ioError, Details: e.details}
+ case *DuplicateUUIDName:
+ return OperationResult{Error: duplicateUUIDName, Details: e.details}
+ case *DomainError:
+ return OperationResult{Error: domainError, Details: e.details}
+ case *RangeError:
+ return OperationResult{Error: rangeError, Details: e.details}
+ case *TimedOut:
+ return OperationResult{Error: timedOut, Details: e.details}
+ case *NotSupported:
+ return OperationResult{Error: notSupported, Details: e.details}
+ case *Aborted:
+ return OperationResult{Error: aborted, Details: e.details}
+ case *NotOwner:
+ return OperationResult{Error: notOwner, Details: e.details}
+ default:
+ return OperationResult{Error: e.Error()}
+ }
+}
+
+// CheckOperationResults checks whether the provided operation was a success
+// If the operation was a success, it will return nil, nil
+// If the operation failed, due to a error committing the transaction it will
+// return nil, error.
+// Finally, in the case where one or more of the operations in the transaction
+// failed, we return []OperationErrors, error
+// Within []OperationErrors, the OperationErrors.Index() corresponds to the same index in
+// the original Operations struct. You may also perform type assertions against
+// the error so the caller can decide how best to handle it
+func CheckOperationResults(result []OperationResult, ops []Operation) ([]OperationError, error) {
+ // this shouldn't happen, but we'll cover the case to be certain
+ if len(result) < len(ops) {
+ return nil, fmt.Errorf("ovsdb transaction error. %d operations submitted but only %d results received", len(ops), len(result))
+ }
+ var errs []OperationError
+ for i, op := range result {
+ // RFC 7047: if all of the operations succeed, but the results cannot
+ // be committed, then "result" will have one more element than "params",
+ // with the additional element being an .
+ if i >= len(ops) {
+ return errs, errorFromResult(nil, op)
+ }
+ if err := errorFromResult(&ops[i], op); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) > 0 {
+ return errs, fmt.Errorf("%d ovsdb operations failed", len(errs))
+ }
+ return nil, nil
+}
+
+// OperationError represents an error that occurred as part of an
+// OVSDB Operation
+type OperationError interface {
+ error
+ // Operation is a pointer to the operation which caused the error
+ Operation() *Operation
+}
+
+// ReferentialIntegrityViolation is explained in RFC 7047 4.1.3
+type ReferentialIntegrityViolation struct {
+ details string
+ operation *Operation
+}
+
+func NewReferentialIntegrityViolation(details string) *ReferentialIntegrityViolation {
+ return &ReferentialIntegrityViolation{details: details}
+}
+
+// Error implements the error interface
+func (e *ReferentialIntegrityViolation) Error() string {
+ msg := referentialIntegrityViolation
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *ReferentialIntegrityViolation) Operation() *Operation {
+ return e.operation
+}
+
+// ConstraintViolation is described in RFC 7047: 4.1.3
+type ConstraintViolation struct {
+ details string
+ operation *Operation
+}
+
+func NewConstraintViolation(details string) *ConstraintViolation {
+ return &ConstraintViolation{details: details}
+}
+
+// Error implements the error interface
+func (e *ConstraintViolation) Error() string {
+ msg := constraintViolation
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *ConstraintViolation) Operation() *Operation {
+ return e.operation
+}
+
+// ResourcesExhausted is described in RFC 7047: 4.1.3
+type ResourcesExhausted struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *ResourcesExhausted) Error() string {
+ msg := resourcesExhausted
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *ResourcesExhausted) Operation() *Operation {
+ return e.operation
+}
+
+// IOError is described in RFC7047: 4.1.3
+type IOError struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *IOError) Error() string {
+ msg := ioError
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *IOError) Operation() *Operation {
+ return e.operation
+}
+
+// DuplicateUUIDName is described in RFC7047 5.2.1
+type DuplicateUUIDName struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *DuplicateUUIDName) Error() string {
+ msg := duplicateUUIDName
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *DuplicateUUIDName) Operation() *Operation {
+ return e.operation
+}
+
+// DomainError is described in RFC 7047: 5.2.4
+type DomainError struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *DomainError) Error() string {
+ msg := domainError
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *DomainError) Operation() *Operation {
+ return e.operation
+}
+
+// RangeError is described in RFC 7047: 5.2.4
+type RangeError struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *RangeError) Error() string {
+ msg := rangeError
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *RangeError) Operation() *Operation {
+ return e.operation
+}
+
+// TimedOut is described in RFC 7047: 5.2.6
+type TimedOut struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *TimedOut) Error() string {
+ msg := timedOut
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *TimedOut) Operation() *Operation {
+ return e.operation
+}
+
+// NotSupported is described in RFC 7047: 5.2.7
+type NotSupported struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *NotSupported) Error() string {
+ msg := notSupported
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *NotSupported) Operation() *Operation {
+ return e.operation
+}
+
+// Aborted is described in RFC 7047: 5.2.8
+type Aborted struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *Aborted) Error() string {
+ msg := aborted
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *Aborted) Operation() *Operation {
+ return e.operation
+}
+
+// NotOwner is described in RFC 7047: 5.2.9
+type NotOwner struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *NotOwner) Error() string {
+ msg := notOwner
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *NotOwner) Operation() *Operation {
+ return e.operation
+}
+
+// Error is a generic OVSDB Error type that implements the
+// OperationError and error interfaces
+type Error struct {
+ name string
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *Error) Error() string {
+ msg := e.name
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *Error) Operation() *Operation {
+ return e.operation
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go
new file mode 100644
index 000000000..893a9774f
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go
@@ -0,0 +1,92 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+// OvsMap is the JSON map structure used for OVSDB
+// RFC 7047 uses the following notation for map as JSON doesn't support non-string keys for maps.
+// A 2-element JSON array that represents a database map value. The
+// first element of the array must be the string "map", and the
+// second element must be an array of zero or more s giving the
+// values in the map. All of the s must have the same key and
+// value types.
+type OvsMap struct {
+ GoMap map[interface{}]interface{}
+}
+
+// MarshalJSON marshalls an OVSDB style Map to a byte array
+func (o OvsMap) MarshalJSON() ([]byte, error) {
+ if len(o.GoMap) > 0 {
+ var ovsMap, innerMap []interface{}
+ ovsMap = append(ovsMap, "map")
+ for key, val := range o.GoMap {
+ var mapSeg []interface{}
+ mapSeg = append(mapSeg, key)
+ mapSeg = append(mapSeg, val)
+ innerMap = append(innerMap, mapSeg)
+ }
+ ovsMap = append(ovsMap, innerMap)
+ return json.Marshal(ovsMap)
+ }
+ return []byte("[\"map\",[]]"), nil
+}
+
+// UnmarshalJSON unmarshals an OVSDB style Map from a byte array
+func (o *OvsMap) UnmarshalJSON(b []byte) (err error) {
+ var oMap []interface{}
+ o.GoMap = make(map[interface{}]interface{})
+ if err := json.Unmarshal(b, &oMap); err == nil && len(oMap) > 1 {
+ innerSlice := oMap[1].([]interface{})
+ for _, val := range innerSlice {
+ f := val.([]interface{})
+ var k interface{}
+ switch f[0].(type) {
+ case []interface{}:
+ vSet := f[0].([]interface{})
+ if len(vSet) != 2 || vSet[0] == "map" {
+ return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)}
+ }
+ goSlice, err := ovsSliceToGoNotation(vSet)
+ if err != nil {
+ return err
+ }
+ k = goSlice
+ default:
+ k = f[0]
+ }
+ switch f[1].(type) {
+ case []interface{}:
+ vSet := f[1].([]interface{})
+ if len(vSet) != 2 || vSet[0] == "map" {
+ return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)}
+ }
+ goSlice, err := ovsSliceToGoNotation(vSet)
+ if err != nil {
+ return err
+ }
+ o.GoMap[k] = goSlice
+ default:
+ o.GoMap[k] = f[1]
+ }
+ }
+ }
+ return err
+}
+
+// NewOvsMap will return an OVSDB style map from a provided Golang Map
+func NewOvsMap(goMap interface{}) (OvsMap, error) {
+ v := reflect.ValueOf(goMap)
+ if v.Kind() != reflect.Map {
+ return OvsMap{}, fmt.Errorf("ovsmap supports only go map types")
+ }
+
+ genMap := make(map[interface{}]interface{})
+ keys := v.MapKeys()
+ for _, key := range keys {
+ genMap[key.Interface()] = v.MapIndex(key).Interface()
+ }
+ return OvsMap{genMap}, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go
new file mode 100644
index 000000000..b97e06285
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go
@@ -0,0 +1,88 @@
+package ovsdb
+
+import "encoding/json"
+
+// MonitorSelect represents a monitor select according to RFC7047
+type MonitorSelect struct {
+ initial *bool
+ insert *bool
+ delete *bool
+ modify *bool
+}
+
+// NewMonitorSelect returns a new MonitorSelect with the provided values
+func NewMonitorSelect(initial, insert, delete, modify bool) *MonitorSelect {
+ return &MonitorSelect{
+ initial: &initial,
+ insert: &insert,
+ delete: &delete,
+ modify: &modify,
+ }
+}
+
+// NewDefaultMonitorSelect returns a new MonitorSelect with default values
+func NewDefaultMonitorSelect() *MonitorSelect {
+ return NewMonitorSelect(true, true, true, true)
+}
+
+// Initial returns whether or not an initial response will be sent
+func (m MonitorSelect) Initial() bool {
+ if m.initial == nil {
+ return true
+ }
+ return *m.initial
+}
+
+// Insert returns whether we will receive updates for inserts
+func (m MonitorSelect) Insert() bool {
+ if m.insert == nil {
+ return true
+ }
+ return *m.insert
+}
+
+// Delete returns whether we will receive updates for deletions
+func (m MonitorSelect) Delete() bool {
+ if m.delete == nil {
+ return true
+ }
+ return *m.delete
+}
+
+// Modify returns whether we will receive updates for modifications
+func (m MonitorSelect) Modify() bool {
+ if m.modify == nil {
+ return true
+ }
+ return *m.modify
+}
+
+type monitorSelect struct {
+ Initial *bool `json:"initial,omitempty"`
+ Insert *bool `json:"insert,omitempty"`
+ Delete *bool `json:"delete,omitempty"`
+ Modify *bool `json:"modify,omitempty"`
+}
+
+func (m MonitorSelect) MarshalJSON() ([]byte, error) {
+ ms := monitorSelect{
+ Initial: m.initial,
+ Insert: m.insert,
+ Delete: m.delete,
+ Modify: m.modify,
+ }
+ return json.Marshal(ms)
+}
+
+func (m *MonitorSelect) UnmarshalJSON(data []byte) error {
+ var ms monitorSelect
+ err := json.Unmarshal(data, &ms)
+ if err != nil {
+ return err
+ }
+ m.initial = ms.Initial
+ m.insert = ms.Insert
+ m.delete = ms.Delete
+ m.modify = ms.Modify
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go
new file mode 100644
index 000000000..dc8b0f6d4
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go
@@ -0,0 +1,87 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type Mutator string
+
+const (
+ // MutateOperationDelete is the delete mutator
+ MutateOperationDelete Mutator = "delete"
+ // MutateOperationInsert is the insert mutator
+ MutateOperationInsert Mutator = "insert"
+ // MutateOperationAdd is the add mutator
+ MutateOperationAdd Mutator = "+="
+ // MutateOperationSubtract is the subtract mutator
+ MutateOperationSubtract Mutator = "-="
+ // MutateOperationMultiply is the multiply mutator
+ MutateOperationMultiply Mutator = "*="
+ // MutateOperationDivide is the divide mutator
+ MutateOperationDivide Mutator = "/="
+ // MutateOperationModulo is the modulo mutator
+ MutateOperationModulo Mutator = "%="
+)
+
+// Mutation is described in RFC 7047: 5.1
+type Mutation struct {
+ Column string
+ Mutator Mutator
+ Value interface{}
+}
+
+// NewMutation returns a new mutation
+func NewMutation(column string, mutator Mutator, value interface{}) *Mutation {
+ return &Mutation{
+ Column: column,
+ Mutator: mutator,
+ Value: value,
+ }
+}
+
+// MarshalJSON marshals a mutation to a 3 element JSON array
+func (m Mutation) MarshalJSON() ([]byte, error) {
+ v := []interface{}{m.Column, m.Mutator, m.Value}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON converts a 3 element JSON array to a Mutation
+func (m *Mutation) UnmarshalJSON(b []byte) error {
+ var v []interface{}
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+ if len(v) != 3 {
+ return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v))
+ }
+ ok := false
+ m.Column, ok = v[0].(string)
+ if !ok {
+ return fmt.Errorf("expected column name %v to be a valid string", v[0])
+ }
+ mutatorString, ok := v[1].(string)
+ if !ok {
+ return fmt.Errorf("expected mutator %v to be a valid string", v[1])
+ }
+ mutator := Mutator(mutatorString)
+ switch mutator {
+ case MutateOperationDelete,
+ MutateOperationInsert,
+ MutateOperationAdd,
+ MutateOperationSubtract,
+ MutateOperationMultiply,
+ MutateOperationDivide,
+ MutateOperationModulo:
+ m.Mutator = mutator
+ default:
+ return fmt.Errorf("%s is not a valid mutator", mutator)
+ }
+ vv, err := ovsSliceToGoNotation(v[2])
+ if err != nil {
+ return err
+ }
+ m.Value = vv
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go
new file mode 100644
index 000000000..29034ee9d
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go
@@ -0,0 +1,165 @@
+package ovsdb
+
+import (
+ "fmt"
+)
+
+// ExpandNamedUUIDs replaces named UUIDs in columns that contain UUID types
+// throughout the operation. The caller must ensure each input operation has
+// a valid UUID, which may be replaced if a previous operation created a
+// matching named UUID mapping. Returns the updated operations or an error.
+func ExpandNamedUUIDs(ops []Operation, schema *DatabaseSchema) ([]Operation, error) {
+ uuidMap := make(map[string]string)
+
+ // Pass 1: replace the named UUID with a real UUID for each operation and
+ // build the substitution map
+ for i := range ops {
+ op := &ops[i]
+ if op.Op != OperationInsert {
+ // Only Insert operations can specify a Named UUID
+ continue
+ }
+
+ if err := ValidateUUID(op.UUID); err != nil {
+ return nil, fmt.Errorf("operation UUID %q invalid: %v", op.UUID, err)
+ }
+
+ if op.UUIDName != "" {
+ if uuid, ok := uuidMap[op.UUIDName]; ok {
+ if op.UUID != "" && op.UUID != uuid {
+ return nil, fmt.Errorf("named UUID %q maps to UUID %q but found existing UUID %q",
+ op.UUIDName, uuid, op.UUID)
+ }
+ // If there's already a mapping for this named UUID use it
+ op.UUID = uuid
+ } else {
+ uuidMap[op.UUIDName] = op.UUID
+ }
+ op.UUIDName = ""
+ }
+ }
+
+ // Pass 2: replace named UUIDs in operation fields with the real UUID
+ for i := range ops {
+ op := &ops[i]
+ tableSchema := schema.Table(op.Table)
+ if tableSchema == nil {
+ return nil, fmt.Errorf("table %q not found in schema %q", op.Table, schema.Name)
+ }
+
+ for i, condition := range op.Where {
+ newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, condition.Column, condition.Value, uuidMap)
+ if err != nil {
+ return nil, err
+ }
+ op.Where[i].Value = newVal
+ }
+ for i, mutation := range op.Mutations {
+ newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, mutation.Column, mutation.Value, uuidMap)
+ if err != nil {
+ return nil, err
+ }
+ op.Mutations[i].Value = newVal
+ }
+ for _, row := range op.Rows {
+ for k, v := range row {
+ newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap)
+ if err != nil {
+ return nil, err
+ }
+ row[k] = newVal
+ }
+ }
+ for k, v := range op.Row {
+ newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap)
+ if err != nil {
+ return nil, err
+ }
+ op.Row[k] = newVal
+ }
+ }
+
+ return ops, nil
+}
+
+func expandColumnNamedUUIDs(tableSchema *TableSchema, tableName, columnName string, value interface{}, uuidMap map[string]string) (interface{}, error) {
+ column := tableSchema.Column(columnName)
+ if column == nil {
+ return nil, fmt.Errorf("column %q not found in table %q", columnName, tableName)
+ }
+ return expandNamedUUID(column, value, uuidMap), nil
+}
+
+func expandNamedUUID(column *ColumnSchema, value interface{}, namedUUIDs map[string]string) interface{} {
+ var keyType, valType ExtendedType
+
+ switch column.Type {
+ case TypeUUID:
+ keyType = column.Type
+ case TypeSet:
+ keyType = column.TypeObj.Key.Type
+ case TypeMap:
+ keyType = column.TypeObj.Key.Type
+ valType = column.TypeObj.Value.Type
+ }
+
+ if valType == TypeUUID {
+ if m, ok := value.(OvsMap); ok {
+ for k, v := range m.GoMap {
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, k, namedUUIDs); ok {
+ m.GoMap[newUUID] = m.GoMap[k]
+ delete(m.GoMap, k)
+ k = newUUID
+ }
+ if newUUID, ok := expandNamedUUIDAtomic(valType, v, namedUUIDs); ok {
+ m.GoMap[k] = newUUID
+ }
+ }
+ }
+ } else if keyType == TypeUUID {
+ if ovsSet, ok := value.(OvsSet); ok {
+ for i, s := range ovsSet.GoSet {
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok {
+ ovsSet.GoSet[i] = newUUID
+ }
+ }
+ return value
+ } else if strSet, ok := value.([]string); ok {
+ for i, s := range strSet {
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok {
+ strSet[i] = newUUID.(string)
+ }
+ }
+ return value
+ } else if uuidSet, ok := value.([]UUID); ok {
+ for i, s := range uuidSet {
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok {
+ uuidSet[i] = newUUID.(UUID)
+ }
+ }
+ return value
+ }
+
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, value, namedUUIDs); ok {
+ return newUUID
+ }
+ }
+
+ // No expansion required; return original value
+ return value
+}
+
+func expandNamedUUIDAtomic(valueType ExtendedType, value interface{}, namedUUIDs map[string]string) (interface{}, bool) {
+ if valueType == TypeUUID {
+ if uuid, ok := value.(UUID); ok {
+ if newUUID, ok := namedUUIDs[uuid.GoUUID]; ok {
+ return UUID{GoUUID: newUUID}, true
+ }
+ } else if uuid, ok := value.(string); ok {
+ if newUUID, ok := namedUUIDs[uuid]; ok {
+ return newUUID, true
+ }
+ }
+ }
+ return value, false
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go
new file mode 100644
index 000000000..afad87cdc
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go
@@ -0,0 +1,129 @@
+package ovsdb
+
+import (
+ "encoding/json"
+)
+
+const (
+ // OperationInsert is an insert operation
+ OperationInsert = "insert"
+ // OperationSelect is a select operation
+ OperationSelect = "select"
+ // OperationUpdate is an update operation
+ OperationUpdate = "update"
+ // OperationMutate is a mutate operation
+ OperationMutate = "mutate"
+ // OperationDelete is a delete operation
+ OperationDelete = "delete"
+ // OperationWait is a wait operation
+ OperationWait = "wait"
+ // OperationCommit is a commit operation
+ OperationCommit = "commit"
+ // OperationAbort is an abort operation
+ OperationAbort = "abort"
+ // OperationComment is a comment operation
+ OperationComment = "comment"
+ // OperationAssert is an assert operation
+ OperationAssert = "assert"
+)
+
+// Operation represents an operation according to RFC7047 section 5.2
+type Operation struct {
+ Op string `json:"op"`
+ Table string `json:"table,omitempty"`
+ Row Row `json:"row,omitempty"`
+ Rows []Row `json:"rows,omitempty"`
+ Columns []string `json:"columns,omitempty"`
+ Mutations []Mutation `json:"mutations,omitempty"`
+ Timeout *int `json:"timeout,omitempty"`
+ Where []Condition `json:"where,omitempty"`
+ Until string `json:"until,omitempty"`
+ Durable *bool `json:"durable,omitempty"`
+ Comment *string `json:"comment,omitempty"`
+ Lock *string `json:"lock,omitempty"`
+ UUID string `json:"uuid,omitempty"`
+ UUIDName string `json:"uuid-name,omitempty"`
+}
+
+// MarshalJSON marshalls 'Operation' to a byte array
+// For 'select' operations, we don't omit the 'Where' field
+// to allow selecting all rows of a table
+func (o Operation) MarshalJSON() ([]byte, error) {
+ type OpAlias Operation
+ switch o.Op {
+ case "select":
+ where := o.Where
+ if where == nil {
+ where = make([]Condition, 0)
+ }
+ return json.Marshal(&struct {
+ Where []Condition `json:"where"`
+ OpAlias
+ }{
+ Where: where,
+ OpAlias: (OpAlias)(o),
+ })
+ default:
+ return json.Marshal(&struct {
+ OpAlias
+ }{
+ OpAlias: (OpAlias)(o),
+ })
+ }
+}
+
+// MonitorRequests represents a group of monitor requests according to RFC7047
+// We cannot use MonitorRequests by inlining the MonitorRequest Map structure till GoLang issue #6213 makes it.
+// The only option is to go with raw map[string]interface{} option :-( that sucks !
+// Refer to client.go : MonitorAll() function for more details
+type MonitorRequests struct {
+ Requests map[string]MonitorRequest `json:"requests"`
+}
+
+// MonitorRequest represents a monitor request according to RFC7047
+type MonitorRequest struct {
+ Columns []string `json:"columns,omitempty"`
+ Where []Condition `json:"where,omitempty"`
+ Select *MonitorSelect `json:"select,omitempty"`
+}
+
+// TransactResponse represents the response to a Transact Operation
+type TransactResponse struct {
+ Result []OperationResult `json:"result"`
+ Error string `json:"error"`
+}
+
+// OperationResult is the result of an Operation
+type OperationResult struct {
+ Count int `json:"count,omitempty"`
+ Error string `json:"error,omitempty"`
+ Details string `json:"details,omitempty"`
+ UUID UUID `json:"uuid,omitempty"`
+ Rows []Row `json:"rows,omitempty"`
+}
+
+func ovsSliceToGoNotation(val interface{}) (interface{}, error) {
+ switch sl := val.(type) {
+ case []interface{}:
+ bsliced, err := json.Marshal(sl)
+ if err != nil {
+ return nil, err
+ }
+ switch sl[0] {
+ case "uuid", "named-uuid":
+ var uuid UUID
+ err = json.Unmarshal(bsliced, &uuid)
+ return uuid, err
+ case "set":
+ var oSet OvsSet
+ err = json.Unmarshal(bsliced, &oSet)
+ return oSet, err
+ case "map":
+ var oMap OvsMap
+ err = json.Unmarshal(bsliced, &oMap)
+ return oMap, err
+ }
+ return val, nil
+ }
+ return val, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go
new file mode 100644
index 000000000..9a253f74f
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go
@@ -0,0 +1,26 @@
+package ovsdb
+
+import "encoding/json"
+
+// Row is a table Row according to RFC7047
+type Row map[string]interface{}
+
+// UnmarshalJSON unmarshalls a byte array to an OVSDB Row
+func (r *Row) UnmarshalJSON(b []byte) (err error) {
+ *r = make(map[string]interface{})
+ var raw map[string]interface{}
+ err = json.Unmarshal(b, &raw)
+ for key, val := range raw {
+ val, err = ovsSliceToGoNotation(val)
+ if err != nil {
+ return err
+ }
+ (*r)[key] = val
+ }
+ return err
+}
+
+// NewRow returns a new empty row
+func NewRow() Row {
+ return Row(make(map[string]interface{}))
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go
new file mode 100644
index 000000000..f1e598005
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go
@@ -0,0 +1,79 @@
+package ovsdb
+
+const (
+ // MonitorRPC is the monitor RPC method
+ MonitorRPC = "monitor"
+ // ConditionalMonitorRPC is the monitor_cond
+ ConditionalMonitorRPC = "monitor_cond"
+ // ConditionalMonitorSinceRPC is the monitor_cond_since RPC method
+ ConditionalMonitorSinceRPC = "monitor_cond_since"
+)
+
+// NewEchoArgs creates a new set of arguments for an echo RPC
+func NewEchoArgs() []interface{} {
+ return []interface{}{"libovsdb echo"}
+}
+
+// NewGetSchemaArgs creates a new set of arguments for a get_schemas RPC
+func NewGetSchemaArgs(schema string) []interface{} {
+ return []interface{}{schema}
+}
+
+// NewTransactArgs creates a new set of arguments for a transact RPC
+func NewTransactArgs(database string, operations ...Operation) []interface{} {
+ dbSlice := make([]interface{}, 1)
+ dbSlice[0] = database
+
+ opsSlice := make([]interface{}, len(operations))
+ for i, d := range operations {
+ opsSlice[i] = d
+ }
+
+ ops := append(dbSlice, opsSlice...)
+ return ops
+}
+
+// NewCancelArgs creates a new set of arguments for a cancel RPC
+func NewCancelArgs(id interface{}) []interface{} {
+ return []interface{}{id}
+}
+
+// NewMonitorArgs creates a new set of arguments for a monitor RPC
+func NewMonitorArgs(database string, value interface{}, requests map[string]MonitorRequest) []interface{} {
+ return []interface{}{database, value, requests}
+}
+
+// NewMonitorCondSinceArgs creates a new set of arguments for a monitor_cond_since RPC
+func NewMonitorCondSinceArgs(database string, value interface{}, requests map[string]MonitorRequest, lastTransactionID string) []interface{} {
+ return []interface{}{database, value, requests, lastTransactionID}
+}
+
+// NewMonitorCancelArgs creates a new set of arguments for a monitor_cancel RPC
+func NewMonitorCancelArgs(value interface{}) []interface{} {
+ return []interface{}{value}
+}
+
+// NewLockArgs creates a new set of arguments for a lock, steal or unlock RPC
+func NewLockArgs(id interface{}) []interface{} {
+ return []interface{}{id}
+}
+
+// NotificationHandler is the interface that must be implemented to receive notifications
+type NotificationHandler interface {
+ // RFC 7047 section 4.1.6 Update Notification
+ Update(context interface{}, tableUpdates TableUpdates)
+
+ // ovsdb-server.7 update2 notifications
+ Update2(context interface{}, tableUpdates TableUpdates2)
+
+ // RFC 7047 section 4.1.9 Locked Notification
+ Locked([]interface{})
+
+ // RFC 7047 section 4.1.10 Stolen Notification
+ Stolen([]interface{})
+
+ // RFC 7047 section 4.1.11 Echo Notification
+ Echo([]interface{})
+
+ Disconnected()
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go
new file mode 100644
index 000000000..285d1e02a
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go
@@ -0,0 +1,641 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "strings"
+)
+
+// DatabaseSchema is a database schema according to RFC7047
+type DatabaseSchema struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Tables map[string]TableSchema `json:"tables"`
+ allTablesRoot *bool
+}
+
+// UUIDColumn is a static column that represents the _uuid column, common to all tables
+var UUIDColumn = ColumnSchema{
+ Type: TypeUUID,
+}
+
+// Table returns a TableSchema Schema for a given table and column name
+func (schema DatabaseSchema) Table(tableName string) *TableSchema {
+ if table, ok := schema.Tables[tableName]; ok {
+ return &table
+ }
+ return nil
+}
+
+// IsRoot whether a table is root or not
+func (schema DatabaseSchema) IsRoot(tableName string) (bool, error) {
+ t := schema.Table(tableName)
+ if t == nil {
+ return false, fmt.Errorf("Table %s not in schame", tableName)
+ }
+ if t.IsRoot {
+ return true, nil
+ }
+ // As per RFC7047, for compatibility with schemas created before
+ // "isRoot" was introduced, if "isRoot" is omitted or false in every
+ // in a given , then every table is part
+ // of the root set.
+ if schema.allTablesRoot == nil {
+ allTablesRoot := true
+ for _, tSchema := range schema.Tables {
+ if tSchema.IsRoot {
+ allTablesRoot = false
+ break
+ }
+ }
+ schema.allTablesRoot = &allTablesRoot
+ }
+ return *schema.allTablesRoot, nil
+}
+
+// Print will print the contents of the DatabaseSchema
+func (schema DatabaseSchema) Print(w io.Writer) {
+ fmt.Fprintf(w, "%s, (%s)\n", schema.Name, schema.Version)
+ for table, tableSchema := range schema.Tables {
+ fmt.Fprintf(w, "\t %s", table)
+ if len(tableSchema.Indexes) > 0 {
+ fmt.Fprintf(w, "(%v)\n", tableSchema.Indexes)
+ } else {
+ fmt.Fprintf(w, "\n")
+ }
+ for column, columnSchema := range tableSchema.Columns {
+ fmt.Fprintf(w, "\t\t %s => %s\n", column, columnSchema)
+ }
+ }
+}
+
+// SchemaFromFile returns a DatabaseSchema from a file
+func SchemaFromFile(f *os.File) (DatabaseSchema, error) {
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return DatabaseSchema{}, err
+ }
+ var schema DatabaseSchema
+ err = json.Unmarshal(data, &schema)
+ if err != nil {
+ return DatabaseSchema{}, err
+ }
+ return schema, nil
+}
+
+// ValidateOperations performs basic validation for operations against a DatabaseSchema
+func (schema DatabaseSchema) ValidateOperations(operations ...Operation) bool {
+ for _, op := range operations {
+ switch op.Op {
+ case OperationAbort, OperationAssert, OperationComment, OperationCommit, OperationWait:
+ continue
+ case OperationInsert, OperationSelect, OperationUpdate, OperationMutate, OperationDelete:
+ table, ok := schema.Tables[op.Table]
+ if ok {
+ for column := range op.Row {
+ if _, ok := table.Columns[column]; !ok {
+ if column != "_uuid" && column != "_version" {
+ return false
+ }
+ }
+ }
+ for _, row := range op.Rows {
+ for column := range row {
+ if _, ok := table.Columns[column]; !ok {
+ if column != "_uuid" && column != "_version" {
+ return false
+ }
+ }
+ }
+ }
+ for _, column := range op.Columns {
+ if _, ok := table.Columns[column]; !ok {
+ if column != "_uuid" && column != "_version" {
+ return false
+ }
+ }
+ }
+ } else {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// TableSchema is a table schema according to RFC7047
+type TableSchema struct {
+ Columns map[string]*ColumnSchema `json:"columns"`
+ Indexes [][]string `json:"indexes,omitempty"`
+ IsRoot bool `json:"isRoot,omitempty"`
+}
+
+// Column returns the Column object for a specific column name
+func (t TableSchema) Column(columnName string) *ColumnSchema {
+ if columnName == "_uuid" {
+ return &UUIDColumn
+ }
+ if column, ok := t.Columns[columnName]; ok {
+ return column
+ }
+ return nil
+}
+
+/*RFC7047 defines some atomic-types (e.g: integer, string, etc). However, the Column's type
+can also hold other more complex types such as set, enum and map. The way to determine the type
+depends on internal, not directly marshallable fields. Therefore, in order to simplify the usage
+of this library, we define an ExtendedType that includes all possible column types (including
+atomic fields).
+*/
+
+// ExtendedType includes atomic types as defined in the RFC plus Enum, Map and Set
+type ExtendedType = string
+
+// RefType is used to define the possible RefTypes
+type RefType = string
+
+// unlimited is not constant as we can't take the address of int constants
+var (
+ // Unlimited is used to express unlimited "Max"
+ Unlimited = -1
+)
+
+const (
+ unlimitedString = "unlimited"
+ //Strong RefType
+ Strong RefType = "strong"
+ //Weak RefType
+ Weak RefType = "weak"
+
+ //ExtendedType associated with Atomic Types
+
+ //TypeInteger is equivalent to 'int'
+ TypeInteger ExtendedType = "integer"
+ //TypeReal is equivalent to 'float64'
+ TypeReal ExtendedType = "real"
+ //TypeBoolean is equivalent to 'bool'
+ TypeBoolean ExtendedType = "boolean"
+ //TypeString is equivalent to 'string'
+ TypeString ExtendedType = "string"
+ //TypeUUID is equivalent to 'libovsdb.UUID'
+ TypeUUID ExtendedType = "uuid"
+
+ //Extended Types used to summarize the internal type of the field.
+
+ //TypeEnum is an enumerator of type defined by Key.Type
+ TypeEnum ExtendedType = "enum"
+ //TypeMap is a map whose type depend on Key.Type and Value.Type
+ TypeMap ExtendedType = "map"
+ //TypeSet is a set whose type depend on Key.Type
+ TypeSet ExtendedType = "set"
+)
+
+// BaseType is a base-type structure as per RFC7047
+type BaseType struct {
+ Type string
+ Enum []interface{}
+ minReal *float64
+ maxReal *float64
+ minInteger *int
+ maxInteger *int
+ minLength *int
+ maxLength *int
+ refTable *string
+ refType *RefType
+}
+
+func (b *BaseType) simpleAtomic() bool {
+ return isAtomicType(b.Type) && b.Enum == nil && b.minReal == nil && b.maxReal == nil && b.minInteger == nil && b.maxInteger == nil && b.minLength == nil && b.maxLength == nil && b.refTable == nil && b.refType == nil
+}
+
+// MinReal returns the minimum real value
+// RFC7047 does not define a default, but we assume this to be
+// the smallest non zero value a float64 could hold
+func (b *BaseType) MinReal() (float64, error) {
+ if b.Type != TypeReal {
+ return 0, fmt.Errorf("%s is not a real", b.Type)
+ }
+ if b.minReal != nil {
+ return *b.minReal, nil
+ }
+ return math.SmallestNonzeroFloat64, nil
+}
+
+// MaxReal returns the maximum real value
+// RFC7047 does not define a default, but this would be the maximum
+// value held by a float64
+func (b *BaseType) MaxReal() (float64, error) {
+ if b.Type != TypeReal {
+ return 0, fmt.Errorf("%s is not a real", b.Type)
+ }
+ if b.maxReal != nil {
+ return *b.maxReal, nil
+ }
+ return math.MaxFloat64, nil
+}
+
+// MinInteger returns the minimum integer value
+// RFC7047 specifies the minimum to be -2^63
+func (b *BaseType) MinInteger() (int, error) {
+ if b.Type != TypeInteger {
+ return 0, fmt.Errorf("%s is not an integer", b.Type)
+ }
+ if b.minInteger != nil {
+ return *b.minInteger, nil
+ }
+ return int(math.Pow(-2, 63)), nil
+}
+
+// MaxInteger returns the minimum integer value
+// RFC7047 specifies the minimum to be 2^63-1
+func (b *BaseType) MaxInteger() (int, error) {
+ if b.Type != TypeInteger {
+ return 0, fmt.Errorf("%s is not an integer", b.Type)
+ }
+ if b.maxInteger != nil {
+ return *b.maxInteger, nil
+ }
+ return int(math.Pow(2, 63)) - 1, nil
+}
+
+// MinLength returns the minimum string length
+// RFC7047 doesn't specify a default, but we assume
+// that it must be >= 0
+func (b *BaseType) MinLength() (int, error) {
+ if b.Type != TypeString {
+ return 0, fmt.Errorf("%s is not an string", b.Type)
+ }
+ if b.minLength != nil {
+ return *b.minLength, nil
+ }
+ return 0, nil
+}
+
+// MaxLength returns the maximum string length
+// RFC7047 doesn't specify a default, but we assume
+// that it must 2^63-1
+func (b *BaseType) MaxLength() (int, error) {
+ if b.Type != TypeString {
+ return 0, fmt.Errorf("%s is not an string", b.Type)
+ }
+ if b.maxLength != nil {
+ return *b.maxLength, nil
+ }
+ return int(math.Pow(2, 63)) - 1, nil
+}
+
+// RefTable returns the table to which a UUID type refers
+// It will return an empty string if not set
+func (b *BaseType) RefTable() (string, error) {
+ if b.Type != TypeUUID {
+ return "", fmt.Errorf("%s is not a uuid", b.Type)
+ }
+ if b.refTable != nil {
+ return *b.refTable, nil
+ }
+ return "", nil
+}
+
+// RefType returns the reference type for a UUID field
+// RFC7047 infers the RefType is strong if omitted
+func (b *BaseType) RefType() (RefType, error) {
+ if b.Type != TypeUUID {
+ return "", fmt.Errorf("%s is not a uuid", b.Type)
+ }
+ if b.refType != nil {
+ return *b.refType, nil
+ }
+ return Strong, nil
+}
+
+// UnmarshalJSON unmarshals a json-formatted base type
+func (b *BaseType) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err == nil {
+ if isAtomicType(s) {
+ b.Type = s
+ } else {
+ return fmt.Errorf("non atomic type %s in ", s)
+ }
+ return nil
+ }
+ // temporary type to avoid recursive call to unmarshal
+ var bt struct {
+ Type string `json:"type"`
+ Enum interface{} `json:"enum,omitempty"`
+ MinReal *float64 `json:"minReal,omitempty"`
+ MaxReal *float64 `json:"maxReal,omitempty"`
+ MinInteger *int `json:"minInteger,omitempty"`
+ MaxInteger *int `json:"maxInteger,omitempty"`
+ MinLength *int `json:"minLength,omitempty"`
+ MaxLength *int `json:"maxLength,omitempty"`
+ RefTable *string `json:"refTable,omitempty"`
+ RefType *RefType `json:"refType,omitempty"`
+ }
+ err := json.Unmarshal(data, &bt)
+ if err != nil {
+ return err
+ }
+
+ if bt.Enum != nil {
+ // 'enum' is a list or a single element representing a list of exactly one element
+ switch bt.Enum.(type) {
+ case []interface{}:
+ // it's an OvsSet
+ oSet := bt.Enum.([]interface{})
+ innerSet := oSet[1].([]interface{})
+ b.Enum = make([]interface{}, len(innerSet))
+ copy(b.Enum, innerSet)
+ default:
+ b.Enum = []interface{}{bt.Enum}
+ }
+ }
+ b.Type = bt.Type
+ b.minReal = bt.MinReal
+ b.maxReal = bt.MaxReal
+ b.minInteger = bt.MinInteger
+ b.maxInteger = bt.MaxInteger
+ b.minLength = bt.MaxLength
+ b.maxLength = bt.MaxLength
+ b.refTable = bt.RefTable
+ b.refType = bt.RefType
+ return nil
+}
+
+// MarshalJSON marshals a base type to JSON
+func (b BaseType) MarshalJSON() ([]byte, error) {
+ j := struct {
+ Type string `json:"type,omitempty"`
+ Enum *OvsSet `json:"enum,omitempty"`
+ MinReal *float64 `json:"minReal,omitempty"`
+ MaxReal *float64 `json:"maxReal,omitempty"`
+ MinInteger *int `json:"minInteger,omitempty"`
+ MaxInteger *int `json:"maxInteger,omitempty"`
+ MinLength *int `json:"minLength,omitempty"`
+ MaxLength *int `json:"maxLength,omitempty"`
+ RefTable *string `json:"refTable,omitempty"`
+ RefType *RefType `json:"refType,omitempty"`
+ }{
+ Type: b.Type,
+ MinReal: b.minReal,
+ MaxReal: b.maxReal,
+ MinInteger: b.minInteger,
+ MaxInteger: b.maxInteger,
+ MinLength: b.maxLength,
+ MaxLength: b.maxLength,
+ RefTable: b.refTable,
+ RefType: b.refType,
+ }
+ if len(b.Enum) > 0 {
+ set, err := NewOvsSet(b.Enum)
+ if err != nil {
+ return nil, err
+ }
+ j.Enum = &set
+ }
+ return json.Marshal(j)
+}
+
+// ColumnType is a type object as per RFC7047
+// "key": required
+// "value": optional
+// "min": optional (default: 1)
+// "max": or "unlimited" optional (default: 1)
+type ColumnType struct {
+ Key *BaseType
+ Value *BaseType
+ min *int
+ max *int
+}
+
+// Max returns the maximum value of a ColumnType. -1 is Unlimited
+func (c *ColumnType) Max() int {
+ if c.max == nil {
+ return 1
+ }
+ return *c.max
+}
+
+// Min returns the minimum value of a ColumnType
+func (c *ColumnType) Min() int {
+ if c.min == nil {
+ return 1
+ }
+ return *c.min
+}
+
+// UnmarshalJSON unmarshals a json-formatted column type
+func (c *ColumnType) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err == nil {
+ if isAtomicType(s) {
+ c.Key = &BaseType{Type: s}
+ } else {
+ return fmt.Errorf("non atomic type %s in ", s)
+ }
+ return nil
+ }
+ var colType struct {
+ Key *BaseType `json:"key"`
+ Value *BaseType `json:"value"`
+ Min *int `json:"min"`
+ Max interface{} `json:"max"`
+ }
+ err := json.Unmarshal(data, &colType)
+ if err != nil {
+ return err
+ }
+ c.Key = colType.Key
+ c.Value = colType.Value
+ c.min = colType.Min
+ switch v := colType.Max.(type) {
+ case string:
+ if v == unlimitedString {
+ c.max = &Unlimited
+ } else {
+ return fmt.Errorf("unexpected string value in max field")
+ }
+ case float64:
+ i := int(v)
+ c.max = &i
+ default:
+ c.max = nil
+ }
+ return nil
+}
+
+// MarshalJSON marshalls a column type to JSON
+func (c ColumnType) MarshalJSON() ([]byte, error) {
+ if c.Value == nil && c.max == nil && c.min == nil && c.Key.simpleAtomic() {
+ return json.Marshal(c.Key.Type)
+ }
+ if c.Max() == Unlimited {
+ colType := struct {
+ Key *BaseType `json:"key"`
+ Value *BaseType `json:"value,omitempty"`
+ Min *int `json:"min,omitempty"`
+ Max string `json:"max,omitempty"`
+ }{
+ Key: c.Key,
+ Value: c.Value,
+ Min: c.min,
+ Max: unlimitedString,
+ }
+ return json.Marshal(&colType)
+ }
+ colType := struct {
+ Key *BaseType `json:"key"`
+ Value *BaseType `json:"value,omitempty"`
+ Min *int `json:"min,omitempty"`
+ Max *int `json:"max,omitempty"`
+ }{
+ Key: c.Key,
+ Value: c.Value,
+ Min: c.min,
+ Max: c.max,
+ }
+ return json.Marshal(&colType)
+}
+
+// ColumnSchema is a column schema according to RFC7047
+type ColumnSchema struct {
+ // According to RFC7047, "type" field can be, either an
+ // Or a ColumnType defined below. To try to simplify the usage, the
+ // json message will be parsed manually and Type will indicate the "extended"
+ // type. Depending on its value, more information may be available in TypeObj.
+ // E.g: If Type == TypeEnum, TypeObj.Key.Enum contains the possible values
+ Type ExtendedType
+ TypeObj *ColumnType
+ ephemeral *bool
+ mutable *bool
+}
+
+// Mutable returns whether a column is mutable
+func (c *ColumnSchema) Mutable() bool {
+ if c.mutable != nil {
+ return *c.mutable
+ }
+ // default true
+ return true
+}
+
+// Ephemeral returns whether a column is ephemeral
+func (c *ColumnSchema) Ephemeral() bool {
+ if c.ephemeral != nil {
+ return *c.ephemeral
+ }
+ // default false
+ return false
+}
+
+// UnmarshalJSON unmarshals a json-formatted column
+func (c *ColumnSchema) UnmarshalJSON(data []byte) error {
+ // ColumnJSON represents the known json values for a Column
+ var colJSON struct {
+ Type *ColumnType `json:"type"`
+ Ephemeral *bool `json:"ephemeral,omitempty"`
+ Mutable *bool `json:"mutable,omitempty"`
+ }
+
+ // Unmarshal known keys
+ if err := json.Unmarshal(data, &colJSON); err != nil {
+ return fmt.Errorf("cannot parse column object %s", err)
+ }
+
+ c.ephemeral = colJSON.Ephemeral
+ c.mutable = colJSON.Mutable
+ c.TypeObj = colJSON.Type
+
+ // Infer the ExtendedType from the TypeObj
+ if c.TypeObj.Value != nil {
+ c.Type = TypeMap
+ } else if c.TypeObj.Min() != 1 || c.TypeObj.Max() != 1 {
+ c.Type = TypeSet
+ } else if len(c.TypeObj.Key.Enum) > 0 {
+ c.Type = TypeEnum
+ } else {
+ c.Type = c.TypeObj.Key.Type
+ }
+ return nil
+}
+
+// MarshalJSON marshalls a column schema to JSON
+func (c ColumnSchema) MarshalJSON() ([]byte, error) {
+ type colJSON struct {
+ Type *ColumnType `json:"type"`
+ Ephemeral *bool `json:"ephemeral,omitempty"`
+ Mutable *bool `json:"mutable,omitempty"`
+ }
+ column := colJSON{
+ Type: c.TypeObj,
+ Ephemeral: c.ephemeral,
+ Mutable: c.mutable,
+ }
+ return json.Marshal(column)
+}
+
+// String returns a string representation of the (native) column type
+func (c *ColumnSchema) String() string {
+ var flags []string
+ var flagStr string
+ var typeStr string
+ if c.Ephemeral() {
+ flags = append(flags, "E")
+ }
+ if c.Mutable() {
+ flags = append(flags, "M")
+ }
+ if len(flags) > 0 {
+ flagStr = fmt.Sprintf("[%s]", strings.Join(flags, ","))
+ }
+
+ switch c.Type {
+ case TypeInteger, TypeReal, TypeBoolean, TypeString:
+ typeStr = string(c.Type)
+ case TypeUUID:
+ if c.TypeObj != nil && c.TypeObj.Key != nil {
+ // ignore err as we've already asserted this is a uuid
+ reftable, _ := c.TypeObj.Key.RefTable()
+ reftype := ""
+ if s, err := c.TypeObj.Key.RefType(); err != nil {
+ reftype = s
+ }
+ typeStr = fmt.Sprintf("uuid [%s (%s)]", reftable, reftype)
+ } else {
+ typeStr = "uuid"
+ }
+
+ case TypeEnum:
+ typeStr = fmt.Sprintf("enum (type: %s): %v", c.TypeObj.Key.Type, c.TypeObj.Key.Enum)
+ case TypeMap:
+ typeStr = fmt.Sprintf("[%s]%s", c.TypeObj.Key.Type, c.TypeObj.Value.Type)
+ case TypeSet:
+ var keyStr string
+ if c.TypeObj.Key.Type == TypeUUID {
+ // ignore err as we've already asserted this is a uuid
+ reftable, _ := c.TypeObj.Key.RefTable()
+ reftype, _ := c.TypeObj.Key.RefType()
+ keyStr = fmt.Sprintf(" [%s (%s)]", reftable, reftype)
+ } else {
+ keyStr = string(c.TypeObj.Key.Type)
+ }
+ typeStr = fmt.Sprintf("[]%s (min: %d, max: %d)", keyStr, c.TypeObj.Min(), c.TypeObj.Max())
+ default:
+ panic(fmt.Sprintf("Unsupported type %s", c.Type))
+ }
+
+ return strings.Join([]string{typeStr, flagStr}, " ")
+}
+
+func isAtomicType(atype string) bool {
+ switch atype {
+ case TypeInteger, TypeReal, TypeBoolean, TypeString, TypeUUID:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore
new file mode 100644
index 000000000..33f8bff56
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore
@@ -0,0 +1 @@
+*.ovsschema
\ No newline at end of file
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go
new file mode 100644
index 000000000..274a7164f
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go
@@ -0,0 +1,182 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package serverdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DatabaseTable = "Database"
+
+type (
+ DatabaseModel = string
+)
+
+var (
+ DatabaseModelStandalone DatabaseModel = "standalone"
+ DatabaseModelClustered DatabaseModel = "clustered"
+ DatabaseModelRelay DatabaseModel = "relay"
+)
+
+// Database defines an object in Database table
+type Database struct {
+ UUID string `ovsdb:"_uuid"`
+ Cid *string `ovsdb:"cid"`
+ Connected bool `ovsdb:"connected"`
+ Index *int `ovsdb:"index"`
+ Leader bool `ovsdb:"leader"`
+ Model DatabaseModel `ovsdb:"model"`
+ Name string `ovsdb:"name"`
+ Schema *string `ovsdb:"schema"`
+ Sid *string `ovsdb:"sid"`
+}
+
+func (a *Database) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Database) GetCid() *string {
+ return a.Cid
+}
+
+func copyDatabaseCid(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDatabaseCid(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Database) GetConnected() bool {
+ return a.Connected
+}
+
+func (a *Database) GetIndex() *int {
+ return a.Index
+}
+
+func copyDatabaseIndex(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDatabaseIndex(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Database) GetLeader() bool {
+ return a.Leader
+}
+
+func (a *Database) GetModel() DatabaseModel {
+ return a.Model
+}
+
+func (a *Database) GetName() string {
+ return a.Name
+}
+
+func (a *Database) GetSchema() *string {
+ return a.Schema
+}
+
+func copyDatabaseSchema(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDatabaseSchema(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Database) GetSid() *string {
+ return a.Sid
+}
+
+func copyDatabaseSid(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDatabaseSid(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Database) DeepCopyInto(b *Database) {
+ *b = *a
+ b.Cid = copyDatabaseCid(a.Cid)
+ b.Index = copyDatabaseIndex(a.Index)
+ b.Schema = copyDatabaseSchema(a.Schema)
+ b.Sid = copyDatabaseSid(a.Sid)
+}
+
+func (a *Database) DeepCopy() *Database {
+ b := new(Database)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Database) CloneModelInto(b model.Model) {
+ c := b.(*Database)
+ a.DeepCopyInto(c)
+}
+
+func (a *Database) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Database) Equals(b *Database) bool {
+ return a.UUID == b.UUID &&
+ equalDatabaseCid(a.Cid, b.Cid) &&
+ a.Connected == b.Connected &&
+ equalDatabaseIndex(a.Index, b.Index) &&
+ a.Leader == b.Leader &&
+ a.Model == b.Model &&
+ a.Name == b.Name &&
+ equalDatabaseSchema(a.Schema, b.Schema) &&
+ equalDatabaseSid(a.Sid, b.Sid)
+}
+
+func (a *Database) EqualsModel(b model.Model) bool {
+ c := b.(*Database)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Database{}
+var _ model.ComparableModel = &Database{}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go
new file mode 100644
index 000000000..5923af60a
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go
@@ -0,0 +1,6 @@
+package serverdb
+
+// server_model is a database model for the special _Server database that all
+// ovsdb instances export. It reports back status of the server process itself.
+
+//go:generate ../../bin/modelgen --extended -p serverdb -o . _server.ovsschema
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go
new file mode 100644
index 000000000..3c117faa2
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go
@@ -0,0 +1,99 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package serverdb
+
+import (
+ "encoding/json"
+
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb
+func FullDatabaseModel() (model.ClientDBModel, error) {
+ return model.NewClientDBModel("_Server", map[string]model.Model{
+ "Database": &Database{},
+ })
+}
+
+var schema = `{
+ "name": "_Server",
+ "version": "1.2.0",
+ "tables": {
+ "Database": {
+ "columns": {
+ "cid": {
+ "type": {
+ "key": {
+ "type": "uuid"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "connected": {
+ "type": "boolean"
+ },
+ "index": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "leader": {
+ "type": "boolean"
+ },
+ "model": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "standalone",
+ "clustered",
+ "relay"
+ ]
+ ]
+ }
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "schema": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "sid": {
+ "type": {
+ "key": {
+ "type": "uuid"
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "isRoot": true
+ }
+ }
+}`
+
+func Schema() ovsdb.DatabaseSchema {
+ var s ovsdb.DatabaseSchema
+ err := json.Unmarshal([]byte(schema), &s)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go
new file mode 100644
index 000000000..ae1ec59ae
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go
@@ -0,0 +1,109 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+// OvsSet is an OVSDB style set
+// RFC 7047 has a weird (but understandable) notation for set as described as :
+// Either an , representing a set with exactly one element, or
+// a 2-element JSON array that represents a database set value. The
+// first element of the array must be the string "set", and the
+// second element must be an array of zero or more s giving the
+// values in the set. All of the s must have the same type.
+type OvsSet struct {
+ GoSet []interface{}
+}
+
+// NewOvsSet creates a new OVSDB style set from a Go interface (object)
+func NewOvsSet(obj interface{}) (OvsSet, error) {
+ ovsSet := make([]interface{}, 0)
+ var v reflect.Value
+ if reflect.TypeOf(obj).Kind() == reflect.Ptr {
+ v = reflect.ValueOf(obj).Elem()
+ if v.Kind() == reflect.Invalid {
+ // must be a nil pointer, so just return an empty set
+ return OvsSet{ovsSet}, nil
+ }
+ } else {
+ v = reflect.ValueOf(obj)
+ }
+
+ switch v.Kind() {
+ case reflect.Slice:
+ for i := 0; i < v.Len(); i++ {
+ ovsSet = append(ovsSet, v.Index(i).Interface())
+ }
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.Bool:
+ ovsSet = append(ovsSet, v.Interface())
+ case reflect.Struct:
+ if v.Type() == reflect.TypeOf(UUID{}) {
+ ovsSet = append(ovsSet, v.Interface())
+ } else {
+ return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types")
+ }
+ default:
+ return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types")
+ }
+ return OvsSet{ovsSet}, nil
+}
+
+// MarshalJSON wil marshal an OVSDB style Set in to a JSON byte array
+func (o OvsSet) MarshalJSON() ([]byte, error) {
+ switch l := len(o.GoSet); {
+ case l == 1:
+ return json.Marshal(o.GoSet[0])
+ case l > 0:
+ var oSet []interface{}
+ oSet = append(oSet, "set")
+ oSet = append(oSet, o.GoSet)
+ return json.Marshal(oSet)
+ }
+ return []byte("[\"set\",[]]"), nil
+}
+
+// UnmarshalJSON will unmarshal a JSON byte array to an OVSDB style Set
+func (o *OvsSet) UnmarshalJSON(b []byte) (err error) {
+ o.GoSet = make([]interface{}, 0)
+ addToSet := func(o *OvsSet, v interface{}) error {
+ goVal, err := ovsSliceToGoNotation(v)
+ if err == nil {
+ o.GoSet = append(o.GoSet, goVal)
+ }
+ return err
+ }
+
+ var inter interface{}
+ if err = json.Unmarshal(b, &inter); err != nil {
+ return err
+ }
+ switch inter.(type) {
+ case []interface{}:
+ var oSet []interface{}
+ oSet = inter.([]interface{})
+ // it's a single uuid object
+ if len(oSet) == 2 && (oSet[0] == "uuid" || oSet[0] == "named-uuid") {
+ return addToSet(o, UUID{GoUUID: oSet[1].(string)})
+ }
+ if oSet[0] != "set" {
+ // it is a slice, but is not a set
+ return &json.UnmarshalTypeError{Value: reflect.ValueOf(inter).String(), Type: reflect.TypeOf(*o)}
+ }
+ innerSet := oSet[1].([]interface{})
+ for _, val := range innerSet {
+ err := addToSet(o, val)
+ if err != nil {
+ return err
+ }
+ }
+ return err
+ default:
+ // it is a single object
+ return addToSet(o, inter)
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go
new file mode 100644
index 000000000..a24ce64ad
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go
@@ -0,0 +1,51 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type MonitorCondSinceReply struct {
+ Found bool
+ LastTransactionID string
+ Updates TableUpdates2
+}
+
+func (m MonitorCondSinceReply) MarshalJSON() ([]byte, error) {
+ v := []interface{}{m.Found, m.LastTransactionID, m.Updates}
+ return json.Marshal(v)
+}
+
+func (m *MonitorCondSinceReply) UnmarshalJSON(b []byte) error {
+ var v []json.RawMessage
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+ if len(v) != 3 {
+ return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v))
+ }
+
+ var found bool
+ err = json.Unmarshal(v[0], &found)
+ if err != nil {
+ return err
+ }
+
+ var lastTransactionID string
+ err = json.Unmarshal(v[1], &lastTransactionID)
+ if err != nil {
+ return err
+ }
+
+ var updates TableUpdates2
+ err = json.Unmarshal(v[2], &updates)
+ if err != nil {
+ return err
+ }
+
+ m.Found = found
+ m.LastTransactionID = lastTransactionID
+ m.Updates = updates
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go
new file mode 100644
index 000000000..5a47d0c44
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go
@@ -0,0 +1,35 @@
+package ovsdb
+
+// TableUpdates is an object that maps from a table name to a
+// TableUpdate
+type TableUpdates map[string]TableUpdate
+
+// TableUpdate is an object that maps from the row's UUID to a
+// RowUpdate
+type TableUpdate map[string]*RowUpdate
+
+// RowUpdate represents a row update according to RFC7047
+type RowUpdate struct {
+ New *Row `json:"new,omitempty"`
+ Old *Row `json:"old,omitempty"`
+}
+
+// Insert returns true if this is an update for an insert operation
+func (r RowUpdate) Insert() bool {
+ return r.New != nil && r.Old == nil
+}
+
+// Modify returns true if this is an update for a modify operation
+func (r RowUpdate) Modify() bool {
+ return r.New != nil && r.Old != nil
+}
+
+// Delete returns true if this is an update for a delete operation
+func (r RowUpdate) Delete() bool {
+ return r.New == nil && r.Old != nil
+}
+
+func (r *RowUpdate) FromRowUpdate2(ru2 RowUpdate2) {
+ r.Old = ru2.Old
+ r.New = ru2.New
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go
new file mode 100644
index 000000000..a040894c9
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go
@@ -0,0 +1,19 @@
+package ovsdb
+
+// TableUpdates2 is an object that maps from a table name to a
+// TableUpdate2
+type TableUpdates2 map[string]TableUpdate2
+
+// TableUpdate2 is an object that maps from the row's UUID to a
+// RowUpdate2
+type TableUpdate2 map[string]*RowUpdate2
+
+// RowUpdate2 represents a row update according to ovsdb-server.7
+type RowUpdate2 struct {
+ Initial *Row `json:"initial,omitempty"`
+ Insert *Row `json:"insert,omitempty"`
+ Modify *Row `json:"modify,omitempty"`
+ Delete *Row `json:"delete,omitempty"`
+ Old *Row `json:"-"`
+ New *Row `json:"-"`
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go
new file mode 100644
index 000000000..6bc463653
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go
@@ -0,0 +1,59 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+)
+
+var validUUID = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`)
+
+// UUID is a UUID according to RFC7047
+type UUID struct {
+ GoUUID string `json:"uuid"`
+}
+
+// MarshalJSON will marshal an OVSDB style UUID to a JSON encoded byte array
+func (u UUID) MarshalJSON() ([]byte, error) {
+ var uuidSlice []string
+ err := ValidateUUID(u.GoUUID)
+ if err == nil {
+ uuidSlice = []string{"uuid", u.GoUUID}
+ } else {
+ uuidSlice = []string{"named-uuid", u.GoUUID}
+ }
+
+ return json.Marshal(uuidSlice)
+}
+
+// UnmarshalJSON will unmarshal a JSON encoded byte array to a OVSDB style UUID
+func (u *UUID) UnmarshalJSON(b []byte) (err error) {
+ var ovsUUID []string
+ if err := json.Unmarshal(b, &ovsUUID); err == nil {
+ u.GoUUID = ovsUUID[1]
+ }
+ return err
+}
+
+func ValidateUUID(uuid string) error {
+ if len(uuid) != 36 {
+ return fmt.Errorf("uuid exceeds 36 characters")
+ }
+
+ if !validUUID.MatchString(uuid) {
+ return fmt.Errorf("uuid does not match regexp")
+ }
+
+ return nil
+}
+
+func IsNamedUUID(uuid string) bool {
+ return len(uuid) > 0 && !validUUID.MatchString(uuid)
+}
+
+func IsValidUUID(uuid string) bool {
+ if err := ValidateUUID(uuid); err != nil {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/difference.go b/vendor/github.com/ovn-org/libovsdb/updates/difference.go
new file mode 100644
index 000000000..7ebfe8bb5
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/difference.go
@@ -0,0 +1,209 @@
+package updates
+
+import "reflect"
+
+// difference between value 'a' and value 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// The result is calculated in 'a' in-place and returned unless the
+// difference is 'b' in which case 'b' is returned unmodified. Also returns a
+// boolean indicating if there is an actual difference.
+func difference(a, b interface{}) (interface{}, bool) {
+ return mergeDifference(nil, a, b)
+}
+
+// applyDifference returns the result of applying difference 'd' to value 'v'
+// along with a boolean indicating if 'v' was changed.
+func applyDifference(v, d interface{}) (interface{}, bool) {
+ if d == nil {
+ return v, false
+ }
+ // difference can be applied with the same algorithm used to calculate it
+ // f(x,f(x,y))=y
+ result, changed := difference(v, d)
+ dv := reflect.ValueOf(d)
+ switch dv.Kind() {
+ case reflect.Slice:
+ fallthrough
+ case reflect.Map:
+ // but we need to tweak the interpretation of change for map and slices:
+ // when there is no difference between the value and non-empty delta, it
+ // actually means the value needs to be emptied so there is actually a
+ // change
+ if !changed && dv.Len() > 0 {
+ return result, true
+ }
+ // there are no changes when delta is empty
+ return result, changed && dv.Len() > 0
+ }
+ return result, changed
+}
+
+// mergeDifference, given an original value 'o' and two differences 'a' and 'b',
+// returns a new equivalent difference that when applied on 'o' it would have
+// the same result as applying 'a' and 'b' consecutively.
+// If 'o' is nil, returns the difference between 'a' and 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// The result is calculated in 'a' in-place and returned unless the result is
+// 'b' in which case 'b' is returned unmodified. Also returns a boolean
+// indicating if there is an actual difference.
+func mergeDifference(o, a, b interface{}) (interface{}, bool) {
+ kind := reflect.ValueOf(b).Kind()
+ if kind == reflect.Invalid {
+ kind = reflect.ValueOf(a).Kind()
+ }
+ switch kind {
+ case reflect.Invalid:
+ return nil, false
+ case reflect.Slice:
+ // set differences are transitive
+ return setDifference(a, b)
+ case reflect.Map:
+ return mergeMapDifference(o, a, b)
+ case reflect.Array:
+ panic("Not implemented")
+ default:
+ return mergeAtomicDifference(o, a, b)
+ }
+}
+
+// setDifference calculates the difference between set 'a' and set 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// The result is calculated in 'a' in-place and returned unless the difference
+// is 'b' in which case 'b' is returned unmodified. Also returns a boolean
+// indicating if there is an actual difference.
+func setDifference(a, b interface{}) (interface{}, bool) {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ if !av.IsValid() && !bv.IsValid() {
+ return nil, false
+ } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() {
+ return b, bv.Len() != 0
+ } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() {
+ return a, av.Len() != 0
+ }
+
+ // From https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+ // The difference between two sets are all elements that only belong to one
+ // of the sets.
+ difference := make(map[interface{}]struct{}, bv.Len())
+ for i := 0; i < bv.Len(); i++ {
+ // supossedly we are working with comparable atomic types with no
+ // pointers so we can use the values as map key
+ difference[bv.Index(i).Interface()] = struct{}{}
+ }
+ j := av.Len()
+ for i := 0; i < j; {
+ vv := av.Index(i)
+ vi := vv.Interface()
+ if _, ok := difference[vi]; ok {
+ // this value of 'a' is in 'b', so remove it from 'a'; to do that,
+ // overwrite it with the last value and re-evaluate
+ vv.Set(av.Index(j - 1))
+ // decrease where the last 'a' value is at
+ j--
+ // remove from 'b' values
+ delete(difference, vi)
+ } else {
+ // this value of 'a' is not in 'b', evaluate the next value
+ i++
+ }
+ }
+ // trim the slice to the actual values held
+ av = av.Slice(0, j)
+ for item := range difference {
+ // this value of 'b' is not in 'a', so add it
+ av = reflect.Append(av, reflect.ValueOf(item))
+ }
+
+ if av.Len() == 0 {
+ return reflect.Zero(av.Type()).Interface(), false
+ }
+
+ return av.Interface(), true
+}
+
+// mergeMapDifference, given an original map 'o' and two differences 'a' and
+// 'b', returns a new equivalent difference that when applied on 'o' it would
+// have the same result as applying 'a' and 'b' consecutively.
+// If 'o' is nil, returns the difference between 'a' and 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// The result is calculated in 'a' in-place and returned unless the result is
+// 'b' in which case 'b' is returned unmodified.
+// Returns a boolean indicating if there is an actual difference.
+func mergeMapDifference(o, a, b interface{}) (interface{}, bool) {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ if !av.IsValid() && !bv.IsValid() {
+ return nil, false
+ } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() {
+ return b, bv.Len() != 0
+ } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() {
+ return a, av.Len() != 0
+ }
+
+ ov := reflect.ValueOf(o)
+ if !ov.IsValid() {
+ ov = reflect.Zero(av.Type())
+ }
+
+ // From
+ // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+ // The difference between two maps are all key-value pairs whose keys
+ // appears in only one of the maps, plus the key-value pairs whose keys
+ // appear in both maps but with different values. For the latter elements,
+ // includes the value from the new column.
+
+ // We can assume that difference is a transitive operation so we calculate
+ // the difference between 'a' and 'b' but we need to handle exceptions when
+ // the same key is present in all values.
+ for i := bv.MapRange(); i.Next(); {
+ kv := i.Key()
+ bvv := i.Value()
+ avv := av.MapIndex(kv)
+ ovv := ov.MapIndex(kv)
+ // supossedly we are working with comparable types with no pointers so
+ // we can compare directly here
+ switch {
+ case ovv.IsValid() && avv.IsValid() && ovv.Interface() == bvv.Interface():
+ // key is present in the three values
+ // final result would restore key to the original value, delete from 'a'
+ av.SetMapIndex(kv, reflect.Value{})
+ case ovv.IsValid() && avv.IsValid() && avv.Interface() == bvv.Interface():
+ // key is present in the three values
+ // final result would remove key, set in 'a' with 'o' value
+ av.SetMapIndex(kv, ovv)
+ case avv.IsValid() && avv.Interface() == bvv.Interface():
+ // key/value is in 'a' and 'b', delete from 'a'
+ av.SetMapIndex(kv, reflect.Value{})
+ default:
+ // key/value in 'b' is not in 'a', set in 'a' with 'b' value
+ av.SetMapIndex(kv, bvv)
+ }
+ }
+
+ if av.Len() == 0 {
+ return reflect.Zero(av.Type()).Interface(), false
+ }
+
+ return av.Interface(), true
+}
+
+// mergeAtomicDifference, given an original atomic value 'o' and two differences
+// 'a' and 'b', returns a new equivalent difference that when applied on 'o' it
+// would have the same result as applying 'a' and 'b' consecutively.
+// If 'o' is nil, returns the difference between 'a' and 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// Returns a boolean indicating if there is an actual difference.
+func mergeAtomicDifference(o, a, b interface{}) (interface{}, bool) {
+ if o != nil {
+ return b, !reflect.DeepEqual(o, b)
+ }
+ return b, !reflect.DeepEqual(a, b)
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/doc.go b/vendor/github.com/ovn-org/libovsdb/updates/doc.go
new file mode 100644
index 000000000..3e6fe18a0
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/doc.go
@@ -0,0 +1,15 @@
+/*
+Package updates provides an utility to perform and aggregate model updates.
+
+As input, it supports OVSDB Operations, RowUpdate or RowUpdate2 notations via
+the corresponding Add methods.
+
+As output, it supports both OVSDB RowUpdate2 as well as model notation via the
+corresponding ForEach iterative methods.
+
+Several updates can be added and will be merged with any previous updates even
+if they are for the same model. If several updates for the same model are
+aggregated, the user is responsible that the provided model to be updated
+matches the updated model of the previous update.
+*/
+package updates
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/merge.go b/vendor/github.com/ovn-org/libovsdb/updates/merge.go
new file mode 100644
index 000000000..562f22623
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/merge.go
@@ -0,0 +1,160 @@
+package updates
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+func merge(ts *ovsdb.TableSchema, a, b modelUpdate) (modelUpdate, error) {
+ // handle model update
+ switch {
+ case b.old == nil && b.new == nil:
+ // noop
+ case a.old == nil && a.new == nil:
+ // first op
+ a.old = b.old
+ a.new = b.new
+ case a.new != nil && b.old != nil && b.new != nil:
+ // update after an insert or an update
+ a.new = b.new
+ case b.old != nil && b.new == nil:
+ // a final delete
+ a.new = nil
+ default:
+ return modelUpdate{}, fmt.Errorf("sequence of updates not supported")
+ }
+
+ // handle row update
+ ru2, err := mergeRowUpdate(ts, a.rowUpdate2, b.rowUpdate2)
+ if err != nil {
+ return modelUpdate{}, err
+ }
+ if ru2 == nil {
+ return modelUpdate{}, nil
+ }
+ a.rowUpdate2 = ru2
+
+ return a, nil
+}
+
+func mergeRowUpdate(ts *ovsdb.TableSchema, a, b *rowUpdate2) (*rowUpdate2, error) {
+ switch {
+ case b == nil:
+ // noop
+ case a == nil:
+ // first op
+ a = b
+ case a.Insert != nil && b.Modify != nil:
+ // update after an insert
+ a.New = b.New
+ a.Insert = b.New
+ case a.Modify != nil && b.Modify != nil:
+ // update after update
+ a.New = b.New
+ a.Modify = mergeModifyRow(ts, a.Old, a.Modify, b.Modify)
+ if a.Modify == nil {
+ // we merged two modifications that brought back the row to its
+ // original value which is a no op
+ a = nil
+ }
+ case a.Insert != nil && b.Delete != nil:
+ // delete after insert
+ a = nil
+ case b.Delete != nil:
+ // a final delete
+ a.Initial = nil
+ a.Insert = nil
+ a.Modify = nil
+ a.New = nil
+ a.Delete = b.Delete
+ default:
+ return &rowUpdate2{}, fmt.Errorf("sequence of updates not supported")
+ }
+ return a, nil
+}
+
+// mergeModifyRow merges two modification rows 'a' and 'b' with respect an
+// original row 'o'. Two modifications that restore the original value cancel
+// each other and won't be included in the result. Returns nil if there are no
+// resulting modifications.
+func mergeModifyRow(ts *ovsdb.TableSchema, o, a, b *ovsdb.Row) *ovsdb.Row {
+ original := *o
+ aMod := *a
+ bMod := *b
+ for k, v := range bMod {
+ if _, ok := aMod[k]; !ok {
+ aMod[k] = v
+ continue
+ }
+
+ var result interface{}
+ var changed bool
+
+ // handle maps or sets first
+ switch v.(type) {
+ // difference only supports set or map values that are comparable with
+ // no pointers. This should be currently fine because the set or map
+ // values should only be non pointer atomic types or the UUID struct.
+ case ovsdb.OvsSet:
+ aSet := aMod[k].(ovsdb.OvsSet)
+ bSet := v.(ovsdb.OvsSet)
+ // handle sets of multiple values, single value sets are handled as
+ // atomic values
+ if ts.Column(k).TypeObj.Max() != 1 {
+ // set difference is a fully transitive operation so we dont
+ // need to do anything special to merge two differences
+ result, changed = setDifference(aSet.GoSet, bSet.GoSet)
+ result = ovsdb.OvsSet{GoSet: result.([]interface{})}
+ }
+ case ovsdb.OvsMap:
+ aMap := aMod[k].(ovsdb.OvsMap)
+ bMap := v.(ovsdb.OvsMap)
+ var originalMap ovsdb.OvsMap
+ if v, ok := original[k]; ok {
+ originalMap = v.(ovsdb.OvsMap)
+ }
+ // map difference is not transitive with respect to the original
+ // value so we have to take the original value into account when
+ // merging
+ result, changed = mergeMapDifference(originalMap.GoMap, aMap.GoMap, bMap.GoMap)
+ result = ovsdb.OvsMap{GoMap: result.(map[interface{}]interface{})}
+ }
+
+ // was neither a map nor a set
+ if result == nil {
+ // atomic difference is not transitive with respect to the original
+ // value so we have to take the original value into account when
+ // merging
+ o := original[k]
+ if o == nil {
+ // assume zero value if original does not have the column
+ o = reflect.Zero(reflect.TypeOf(v)).Interface()
+ }
+ if set, ok := o.(ovsdb.OvsSet); ok {
+ // atomic optional values are cleared out with an empty set
+ // if the original value was also cleared out, use an empty set
+ // instead of a nil set so that mergeAtomicDifference notices
+ // that we are returning to the original value
+ if set.GoSet == nil {
+ set.GoSet = []interface{}{}
+ }
+ o = set
+ }
+ result, changed = mergeAtomicDifference(o, aMod[k], v)
+ }
+
+ if !changed {
+ delete(aMod, k)
+ continue
+ }
+ aMod[k] = result
+ }
+
+ if len(aMod) == 0 {
+ return nil
+ }
+
+ return a
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/mutate.go b/vendor/github.com/ovn-org/libovsdb/updates/mutate.go
new file mode 100644
index 000000000..1d87737fc
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/mutate.go
@@ -0,0 +1,297 @@
+package updates
+
+import (
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+func removeFromSlice(a, b reflect.Value) (reflect.Value, bool) {
+ for i := 0; i < a.Len(); i++ {
+ if a.Index(i).Interface() == b.Interface() {
+ v := reflect.AppendSlice(a.Slice(0, i), a.Slice(i+1, a.Len()))
+ return v, true
+ }
+ }
+ return a, false
+}
+
+func insertToSlice(a, b reflect.Value) (reflect.Value, bool) {
+ for i := 0; i < a.Len(); i++ {
+ if a.Index(i).Interface() == b.Interface() {
+ return a, false
+ }
+ }
+ return reflect.Append(a, b), true
+}
+
+func mutate(current interface{}, mutator ovsdb.Mutator, value interface{}) (interface{}, interface{}) {
+ switch current.(type) {
+ case bool, string:
+ return current, value
+ }
+ switch mutator {
+ case ovsdb.MutateOperationInsert:
+ // for insert, the delta will be the new value added
+ return mutateInsert(current, value)
+ case ovsdb.MutateOperationDelete:
+ return mutateDelete(current, value)
+ case ovsdb.MutateOperationAdd:
+ // for add, the delta is the new value
+ new := mutateAdd(current, value)
+ return new, new
+ case ovsdb.MutateOperationSubtract:
+ // for subtract, the delta is the new value
+ new := mutateSubtract(current, value)
+ return new, new
+ case ovsdb.MutateOperationMultiply:
+ new := mutateMultiply(current, value)
+ return new, new
+ case ovsdb.MutateOperationDivide:
+ new := mutateDivide(current, value)
+ return new, new
+ case ovsdb.MutateOperationModulo:
+ new := mutateModulo(current, value)
+ return new, new
+ }
+ return current, value
+}
+
+func mutateInsert(current, value interface{}) (interface{}, interface{}) {
+ switch current.(type) {
+ case int, float64:
+ return current, current
+ }
+ vc := reflect.ValueOf(current)
+ vv := reflect.ValueOf(value)
+ if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) {
+ v, ok := insertToSlice(vc, vv)
+ var diff interface{}
+ if ok {
+ diff = value
+ }
+ return v.Interface(), diff
+ }
+ if !vc.IsValid() {
+ if vv.IsValid() {
+ return vv.Interface(), vv.Interface()
+ }
+ return nil, nil
+ }
+ if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice {
+ v := vc
+ diff := reflect.Indirect(reflect.New(vv.Type()))
+ for i := 0; i < vv.Len(); i++ {
+ var ok bool
+ v, ok = insertToSlice(v, vv.Index(i))
+ if ok {
+ diff = reflect.Append(diff, vv.Index(i))
+ }
+ }
+ if diff.Len() > 0 {
+ return v.Interface(), diff.Interface()
+ }
+ return v.Interface(), nil
+ }
+ if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map {
+ if vc.IsNil() && vv.Len() > 0 {
+ return value, value
+ }
+ diff := reflect.MakeMap(vc.Type())
+ iter := vv.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ if !vc.MapIndex(k).IsValid() {
+ vc.SetMapIndex(k, iter.Value())
+ diff.SetMapIndex(k, iter.Value())
+ }
+ }
+ if diff.Len() > 0 {
+ return current, diff.Interface()
+ }
+ return current, nil
+ }
+ return current, nil
+}
+
+func mutateDelete(current, value interface{}) (interface{}, interface{}) {
+ switch current.(type) {
+ case int, float64:
+ return current, nil
+ }
+ vc := reflect.ValueOf(current)
+ vv := reflect.ValueOf(value)
+ if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) {
+ v, ok := removeFromSlice(vc, vv)
+ diff := value
+ if !ok {
+ diff = nil
+ }
+ return v.Interface(), diff
+ }
+ if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice {
+ v := vc
+ diff := reflect.Indirect(reflect.New(vv.Type()))
+ for i := 0; i < vv.Len(); i++ {
+ var ok bool
+ v, ok = removeFromSlice(v, vv.Index(i))
+ if ok {
+ diff = reflect.Append(diff, vv.Index(i))
+ }
+ }
+ if diff.Len() > 0 {
+ return v.Interface(), diff.Interface()
+ }
+ return v.Interface(), nil
+ }
+ if vc.Kind() == reflect.Map && vv.Type() == reflect.SliceOf(vc.Type().Key()) {
+ diff := reflect.MakeMap(vc.Type())
+ for i := 0; i < vv.Len(); i++ {
+ if vc.MapIndex(vv.Index(i)).IsValid() {
+ diff.SetMapIndex(vv.Index(i), vc.MapIndex(vv.Index(i)))
+ vc.SetMapIndex(vv.Index(i), reflect.Value{})
+ }
+ }
+ if diff.Len() > 0 {
+ return current, diff.Interface()
+ }
+ return current, nil
+ }
+ if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map {
+ diff := reflect.MakeMap(vc.Type())
+ iter := vv.MapRange()
+ for iter.Next() {
+ vvk := iter.Key()
+ vvv := iter.Value()
+ vcv := vc.MapIndex(vvk)
+ if vcv.IsValid() && reflect.DeepEqual(vcv.Interface(), vvv.Interface()) {
+ diff.SetMapIndex(vvk, vcv)
+ vc.SetMapIndex(vvk, reflect.Value{})
+ }
+ }
+ if diff.Len() > 0 {
+ return current, diff.Interface()
+ }
+ return current, nil
+ }
+ return current, nil
+}
+
+func mutateAdd(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i + v
+ }
+ if i, ok := current.(float64); ok {
+ v := value.(float64)
+ return i + v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j + v
+ }
+ return is
+ }
+ if is, ok := current.([]float64); ok {
+ v := value.(float64)
+ for i, j := range is {
+ is[i] = j + v
+ }
+ return is
+ }
+ return current
+}
+
+func mutateSubtract(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i - v
+ }
+ if i, ok := current.(float64); ok {
+ v := value.(float64)
+ return i - v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j - v
+ }
+ return is
+ }
+ if is, ok := current.([]float64); ok {
+ v := value.(float64)
+ for i, j := range is {
+ is[i] = j - v
+ }
+ return is
+ }
+ return current
+}
+
+func mutateMultiply(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i * v
+ }
+ if i, ok := current.(float64); ok {
+ v := value.(float64)
+ return i * v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j * v
+ }
+ return is
+ }
+ if is, ok := current.([]float64); ok {
+ v := value.(float64)
+ for i, j := range is {
+ is[i] = j * v
+ }
+ return is
+ }
+ return current
+}
+
+func mutateDivide(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i / v
+ }
+ if i, ok := current.(float64); ok {
+ v := value.(float64)
+ return i / v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j / v
+ }
+ return is
+ }
+ if is, ok := current.([]float64); ok {
+ v := value.(float64)
+ for i, j := range is {
+ is[i] = j / v
+ }
+ return is
+ }
+ return current
+}
+
+func mutateModulo(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i % v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j % v
+ }
+ return is
+ }
+ return current
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/references.go b/vendor/github.com/ovn-org/libovsdb/updates/references.go
new file mode 100644
index 000000000..938d02aae
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/references.go
@@ -0,0 +1,797 @@
+package updates
+
+import (
+ "fmt"
+
+ "github.com/ovn-org/libovsdb/database"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// ReferenceProvider should be implemented by a database that tracks references
+type ReferenceProvider interface {
+ // GetReferences provides the references to the provided row
+ GetReferences(database, table, uuid string) (database.References, error)
+ // Get provides the corresponding model
+ Get(database, table string, uuid string) (model.Model, error)
+}
+
+// DatabaseUpdate bundles updates together with the updated
+// reference information
+type DatabaseUpdate struct {
+ ModelUpdates
+ referenceUpdates database.References
+}
+
+func (u DatabaseUpdate) ForReferenceUpdates(do func(references database.References) error) error {
+ refsCopy := database.References{}
+ // since refsCopy is empty, this will just copy everything
+ applyReferenceModifications(refsCopy, u.referenceUpdates)
+ return do(refsCopy)
+}
+
+func NewDatabaseUpdate(updates ModelUpdates, references database.References) DatabaseUpdate {
+ return DatabaseUpdate{
+ ModelUpdates: updates,
+ referenceUpdates: references,
+ }
+}
+
+// ProcessReferences tracks referential integrity for the provided set of
+// updates. It returns an updated set of updates which includes additional
+// updates and updated references as a result of the reference garbage
+// collection described in RFC7047. These additional updates resulting from the
+// reference garbage collection are also returned separately. Any constraint or
+// referential integrity violation is returned as an error.
+func ProcessReferences(dbModel model.DatabaseModel, provider ReferenceProvider, updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) {
+ referenceTracker := newReferenceTracker(dbModel, provider)
+ return referenceTracker.processReferences(updates)
+}
+
+type referenceTracker struct {
+ dbModel model.DatabaseModel
+ provider ReferenceProvider
+
+ // updates that are being processed
+ updates ModelUpdates
+
+ // references are the updated references by the set of updates processed
+ references database.References
+
+ // helper maps to track the rows that we are processing and their tables
+ tracked map[string]string
+ added map[string]string
+ deleted map[string]string
+}
+
+func newReferenceTracker(dbModel model.DatabaseModel, provider ReferenceProvider) *referenceTracker {
+ return &referenceTracker{
+ dbModel: dbModel,
+ provider: provider,
+ }
+}
+
+func (rt *referenceTracker) processReferences(updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) {
+ rt.updates = updates
+ rt.tracked = make(map[string]string)
+ rt.added = make(map[string]string)
+ rt.deleted = make(map[string]string)
+ rt.references = make(database.References)
+
+ referenceUpdates, err := rt.processReferencesLoop(updates)
+ if err != nil {
+ return ModelUpdates{}, ModelUpdates{}, nil, err
+ }
+
+ // merge the updates generated from reference tracking into the main updates
+ err = updates.Merge(rt.dbModel, referenceUpdates)
+ if err != nil {
+ return ModelUpdates{}, ModelUpdates{}, nil, err
+ }
+
+ return updates, referenceUpdates, rt.references, nil
+}
+
+func (rt *referenceTracker) processReferencesLoop(updates ModelUpdates) (ModelUpdates, error) {
+ referenceUpdates := ModelUpdates{}
+
+ // references can be transitive and deleting them can lead to further
+ // references having to be removed so loop until there are no updates to be
+ // made
+ for len(updates.updates) > 0 {
+ // update the references from the updates
+ err := rt.processModelUpdates(updates)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // process strong reference integrity
+ updates, err = rt.processStrongReferences()
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // process weak reference integrity
+ weakUpdates, err := rt.processWeakReferences()
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // merge strong and weak reference updates
+ err = updates.Merge(rt.dbModel, weakUpdates)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // merge updates from this iteration to the overall reference updates
+ err = referenceUpdates.Merge(rt.dbModel, updates)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ return referenceUpdates, nil
+}
+
+// processModelUpdates keeps track of the updated references by a set of updates
+func (rt *referenceTracker) processModelUpdates(updates ModelUpdates) error {
+ tables := updates.GetUpdatedTables()
+ for _, table := range tables {
+ err := updates.ForEachRowUpdate(table, func(uuid string, row ovsdb.RowUpdate2) error {
+ return rt.processRowUpdate(table, uuid, &row)
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// processRowUpdate keeps track of the updated references by a given row update
+func (rt *referenceTracker) processRowUpdate(table, uuid string, row *ovsdb.RowUpdate2) error {
+
+ // getReferencesFromRowModify extracts updated references from the
+ // modifications. Following the same strategy as the modify field of Update2
+ // notification, it will extract a difference, that is, both old removed
+ // references and new added references are extracted. This difference will
+ // then be applied to currently tracked references to come up with the
+ // updated references.
+
+ // For more info on the modify field of Update2 notification and the
+ // strategy used to apply differences, check
+ // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+
+ var updateRefs database.References
+ switch {
+ case row.Delete != nil:
+ rt.deleted[uuid] = table
+ updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Old, row.Old)
+ case row.Modify != nil:
+ updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Modify, row.Old)
+ case row.Insert != nil:
+ if !isRoot(&rt.dbModel, table) {
+ // track rows added that are not part of the root set, we might need
+ // to delete those later
+ rt.added[uuid] = table
+ rt.tracked[uuid] = table
+ }
+ updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Insert, nil)
+ }
+
+ // (lazy) initialize existing references to the same rows from the database
+ for spec, refs := range updateRefs {
+ for to := range refs {
+ err := rt.initReferences(spec.ToTable, to)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // apply the reference modifications to the initialized references
+ applyReferenceModifications(rt.references, updateRefs)
+
+ return nil
+}
+
+// processStrongReferences adds delete operations for rows that are not part of
+// the root set and are no longer strongly referenced. Returns a referential
+// integrity violation if a nonexistent row is strongly referenced or a strongly
+// referenced row has been deleted.
+func (rt *referenceTracker) processStrongReferences() (ModelUpdates, error) {
+ // make sure that we are tracking the references to the deleted rows
+ err := rt.initReferencesOfDeletedRows()
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // track if rows are referenced or not
+ isReferenced := map[string]bool{}
+
+ // go over the updated references
+ for spec, refs := range rt.references {
+
+ // we only care about strong references
+ if !isStrong(&rt.dbModel, spec) {
+ continue
+ }
+
+ for to, from := range refs {
+ // check if the referenced row exists
+ exists, err := rt.rowExists(spec.ToTable, to)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ if !exists {
+ for _, uuid := range from {
+ // strong reference to a row that does not exist
+ return ModelUpdates{}, ovsdb.NewReferentialIntegrityViolation(fmt.Sprintf(
+ "Table %s column %s row %s references nonexistent or deleted row %s in table %s",
+ spec.FromTable, spec.FromColumn, uuid, to, spec.ToTable))
+ }
+ // we deleted the row ourselves on a previous loop
+ continue
+ }
+
+ // track if this row is referenced from this location spec
+ isReferenced[to] = isReferenced[to] || len(from) > 0
+ }
+ }
+
+ // inserted rows that are unreferenced and not part of the root set will
+ // silently be dropped from the updates
+ for uuid := range rt.added {
+ if isReferenced[uuid] {
+ continue
+ }
+ isReferenced[uuid] = false
+ }
+
+ // delete rows that are not referenced
+ updates := ModelUpdates{}
+ for uuid, isReferenced := range isReferenced {
+ if isReferenced {
+ // row is still referenced, ignore
+ continue
+ }
+
+ if rt.deleted[uuid] != "" {
+ // already deleted, ignore
+ continue
+ }
+
+ table := rt.tracked[uuid]
+ if isRoot(&rt.dbModel, table) {
+ // table is part of the root set, ignore
+ continue
+ }
+
+ // delete row that is not part of the root set and is no longer
+ // referenced
+ update, err := rt.deleteRow(table, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ err = updates.Merge(rt.dbModel, update)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ return updates, nil
+}
+
+// processWeakReferences deletes weak references to rows that were deleted.
+// Returns a constraint violation if this results in invalid values
+func (rt *referenceTracker) processWeakReferences() (ModelUpdates, error) {
+ // make sure that we are tracking the references to rows that might have
+ // been deleted as a result of strong reference garbage collection
+ err := rt.initReferencesOfDeletedRows()
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ tables := map[string]string{}
+ originalRows := map[string]ovsdb.Row{}
+ updatedRows := map[string]ovsdb.Row{}
+
+ for spec, refs := range rt.references {
+ // fetch some reference information from the schema
+ extendedType, minLenAllowed, refType, _ := refInfo(&rt.dbModel, spec.FromTable, spec.FromColumn, spec.FromValue)
+ isEmptyAllowed := minLenAllowed == 0
+
+ if refType != ovsdb.Weak {
+ // we only care about weak references
+ continue
+ }
+
+ for to, from := range refs {
+ if len(from) == 0 {
+ // not referenced from anywhere, ignore
+ continue
+ }
+
+ // check if the referenced row exists
+ exists, err := rt.rowExists(spec.ToTable, to)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ if exists {
+ // we only care about rows that have been deleted or otherwise
+ // don't exist
+ continue
+ }
+
+ // generate the updates to remove the references to deleted rows
+ for _, uuid := range from {
+ if _, ok := updatedRows[uuid]; !ok {
+ updatedRows[uuid] = ovsdb.NewRow()
+ }
+
+ if rt.deleted[uuid] != "" {
+ // already deleted, ignore
+ continue
+ }
+
+ // fetch the original rows
+ if originalRows[uuid] == nil {
+ originalRow, err := rt.getRow(spec.FromTable, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ if originalRow == nil {
+ return ModelUpdates{}, fmt.Errorf("reference from non-existent model with uuid %s", uuid)
+ }
+ originalRows[uuid] = *originalRow
+ }
+
+ var becomesLen int
+ switch extendedType {
+ case ovsdb.TypeMap:
+ // a map referencing the row
+ // generate the mutation to remove the entry form the map
+ originalMap := originalRows[uuid][spec.FromColumn].(ovsdb.OvsMap).GoMap
+ var mutationMap map[interface{}]interface{}
+ value, ok := updatedRows[uuid][spec.FromColumn]
+ if !ok {
+ mutationMap = map[interface{}]interface{}{}
+ } else {
+ mutationMap = value.(ovsdb.OvsMap).GoMap
+ }
+ // copy the map entries referencing the row from the original map
+ mutationMap = copyMapKeyValues(originalMap, mutationMap, !spec.FromValue, ovsdb.UUID{GoUUID: to})
+
+ // track the new length of the map
+ if !isEmptyAllowed {
+ becomesLen = len(originalMap) - len(mutationMap)
+ }
+
+ updatedRows[uuid][spec.FromColumn] = ovsdb.OvsMap{GoMap: mutationMap}
+
+ case ovsdb.TypeSet:
+ // a set referencing the row
+ // generate the mutation to remove the entry form the set
+ var mutationSet []interface{}
+ value, ok := updatedRows[uuid][spec.FromColumn]
+ if !ok {
+ mutationSet = []interface{}{}
+ } else {
+ mutationSet = value.(ovsdb.OvsSet).GoSet
+ }
+ mutationSet = append(mutationSet, ovsdb.UUID{GoUUID: to})
+
+ // track the new length of the set
+ if !isEmptyAllowed {
+ originalSet := originalRows[uuid][spec.FromColumn].(ovsdb.OvsSet).GoSet
+ becomesLen = len(originalSet) - len(mutationSet)
+ }
+
+ updatedRows[uuid][spec.FromColumn] = ovsdb.OvsSet{GoSet: mutationSet}
+
+ case ovsdb.TypeUUID:
+ // this is an atomic UUID value that needs to be cleared
+ updatedRows[uuid][spec.FromColumn] = nil
+ becomesLen = 0
+ }
+
+ if becomesLen < minLenAllowed {
+ return ModelUpdates{}, ovsdb.NewConstraintViolation(fmt.Sprintf(
+ "Deletion of a weak reference to a deleted (or never-existing) row from column %s in table %s "+
+ "row %s caused this column to have an invalid length.",
+ spec.FromColumn, spec.FromTable, uuid))
+ }
+
+ // track the table of the row we are going to update
+ tables[uuid] = spec.FromTable
+ }
+ }
+ }
+
+ // process the updates
+ updates := ModelUpdates{}
+ for uuid, rowUpdate := range updatedRows {
+ update, err := rt.updateRow(tables[uuid], uuid, rowUpdate)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ err = updates.Merge(rt.dbModel, update)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ return updates, nil
+}
+
+func copyMapKeyValues(from, to map[interface{}]interface{}, isKey bool, keyValue ovsdb.UUID) map[interface{}]interface{} {
+ if isKey {
+ to[keyValue] = from[keyValue]
+ return to
+ }
+ for key, value := range from {
+ if value.(ovsdb.UUID) == keyValue {
+ to[key] = from[key]
+ }
+ }
+ return to
+}
+
+// initReferences initializes the references to the provided row from the
+// database
+func (rt *referenceTracker) initReferences(table, uuid string) error {
+ if _, ok := rt.tracked[uuid]; ok {
+ // already initialized
+ return nil
+ }
+ existingRefs, err := rt.provider.GetReferences(rt.dbModel.Client().Name(), table, uuid)
+ if err != nil {
+ return err
+ }
+ rt.references.UpdateReferences(existingRefs)
+ rt.tracked[uuid] = table
+ return nil
+}
+
+func (rt *referenceTracker) initReferencesOfDeletedRows() error {
+ for uuid, table := range rt.deleted {
+ err := rt.initReferences(table, uuid)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// deleteRow adds an update to delete the provided row.
+func (rt *referenceTracker) deleteRow(table, uuid string) (ModelUpdates, error) {
+ model, err := rt.getModel(table, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ row, err := rt.getRow(table, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ updates := ModelUpdates{}
+ update := ovsdb.RowUpdate2{Delete: &ovsdb.Row{}, Old: row}
+ err = updates.AddRowUpdate2(rt.dbModel, table, uuid, model, update)
+
+ rt.deleted[uuid] = table
+
+ return updates, err
+}
+
+// updateRow generates updates for the provided row
+func (rt *referenceTracker) updateRow(table, uuid string, row ovsdb.Row) (ModelUpdates, error) {
+ model, err := rt.getModel(table, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // In agreement with processWeakReferences, columns with values are assumed
+ // to be values of sets or maps that need to be mutated for deletion.
+ // Columns with no values are assumed to be atomic optional values that need
+ // to be cleared with an update.
+
+ mutations := make([]ovsdb.Mutation, 0, len(row))
+ update := ovsdb.Row{}
+ for column, value := range row {
+ if value != nil {
+ mutations = append(mutations, *ovsdb.NewMutation(column, ovsdb.MutateOperationDelete, value))
+ continue
+ }
+ update[column] = ovsdb.OvsSet{GoSet: []interface{}{}}
+ }
+
+ updates := ModelUpdates{}
+
+ if len(mutations) > 0 {
+ err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{
+ Op: ovsdb.OperationMutate,
+ Table: table,
+ Mutations: mutations,
+ Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})},
+ })
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ if len(update) > 0 {
+ err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{
+ Op: ovsdb.OperationUpdate,
+ Table: table,
+ Row: update,
+ Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})},
+ })
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ return updates, nil
+}
+
+// getModel gets the model from the updates or the database
+func (rt *referenceTracker) getModel(table, uuid string) (model.Model, error) {
+ if _, deleted := rt.deleted[uuid]; deleted {
+ // model has been deleted
+ return nil, nil
+ }
+ // look for the model in the updates
+ model := rt.updates.GetModel(table, uuid)
+ if model != nil {
+ return model, nil
+ }
+ // look for the model in the database
+ model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid)
+ if err != nil {
+ return nil, err
+ }
+ return model, nil
+}
+
+// getRow gets the row from the updates or the database
+func (rt *referenceTracker) getRow(table, uuid string) (*ovsdb.Row, error) {
+ if _, deleted := rt.deleted[uuid]; deleted {
+ // row has been deleted
+ return nil, nil
+ }
+ // look for the row in the updates
+ row := rt.updates.GetRow(table, uuid)
+ if row != nil {
+ return row, nil
+ }
+ // look for the model in the database and build the row
+ model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid)
+ if err != nil {
+ return nil, err
+ }
+ info, err := rt.dbModel.NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ newRow, err := rt.dbModel.Mapper.NewRow(info)
+ if err != nil {
+ return nil, err
+ }
+ return &newRow, nil
+}
+
+// rowExists returns whether the row exists either in the updates or the database
+func (rt *referenceTracker) rowExists(table, uuid string) (bool, error) {
+ model, err := rt.getModel(table, uuid)
+ return model != nil, err
+}
+
+func getReferenceModificationsFromRow(dbModel *model.DatabaseModel, table, uuid string, modify, old *ovsdb.Row) database.References {
+ refs := database.References{}
+ for column, value := range *modify {
+ var oldValue interface{}
+ if old != nil {
+ oldValue = (*old)[column]
+ }
+ crefs := getReferenceModificationsFromColumn(dbModel, table, uuid, column, value, oldValue)
+ refs.UpdateReferences(crefs)
+ }
+ return refs
+}
+
+func getReferenceModificationsFromColumn(dbModel *model.DatabaseModel, table, uuid, column string, modify, old interface{}) database.References {
+ switch v := modify.(type) {
+ case ovsdb.UUID:
+ var oldUUID ovsdb.UUID
+ if old != nil {
+ oldUUID = old.(ovsdb.UUID)
+ }
+ return getReferenceModificationsFromAtom(dbModel, table, uuid, column, v, oldUUID)
+ case ovsdb.OvsSet:
+ var oldSet ovsdb.OvsSet
+ if old != nil {
+ oldSet = old.(ovsdb.OvsSet)
+ }
+ return getReferenceModificationsFromSet(dbModel, table, uuid, column, v, oldSet)
+ case ovsdb.OvsMap:
+ return getReferenceModificationsFromMap(dbModel, table, uuid, column, v)
+ }
+ return nil
+}
+
+func getReferenceModificationsFromMap(dbModel *model.DatabaseModel, table, uuid, column string, value ovsdb.OvsMap) database.References {
+ if len(value.GoMap) == 0 {
+ return nil
+ }
+
+ // get the referenced table
+ keyRefTable := refTable(dbModel, table, column, false)
+ valueRefTable := refTable(dbModel, table, column, true)
+ if keyRefTable == "" && valueRefTable == "" {
+ return nil
+ }
+
+ from := uuid
+ keySpec := database.ReferenceSpec{ToTable: keyRefTable, FromTable: table, FromColumn: column, FromValue: false}
+ valueSpec := database.ReferenceSpec{ToTable: valueRefTable, FromTable: table, FromColumn: column, FromValue: true}
+
+ refs := database.References{}
+ for k, v := range value.GoMap {
+ if keyRefTable != "" {
+ switch to := k.(type) {
+ case ovsdb.UUID:
+ if _, ok := refs[keySpec]; !ok {
+ refs[keySpec] = database.Reference{to.GoUUID: []string{from}}
+ } else if _, ok := refs[keySpec][to.GoUUID]; !ok {
+ refs[keySpec][to.GoUUID] = append(refs[keySpec][to.GoUUID], from)
+ }
+ }
+ }
+ if valueRefTable != "" {
+ switch to := v.(type) {
+ case ovsdb.UUID:
+ if _, ok := refs[valueSpec]; !ok {
+ refs[valueSpec] = database.Reference{to.GoUUID: []string{from}}
+ } else if _, ok := refs[valueSpec][to.GoUUID]; !ok {
+ refs[valueSpec][to.GoUUID] = append(refs[valueSpec][to.GoUUID], from)
+ }
+ }
+ }
+ }
+
+ return refs
+}
+
+func getReferenceModificationsFromSet(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.OvsSet) database.References {
+ // if the modify set is empty, it means the op is clearing an atomic value
+ // so pick the old value instead
+ value := modify
+ if len(modify.GoSet) == 0 {
+ value = old
+ }
+
+ if len(value.GoSet) == 0 {
+ return nil
+ }
+
+ // get the referenced table
+ refTable := refTable(dbModel, table, column, false)
+ if refTable == "" {
+ return nil
+ }
+
+ spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column}
+ from := uuid
+ refs := database.References{spec: database.Reference{}}
+ for _, v := range value.GoSet {
+ switch to := v.(type) {
+ case ovsdb.UUID:
+ refs[spec][to.GoUUID] = append(refs[spec][to.GoUUID], from)
+ }
+ }
+ return refs
+}
+
+func getReferenceModificationsFromAtom(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.UUID) database.References {
+ // get the referenced table
+ refTable := refTable(dbModel, table, column, false)
+ if refTable == "" {
+ return nil
+ }
+ spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column}
+ from := uuid
+ to := modify.GoUUID
+ refs := database.References{spec: {to: {from}}}
+ if old.GoUUID != "" {
+ // extract the old value as well
+ refs[spec][old.GoUUID] = []string{from}
+ }
+ return refs
+}
+
+// applyReferenceModifications updates references in 'a' from those in 'b'
+func applyReferenceModifications(a, b database.References) {
+ for spec, bv := range b {
+ for to, bfrom := range bv {
+ if av, ok := a[spec]; ok {
+ if afrom, ok := av[to]; ok {
+ r, _ := applyDifference(afrom, bfrom)
+ av[to] = r.([]string)
+ } else {
+ // this reference is not in 'a', so add it
+ av[to] = bfrom
+ }
+ } else {
+ // this reference is not in 'a', so add it
+ a[spec] = database.Reference{to: bfrom}
+ }
+ }
+ }
+}
+
+func refInfo(dbModel *model.DatabaseModel, table, column string, mapValue bool) (ovsdb.ExtendedType, int, ovsdb.RefType, string) {
+ tSchema := dbModel.Schema.Table(table)
+ if tSchema == nil {
+ panic(fmt.Sprintf("unexpected schema error: no schema for table %s", table))
+ }
+
+ cSchema := tSchema.Column(column)
+ if cSchema == nil {
+ panic(fmt.Sprintf("unexpected schema error: no schema for column %s", column))
+ }
+
+ cType := cSchema.TypeObj
+ if cType == nil {
+ // this is not a reference
+ return "", 0, "", ""
+ }
+
+ var bType *ovsdb.BaseType
+ switch {
+ case !mapValue && cType.Key != nil:
+ bType = cType.Key
+ case mapValue && cType.Value != nil:
+ bType = cType.Value
+ default:
+ panic(fmt.Sprintf("unexpected schema error: no schema for map value on column %s", column))
+ }
+ if bType.Type != ovsdb.TypeUUID {
+ // this is not a reference
+ return "", 0, "", ""
+ }
+
+ // treat optional values represented with sets as atomic UUIDs
+ extendedType := cSchema.Type
+ if extendedType == ovsdb.TypeSet && cType.Min() == 0 && cType.Max() == 1 {
+ extendedType = ovsdb.TypeUUID
+ }
+
+ rType, err := bType.RefType()
+ if err != nil {
+ panic(fmt.Sprintf("unexpected schema error: %v", err))
+ }
+
+ rTable, err := bType.RefTable()
+ if err != nil {
+ panic(fmt.Sprintf("unexpected schema error: %v", err))
+ }
+
+ return extendedType, cType.Min(), rType, rTable
+}
+
+func refTable(dbModel *model.DatabaseModel, table, column string, mapValue bool) ovsdb.RefType {
+ _, _, _, refTable := refInfo(dbModel, table, column, mapValue)
+ return refTable
+}
+
+func isRoot(dbModel *model.DatabaseModel, table string) bool {
+ isRoot, err := dbModel.Schema.IsRoot(table)
+ if err != nil {
+ panic(fmt.Sprintf("unexpected schema error: %v", err))
+ }
+ return isRoot
+}
+
+func isStrong(dbModel *model.DatabaseModel, spec database.ReferenceSpec) bool {
+ _, _, refType, _ := refInfo(dbModel, spec.FromTable, spec.FromColumn, spec.FromValue)
+ return refType == ovsdb.Strong
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/updates.go b/vendor/github.com/ovn-org/libovsdb/updates/updates.go
new file mode 100644
index 000000000..4ff2363a0
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/updates.go
@@ -0,0 +1,528 @@
+package updates
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+type rowUpdate2 = ovsdb.RowUpdate2
+
+// modelUpdate contains an update in model and OVSDB RowUpdate2 notation
+type modelUpdate struct {
+ rowUpdate2 *rowUpdate2
+ old model.Model
+ new model.Model
+}
+
+// isEmpty returns whether this update is empty
+func (mu modelUpdate) isEmpty() bool {
+ return mu == modelUpdate{}
+}
+
+// ModelUpdates contains updates indexed by table and uuid
+type ModelUpdates struct {
+ updates map[string]map[string]modelUpdate
+}
+
+// GetUpdatedTables returns the tables that have updates
+func (u ModelUpdates) GetUpdatedTables() []string {
+ tables := make([]string, 0, len(u.updates))
+ for table, updates := range u.updates {
+ if len(updates) > 0 {
+ tables = append(tables, table)
+ }
+ }
+ return tables
+}
+
+// ForEachModelUpdate processes each row update of a given table in model
+// notation
+func (u ModelUpdates) ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error {
+ models := u.updates[table]
+ for uuid, model := range models {
+ err := do(uuid, model.old, model.new)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ForEachRowUpdate processes each row update of a given table in OVSDB
+// RowUpdate2 notation
+func (u ModelUpdates) ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error {
+ rows := u.updates[table]
+ for uuid, row := range rows {
+ err := do(uuid, *row.rowUpdate2)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetModel returns the last known state of the requested model. If the model is
+// unknown or has been deleted, returns nil.
+func (u ModelUpdates) GetModel(table, uuid string) model.Model {
+ if u.updates == nil {
+ return nil
+ }
+ if t, found := u.updates[table]; found {
+ if update, found := t[uuid]; found {
+ return update.new
+ }
+ }
+ return nil
+}
+
+// GetRow returns the last known state of the requested row. If the row is
+// unknown or has been deleted, returns nil.
+func (u ModelUpdates) GetRow(table, uuid string) *ovsdb.Row {
+ if u.updates == nil {
+ return nil
+ }
+ if t, found := u.updates[table]; found {
+ if update, found := t[uuid]; found {
+ return update.rowUpdate2.New
+ }
+ }
+ return nil
+}
+
+// Merge a set of updates with an earlier set of updates
+func (u *ModelUpdates) Merge(dbModel model.DatabaseModel, new ModelUpdates) error {
+ for table, models := range new.updates {
+ for uuid, update := range models {
+ err := u.addUpdate(dbModel, table, uuid, update)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// AddOperation adds an update for a model from a OVSDB Operation. If several
+// updates for the same model are aggregated, the user is responsible that the
+// provided model to be updated matches the updated model of the previous
+// update.
+func (u *ModelUpdates) AddOperation(dbModel model.DatabaseModel, table, uuid string, current model.Model, op *ovsdb.Operation) error {
+ switch op.Op {
+ case ovsdb.OperationInsert:
+ return u.addInsertOperation(dbModel, table, uuid, op)
+ case ovsdb.OperationUpdate:
+ return u.addUpdateOperation(dbModel, table, uuid, current, op)
+ case ovsdb.OperationMutate:
+ return u.addMutateOperation(dbModel, table, uuid, current, op)
+ case ovsdb.OperationDelete:
+ return u.addDeleteOperation(dbModel, table, uuid, current, op)
+ default:
+ return fmt.Errorf("database update from operation %#v not supported", op.Op)
+ }
+}
+
+// AddRowUpdate adds an update for a model from a OVSDB RowUpdate. If several
+// updates for the same model are aggregated, the user is responsible that the
+// provided model to be updated matches the updated model of the previous
+// update.
+func (u *ModelUpdates) AddRowUpdate(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru ovsdb.RowUpdate) error {
+ switch {
+ case ru.Old == nil && ru.New != nil:
+ new, err := model.CreateModel(dbModel, table, ru.New, uuid)
+ if err != nil {
+ return err
+ }
+ err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &rowUpdate2{New: ru.New}})
+ if err != nil {
+ return err
+ }
+ case ru.Old != nil && ru.New != nil:
+ old := current
+ new := model.Clone(current)
+ info, err := dbModel.NewModelInfo(new)
+ if err != nil {
+ return err
+ }
+ changed, err := updateModel(dbModel, table, info, ru.New, nil)
+ if !changed || err != nil {
+ return err
+ }
+ err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &rowUpdate2{Old: ru.Old, New: ru.New}})
+ if err != nil {
+ return err
+ }
+ case ru.New == nil:
+ old := current
+ err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &rowUpdate2{Old: ru.Old}})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddRowUpdate2 adds an update for a model from a OVSDB RowUpdate2. If several
+// updates for the same model are aggregated, the user is responsible that the
+// provided model to be updated matches the updated model of the previous
+// update.
+func (u *ModelUpdates) AddRowUpdate2(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru2 ovsdb.RowUpdate2) error {
+ switch {
+ case ru2.Initial != nil:
+ ru2.Insert = ru2.Initial
+ fallthrough
+ case ru2.Insert != nil:
+ new, err := model.CreateModel(dbModel, table, ru2.Insert, uuid)
+ if err != nil {
+ return err
+ }
+ err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &ru2})
+ if err != nil {
+ return err
+ }
+ case ru2.Modify != nil:
+ old := current
+ new := model.Clone(current)
+ info, err := dbModel.NewModelInfo(new)
+ if err != nil {
+ return err
+ }
+ changed, err := modifyModel(dbModel, table, info, ru2.Modify)
+ if !changed || err != nil {
+ return err
+ }
+ err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &ru2})
+ if err != nil {
+ return err
+ }
+ default:
+ old := current
+ err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &ru2})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (u *ModelUpdates) addUpdate(dbModel model.DatabaseModel, table, uuid string, update modelUpdate) error {
+ if u.updates == nil {
+ u.updates = map[string]map[string]modelUpdate{}
+ }
+ if _, ok := u.updates[table]; !ok {
+ u.updates[table] = make(map[string]modelUpdate)
+ }
+
+ ts := dbModel.Schema.Table(table)
+ update, err := merge(ts, u.updates[table][uuid], update)
+ if err != nil {
+ return err
+ }
+
+ if !update.isEmpty() {
+ u.updates[table][uuid] = update
+ return nil
+ }
+
+ // If after the merge this amounts to no update, remove it from the list and
+ // clean up
+ delete(u.updates[table], uuid)
+ if len(u.updates[table]) == 0 {
+ delete(u.updates, table)
+ }
+ if len(u.updates) == 0 {
+ u.updates = nil
+ }
+
+ return nil
+}
+
+func (u *ModelUpdates) addInsertOperation(dbModel model.DatabaseModel, table, uuid string, op *ovsdb.Operation) error {
+ m := dbModel.Mapper
+
+ model, err := dbModel.NewModel(table)
+ if err != nil {
+ return err
+ }
+
+ mapperInfo, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return err
+ }
+
+ err = m.GetRowData(&op.Row, mapperInfo)
+ if err != nil {
+ return err
+ }
+
+ err = mapperInfo.SetField("_uuid", uuid)
+ if err != nil {
+ return err
+ }
+
+ resultRow, err := m.NewRow(mapperInfo)
+ if err != nil {
+ return err
+ }
+
+ err = u.addUpdate(dbModel, table, uuid,
+ modelUpdate{
+ old: nil,
+ new: model,
+ rowUpdate2: &rowUpdate2{
+ Insert: &resultRow,
+ New: &resultRow,
+ Old: nil,
+ },
+ },
+ )
+
+ return err
+}
+
+func (u *ModelUpdates) addUpdateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error {
+ m := dbModel.Mapper
+
+ oldInfo, err := dbModel.NewModelInfo(old)
+ if err != nil {
+ return err
+ }
+
+ oldRow, err := m.NewRow(oldInfo)
+ if err != nil {
+ return err
+ }
+
+ new := model.Clone(old)
+ newInfo, err := dbModel.NewModelInfo(new)
+ if err != nil {
+ return err
+ }
+
+ delta := ovsdb.NewRow()
+ changed, err := updateModel(dbModel, table, newInfo, &op.Row, &delta)
+ if err != nil {
+ return err
+ }
+ if !changed {
+ return nil
+ }
+
+ newRow, err := m.NewRow(newInfo)
+ if err != nil {
+ return err
+ }
+
+ err = u.addUpdate(dbModel, table, uuid,
+ modelUpdate{
+ old: old,
+ new: new,
+ rowUpdate2: &rowUpdate2{
+ Modify: &delta,
+ Old: &oldRow,
+ New: &newRow,
+ },
+ },
+ )
+
+ return err
+}
+
+func (u *ModelUpdates) addMutateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error {
+ m := dbModel.Mapper
+ schema := dbModel.Schema.Table(table)
+
+ oldInfo, err := dbModel.NewModelInfo(old)
+ if err != nil {
+ return err
+ }
+
+ oldRow, err := m.NewRow(oldInfo)
+ if err != nil {
+ return err
+ }
+
+ new := model.Clone(old)
+ newInfo, err := dbModel.NewModelInfo(new)
+ if err != nil {
+ return err
+ }
+
+ differences := make(map[string]interface{})
+ for _, mutation := range op.Mutations {
+ column := schema.Column(mutation.Column)
+ if column == nil {
+ continue
+ }
+
+ var nativeValue interface{}
+ // Usually a mutation value is of the same type of the value being mutated
+ // except for delete mutation of maps where it can also be a list of same type of
+ // keys (rfc7047 5.1). Handle this special case here.
+ if mutation.Mutator == "delete" && column.Type == ovsdb.TypeMap && reflect.TypeOf(mutation.Value) != reflect.TypeOf(ovsdb.OvsMap{}) {
+ nativeValue, err = ovsdb.OvsToNativeSlice(column.TypeObj.Key.Type, mutation.Value)
+ if err != nil {
+ return err
+ }
+ } else {
+ nativeValue, err = ovsdb.OvsToNative(column, mutation.Value)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := ovsdb.ValidateMutation(column, mutation.Mutator, nativeValue); err != nil {
+ return err
+ }
+
+ current, err := newInfo.FieldByColumn(mutation.Column)
+ if err != nil {
+ return err
+ }
+
+ newValue, diff := mutate(current, mutation.Mutator, nativeValue)
+ if err := newInfo.SetField(mutation.Column, newValue); err != nil {
+ return err
+ }
+
+ old, err := oldInfo.FieldByColumn(mutation.Column)
+ if err != nil {
+ return err
+ }
+ diff, changed := mergeDifference(old, differences[mutation.Column], diff)
+ if changed {
+ differences[mutation.Column] = diff
+ } else {
+ delete(differences, mutation.Column)
+ }
+ }
+
+ if len(differences) == 0 {
+ return nil
+ }
+
+ delta := ovsdb.NewRow()
+ for column, diff := range differences {
+ colSchema := schema.Column(column)
+ diffOvs, err := ovsdb.NativeToOvs(colSchema, diff)
+ if err != nil {
+ return err
+ }
+ delta[column] = diffOvs
+ }
+
+ newRow, err := m.NewRow(newInfo)
+ if err != nil {
+ return err
+ }
+
+ err = u.addUpdate(dbModel, table, uuid,
+ modelUpdate{
+ old: old,
+ new: new,
+ rowUpdate2: &rowUpdate2{
+ Modify: &delta,
+ Old: &oldRow,
+ New: &newRow,
+ },
+ },
+ )
+
+ return err
+}
+
+func (u *ModelUpdates) addDeleteOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error {
+ m := dbModel.Mapper
+
+ info, err := dbModel.NewModelInfo(old)
+ if err != nil {
+ return err
+ }
+
+ oldRow, err := m.NewRow(info)
+ if err != nil {
+ return err
+ }
+
+ err = u.addUpdate(dbModel, table, uuid,
+ modelUpdate{
+ old: old,
+ new: nil,
+ rowUpdate2: &rowUpdate2{
+ Delete: &ovsdb.Row{},
+ Old: &oldRow,
+ },
+ },
+ )
+
+ return err
+}
+
+func updateModel(dbModel model.DatabaseModel, table string, info *mapper.Info, update, modify *ovsdb.Row) (bool, error) {
+ return updateOrModifyModel(dbModel, table, info, update, modify, false)
+}
+
+func modifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, modify *ovsdb.Row) (bool, error) {
+ return updateOrModifyModel(dbModel, table, info, modify, nil, true)
+}
+
+// updateOrModifyModel updates info about a model with a given row containing
+// the change. The change row itself can be interpreted as an update or a
+// modify. If the change is an update and a modify row is provided, it will be
+// filled with the modify data.
+func updateOrModifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, changeRow, modifyRow *ovsdb.Row, isModify bool) (bool, error) {
+ schema := dbModel.Schema.Table(table)
+ var changed bool
+
+ for column, updateOvs := range *changeRow {
+ colSchema := schema.Column(column)
+ if colSchema == nil {
+ // ignore columns we don't know about in our schema
+ continue
+ }
+
+ currentNative, err := info.FieldByColumn(column)
+ if err != nil {
+ return false, err
+ }
+
+ updateNative, err := ovsdb.OvsToNative(colSchema, updateOvs)
+ if err != nil {
+ return false, err
+ }
+
+ if isModify {
+ differenceNative, isDifferent := applyDifference(currentNative, updateNative)
+ if isDifferent && !colSchema.Mutable() {
+ return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table))
+ }
+ changed = changed || isDifferent
+ err = info.SetField(column, differenceNative)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ differenceNative, isDifferent := difference(currentNative, updateNative)
+ if isDifferent && !colSchema.Mutable() {
+ return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table))
+ }
+ changed = changed || isDifferent
+ if isDifferent && modifyRow != nil {
+ deltaOvs, err := ovsdb.NativeToOvs(colSchema, differenceNative)
+ if err != nil {
+ return false, err
+ }
+ (*modifyRow)[column] = deltaOvs
+ }
+ err = info.SetField(column, updateNative)
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+
+ return changed, nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model/network_event.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model/network_event.go
new file mode 100644
index 000000000..00fb19fcf
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model/network_event.go
@@ -0,0 +1,61 @@
+package model
+
+import (
+ "fmt"
+
+ libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+type NetworkEvent interface {
+ String() string
+}
+
+type ACLEvent struct {
+ NetworkEvent
+ Action string
+ Actor string
+ Name string
+ Namespace string
+ Direction string
+}
+
+func (e *ACLEvent) String() string {
+ var action string
+ switch e.Action {
+ case nbdb.ACLActionAllow, nbdb.ACLActionAllowRelated, nbdb.ACLActionAllowStateless:
+ action = "Allowed"
+ case nbdb.ACLActionDrop:
+ action = "Dropped"
+ case nbdb.ACLActionPass:
+ action = "Delegated to network policy"
+ default:
+ action = "Action " + e.Action
+ }
+ var msg string
+ switch e.Actor {
+ case libovsdbops.AdminNetworkPolicyOwnerType:
+ msg = fmt.Sprintf("admin network policy %s, direction %s", e.Name, e.Direction)
+ case libovsdbops.BaselineAdminNetworkPolicyOwnerType:
+ msg = fmt.Sprintf("baseline admin network policy %s, direction %s", e.Name, e.Direction)
+ case libovsdbops.MulticastNamespaceOwnerType:
+ msg = fmt.Sprintf("multicast in namespace %s, direction %s", e.Namespace, e.Direction)
+ case libovsdbops.MulticastClusterOwnerType:
+ msg = fmt.Sprintf("cluster multicast policy, direction %s", e.Direction)
+ case libovsdbops.NetpolNodeOwnerType:
+ msg = fmt.Sprintf("default allow from local node policy, direction %s", e.Direction)
+ case libovsdbops.NetworkPolicyOwnerType:
+ if e.Namespace != "" {
+ msg = fmt.Sprintf("network policy %s in namespace %s, direction %s", e.Name, e.Namespace, e.Direction)
+ } else {
+ msg = fmt.Sprintf("network policy %s, direction %s", e.Name, e.Direction)
+ }
+ case libovsdbops.NetpolNamespaceOwnerType:
+ msg = fmt.Sprintf("network policies isolation in namespace %s, direction %s", e.Namespace, e.Direction)
+ case libovsdbops.EgressFirewallOwnerType:
+ msg = fmt.Sprintf("egress firewall in namespace %s", e.Namespace)
+ case libovsdbops.UDNIsolationOwnerType:
+ msg = fmt.Sprintf("UDN isolation of type %s", e.Name)
+ }
+ return fmt.Sprintf("%s by %s", action, msg)
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore
new file mode 100644
index 000000000..734ba1eff
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore
@@ -0,0 +1 @@
+*.ovsschema
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go
new file mode 100644
index 000000000..d0135c488
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go
@@ -0,0 +1,570 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package ovsdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const BridgeTable = "Bridge"
+
+type (
+ BridgeFailMode = string
+ BridgeProtocols = string
+)
+
+var (
+ BridgeFailModeStandalone BridgeFailMode = "standalone"
+ BridgeFailModeSecure BridgeFailMode = "secure"
+ BridgeProtocolsOpenflow10 BridgeProtocols = "OpenFlow10"
+ BridgeProtocolsOpenflow11 BridgeProtocols = "OpenFlow11"
+ BridgeProtocolsOpenflow12 BridgeProtocols = "OpenFlow12"
+ BridgeProtocolsOpenflow13 BridgeProtocols = "OpenFlow13"
+ BridgeProtocolsOpenflow14 BridgeProtocols = "OpenFlow14"
+ BridgeProtocolsOpenflow15 BridgeProtocols = "OpenFlow15"
+)
+
+// Bridge defines an object in Bridge table
+type Bridge struct {
+ UUID string `ovsdb:"_uuid"`
+ AutoAttach *string `ovsdb:"auto_attach"`
+ Controller []string `ovsdb:"controller"`
+ DatapathID *string `ovsdb:"datapath_id"`
+ DatapathType string `ovsdb:"datapath_type"`
+ DatapathVersion string `ovsdb:"datapath_version"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ FailMode *BridgeFailMode `ovsdb:"fail_mode"`
+ FloodVLANs []int `ovsdb:"flood_vlans"`
+ FlowTables map[int]string `ovsdb:"flow_tables"`
+ IPFIX *string `ovsdb:"ipfix"`
+ McastSnoopingEnable bool `ovsdb:"mcast_snooping_enable"`
+ Mirrors []string `ovsdb:"mirrors"`
+ Name string `ovsdb:"name"`
+ Netflow *string `ovsdb:"netflow"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+ Ports []string `ovsdb:"ports"`
+ Protocols []BridgeProtocols `ovsdb:"protocols"`
+ RSTPEnable bool `ovsdb:"rstp_enable"`
+ RSTPStatus map[string]string `ovsdb:"rstp_status"`
+ Sflow *string `ovsdb:"sflow"`
+ Status map[string]string `ovsdb:"status"`
+ STPEnable bool `ovsdb:"stp_enable"`
+}
+
+func (a *Bridge) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Bridge) GetAutoAttach() *string {
+ return a.AutoAttach
+}
+
+func copyBridgeAutoAttach(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBridgeAutoAttach(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Bridge) GetController() []string {
+ return a.Controller
+}
+
+func copyBridgeController(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalBridgeController(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetDatapathID() *string {
+ return a.DatapathID
+}
+
+func copyBridgeDatapathID(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBridgeDatapathID(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Bridge) GetDatapathType() string {
+ return a.DatapathType
+}
+
+func (a *Bridge) GetDatapathVersion() string {
+ return a.DatapathVersion
+}
+
+func (a *Bridge) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyBridgeExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBridgeExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetFailMode() *BridgeFailMode {
+ return a.FailMode
+}
+
+func copyBridgeFailMode(a *BridgeFailMode) *BridgeFailMode {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBridgeFailMode(a, b *BridgeFailMode) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Bridge) GetFloodVLANs() []int {
+ return a.FloodVLANs
+}
+
+func copyBridgeFloodVLANs(a []int) []int {
+ if a == nil {
+ return nil
+ }
+ b := make([]int, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalBridgeFloodVLANs(a, b []int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetFlowTables() map[int]string {
+ return a.FlowTables
+}
+
+func copyBridgeFlowTables(a map[int]string) map[int]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[int]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBridgeFlowTables(a, b map[int]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetIPFIX() *string {
+ return a.IPFIX
+}
+
+func copyBridgeIPFIX(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBridgeIPFIX(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Bridge) GetMcastSnoopingEnable() bool {
+ return a.McastSnoopingEnable
+}
+
+func (a *Bridge) GetMirrors() []string {
+ return a.Mirrors
+}
+
+func copyBridgeMirrors(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalBridgeMirrors(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetName() string {
+ return a.Name
+}
+
+func (a *Bridge) GetNetflow() *string {
+ return a.Netflow
+}
+
+func copyBridgeNetflow(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBridgeNetflow(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Bridge) GetOtherConfig() map[string]string {
+ return a.OtherConfig
+}
+
+func copyBridgeOtherConfig(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBridgeOtherConfig(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetPorts() []string {
+ return a.Ports
+}
+
+func copyBridgePorts(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalBridgePorts(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetProtocols() []BridgeProtocols {
+ return a.Protocols
+}
+
+func copyBridgeProtocols(a []BridgeProtocols) []BridgeProtocols {
+ if a == nil {
+ return nil
+ }
+ b := make([]BridgeProtocols, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalBridgeProtocols(a, b []BridgeProtocols) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetRSTPEnable() bool {
+ return a.RSTPEnable
+}
+
+func (a *Bridge) GetRSTPStatus() map[string]string {
+ return a.RSTPStatus
+}
+
+func copyBridgeRSTPStatus(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBridgeRSTPStatus(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetSflow() *string {
+ return a.Sflow
+}
+
+func copyBridgeSflow(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBridgeSflow(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Bridge) GetStatus() map[string]string {
+ return a.Status
+}
+
+func copyBridgeStatus(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBridgeStatus(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Bridge) GetSTPEnable() bool {
+ return a.STPEnable
+}
+
+func (a *Bridge) DeepCopyInto(b *Bridge) {
+ *b = *a
+ b.AutoAttach = copyBridgeAutoAttach(a.AutoAttach)
+ b.Controller = copyBridgeController(a.Controller)
+ b.DatapathID = copyBridgeDatapathID(a.DatapathID)
+ b.ExternalIDs = copyBridgeExternalIDs(a.ExternalIDs)
+ b.FailMode = copyBridgeFailMode(a.FailMode)
+ b.FloodVLANs = copyBridgeFloodVLANs(a.FloodVLANs)
+ b.FlowTables = copyBridgeFlowTables(a.FlowTables)
+ b.IPFIX = copyBridgeIPFIX(a.IPFIX)
+ b.Mirrors = copyBridgeMirrors(a.Mirrors)
+ b.Netflow = copyBridgeNetflow(a.Netflow)
+ b.OtherConfig = copyBridgeOtherConfig(a.OtherConfig)
+ b.Ports = copyBridgePorts(a.Ports)
+ b.Protocols = copyBridgeProtocols(a.Protocols)
+ b.RSTPStatus = copyBridgeRSTPStatus(a.RSTPStatus)
+ b.Sflow = copyBridgeSflow(a.Sflow)
+ b.Status = copyBridgeStatus(a.Status)
+}
+
+func (a *Bridge) DeepCopy() *Bridge {
+ b := new(Bridge)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Bridge) CloneModelInto(b model.Model) {
+ c := b.(*Bridge)
+ a.DeepCopyInto(c)
+}
+
+func (a *Bridge) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Bridge) Equals(b *Bridge) bool {
+ return a.UUID == b.UUID &&
+ equalBridgeAutoAttach(a.AutoAttach, b.AutoAttach) &&
+ equalBridgeController(a.Controller, b.Controller) &&
+ equalBridgeDatapathID(a.DatapathID, b.DatapathID) &&
+ a.DatapathType == b.DatapathType &&
+ a.DatapathVersion == b.DatapathVersion &&
+ equalBridgeExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalBridgeFailMode(a.FailMode, b.FailMode) &&
+ equalBridgeFloodVLANs(a.FloodVLANs, b.FloodVLANs) &&
+ equalBridgeFlowTables(a.FlowTables, b.FlowTables) &&
+ equalBridgeIPFIX(a.IPFIX, b.IPFIX) &&
+ a.McastSnoopingEnable == b.McastSnoopingEnable &&
+ equalBridgeMirrors(a.Mirrors, b.Mirrors) &&
+ a.Name == b.Name &&
+ equalBridgeNetflow(a.Netflow, b.Netflow) &&
+ equalBridgeOtherConfig(a.OtherConfig, b.OtherConfig) &&
+ equalBridgePorts(a.Ports, b.Ports) &&
+ equalBridgeProtocols(a.Protocols, b.Protocols) &&
+ a.RSTPEnable == b.RSTPEnable &&
+ equalBridgeRSTPStatus(a.RSTPStatus, b.RSTPStatus) &&
+ equalBridgeSflow(a.Sflow, b.Sflow) &&
+ equalBridgeStatus(a.Status, b.Status) &&
+ a.STPEnable == b.STPEnable
+}
+
+func (a *Bridge) EqualsModel(b model.Model) bool {
+ c := b.(*Bridge)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Bridge{}
+var _ model.ComparableModel = &Bridge{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go
new file mode 100644
index 000000000..57a26e805
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go
@@ -0,0 +1,143 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package ovsdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set"
+
+// FlowSampleCollectorSet defines an object in Flow_Sample_Collector_Set table
+type FlowSampleCollectorSet struct {
+ UUID string `ovsdb:"_uuid"`
+ Bridge string `ovsdb:"bridge"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ ID int `ovsdb:"id"`
+ IPFIX *string `ovsdb:"ipfix"`
+ LocalGroupID *int `ovsdb:"local_group_id"`
+}
+
+func (a *FlowSampleCollectorSet) GetUUID() string {
+ return a.UUID
+}
+
+func (a *FlowSampleCollectorSet) GetBridge() string {
+ return a.Bridge
+}
+
+func (a *FlowSampleCollectorSet) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyFlowSampleCollectorSetExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalFlowSampleCollectorSetExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *FlowSampleCollectorSet) GetID() int {
+ return a.ID
+}
+
+func (a *FlowSampleCollectorSet) GetIPFIX() *string {
+ return a.IPFIX
+}
+
+func copyFlowSampleCollectorSetIPFIX(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalFlowSampleCollectorSetIPFIX(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *FlowSampleCollectorSet) GetLocalGroupID() *int {
+ return a.LocalGroupID
+}
+
+func copyFlowSampleCollectorSetLocalGroupID(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalFlowSampleCollectorSetLocalGroupID(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *FlowSampleCollectorSet) DeepCopyInto(b *FlowSampleCollectorSet) {
+ *b = *a
+ b.ExternalIDs = copyFlowSampleCollectorSetExternalIDs(a.ExternalIDs)
+ b.IPFIX = copyFlowSampleCollectorSetIPFIX(a.IPFIX)
+ b.LocalGroupID = copyFlowSampleCollectorSetLocalGroupID(a.LocalGroupID)
+}
+
+func (a *FlowSampleCollectorSet) DeepCopy() *FlowSampleCollectorSet {
+ b := new(FlowSampleCollectorSet)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *FlowSampleCollectorSet) CloneModelInto(b model.Model) {
+ c := b.(*FlowSampleCollectorSet)
+ a.DeepCopyInto(c)
+}
+
+func (a *FlowSampleCollectorSet) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *FlowSampleCollectorSet) Equals(b *FlowSampleCollectorSet) bool {
+ return a.UUID == b.UUID &&
+ a.Bridge == b.Bridge &&
+ equalFlowSampleCollectorSetExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.ID == b.ID &&
+ equalFlowSampleCollectorSetIPFIX(a.IPFIX, b.IPFIX) &&
+ equalFlowSampleCollectorSetLocalGroupID(a.LocalGroupID, b.LocalGroupID)
+}
+
+func (a *FlowSampleCollectorSet) EqualsModel(b model.Model) bool {
+ c := b.(*FlowSampleCollectorSet)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &FlowSampleCollectorSet{}
+var _ model.ComparableModel = &FlowSampleCollectorSet{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go
new file mode 100644
index 000000000..c5aabca46
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go
@@ -0,0 +1,3 @@
+package ovsdb
+
+//go:generate modelgen --extended -p ovsdb -o . vswitch.ovsschema
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go
new file mode 100644
index 000000000..7ba2329e3
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go
@@ -0,0 +1,11 @@
+package ovsdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+// ObservDatabaseModel returns the DatabaseModel object to be used by observability library.
+func ObservDatabaseModel() (model.ClientDBModel, error) {
+ return model.NewClientDBModel("Open_vSwitch", map[string]model.Model{
+ "Bridge": &Bridge{},
+ "Flow_Sample_Collector_Set": &FlowSampleCollectorSet{},
+ })
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go
new file mode 100644
index 000000000..5ff1587a6
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go
@@ -0,0 +1,118 @@
+package sampledecoder
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+ "k8s.io/klog/v2/textlogger"
+)
+
+const OVSDBTimeout = 10 * time.Second
+
+func NewNBClientWithConfig(ctx context.Context, cfg dbConfig) (client.Client, error) {
+ dbModel, err := nbdb.FullDatabaseModel()
+ if err != nil {
+ return nil, err
+ }
+
+ // define client indexes for ACLs to quickly find them by sample_new or sample_est column.
+ dbModel.SetIndexes(map[string][]model.ClientIndex{
+ nbdb.ACLTable: {
+ {Columns: []model.ColumnKey{{Column: "sample_new"}}},
+ {Columns: []model.ColumnKey{{Column: "sample_est"}}},
+ },
+ })
+
+ c, err := newClient(cfg, dbModel)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = c.Monitor(ctx,
+ c.NewMonitor(
+ client.WithTable(&nbdb.ACL{}),
+ client.WithTable(&nbdb.Sample{}),
+ ),
+ )
+
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+func NewOVSDBClientWithConfig(ctx context.Context, cfg dbConfig) (client.Client, error) {
+ dbModel, err := ovsdb.ObservDatabaseModel()
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := newClient(cfg, dbModel)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = c.Monitor(ctx,
+ c.NewMonitor(
+ client.WithTable(&ovsdb.FlowSampleCollectorSet{}),
+ client.WithTable(&ovsdb.Bridge{}),
+ ),
+ )
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+// newClient creates a new client object given the provided config
+// the stopCh is required to ensure the goroutine for ssl cert
+// update is not leaked
+func newClient(cfg dbConfig, dbModel model.ClientDBModel) (client.Client, error) {
+ const connectTimeout = OVSDBTimeout * 2
+ const inactivityTimeout = OVSDBTimeout * 18
+ // Don't log anything from the libovsdb client by default
+ config := textlogger.NewConfig(textlogger.Verbosity(0))
+ logger := textlogger.NewLogger(config)
+
+ options := []client.Option{
+ // Reading and parsing the DB after reconnect at scale can (unsurprisingly)
+ // take longer than a normal ovsdb operation. Give it a bit more time, so
+ // we don't time out and enter a reconnect loop. In addition, it also enables
+ // inactivity check on the ovsdb connection.
+ client.WithInactivityCheck(inactivityTimeout, connectTimeout, &backoff.ZeroBackOff{}),
+ client.WithLeaderOnly(true),
+ client.WithLogger(&logger),
+ }
+
+ for _, endpoint := range strings.Split(cfg.address, ",") {
+ options = append(options, client.WithEndpoint(endpoint))
+ }
+ if cfg.scheme != "unix" {
+ return nil, fmt.Errorf("only unix scheme is supported for now")
+ }
+
+ client, err := client.NewOVSDBClient(dbModel, options...)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), connectTimeout)
+ defer cancel()
+ err = client.Connect(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go
new file mode 100644
index 000000000..d691fd9cc
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go
@@ -0,0 +1,293 @@
+package sampledecoder
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "strings"
+
+ "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb"
+ libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability"
+)
+
+type SampleDecoder struct {
+ nbClient client.Client
+ ovsdbClient client.Client
+ cleanupCollectors []int
+}
+
+type dbConfig struct {
+ address string
+ scheme string
+}
+
+type Cookie struct {
+ ObsDomainID uint32
+ ObsPointID uint32
+}
+
+const CookieSize = 8
+const bridgeName = "br-int"
+
+var SampleEndian = getEndian()
+
+func getEndian() binary.ByteOrder {
+ // Use network bite order
+ return binary.BigEndian
+}
+
+// getLocalNBClient only supports connecting to nbdb via unix socket.
+// address is the path to the unix socket, e.g. "/var/run/ovn/ovnnb_db.sock"
+func getLocalNBClient(ctx context.Context, address string) (client.Client, error) {
+ config := dbConfig{
+ address: "unix:" + address,
+ scheme: "unix",
+ }
+ libovsdbOvnNBClient, err := NewNBClientWithConfig(ctx, config)
+ if err != nil {
+ return nil, fmt.Errorf("error creating libovsdb client: %w ", err)
+ }
+ return libovsdbOvnNBClient, nil
+}
+
+func getLocalOVSDBClient(ctx context.Context) (client.Client, error) {
+ config := dbConfig{
+ address: "unix:/var/run/openvswitch/db.sock",
+ scheme: "unix",
+ }
+ return NewOVSDBClientWithConfig(ctx, config)
+}
+
+// NewSampleDecoderWithDefaultCollector creates a new SampleDecoder, initializes the OVSDB client and adds the default collector.
+// It allows to set the groupID and ownerName for the created default collector.
+// If the default collector already exists with a different owner or different groupID an error will be returned.
+// Shutdown should be called to clean up the collector from the OVSDB.
+func NewSampleDecoderWithDefaultCollector(ctx context.Context, nbdbSocketPath string, ownerName string, groupID int) (*SampleDecoder, error) {
+ nbClient, err := getLocalNBClient(ctx, nbdbSocketPath)
+ if err != nil {
+ return nil, err
+ }
+ ovsdbClient, err := getLocalOVSDBClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ decoder := &SampleDecoder{
+ nbClient: nbClient,
+ ovsdbClient: ovsdbClient,
+ }
+ err = decoder.AddCollector(observability.DefaultObservabilityCollectorSetID, groupID, ownerName)
+ if err != nil {
+ return nil, err
+ }
+ decoder.cleanupCollectors = append(decoder.cleanupCollectors, observability.DefaultObservabilityCollectorSetID)
+ return decoder, nil
+}
+
+// NewSampleDecoder creates a new SampleDecoder and initializes the OVSDB client.
+func NewSampleDecoder(ctx context.Context, nbdbSocketPath string) (*SampleDecoder, error) {
+ nbClient, err := getLocalNBClient(ctx, nbdbSocketPath)
+ if err != nil {
+ return nil, err
+ }
+ return &SampleDecoder{
+ nbClient: nbClient,
+ }, nil
+}
+
+func (d *SampleDecoder) Shutdown() {
+ for _, collectorID := range d.cleanupCollectors {
+ err := d.DeleteCollector(collectorID)
+ if err != nil {
+ fmt.Printf("Error deleting collector with ID=%d: %v", collectorID, err)
+ }
+ }
+}
+
+func getObservAppID(obsDomainID uint32) uint8 {
+ return uint8(obsDomainID >> 24)
+}
+
+// findACLBySample relies on the client index based on sample_new and sample_est column.
+func findACLBySample(nbClient client.Client, acl *nbdb.ACL) ([]*nbdb.ACL, error) {
+ found := []*nbdb.ACL{}
+ err := nbClient.Where(acl).List(context.Background(), &found)
+ return found, err
+}
+
+func (d *SampleDecoder) DecodeCookieIDs(obsDomainID, obsPointID uint32) (model.NetworkEvent, error) {
+ // Find sample using obsPointID
+ sample, err := libovsdbops.FindSample(d.nbClient, int(obsPointID))
+ if err != nil || sample == nil {
+ return nil, fmt.Errorf("find sample failed: %w", err)
+ }
+ // find db object using observ application ID
+ // Since ACL is indexed both by sample_new and sample_est, when searching by one of them,
+ // we need to make sure the other one will not match.
+ // nil is a valid index value, therefore we have to use non-existing UUID.
+ wrongUUID := "wrongUUID"
+ var dbObj interface{}
+ switch getObservAppID(obsDomainID) {
+ case observability.ACLNewTrafficSamplingID:
+ acls, err := findACLBySample(d.nbClient, &nbdb.ACL{SampleNew: &sample.UUID, SampleEst: &wrongUUID})
+ if err != nil {
+ return nil, fmt.Errorf("find acl for sample failed: %w", err)
+ }
+ if len(acls) != 1 {
+ return nil, fmt.Errorf("expected 1 ACL, got %d", len(acls))
+ }
+ dbObj = acls[0]
+ case observability.ACLEstTrafficSamplingID:
+ acls, err := findACLBySample(d.nbClient, &nbdb.ACL{SampleNew: &wrongUUID, SampleEst: &sample.UUID})
+ if err != nil {
+ return nil, fmt.Errorf("find acl for sample failed: %w", err)
+ }
+ if len(acls) != 1 {
+ return nil, fmt.Errorf("expected 1 ACL, got %d", len(acls))
+ }
+ dbObj = acls[0]
+ default:
+ return nil, fmt.Errorf("unknown app ID: %d", getObservAppID(obsDomainID))
+ }
+ var event model.NetworkEvent
+ switch o := dbObj.(type) {
+ case *nbdb.ACL:
+ event, err = newACLEvent(o)
+ if err != nil {
+ return nil, fmt.Errorf("failed to build ACL network event: %w", err)
+ }
+ }
+ if event == nil {
+ return nil, fmt.Errorf("failed to build network event for db object %v", dbObj)
+ }
+ return event, nil
+}
+
+func newACLEvent(o *nbdb.ACL) (*model.ACLEvent, error) {
+ actor := o.ExternalIDs[libovsdbops.OwnerTypeKey.String()]
+ event := model.ACLEvent{
+ Action: o.Action,
+ Actor: actor,
+ }
+ switch actor {
+ case libovsdbops.NetworkPolicyOwnerType:
+ objName := o.ExternalIDs[libovsdbops.ObjectNameKey.String()]
+ nsname := strings.SplitN(objName, ":", 2)
+ if len(nsname) == 2 {
+ event.Namespace = nsname[0]
+ event.Name = nsname[1]
+ } else {
+ return nil, fmt.Errorf("expected format namespace:name for Object Name, but found: %s", objName)
+ }
+ event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]
+ case libovsdbops.AdminNetworkPolicyOwnerType, libovsdbops.BaselineAdminNetworkPolicyOwnerType:
+ event.Name = o.ExternalIDs[libovsdbops.ObjectNameKey.String()]
+ event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]
+ case libovsdbops.MulticastNamespaceOwnerType, libovsdbops.NetpolNamespaceOwnerType:
+ event.Namespace = o.ExternalIDs[libovsdbops.ObjectNameKey.String()]
+ event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]
+ case libovsdbops.MulticastClusterOwnerType:
+ event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]
+ case libovsdbops.EgressFirewallOwnerType:
+ event.Namespace = o.ExternalIDs[libovsdbops.ObjectNameKey.String()]
+ event.Direction = "Egress"
+ case libovsdbops.UDNIsolationOwnerType:
+ event.Name = o.ExternalIDs[libovsdbops.ObjectNameKey.String()]
+ case libovsdbops.NetpolNodeOwnerType:
+ event.Direction = "Ingress"
+ }
+ return &event, nil
+}
+
+func (d *SampleDecoder) DecodeCookieBytes(cookie []byte) (model.NetworkEvent, error) {
+ if uint64(len(cookie)) != CookieSize {
+ return nil, fmt.Errorf("invalid cookie size: %d", len(cookie))
+ }
+ c := Cookie{}
+ err := binary.Read(bytes.NewReader(cookie), SampleEndian, &c)
+ if err != nil {
+ return nil, err
+ }
+ return d.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID)
+}
+
+func (d *SampleDecoder) DecodeCookie8Bytes(cookie [8]byte) (model.NetworkEvent, error) {
+ c := Cookie{}
+ err := binary.Read(bytes.NewReader(cookie[:]), SampleEndian, &c)
+ if err != nil {
+ return nil, err
+ }
+ return d.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID)
+}
+
+func getGroupID(groupID *int) string {
+ if groupID == nil {
+ return "unset"
+ }
+ return fmt.Sprintf("%d", *groupID)
+}
+
+func (d *SampleDecoder) AddCollector(collectorID, groupID int, ownerName string) error {
+ if d.ovsdbClient == nil {
+ return fmt.Errorf("OVSDB client is not initialized")
+ }
+ // find existing collector with the same ID
+ collectors := []*ovsdb.FlowSampleCollectorSet{}
+ err := d.ovsdbClient.WhereCache(func(item *ovsdb.FlowSampleCollectorSet) bool {
+ return item.ID == collectorID
+ }).List(context.Background(), &collectors)
+ if err != nil {
+ return fmt.Errorf("failed finding existing collector: %w", err)
+ }
+ if len(collectors) > 0 && (collectors[0].ExternalIDs["owner"] != ownerName ||
+ collectors[0].LocalGroupID == nil || *collectors[0].LocalGroupID != groupID) {
+ return fmt.Errorf("requested collector with id=%v already exists "+
+ "with the external_ids=%+v, local_group_id=%v", collectorID, collectors[0].ExternalIDs["owner"], getGroupID(collectors[0].LocalGroupID))
+ }
+
+ // find br-int UUID to attach collector
+ bridges := []*ovsdb.Bridge{}
+ err = d.ovsdbClient.WhereCache(func(item *ovsdb.Bridge) bool {
+ return item.Name == bridgeName
+ }).List(context.Background(), &bridges)
+ if err != nil || len(bridges) != 1 {
+ return fmt.Errorf("failed finding br-int: %w", err)
+ }
+
+ ops, err := d.ovsdbClient.Create(&ovsdb.FlowSampleCollectorSet{
+ ID: collectorID,
+ Bridge: bridges[0].UUID,
+ LocalGroupID: &groupID,
+ ExternalIDs: map[string]string{"owner": ownerName},
+ })
+ if err != nil {
+ return fmt.Errorf("failed creating collector: %w", err)
+ }
+ _, err = d.ovsdbClient.Transact(context.Background(), ops...)
+ return err
+}
+
+func (d *SampleDecoder) DeleteCollector(collectorID int) error {
+ collectors := []*ovsdb.FlowSampleCollectorSet{}
+ err := d.ovsdbClient.WhereCache(func(item *ovsdb.FlowSampleCollectorSet) bool {
+ return item.ID == collectorID
+ }).List(context.Background(), &collectors)
+ if err != nil {
+ return fmt.Errorf("failed finding exisiting collector: %w", err)
+ }
+ if len(collectors) != 1 {
+ return fmt.Errorf("expected only 1 collector with given id")
+ }
+
+ ops, err := d.ovsdbClient.Where(collectors[0]).Delete()
+ if err != nil {
+ return fmt.Errorf("failed creating collector: %w", err)
+ }
+ res, err := d.ovsdbClient.Transact(context.Background(), ops...)
+ fmt.Println("res: ", res)
+ return err
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go
new file mode 100644
index 000000000..88b8f6a83
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go
@@ -0,0 +1,97 @@
+package types
+
+import (
+ "net"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+// NetConf is CNI NetConf with DeviceID
+type NetConf struct {
+ types.NetConf
+ // Role is valid only on L3 / L2 topologies. Not on localnet.
+ // It allows for using this network to be either secondary or
+ // primary user defined network for the pod.
+ // primary user defined networks are used in order to achieve
+ // native network isolation.
+ // In order to ensure backwards compatibility, if empty the
+ // network is considered secondary
+ Role string `json:"role,omitempty"`
+ // specifies the OVN topology for this network configuration
+ // when not specified, by default it is Layer3AttachDefTopoType
+ Topology string `json:"topology,omitempty"`
+ // captures net-attach-def name in the form of namespace/name
+ NADName string `json:"netAttachDefName,omitempty"`
+ // Network MTU
+ MTU int `json:"mtu,omitempty"`
+ // comma-seperated subnet cidr
+ // for secondary layer3 network, eg. 10.128.0.0/14/23
+ // for layer2 and localnet network, eg. 10.1.130.0/24
+ Subnets string `json:"subnets,omitempty"`
+ // comma-seperated list of IPs, expressed in the form of subnets, to be excluded from being allocated for Pod
+ // valid for layer2 and localnet network topology
+ // eg. "10.1.130.0/27, 10.1.130.122/32"
+ ExcludeSubnets string `json:"excludeSubnets,omitempty"`
+ // join subnet cidr is required for supporting
+ // services and ingress for user defined networks
+ // in case of dualstack cluster, please do a comma-seperated list
+ // expected format:
+ // 1) V4 single stack: "v4CIDR" (eg: "100.65.0.0/16")
+ // 2) V6 single stack: "v6CIDR" (eg: "fd99::/64")
+ // 3) dualstack: "v4CIDR,v6CIDR" (eg: "100.65.0.0/16,fd99::/64")
+ // valid for UDN layer3/layer2 network topology
+ // default value: 100.65.0.0/16,fd99::/64 if not provided
+ JoinSubnet string `json:"joinSubnet,omitempty"`
+ // VLANID, valid in localnet topology network only
+ VLANID int `json:"vlanID,omitempty"`
+ // AllowPersistentIPs is valid on both localnet / layer topologies.
+ // It allows for having IP allocations that outlive the pod for which
+ // they are originally created - e.g. a KubeVirt VM's migration, or
+ // restart.
+ AllowPersistentIPs bool `json:"allowPersistentIPs,omitempty"`
+
+ // PhysicalNetworkName indicates the name of the physical network to which
+ // the OVN overlay will connect. Only applies to `localnet` topologies.
+ // When omitted, the physical network name of the network will be the value
+ // of the `name` attribute.
+ // This attribute allows multiple overlays to share the same physical
+ // network mapping in the hosts.
+ PhysicalNetworkName string `json:"physicalNetworkName,omitempty"`
+
+ // PciAddrs in case of using sriov or Auxiliry device name in case of SF
+ DeviceID string `json:"deviceID,omitempty"`
+ // LogFile to log all the messages from cni shim binary to
+ LogFile string `json:"logFile,omitempty"`
+ // Level is the logging verbosity level
+ LogLevel string `json:"logLevel,omitempty"`
+ // LogFileMaxSize is the maximum size in bytes of the logfile
+ // before it gets rolled.
+ LogFileMaxSize int `json:"logfile-maxsize"`
+ // LogFileMaxBackups represents the maximum number of
+ // old log files to retain
+ LogFileMaxBackups int `json:"logfile-maxbackups"`
+ // LogFileMaxAge represents the maximum number
+ // of days to retain old log files
+ LogFileMaxAge int `json:"logfile-maxage"`
+ // Runtime arguments passed by the NPWG implementation (e.g. multus)
+ RuntimeConfig struct {
+ // see https://github.com/k8snetworkplumbingwg/device-info-spec
+ CNIDeviceInfoFile string `json:"CNIDeviceInfoFile,omitempty"`
+ } `json:"runtimeConfig,omitempty"`
+}
+
+// NetworkSelectionElement represents one element of the JSON format
+// Network Attachment Selection Annotation as described in section 4.1.2
+// of the CRD specification.
+type NetworkSelectionElement struct {
+ // Name contains the name of the Network object this element selects
+ Name string `json:"name"`
+ // Namespace contains the optional namespace that the network referenced
+ // by Name exists in
+ Namespace string `json:"namespace,omitempty"`
+ // MacRequest contains an optional requested MAC address for this
+ // network attachment
+ MacRequest string `json:"mac,omitempty"`
+ // GatewayRequest contains default route IP address for the pod
+ GatewayRequest []net.IP `json:"default-route,omitempty"`
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go
new file mode 100644
index 000000000..3d935c5c6
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go
@@ -0,0 +1,173 @@
+package config
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containernetworking/cni/libcni"
+ "github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/version"
+
+ ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types"
+ ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
+)
+
+var ErrorAttachDefNotOvnManaged = errors.New("net-attach-def not managed by OVN")
+var ErrorChainingNotSupported = errors.New("CNI plugin chaining is not supported")
+
+// WriteCNIConfig writes a CNI JSON config file to directory given by global config
+// if the file doesn't already exist, or is different than the content that would
+// be written.
+func WriteCNIConfig() error {
+ netConf := &ovncnitypes.NetConf{
+ NetConf: types.NetConf{
+ CNIVersion: "0.4.0",
+ Name: "ovn-kubernetes",
+ Type: CNI.Plugin,
+ },
+ LogFile: Logging.CNIFile,
+ LogLevel: fmt.Sprintf("%d", Logging.Level),
+ LogFileMaxSize: Logging.LogFileMaxSize,
+ LogFileMaxBackups: Logging.LogFileMaxBackups,
+ LogFileMaxAge: Logging.LogFileMaxAge,
+ }
+
+ newBytes, err := json.Marshal(netConf)
+ if err != nil {
+ return fmt.Errorf("failed to marshal CNI config JSON: %v", err)
+ }
+
+ confFile := filepath.Join(CNI.ConfDir, CNIConfFileName)
+ if existingBytes, err := os.ReadFile(confFile); err == nil {
+ if bytes.Equal(newBytes, existingBytes) {
+ // No changes; do nothing
+ return nil
+ }
+ }
+
+ // Install the CNI config file after all initialization is done
+ // MkdirAll() returns no error if the path already exists
+ if err := os.MkdirAll(CNI.ConfDir, os.ModeDir); err != nil {
+ return err
+ }
+
+ var f *os.File
+ f, err = os.CreateTemp(CNI.ConfDir, "ovnkube-")
+ if err != nil {
+ return err
+ }
+
+ if _, err := f.Write(newBytes); err != nil {
+ return err
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+
+ return os.Rename(f.Name(), confFile)
+}
+
+// ParseNetConf parses config in NAD spec
+func ParseNetConf(bytes []byte) (*ovncnitypes.NetConf, error) {
+ var netconf *ovncnitypes.NetConf
+
+ confList, err := libcni.ConfListFromBytes(bytes)
+ if err == nil {
+ netconf, err = parseNetConfList(confList)
+ if err == nil {
+ if _, singleErr := parseNetConfSingle(bytes); singleErr == nil {
+ return nil, fmt.Errorf("CNI config cannot have both a plugin list and a single config")
+ }
+ }
+ } else {
+ netconf, err = parseNetConfSingle(bytes)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if netconf.Topology == "" {
+ // NAD of default network
+ netconf.Name = ovntypes.DefaultNetworkName
+ }
+
+ return netconf, nil
+}
+
+func parseNetConfSingle(bytes []byte) (*ovncnitypes.NetConf, error) {
+ netconf := &ovncnitypes.NetConf{MTU: Default.MTU}
+ err := json.Unmarshal(bytes, &netconf)
+ if err != nil {
+ return nil, err
+ }
+
+ // skip non-OVN NAD
+ if netconf.Type != "ovn-k8s-cni-overlay" {
+ return nil, ErrorAttachDefNotOvnManaged
+ }
+
+ err = ValidateNetConfNameFields(netconf)
+ if err != nil {
+ return nil, err
+ }
+
+ return netconf, nil
+}
+
+func parseNetConfList(confList *libcni.NetworkConfigList) (*ovncnitypes.NetConf, error) {
+ if len(confList.Plugins) > 1 {
+ return nil, ErrorChainingNotSupported
+ }
+
+ netconf := &ovncnitypes.NetConf{MTU: Default.MTU}
+ if err := json.Unmarshal(confList.Plugins[0].Bytes, netconf); err != nil {
+ return nil, err
+ }
+
+ // skip non-OVN NAD
+ if netconf.Type != "ovn-k8s-cni-overlay" {
+ return nil, ErrorAttachDefNotOvnManaged
+ }
+
+ netconf.Name = confList.Name
+ netconf.CNIVersion = confList.CNIVersion
+
+ if err := ValidateNetConfNameFields(netconf); err != nil {
+ return nil, err
+ }
+
+ return netconf, nil
+}
+
+func ValidateNetConfNameFields(netconf *ovncnitypes.NetConf) error {
+ if netconf.Topology != "" {
+ if netconf.NADName == "" {
+ return fmt.Errorf("missing NADName in secondary network netconf %s", netconf.Name)
+ }
+ // "ovn-kubernetes" network name is reserved for later
+ if netconf.Name == "" || netconf.Name == ovntypes.DefaultNetworkName || netconf.Name == "ovn-kubernetes" {
+ return fmt.Errorf("invalid name in in secondary network netconf (%s)", netconf.Name)
+ }
+ }
+
+ return nil
+}
+
+// ReadCNIConfig unmarshals a CNI JSON config into an NetConf structure
+func ReadCNIConfig(bytes []byte) (*ovncnitypes.NetConf, error) {
+ conf, err := ParseNetConf(bytes)
+ if err != nil {
+ return nil, err
+ }
+ if conf.RawPrevResult != nil {
+ if err := version.ParsePrevResult(&conf.NetConf); err != nil {
+ return nil, err
+ }
+ }
+ return conf, nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go
new file mode 100644
index 000000000..3b129624b
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go
@@ -0,0 +1,2714 @@
+package config
+
+import (
+ "flag"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/urfave/cli/v2"
+ gcfg "gopkg.in/gcfg.v1"
+ lumberjack "gopkg.in/natefinch/lumberjack.v2"
+ "k8s.io/apimachinery/pkg/api/validation"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/klog/v2"
+ kexec "k8s.io/utils/exec"
+ utilnet "k8s.io/utils/net"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
+)
+
+// DefaultEncapPort number used if not supplied
+const DefaultEncapPort = 6081
+
+const DefaultAPIServer = "http://localhost:8443"
+
+// Default IANA-assigned UDP port number for VXLAN
+const DefaultVXLANPort = 4789
+
+const DefaultDBTxnTimeout = time.Second * 100
+
+// The following are global config parameters that other modules may access directly
+var (
+ // Build information. Populated at build-time.
+ // commit ID used to build ovn-kubernetes
+ Commit = ""
+ // branch used to build ovn-kubernetes
+ Branch = ""
+ // ovn-kubernetes build user
+ BuildUser = ""
+ // ovn-kubernetes build date
+ BuildDate = ""
+ // ovn-kubernetes version, to be changed with every release
+ Version = "1.0.0"
+ // version of the go runtime used to compile ovn-kubernetes
+ GoVersion = runtime.Version()
+ // os and architecture used to build ovn-kubernetes
+ OSArch = fmt.Sprintf("%s %s", runtime.GOOS, runtime.GOARCH)
+
+ // ovn-kubernetes cni config file name
+ CNIConfFileName = "10-ovn-kubernetes.conf"
+
+ // Default holds parsed config file parameters and command-line overrides
+ Default = DefaultConfig{
+ MTU: 1400,
+ ConntrackZone: 64000,
+ EncapType: "geneve",
+ EncapIP: "",
+ EncapPort: DefaultEncapPort,
+ InactivityProbe: 100000, // in Milliseconds
+ OpenFlowProbe: 180, // in Seconds
+ OfctrlWaitBeforeClear: 0, // in Milliseconds
+ MonitorAll: true,
+ OVSDBTxnTimeout: DefaultDBTxnTimeout,
+ LFlowCacheEnable: true,
+ RawClusterSubnets: "10.128.0.0/14/23",
+ Zone: types.OvnDefaultZone,
+ RawUDNAllowedDefaultServices: "default/kubernetes,kube-system/kube-dns",
+ }
+
+ // Logging holds logging-related parsed config file parameters and command-line overrides
+ Logging = LoggingConfig{
+ File: "", // do not log to a file by default
+ CNIFile: "",
+ LibovsdbFile: "",
+ Level: 4,
+ LogFileMaxSize: 100, // Size in Megabytes
+ LogFileMaxBackups: 5,
+ LogFileMaxAge: 5, //days
+ ACLLoggingRateLimit: 20,
+ }
+
+ // Monitoring holds monitoring-related parsed config file parameters and command-line overrides
+ Monitoring = MonitoringConfig{
+ RawNetFlowTargets: "",
+ RawSFlowTargets: "",
+ RawIPFIXTargets: "",
+ }
+
+ // IPFIX holds IPFIX-related performance configuration options. It requires that the
+ // IPFIXTargets value of the Monitoring section contains at least one endpoint.
+ IPFIX = IPFIXConfig{
+ Sampling: 400,
+ CacheActiveTimeout: 60,
+ CacheMaxFlows: 0,
+ }
+
+ // CNI holds CNI-related parsed config file parameters and command-line overrides
+ CNI = CNIConfig{
+ ConfDir: "/etc/cni/net.d",
+ Plugin: "ovn-k8s-cni-overlay",
+ }
+
+ // Kubernetes holds Kubernetes-related parsed config file parameters and command-line overrides
+ Kubernetes = KubernetesConfig{
+ APIServer: DefaultAPIServer,
+ RawServiceCIDRs: "172.16.1.0/24",
+ OVNConfigNamespace: "ovn-kubernetes",
+ HostNetworkNamespace: "",
+ DisableRequestedChassis: false,
+ PlatformType: "",
+ DNSServiceNamespace: "kube-system",
+ DNSServiceName: "kube-dns",
+ // By default, use a short lifetime length for certificates to ensure that the automatic rotation works well,
+ // might revisit in the future to use a more sensible value
+ CertDuration: 10 * time.Minute,
+ }
+
+ // Metrics holds Prometheus metrics-related parameters.
+ Metrics MetricsConfig
+
+ // OVNKubernetesFeatureConfig holds OVN-Kubernetes feature enhancement config file parameters and command-line overrides
+ OVNKubernetesFeature = OVNKubernetesFeatureConfig{
+ EgressIPReachabiltyTotalTimeout: 1,
+ }
+
+ // OvnNorth holds northbound OVN database client and server authentication and location details
+ OvnNorth OvnAuthConfig
+
+ // OvnSouth holds southbound OVN database client and server authentication and location details
+ OvnSouth OvnAuthConfig
+
+ // Gateway holds node gateway-related parsed config file parameters and command-line overrides
+ Gateway = GatewayConfig{
+ V4JoinSubnet: "100.64.0.0/16",
+ V6JoinSubnet: "fd98::/64",
+ V4MasqueradeSubnet: "169.254.169.0/29",
+ V6MasqueradeSubnet: "fd69::/125",
+ MasqueradeIPs: MasqueradeIPsConfig{
+ V4OVNMasqueradeIP: net.ParseIP("169.254.169.1"),
+ V6OVNMasqueradeIP: net.ParseIP("fd69::1"),
+ V4HostMasqueradeIP: net.ParseIP("169.254.169.2"),
+ V6HostMasqueradeIP: net.ParseIP("fd69::2"),
+ V4HostETPLocalMasqueradeIP: net.ParseIP("169.254.169.3"),
+ V6HostETPLocalMasqueradeIP: net.ParseIP("fd69::3"),
+ V4DummyNextHopMasqueradeIP: net.ParseIP("169.254.169.4"),
+ V6DummyNextHopMasqueradeIP: net.ParseIP("fd69::4"),
+ V4OVNServiceHairpinMasqueradeIP: net.ParseIP("169.254.169.5"),
+ V6OVNServiceHairpinMasqueradeIP: net.ParseIP("fd69::5"),
+ },
+ }
+
+ // Set Leaderelection config values based on
+ // https://github.com/openshift/enhancements/blame/84e894ead7b188a1013556e0ba6973b8463995f1/CONVENTIONS.md#L183
+
+ // MasterHA holds master HA related config options.
+ MasterHA = HAConfig{
+ ElectionRetryPeriod: 26,
+ ElectionRenewDeadline: 107,
+ ElectionLeaseDuration: 137,
+ }
+
+ // ClusterMgrHA holds cluster manager HA related config options.
+ ClusterMgrHA = HAConfig{
+ ElectionRetryPeriod: 26,
+ ElectionRenewDeadline: 107,
+ ElectionLeaseDuration: 137,
+ }
+
+ // HybridOverlay holds hybrid overlay feature config options.
+ HybridOverlay = HybridOverlayConfig{
+ VXLANPort: DefaultVXLANPort,
+ }
+
+ // UnprivilegedMode allows ovnkube-node to run without SYS_ADMIN capability, by performing interface setup in the CNI plugin
+ UnprivilegedMode bool
+
+ // EnableMulticast enables multicast support between the pods within the same namespace
+ EnableMulticast bool
+
+ // IPv4Mode captures whether we are using IPv4 for OVN logical topology. (ie, single-stack IPv4 or dual-stack)
+ IPv4Mode bool
+
+ // IPv6Mode captures whether we are using IPv6 for OVN logical topology. (ie, single-stack IPv6 or dual-stack)
+ IPv6Mode bool
+
+ // OvnKubeNode holds ovnkube-node parsed config file parameters and command-line overrides
+ OvnKubeNode = OvnKubeNodeConfig{
+ Mode: types.NodeModeFull,
+ }
+
+ ClusterManager = ClusterManagerConfig{
+ V4TransitSwitchSubnet: "100.88.0.0/16",
+ V6TransitSwitchSubnet: "fd97::/64",
+ }
+)
+
+const (
+ kubeServiceAccountPath string = "/var/run/secrets/kubernetes.io/serviceaccount/"
+ kubeServiceAccountFileToken string = "token"
+ kubeServiceAccountFileCACert string = "ca.crt"
+)
+
+// DefaultConfig holds parsed config file parameters and command-line overrides
+type DefaultConfig struct {
+ // MTU value used for the overlay networks.
+ MTU int `gcfg:"mtu"`
+ // RoutableMTU is the maximum routable MTU between nodes, used to facilitate
+ // an MTU migration procedure where different nodes might be using different
+ // MTU values
+ RoutableMTU int `gcfg:"routable-mtu"`
+ // ConntrackZone affects only the gateway nodes, This value is used to track connections
+ // that are initiated from the pods so that the reverse connections go back to the pods.
+ // This represents the conntrack zone used for the conntrack flow rules.
+ ConntrackZone int `gcfg:"conntrack-zone"`
+ // HostMasqConntrackZone is an unexposed config with the value of ConntrackZone+1
+ HostMasqConntrackZone int
+ // OVNMasqConntrackZone is an unexposed config with the value of ConntrackZone+2
+ OVNMasqConntrackZone int
+ // HostNodePortCTZone is an unexposed config with the value of ConntrackZone+3
+ HostNodePortConntrackZone int
+ // ReassemblyConntrackZone is an unexposed config with the value of ConntrackZone+4
+ ReassemblyConntrackZone int
+ // EncapType value defines the encapsulation protocol to use to transmit packets between
+ // hypervisors. By default the value is 'geneve'
+ EncapType string `gcfg:"encap-type"`
+ // The IP address of the encapsulation endpoint. If not specified, the IP address the
+ // NodeName resolves to will be used
+ EncapIP string `gcfg:"encap-ip"`
+ // The UDP Port of the encapsulation endpoint. If not specified, the IP default port
+ // of 6081 will be used
+ EncapPort uint `gcfg:"encap-port"`
+ // Maximum number of milliseconds of idle time on connection that
+ // ovn-controller waits before it will send a connection health probe.
+ InactivityProbe int `gcfg:"inactivity-probe"`
+ // Maximum number of seconds of idle time on the OpenFlow connection
+ // that ovn-controller will wait before it sends a connection health probe
+ OpenFlowProbe int `gcfg:"openflow-probe"`
+ // Maximum number of milliseconds that ovn-controller waits before clearing existing flows
+ // during start up, to make sure the initial flow compute is complete and avoid data plane
+ // interruptions.
+ OfctrlWaitBeforeClear int `gcfg:"ofctrl-wait-before-clear"`
+ // The boolean flag indicates if ovn-controller should monitor all data in SB DB
+ // instead of conditionally monitoring the data relevant to this node only.
+ // By default monitor-all is enabled.
+ MonitorAll bool `gcfg:"monitor-all"`
+ // OVSDBTxnTimeout is the timeout for db transaction, may be useful to increase for high-scale clusters.
+ // default value is 100 seconds.
+ OVSDBTxnTimeout time.Duration `gcfg:"db-txn-timeout"`
+ // The boolean flag indicates if ovn-controller should
+ // enable/disable the logical flow in-memory cache it uses
+ // when processing Southbound database logical flow changes.
+ // By default caching is enabled.
+ LFlowCacheEnable bool `gcfg:"enable-lflow-cache"`
+ // Maximum number of logical flow cache entries ovn-controller
+ // may create when the logical flow cache is enabled. By
+ // default the size of the cache is unlimited.
+ LFlowCacheLimit uint `gcfg:"lflow-cache-limit"`
+ // Maximum number of logical flow cache entries ovn-controller
+ // may create when the logical flow cache is enabled. By
+ // default the size of the cache is unlimited.
+ LFlowCacheLimitKb uint `gcfg:"lflow-cache-limit-kb"`
+ // RawClusterSubnets holds the unparsed cluster subnets. Should only be
+ // used inside config module.
+ RawClusterSubnets string `gcfg:"cluster-subnets"`
+ // ClusterSubnets holds parsed cluster subnet entries and may be used
+ // outside the config module.
+ ClusterSubnets []CIDRNetworkEntry
+ // EnableUDPAggregation is true if ovn-kubernetes should use UDP Generic Receive
+ // Offload forwarding to improve the performance of containers that transmit lots
+ // of small UDP packets by allowing them to be aggregated before passing through
+ // the kernel network stack. This requires a new-enough kernel (5.15 or RHEL 8.5).
+ EnableUDPAggregation bool `gcfg:"enable-udp-aggregation"`
+
+ // Zone name to which ovnkube-node/ovnkube-controller belongs to
+ Zone string `gcfg:"zone"`
+
+ // RawUDNAllowedDefaultServices holds the unparsed UDNAllowedDefaultServices. Should only be
+ // used inside config module.
+ RawUDNAllowedDefaultServices string `gcfg:"udn-allowed-default-services"`
+
+ // UDNAllowedDefaultServices holds a list of namespaced names of
+ // default cluster network services accessible from primary user-defined networks
+ UDNAllowedDefaultServices []string
+}
+
+// LoggingConfig holds logging-related parsed config file parameters and command-line overrides
+type LoggingConfig struct {
+ // File is the path of the file to log to
+ File string `gcfg:"logfile"`
+ // CNIFile is the path of the file for the CNI shim to log to
+ CNIFile string `gcfg:"cnilogfile"`
+ // LibovsdbFile is the path of the file for the libovsdb client to log to
+ LibovsdbFile string `gcfg:"libovsdblogfile"`
+ // Level is the logging verbosity level
+ Level int `gcfg:"loglevel"`
+ // LogFileMaxSize is the maximum size in megabytes of the logfile
+ // before it gets rolled.
+ LogFileMaxSize int `gcfg:"logfile-maxsize"`
+ // LogFileMaxBackups represents the the maximum number of old log files to retain
+ LogFileMaxBackups int `gcfg:"logfile-maxbackups"`
+ // LogFileMaxAge represents the maximum number of days to retain old log files
+ LogFileMaxAge int `gcfg:"logfile-maxage"`
+ // Logging rate-limiting meter
+ ACLLoggingRateLimit int `gcfg:"acl-logging-rate-limit"`
+}
+
+// MonitoringConfig holds monitoring-related parsed config file parameters and command-line overrides
+type MonitoringConfig struct {
+ // RawNetFlowTargets holds the unparsed NetFlow targets. Should only be used inside the config module.
+ RawNetFlowTargets string `gcfg:"netflow-targets"`
+ // RawSFlowTargets holds the unparsed SFlow targets. Should only be used inside the config module.
+ RawSFlowTargets string `gcfg:"sflow-targets"`
+ // RawIPFIXTargets holds the unparsed IPFIX targets. Should only be used inside the config module.
+ RawIPFIXTargets string `gcfg:"ipfix-targets"`
+ // NetFlowTargets holds the parsed NetFlow targets and may be used outside the config module.
+ NetFlowTargets []HostPort
+ // SFlowTargets holds the parsed SFlow targets and may be used outside the config module.
+ SFlowTargets []HostPort
+ // IPFIXTargets holds the parsed IPFIX targets and may be used outside the config module.
+ IPFIXTargets []HostPort
+}
+
+// IPFIXConfig holds IPFIX-related performance configuration options. It requires that the ipfix-targets
+// value of the [monitoring] section contains at least one endpoint.
+type IPFIXConfig struct {
+ // Sampling is an optional integer in range 1 to 4,294,967,295. It holds the rate at which
+ // packets should be sampled and sent to each target collector. If not specified, defaults to
+ // 400, which means one out of 400 packets, on average, will be sent to each target collector.
+ Sampling uint `gcfg:"sampling"`
+ // CacheActiveTimeout is an optional integer in range 0 to 4,200. It holds the maximum period in
+ // seconds for which an IPFIX flow record is cached and aggregated before being sent. If not
+ // specified, defaults to 60. If 0, caching is disabled.
+ CacheActiveTimeout uint `gcfg:"cache-active-timeout"`
+ // CacheMaxFlows is an optional integer in range 0 to 4,294,967,295. It holds the maximum number
+ // of IPFIX flow records that can be cached at a time. If not specified in OVS, defaults to 0
+ // (however, this controller defaults it to 60). If 0, caching is disabled.
+ CacheMaxFlows uint `gcfg:"cache-max-flows"`
+}
+
+// CNIConfig holds CNI-related parsed config file parameters and command-line overrides
+type CNIConfig struct {
+ // ConfDir specifies the CNI config directory in which to write the overlay CNI config file
+ ConfDir string `gcfg:"conf-dir"`
+ // Plugin specifies the name of the CNI plugin
+ Plugin string `gcfg:"plugin"`
+}
+
+// KubernetesConfig holds Kubernetes-related parsed config file parameters and command-line overrides
+type KubernetesConfig struct {
+ BootstrapKubeconfig string `gcfg:"bootstrap-kubeconfig"`
+ CertDir string `gcfg:"cert-dir"`
+ CertDuration time.Duration `gcfg:"cert-duration"`
+ Kubeconfig string `gcfg:"kubeconfig"`
+ CACert string `gcfg:"cacert"`
+ CAData []byte
+ APIServer string `gcfg:"apiserver"`
+ Token string `gcfg:"token"`
+ TokenFile string `gcfg:"tokenFile"`
+ CompatServiceCIDR string `gcfg:"service-cidr"`
+ RawServiceCIDRs string `gcfg:"service-cidrs"`
+ ServiceCIDRs []*net.IPNet
+ OVNConfigNamespace string `gcfg:"ovn-config-namespace"`
+ OVNEmptyLbEvents bool `gcfg:"ovn-empty-lb-events"`
+ PodIP string `gcfg:"pod-ip"` // UNUSED
+ RawNoHostSubnetNodes string `gcfg:"no-hostsubnet-nodes"`
+ NoHostSubnetNodes labels.Selector
+ HostNetworkNamespace string `gcfg:"host-network-namespace"`
+ DisableRequestedChassis bool `gcfg:"disable-requestedchassis"`
+ PlatformType string `gcfg:"platform-type"`
+ HealthzBindAddress string `gcfg:"healthz-bind-address"`
+
+ // CompatMetricsBindAddress is overridden by the corresponding option in MetricsConfig
+ CompatMetricsBindAddress string `gcfg:"metrics-bind-address"`
+ // CompatOVNMetricsBindAddress is overridden by the corresponding option in MetricsConfig
+ CompatOVNMetricsBindAddress string `gcfg:"ovn-metrics-bind-address"`
+ // CompatMetricsEnablePprof is overridden by the corresponding option in MetricsConfig
+ CompatMetricsEnablePprof bool `gcfg:"metrics-enable-pprof"`
+
+ DNSServiceNamespace string `gcfg:"dns-service-namespace"`
+ DNSServiceName string `gcfg:"dns-service-name"`
+}
+
+// MetricsConfig holds Prometheus metrics-related parameters.
+type MetricsConfig struct {
+ BindAddress string `gcfg:"bind-address"`
+ OVNMetricsBindAddress string `gcfg:"ovn-metrics-bind-address"`
+ ExportOVSMetrics bool `gcfg:"export-ovs-metrics"`
+ EnablePprof bool `gcfg:"enable-pprof"`
+ NodeServerPrivKey string `gcfg:"node-server-privkey"`
+ NodeServerCert string `gcfg:"node-server-cert"`
+ // EnableConfigDuration holds the boolean flag to enable OVN-Kubernetes master to monitor OVN-Kubernetes master
+ // configuration duration and optionally, its application to all nodes
+ EnableConfigDuration bool `gcfg:"enable-config-duration"`
+ EnableScaleMetrics bool `gcfg:"enable-scale-metrics"`
+}
+
+// OVNKubernetesFeatureConfig holds OVN-Kubernetes feature enhancement config file parameters and command-line overrides
+type OVNKubernetesFeatureConfig struct {
+ // Admin Network Policy feature is enabled
+ EnableAdminNetworkPolicy bool `gcfg:"enable-admin-network-policy"`
+ // EgressIP feature is enabled
+ EnableEgressIP bool `gcfg:"enable-egress-ip"`
+ // EgressIP node reachability total timeout in seconds
+ EgressIPReachabiltyTotalTimeout int `gcfg:"egressip-reachability-total-timeout"`
+ EnableEgressFirewall bool `gcfg:"enable-egress-firewall"`
+ EnableEgressQoS bool `gcfg:"enable-egress-qos"`
+ EnableEgressService bool `gcfg:"enable-egress-service"`
+ EgressIPNodeHealthCheckPort int `gcfg:"egressip-node-healthcheck-port"`
+ EnableMultiNetwork bool `gcfg:"enable-multi-network"`
+ EnableNetworkSegmentation bool `gcfg:"enable-network-segmentation"`
+ EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"`
+ EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"`
+ EnableInterconnect bool `gcfg:"enable-interconnect"`
+ EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"`
+ EnablePersistentIPs bool `gcfg:"enable-persistent-ips"`
+ EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"`
+ EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"`
+ EnableObservability bool `gcfg:"enable-observability"`
+}
+
+// GatewayMode holds the node gateway mode
+type GatewayMode string
+
+const (
+ // GatewayModeDisabled indicates the node gateway mode is disabled
+ GatewayModeDisabled GatewayMode = ""
+ // GatewayModeShared indicates OVN shares a gateway interface with the node
+ GatewayModeShared GatewayMode = "shared"
+ // GatewayModeLocal indicates OVN creates a local NAT-ed interface for the gateway
+ GatewayModeLocal GatewayMode = "local"
+)
+
+// GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides
+type GatewayConfig struct {
+ // Mode is the gateway mode; if may be either empty (disabled), "shared", or "local"
+ Mode GatewayMode `gcfg:"mode"`
+ // Interface is the network interface to use for the gateway in "shared" mode
+ Interface string `gcfg:"interface"`
+ // Exgress gateway interface is the optional network interface to use for external gw pods traffic.
+ EgressGWInterface string `gcfg:"egw-interface"`
+ // NextHop is the gateway IP address of Interface; will be autodetected if not given
+ NextHop string `gcfg:"next-hop"`
+ // VLANID is the option VLAN tag to apply to gateway traffic for "shared" mode
+ VLANID uint `gcfg:"vlan-id"`
+ // NodeportEnable sets whether to provide Kubernetes NodePort service or not
+ NodeportEnable bool `gcfg:"nodeport"`
+ // DisableSNATMultipleGws sets whether to disable SNAT of egress traffic in namespaces annotated with routing-external-gws
+ DisableSNATMultipleGWs bool `gcfg:"disable-snat-multiple-gws"`
+ // V4JoinSubnet to be used in the cluster
+ V4JoinSubnet string `gcfg:"v4-join-subnet"`
+ // V6JoinSubnet to be used in the cluster
+ V6JoinSubnet string `gcfg:"v6-join-subnet"`
+ // V4MasqueradeSubnet to be used in the cluster
+ V4MasqueradeSubnet string `gcfg:"v4-masquerade-subnet"`
+ // V6MasqueradeSubnet to be used in the cluster
+ V6MasqueradeSubnet string `gcfg:"v6-masquerade-subnet"`
+ // MasqueradeIps to be allocated from the masquerade subnets to enable host to service traffic
+ MasqueradeIPs MasqueradeIPsConfig
+
+ // DisablePacketMTUCheck disables adding openflow flows to check packets too large to be
+ // delivered to OVN due to pod MTU being lower than NIC MTU. Disabling this check will result in southbound packets
+ // exceeding pod MTU to be dropped by OVN. With this check enabled, ICMP needs frag/packet too big will be sent
+ // back to the original client
+ DisablePacketMTUCheck bool `gcfg:"disable-pkt-mtu-check"`
+ // RouterSubnet is the subnet to be used for the GR external port. auto-detected if not given.
+ // Must match the the kube node IP address. Currently valid for DPU only.
+ RouterSubnet string `gcfg:"router-subnet"`
+ // SingeNode indicates the cluster has only one node
+ SingleNode bool `gcfg:"single-node"`
+ // DisableForwarding (enabled by default) controls if forwarding is allowed on OVNK controlled interfaces
+ DisableForwarding bool `gcfg:"disable-forwarding"`
+ // AllowNoUplink (disabled by default) controls if the external gateway bridge without an uplink port is allowed in local gateway mode.
+ AllowNoUplink bool `gcfg:"allow-no-uplink"`
+}
+
+// OvnAuthConfig holds client authentication and location details for
+// an OVN database (either northbound or southbound)
+type OvnAuthConfig struct {
+ // e.g: "ssl:192.168.1.2:6641,ssl:192.168.1.2:6642"
+ Address string `gcfg:"address"`
+ PrivKey string `gcfg:"client-privkey"`
+ Cert string `gcfg:"client-cert"`
+ CACert string `gcfg:"client-cacert"`
+ CertCommonName string `gcfg:"cert-common-name"`
+ Scheme OvnDBScheme
+ ElectionTimer uint `gcfg:"election-timer"`
+ northbound bool
+
+ exec kexec.Interface
+}
+
+// HAConfig holds configuration for HA
+// configuration.
+type HAConfig struct {
+ ElectionLeaseDuration int `gcfg:"election-lease-duration"`
+ ElectionRenewDeadline int `gcfg:"election-renew-deadline"`
+ ElectionRetryPeriod int `gcfg:"election-retry-period"`
+}
+
+// HybridOverlayConfig holds configuration for hybrid overlay
+// configuration.
+type HybridOverlayConfig struct {
+ // Enabled indicates whether hybrid overlay features are enabled or not.
+ Enabled bool `gcfg:"enabled"`
+ // RawClusterSubnets holds the unparsed hybrid overlay cluster subnets.
+ // Should only be used inside config module.
+ RawClusterSubnets string `gcfg:"cluster-subnets"`
+ // ClusterSubnets holds parsed hybrid overlay cluster subnet entries and
+ // may be used outside the config module.
+ ClusterSubnets []CIDRNetworkEntry
+ // VXLANPort holds the VXLAN tunnel UDP port number.
+ VXLANPort uint `gcfg:"hybrid-overlay-vxlan-port"`
+}
+
+// OvnKubeNodeConfig holds ovnkube-node configurations
+type OvnKubeNodeConfig struct {
+ Mode string `gcfg:"mode"`
+ DPResourceDeviceIdsMap map[string][]string
+ MgmtPortNetdev string `gcfg:"mgmt-port-netdev"`
+ MgmtPortDPResourceName string `gcfg:"mgmt-port-dp-resource-name"`
+}
+
+// ClusterManagerConfig holds configuration for ovnkube-cluster-manager
+type ClusterManagerConfig struct {
+ // V4TransitSwitchSubnet to be used in the cluster for interconnecting multiple zones
+ V4TransitSwitchSubnet string `gcfg:"v4-transit-switch-subnet"`
+ // V6TransitSwitchSubnet to be used in the cluster for interconnecting multiple zones
+ V6TransitSwitchSubnet string `gcfg:"v6-transit-switch-subnet"`
+}
+
+// OvnDBScheme describes the OVN database connection transport method
+type OvnDBScheme string
+
+const (
+ // OvnDBSchemeSSL specifies SSL as the OVN database transport method
+ OvnDBSchemeSSL OvnDBScheme = "ssl"
+ // OvnDBSchemeTCP specifies TCP as the OVN database transport method
+ OvnDBSchemeTCP OvnDBScheme = "tcp"
+ // OvnDBSchemeUnix specifies Unix domains sockets as the OVN database transport method
+ OvnDBSchemeUnix OvnDBScheme = "unix"
+)
+
+// Config is used to read the structured config file and to cache config in testcases
+type config struct {
+ Default DefaultConfig
+ Logging LoggingConfig
+ Monitoring MonitoringConfig
+ IPFIX IPFIXConfig
+ CNI CNIConfig
+ OVNKubernetesFeature OVNKubernetesFeatureConfig
+ Kubernetes KubernetesConfig
+ Metrics MetricsConfig
+ OvnNorth OvnAuthConfig
+ OvnSouth OvnAuthConfig
+ Gateway GatewayConfig
+ MasterHA HAConfig
+ ClusterMgrHA HAConfig
+ HybridOverlay HybridOverlayConfig
+ OvnKubeNode OvnKubeNodeConfig
+ ClusterManager ClusterManagerConfig
+}
+
+var (
+ savedDefault DefaultConfig
+ savedLogging LoggingConfig
+ savedMonitoring MonitoringConfig
+ savedIPFIX IPFIXConfig
+ savedCNI CNIConfig
+ savedOVNKubernetesFeature OVNKubernetesFeatureConfig
+ savedKubernetes KubernetesConfig
+ savedMetrics MetricsConfig
+ savedOvnNorth OvnAuthConfig
+ savedOvnSouth OvnAuthConfig
+ savedGateway GatewayConfig
+ savedMasterHA HAConfig
+ savedClusterMgrHA HAConfig
+ savedHybridOverlay HybridOverlayConfig
+ savedOvnKubeNode OvnKubeNodeConfig
+ savedClusterManager ClusterManagerConfig
+
+ // legacy service-cluster-ip-range CLI option
+ serviceClusterIPRange string
+ // legacy cluster-subnet CLI option
+ clusterSubnet string
+ // legacy init-gateways CLI option
+ initGateways bool
+ // legacy gateway-local CLI option
+ gatewayLocal bool
+ // legacy disable-ovn-iface-id-ver CLI option
+ disableOVNIfaceIDVer bool
+)
+
+func init() {
+ // Cache original default config values
+ savedDefault = Default
+ savedLogging = Logging
+ savedMonitoring = Monitoring
+ savedIPFIX = IPFIX
+ savedCNI = CNI
+ savedOVNKubernetesFeature = OVNKubernetesFeature
+ savedKubernetes = Kubernetes
+ savedMetrics = Metrics
+ savedOvnNorth = OvnNorth
+ savedOvnSouth = OvnSouth
+ savedGateway = Gateway
+ savedMasterHA = MasterHA
+ savedClusterMgrHA = ClusterMgrHA
+ savedHybridOverlay = HybridOverlay
+ savedOvnKubeNode = OvnKubeNode
+ savedClusterManager = ClusterManager
+ cli.VersionPrinter = func(c *cli.Context) {
+ fmt.Printf("Version: %s\n", Version)
+ fmt.Printf("Git commit: %s\n", Commit)
+ fmt.Printf("Git branch: %s\n", Branch)
+ fmt.Printf("Go version: %s\n", GoVersion)
+ fmt.Printf("Build date: %s\n", BuildDate)
+ fmt.Printf("OS/Arch: %s\n", OSArch)
+ }
+ Flags = GetFlags([]cli.Flag{})
+}
+
+// PrepareTestConfig restores default config values. Used by testcases to
+// provide a pristine environment between tests.
+func PrepareTestConfig() error {
+ Default = savedDefault
+ Logging = savedLogging
+ Logging.Level = 5
+ Monitoring = savedMonitoring
+ IPFIX = savedIPFIX
+ CNI = savedCNI
+ OVNKubernetesFeature = savedOVNKubernetesFeature
+ Kubernetes = savedKubernetes
+ Metrics = savedMetrics
+ OvnNorth = savedOvnNorth
+ OvnSouth = savedOvnSouth
+ Gateway = savedGateway
+ MasterHA = savedMasterHA
+ HybridOverlay = savedHybridOverlay
+ OvnKubeNode = savedOvnKubeNode
+ ClusterManager = savedClusterManager
+ Kubernetes.DisableRequestedChassis = false
+ EnableMulticast = false
+ Default.OVSDBTxnTimeout = 5 * time.Second
+
+ if err := completeConfig(); err != nil {
+ return err
+ }
+
+ // Don't pick up defaults from the environment
+ os.Unsetenv("KUBECONFIG")
+ os.Unsetenv("K8S_CACERT")
+ os.Unsetenv("K8S_APISERVER")
+ os.Unsetenv("K8S_TOKEN")
+ os.Unsetenv("K8S_TOKEN_FILE")
+
+ return nil
+}
+
+// copy members of struct 'src' into the corresponding field in struct 'dst'
+// if the field in 'src' is a non-zero int or a non-zero-length string and
+// does not contain a default value. This function should be called with pointers to structs.
+func overrideFields(dst, src, defaults interface{}) error {
+ dstStruct := reflect.ValueOf(dst).Elem()
+ srcStruct := reflect.ValueOf(src).Elem()
+ if dstStruct.Kind() != srcStruct.Kind() || dstStruct.Kind() != reflect.Struct {
+ return fmt.Errorf("mismatched value types")
+ }
+ if dstStruct.NumField() != srcStruct.NumField() {
+ return fmt.Errorf("mismatched struct types")
+ }
+
+ var defStruct reflect.Value
+ if defaults != nil {
+ defStruct = reflect.ValueOf(defaults).Elem()
+ }
+ // Iterate over each field in dst/src Type so we can get the tags,
+ // and use the field name to retrieve the field's actual value from
+ // the dst/src instance
+ var handled bool
+ dstType := reflect.TypeOf(dst).Elem()
+ for i := 0; i < dstType.NumField(); i++ {
+ structField := dstType.Field(i)
+ // Ignore private internal fields; we only care about overriding
+ // 'gcfg' tagged fields read from CLI or the config file
+ if _, ok := structField.Tag.Lookup("gcfg"); !ok {
+ continue
+ }
+ handled = true
+
+ dstField := dstStruct.FieldByName(structField.Name)
+ srcField := srcStruct.FieldByName(structField.Name)
+ var dv reflect.Value
+ if defStruct.IsValid() {
+ dv = defStruct.FieldByName(structField.Name)
+ }
+ if !dstField.IsValid() || !srcField.IsValid() {
+ return fmt.Errorf("invalid struct %q field %q", dstType.Name(), structField.Name)
+ }
+ if dstField.Kind() != srcField.Kind() {
+ return fmt.Errorf("mismatched struct %q fields %q", dstType.Name(), structField.Name)
+ }
+ if dv.IsValid() && reflect.DeepEqual(dv.Interface(), srcField.Interface()) {
+ continue
+ }
+ dstField.Set(srcField)
+ }
+ if !handled {
+ // No tags found in the struct so we don't know how to override
+ return fmt.Errorf("failed to find 'gcfg' tags in struct %q", dstType.Name())
+ }
+
+ return nil
+}
+
+var cliConfig config
+
+// CommonFlags capture general options.
+var CommonFlags = []cli.Flag{
+ // Mode flags
+ &cli.StringFlag{
+ Name: "init-master",
+ Usage: "initialize master (both cluster-manager and ovnkube-controller), requires the hostname as argument",
+ },
+ &cli.StringFlag{
+ Name: "init-cluster-manager",
+ Usage: "initialize cluster manager (but not ovnkube-controller), requires the hostname as argument",
+ },
+ &cli.StringFlag{
+ Name: "init-ovnkube-controller",
+ Usage: "initialize ovnkube-controller (but not cluster-manager), requires the hostname as argument",
+ },
+ &cli.StringFlag{
+ Name: "init-node",
+ Usage: "initialize node, requires the name that node is registered with in kubernetes cluster",
+ },
+ &cli.StringFlag{
+ Name: "cleanup-node",
+ Usage: "cleanup node, requires the name that node is registered with in kubernetes cluster",
+ },
+ &cli.StringFlag{
+ Name: "pidfile",
+ Usage: "Name of file that will hold the ovnkube pid (optional)",
+ },
+ &cli.StringFlag{
+ Name: "config-file",
+ Usage: "configuration file path (default: /etc/openvswitch/ovn_k8s.conf)",
+ //Value: "/etc/openvswitch/ovn_k8s.conf",
+ },
+ &cli.IntFlag{
+ Name: "mtu",
+ Usage: "MTU value used for the overlay networks (default: 1400)",
+ Destination: &cliConfig.Default.MTU,
+ Value: Default.MTU,
+ },
+ &cli.IntFlag{
+ Name: "routable-mtu",
+ Usage: "Maximum routable MTU between nodes, used to facilitate an MTU migration procedure where different nodes might be using different MTU values",
+ Destination: &cliConfig.Default.RoutableMTU,
+ },
+ &cli.IntFlag{
+ Name: "conntrack-zone",
+ Usage: "For gateway nodes, the conntrack zone used for conntrack flow rules (default: 64000)",
+ Destination: &cliConfig.Default.ConntrackZone,
+ Value: Default.ConntrackZone,
+ },
+ &cli.StringFlag{
+ Name: "encap-type",
+ Usage: "The encapsulation protocol to use to transmit packets between hypervisors (default: geneve)",
+ Destination: &cliConfig.Default.EncapType,
+ Value: Default.EncapType,
+ },
+ &cli.StringFlag{
+ Name: "encap-ip",
+ Usage: "The IP address of the encapsulation endpoint (default: Node IP address resolved from Node hostname)",
+ Destination: &cliConfig.Default.EncapIP,
+ },
+ &cli.UintFlag{
+ Name: "encap-port",
+ Usage: "The UDP port used by the encapsulation endpoint (default: 6081)",
+ Destination: &cliConfig.Default.EncapPort,
+ Value: Default.EncapPort,
+ },
+ &cli.IntFlag{
+ Name: "inactivity-probe",
+ Usage: "Maximum number of milliseconds of idle time on " +
+ "connection for ovn-controller before it sends a inactivity probe",
+ Destination: &cliConfig.Default.InactivityProbe,
+ Value: Default.InactivityProbe,
+ },
+ &cli.IntFlag{
+ Name: "openflow-probe",
+ Usage: "Maximum number of seconds of idle time on the openflow " +
+ "connection for ovn-controller before it sends a inactivity probe",
+ Destination: &cliConfig.Default.OpenFlowProbe,
+ Value: Default.OpenFlowProbe,
+ },
+ &cli.IntFlag{
+ Name: "ofctrl-wait-before-clear",
+ Usage: "Maximum number of milliseconds that ovn-controller waits before " +
+ "clearing existing flows during start up, to make sure the initial flow " +
+ "compute is complete and avoid data plane interruptions.",
+ Destination: &cliConfig.Default.OfctrlWaitBeforeClear,
+ Value: Default.OfctrlWaitBeforeClear,
+ },
+ &cli.BoolFlag{
+ Name: "monitor-all",
+ Usage: "Enable monitoring all data from SB DB instead of conditionally " +
+ "monitoring the data relevant to this node only. " +
+ "By default it is enabled.",
+ Destination: &cliConfig.Default.MonitorAll,
+ Value: Default.MonitorAll,
+ },
+ &cli.DurationFlag{
+ Name: "db-txn-timeout",
+ Usage: "OVSDBTxnTimeout is the timeout for db transaction in seconds, " +
+ "may be useful to increase for high-scale clusters. default value is 60 seconds.",
+ Destination: &cliConfig.Default.OVSDBTxnTimeout,
+ Value: Default.OVSDBTxnTimeout,
+ },
+ &cli.BoolFlag{
+ Name: "enable-lflow-cache",
+ Usage: "Enable the logical flow in-memory cache it uses " +
+ "when processing Southbound database logical flow changes. " +
+ "By default caching is enabled.",
+ Destination: &cliConfig.Default.LFlowCacheEnable,
+ Value: Default.LFlowCacheEnable,
+ },
+ &cli.UintFlag{
+ Name: "lflow-cache-limit",
+ Usage: "Maximum number of logical flow cache entries ovn-controller " +
+ "may create when the logical flow cache is enabled. By " +
+ "default the size of the cache is unlimited.",
+ Destination: &cliConfig.Default.LFlowCacheLimit,
+ Value: Default.LFlowCacheLimit,
+ },
+ &cli.UintFlag{
+ Name: "lflow-cache-limit-kb",
+ Usage: "Maximum size of the logical flow cache ovn-controller " +
+ "may create when the logical flow cache is enabled. By " +
+ "default the size of the cache is unlimited.",
+ Destination: &cliConfig.Default.LFlowCacheLimitKb,
+ Value: Default.LFlowCacheLimitKb,
+ },
+ &cli.StringFlag{
+ Name: "cluster-subnet",
+ Usage: "Deprecated alias for cluster-subnets.",
+ Destination: &clusterSubnet,
+ },
+ &cli.StringFlag{
+ Name: "cluster-subnets",
+ Value: Default.RawClusterSubnets,
+ Usage: "A comma separated set of IP subnets and the associated " +
+ "hostsubnet prefix lengths to use for the cluster (eg, \"10.128.0.0/14/23,10.0.0.0/14/23\"). " +
+ "Each entry is given in the form [IP address/prefix-length/hostsubnet-prefix-length] " +
+ "and cannot overlap with other entries. The hostsubnet-prefix-length " +
+ "defines how large a subnet is given to each node and may be different " +
+ "for each entry. For IPv6 subnets, it must be 64 (and does not need to " +
+ "be explicitly specified). For IPv4 subnets an explicit " +
+ "hostsubnet-prefix should be specified, but for backward compatibility " +
+ "it defaults to 24 if unspecified.",
+ Destination: &cliConfig.Default.RawClusterSubnets,
+ },
+ &cli.BoolFlag{
+ Name: "unprivileged-mode",
+ Usage: "Run ovnkube-node container in unprivileged mode. Valid only with --init-node option.",
+ Destination: &UnprivilegedMode,
+ },
+ &cli.BoolFlag{
+ Name: "enable-multicast",
+ Usage: "Adds multicast support. Valid only with --init-master option.",
+ Destination: &EnableMulticast,
+ },
+ // Logging options
+ &cli.IntFlag{
+ Name: "loglevel",
+ Usage: "log verbosity and level: info, warn, fatal, error are always printed no matter the log level. Use 5 for debug (default: 4)",
+ Destination: &cliConfig.Logging.Level,
+ Value: Logging.Level,
+ },
+ &cli.StringFlag{
+ Name: "logfile",
+ Usage: "path of a file to direct log output to",
+ Destination: &cliConfig.Logging.File,
+ },
+ &cli.StringFlag{
+ Name: "cnilogfile",
+ Usage: "path of a file to direct log from cni shim to output to (default: /var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log)",
+ Destination: &cliConfig.Logging.CNIFile,
+ Value: "/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log",
+ },
+ &cli.StringFlag{
+ Name: "libovsdblogfile",
+ Usage: "path of a file to direct log from libovsdb client to output to (default is to use same as --logfile)",
+ Destination: &cliConfig.Logging.LibovsdbFile,
+ },
+ // Logfile rotation parameters
+ &cli.IntFlag{
+ Name: "logfile-maxsize",
+ Usage: "Maximum size in bytes of the log file before it gets rolled",
+ Destination: &cliConfig.Logging.LogFileMaxSize,
+ Value: Logging.LogFileMaxSize,
+ },
+ &cli.IntFlag{
+ Name: "logfile-maxbackups",
+ Usage: "Maximum number of old log files to retain",
+ Destination: &cliConfig.Logging.LogFileMaxBackups,
+ Value: Logging.LogFileMaxBackups,
+ },
+ &cli.IntFlag{
+ Name: "logfile-maxage",
+ Usage: "Maximum number of days to retain old log files",
+ Destination: &cliConfig.Logging.LogFileMaxAge,
+ Value: Logging.LogFileMaxAge,
+ },
+ &cli.IntFlag{
+ Name: "acl-logging-rate-limit",
+ Usage: "The largest number of messages per second that gets logged before drop (default 20)",
+ Destination: &cliConfig.Logging.ACLLoggingRateLimit,
+ Value: 20,
+ },
+ &cli.StringFlag{
+ Name: "zone",
+ Usage: "zone name to which ovnkube-node/ovnkube-controller belongs to",
+ Value: Default.Zone,
+ Destination: &cliConfig.Default.Zone,
+ },
+ &cli.StringFlag{
+ Name: "udn-allowed-default-services",
+ Usage: "a list of namespaced names of default cluster network services accessible from primary" +
+ "user-defined networks. If not specified defaults to [\"default/kubernetes\", \"kube-system/kube-dns\"]." +
+ "Only used when enable-network-segmentation is set",
+ Value: Default.RawUDNAllowedDefaultServices,
+ Destination: &cliConfig.Default.RawUDNAllowedDefaultServices,
+ },
+}
+
+// MonitoringFlags capture monitoring-related options
+var MonitoringFlags = []cli.Flag{
+ // Monitoring options
+ &cli.StringFlag{
+ Name: "netflow-targets",
+ Value: Monitoring.RawNetFlowTargets,
+ Usage: "A comma separated set of NetFlow collectors to export flow data (eg, \"10.128.0.150:2056,10.0.0.151:2056\")." +
+ "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP",
+ Destination: &cliConfig.Monitoring.RawNetFlowTargets,
+ },
+ &cli.StringFlag{
+ Name: "sflow-targets",
+ Value: Monitoring.RawSFlowTargets,
+ Usage: "A comma separated set of SFlow collectors to export flow data (eg, \"10.128.0.150:6343,10.0.0.151:6343\")." +
+ "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP",
+ Destination: &cliConfig.Monitoring.RawSFlowTargets,
+ },
+ &cli.StringFlag{
+ Name: "ipfix-targets",
+ Value: Monitoring.RawIPFIXTargets,
+ Usage: "A comma separated set of IPFIX collectors to export flow data (eg, \"10.128.0.150:2055,10.0.0.151:2055\")." +
+ "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP",
+ Destination: &cliConfig.Monitoring.RawIPFIXTargets,
+ },
+}
+
+// IPFIXFlags capture IPFIX-related options
+var IPFIXFlags = []cli.Flag{
+ &cli.UintFlag{
+ Name: "ipfix-sampling",
+ Usage: "Rate at which packets should be sampled and sent to each target collector (default: 400)",
+ Destination: &cliConfig.IPFIX.Sampling,
+ Value: IPFIX.Sampling,
+ },
+ &cli.UintFlag{
+ Name: "ipfix-cache-max-flows",
+ Usage: "Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled (default: 0)",
+ Destination: &cliConfig.IPFIX.CacheMaxFlows,
+ Value: IPFIX.CacheMaxFlows,
+ }, &cli.UintFlag{
+ Name: "ipfix-cache-active-timeout",
+ Usage: "Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled (default: 60)",
+ Destination: &cliConfig.IPFIX.CacheActiveTimeout,
+ Value: IPFIX.CacheActiveTimeout,
+ },
+}
+
+// CNIFlags capture CNI-related options
+var CNIFlags = []cli.Flag{
+ // CNI options
+ &cli.StringFlag{
+ Name: "cni-conf-dir",
+ Usage: "the CNI config directory in which to write the overlay CNI config file (default: /etc/cni/net.d)",
+ Destination: &cliConfig.CNI.ConfDir,
+ Value: CNI.ConfDir,
+ },
+ &cli.StringFlag{
+ Name: "cni-plugin",
+ Usage: "the name of the CNI plugin (default: ovn-k8s-cni-overlay)",
+ Destination: &cliConfig.CNI.Plugin,
+ Value: CNI.Plugin,
+ },
+}
+
+// OVNK8sFeatureFlags capture OVN-Kubernetes feature related options
+var OVNK8sFeatureFlags = []cli.Flag{
+ &cli.BoolFlag{
+ Name: "enable-admin-network-policy",
+ Usage: "Configure to use Admin Network Policy CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableAdminNetworkPolicy,
+ Value: OVNKubernetesFeature.EnableAdminNetworkPolicy,
+ },
+ &cli.BoolFlag{
+ Name: "enable-egress-ip",
+ Usage: "Configure to use EgressIP CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableEgressIP,
+ Value: OVNKubernetesFeature.EnableEgressIP,
+ },
+ &cli.IntFlag{
+ Name: "egressip-reachability-total-timeout",
+ Usage: "EgressIP node reachability total timeout in seconds (default: 1)",
+ Destination: &cliConfig.OVNKubernetesFeature.EgressIPReachabiltyTotalTimeout,
+ Value: 1,
+ },
+ &cli.BoolFlag{
+ Name: "enable-egress-firewall",
+ Usage: "Configure to use EgressFirewall CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableEgressFirewall,
+ Value: OVNKubernetesFeature.EnableEgressFirewall,
+ },
+ &cli.BoolFlag{
+ Name: "enable-egress-qos",
+ Usage: "Configure to use EgressQoS CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableEgressQoS,
+ Value: OVNKubernetesFeature.EnableEgressQoS,
+ },
+ &cli.IntFlag{
+ Name: "egressip-node-healthcheck-port",
+ Usage: "Configure EgressIP node reachability using gRPC on this TCP port.",
+ Destination: &cliConfig.OVNKubernetesFeature.EgressIPNodeHealthCheckPort,
+ },
+ &cli.BoolFlag{
+ Name: "enable-multi-network",
+ Usage: "Configure to use multiple NetworkAttachmentDefinition CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetwork,
+ Value: OVNKubernetesFeature.EnableMultiNetwork,
+ },
+ &cli.BoolFlag{
+ Name: "enable-multi-networkpolicy",
+ Usage: "Configure to use MultiNetworkPolicy CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetworkPolicy,
+ Value: OVNKubernetesFeature.EnableMultiNetworkPolicy,
+ },
+ &cli.BoolFlag{
+ Name: "enable-network-segmentation",
+ Usage: "Configure to use network segmentation feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableNetworkSegmentation,
+ Value: OVNKubernetesFeature.EnableNetworkSegmentation,
+ },
+ &cli.BoolFlag{
+ Name: "enable-stateless-netpol",
+ Usage: "Configure to use stateless network policy feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableStatelessNetPol,
+ Value: OVNKubernetesFeature.EnableStatelessNetPol,
+ },
+ &cli.BoolFlag{
+ Name: "enable-interconnect",
+ Usage: "Configure to enable interconnecting multiple zones.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableInterconnect,
+ Value: OVNKubernetesFeature.EnableInterconnect,
+ },
+ &cli.BoolFlag{
+ Name: "enable-egress-service",
+ Usage: "Configure to use EgressService CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableEgressService,
+ Value: OVNKubernetesFeature.EnableEgressService,
+ },
+ &cli.BoolFlag{
+ Name: "enable-multi-external-gateway",
+ Usage: "Configure to use AdminPolicyBasedExternalRoute CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableMultiExternalGateway,
+ Value: OVNKubernetesFeature.EnableMultiExternalGateway,
+ },
+ &cli.BoolFlag{
+ Name: "enable-persistent-ips",
+ Usage: "Configure to use the persistent ips feature for virtualization with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnablePersistentIPs,
+ Value: OVNKubernetesFeature.EnablePersistentIPs,
+ },
+ &cli.BoolFlag{
+ Name: "enable-dns-name-resolver",
+ Usage: "Configure to use DNSNameResolver CRD feature with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableDNSNameResolver,
+ Value: OVNKubernetesFeature.EnableDNSNameResolver,
+ },
+ &cli.BoolFlag{
+ Name: "enable-svc-template-support",
+ Usage: "Configure to use svc-template with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableServiceTemplateSupport,
+ Value: OVNKubernetesFeature.EnableServiceTemplateSupport,
+ },
+ &cli.BoolFlag{
+ Name: "enable-observability",
+ Usage: "Configure to use OVN sampling with ovn-kubernetes.",
+ Destination: &cliConfig.OVNKubernetesFeature.EnableObservability,
+ Value: OVNKubernetesFeature.EnableObservability,
+ },
+}
+
+// K8sFlags capture Kubernetes-related options
+var K8sFlags = []cli.Flag{
+ &cli.StringFlag{
+ Name: "service-cluster-ip-range",
+ Usage: "Deprecated alias for k8s-service-cidrs.",
+ Destination: &serviceClusterIPRange,
+ },
+ &cli.StringFlag{
+ Name: "k8s-service-cidr",
+ Usage: "Deprecated alias for k8s-service-cidrs.",
+ Destination: &cliConfig.Kubernetes.CompatServiceCIDR,
+ },
+ &cli.StringFlag{
+ Name: "k8s-service-cidrs",
+ Usage: "A comma-separated set of CIDR notation IP ranges from which k8s assigns " +
+ "service cluster IPs. This should be the same as the value " +
+ "provided for kube-apiserver \"--service-cluster-ip-range\" " +
+ "option. (default: 172.16.1.0/24)",
+ Destination: &cliConfig.Kubernetes.RawServiceCIDRs,
+ Value: Kubernetes.RawServiceCIDRs,
+ },
+ &cli.StringFlag{
+ Name: "k8s-kubeconfig",
+ Usage: "absolute path to the Kubernetes kubeconfig file (not required if the --k8s-apiserver, --k8s-ca-cert, and --k8s-token are given)",
+ Destination: &cliConfig.Kubernetes.Kubeconfig,
+ },
+ &cli.StringFlag{
+ Name: "bootstrap-kubeconfig",
+ Usage: "absolute path to the Kubernetes kubeconfig file that is used to create the initial, per node, client certificates (should only be used together with 'cert-dir')",
+ Destination: &cliConfig.Kubernetes.BootstrapKubeconfig,
+ },
+ &cli.StringFlag{
+ Name: "k8s-apiserver",
+ Usage: "URL of the Kubernetes API server (not required if --k8s-kubeconfig is given) (default: http://localhost:8443)",
+ Destination: &cliConfig.Kubernetes.APIServer,
+ Value: Kubernetes.APIServer,
+ },
+ &cli.StringFlag{
+ Name: "cert-dir",
+ Usage: "absolute path to the directory of the client key and certificate (not required if --k8s-kubeconfig or --k8s-apiserver, --k8s-ca-cert, and --k8s-token are given)",
+ Destination: &cliConfig.Kubernetes.CertDir,
+ },
+ &cli.DurationFlag{
+ Name: "cert-duration",
+ Usage: "requested certificate duration, default: 10min",
+ Destination: &cliConfig.Kubernetes.CertDuration,
+ Value: Kubernetes.CertDuration,
+ },
+ &cli.StringFlag{
+ Name: "k8s-cacert",
+ Usage: "the absolute path to the Kubernetes API CA certificate (not required if --k8s-kubeconfig is given)",
+ Destination: &cliConfig.Kubernetes.CACert,
+ },
+ &cli.StringFlag{
+ Name: "k8s-token",
+ Usage: "the Kubernetes API authentication token (not required if --k8s-kubeconfig is given)",
+ Destination: &cliConfig.Kubernetes.Token,
+ },
+ &cli.StringFlag{
+ Name: "k8s-token-file",
+ Usage: "the path to Kubernetes API token. If set, it is periodically read and takes precedence over k8s-token",
+ Destination: &cliConfig.Kubernetes.TokenFile,
+ },
+ &cli.StringFlag{
+ Name: "ovn-config-namespace",
+ Usage: "specify a namespace which will contain services to config the OVN databases",
+ Destination: &cliConfig.Kubernetes.OVNConfigNamespace,
+ Value: Kubernetes.OVNConfigNamespace,
+ },
+ &cli.BoolFlag{
+ Name: "ovn-empty-lb-events",
+ Usage: "If set, then load balancers do not get deleted when all backends are removed. " +
+ "Instead, ovn-kubernetes monitors the OVN southbound database for empty lb backends " +
+ "controller events. If one arrives, then a NeedPods event is sent so that Kubernetes " +
+ "will spin up pods for the load balancer to send traffic to.",
+ Destination: &cliConfig.Kubernetes.OVNEmptyLbEvents,
+ },
+ &cli.StringFlag{
+ Name: "pod-ip",
+ Usage: "UNUSED",
+ },
+ &cli.StringFlag{
+ Name: "no-hostsubnet-nodes",
+ Usage: "Specify a label for nodes that will manage their own hostsubnets",
+ Destination: &cliConfig.Kubernetes.RawNoHostSubnetNodes,
+ },
+ &cli.StringFlag{
+ Name: "host-network-namespace",
+ Usage: "specify a namespace which will be used to classify host network traffic for network policy",
+ Destination: &cliConfig.Kubernetes.HostNetworkNamespace,
+ Value: Kubernetes.HostNetworkNamespace,
+ },
+ &cli.BoolFlag{
+ Name: "disable-requestedchassis",
+ Usage: "If set to true, requested-chassis option will not be set during pod creation",
+ Destination: &cliConfig.Kubernetes.DisableRequestedChassis,
+ Value: Kubernetes.DisableRequestedChassis,
+ },
+ &cli.StringFlag{
+ Name: "platform-type",
+ Usage: "The cloud provider platform type ovn-kubernetes is deployed on. " +
+ "Valid values can be found in: https://github.com/ovn-org/ovn-kubernetes/blob/master/go-controller/vendor/github.com/openshift/api/config/v1/types_infrastructure.go#L130-L172",
+ Destination: &cliConfig.Kubernetes.PlatformType,
+ Value: Kubernetes.PlatformType,
+ },
+ &cli.StringFlag{
+ Name: "healthz-bind-address",
+ Usage: "The IP address and port for the node proxy healthz server to serve on (set to '0.0.0.0:10256' or '[::]:10256' for listening in all interfaces and IP families). Disabled by default.",
+ Destination: &cliConfig.Kubernetes.HealthzBindAddress,
+ },
+ &cli.StringFlag{
+ Name: "dns-service-namespace",
+ Usage: "DNS kubernetes service namespace used to expose name resolving to live migratable vms.",
+ Destination: &cliConfig.Kubernetes.DNSServiceNamespace,
+ Value: Kubernetes.DNSServiceNamespace,
+ },
+ &cli.StringFlag{
+ Name: "dns-service-name",
+ Usage: "DNS kubernetes service name used to expose name resolving to live migratable vms.",
+ Destination: &cliConfig.Kubernetes.DNSServiceName,
+ Value: Kubernetes.DNSServiceName,
+ },
+}
+
+// MetricsFlags capture metrics-related options
+var MetricsFlags = []cli.Flag{
+ &cli.StringFlag{
+ Name: "metrics-bind-address",
+ Usage: "The IP address and port for the OVN K8s metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces)",
+ Destination: &cliConfig.Metrics.BindAddress,
+ },
+ &cli.StringFlag{
+ Name: "ovn-metrics-bind-address",
+ Usage: "The IP address and port for the OVN metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces)",
+ Destination: &cliConfig.Metrics.OVNMetricsBindAddress,
+ },
+ &cli.BoolFlag{
+ Name: "export-ovs-metrics",
+ Usage: "When true exports OVS metrics from the OVN metrics server",
+ Destination: &cliConfig.Metrics.ExportOVSMetrics,
+ },
+ &cli.BoolFlag{
+ Name: "metrics-enable-pprof",
+ Usage: "If true, then also accept pprof requests on the metrics port.",
+ Destination: &cliConfig.Metrics.EnablePprof,
+ Value: Metrics.EnablePprof,
+ },
+ &cli.StringFlag{
+ Name: "node-server-privkey",
+ Usage: "Private key that the OVN node K8s metrics server uses to serve metrics over TLS.",
+ Destination: &cliConfig.Metrics.NodeServerPrivKey,
+ },
+ &cli.StringFlag{
+ Name: "node-server-cert",
+ Usage: "Certificate that the OVN node K8s metrics server uses to serve metrics over TLS.",
+ Destination: &cliConfig.Metrics.NodeServerCert,
+ },
+ &cli.BoolFlag{
+ Name: "metrics-enable-config-duration",
+ Usage: "Enables monitoring OVN-Kubernetes master and OVN configuration duration",
+ Destination: &cliConfig.Metrics.EnableConfigDuration,
+ },
+ &cli.BoolFlag{
+ Name: "metrics-enable-scale",
+ Usage: "Enables metrics related to scaling",
+ Destination: &cliConfig.Metrics.EnableScaleMetrics,
+ },
+}
+
+// OvnNBFlags capture OVN northbound database options
+var OvnNBFlags = []cli.Flag{
+ &cli.StringFlag{
+ Name: "nb-address",
+ Usage: "IP address and port of the OVN northbound API " +
+ "(eg, ssl:1.2.3.4:6641,ssl:1.2.3.5:6642). Leave empty to " +
+ "use a local unix socket.",
+ Destination: &cliConfig.OvnNorth.Address,
+ },
+ &cli.StringFlag{
+ Name: "nb-client-privkey",
+ Usage: "Private key that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-privkey.pem). " +
+ "Default value for this setting is empty which defaults to use local unix socket.",
+ Destination: &cliConfig.OvnNorth.PrivKey,
+ },
+ &cli.StringFlag{
+ Name: "nb-client-cert",
+ Usage: "Client certificate that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-cert.pem). " +
+ "Default value for this setting is empty which defaults to use local unix socket.",
+ Destination: &cliConfig.OvnNorth.Cert,
+ },
+ &cli.StringFlag{
+ Name: "nb-client-cacert",
+ Usage: "CA certificate that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-ca.cert)." +
+ "Default value for this setting is empty which defaults to use local unix socket.",
+ Destination: &cliConfig.OvnNorth.CACert,
+ },
+ &cli.StringFlag{
+ Name: "nb-cert-common-name",
+ Usage: "Common Name of the certificate used for TLS server certificate verification. " +
+ "In cases where the certificate doesn't have any SAN Extensions, this parameter " +
+ "should match the DNS(hostname) of the server. In case the certificate has a " +
+ "SAN extension, this parameter should match one of the SAN fields.",
+ Destination: &cliConfig.OvnNorth.CertCommonName,
+ },
+ &cli.UintFlag{
+ Name: "nb-raft-election-timer",
+ Usage: "The desired northbound database election timer.",
+ Destination: &cliConfig.OvnNorth.ElectionTimer,
+ },
+}
+
+// OvnSBFlags capture OVN southbound database options
+var OvnSBFlags = []cli.Flag{
+ &cli.StringFlag{
+ Name: "sb-address",
+ Usage: "IP address and port of the OVN southbound API " +
+ "(eg, ssl:1.2.3.4:6642,ssl:1.2.3.5:6642). " +
+ "Leave empty to use a local unix socket.",
+ Destination: &cliConfig.OvnSouth.Address,
+ },
+ &cli.StringFlag{
+ Name: "sb-client-privkey",
+ Usage: "Private key that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnsb-privkey.pem)." +
+ "Default value for this setting is empty which defaults to use local unix socket.",
+ Destination: &cliConfig.OvnSouth.PrivKey,
+ },
+ &cli.StringFlag{
+ Name: "sb-client-cert",
+ Usage: "Client certificate that the client should use for talking to the OVN database(default when ssl address is used: /etc/openvswitch/ovnsb-cert.pem). " +
+ "Default value for this setting is empty which defaults to use local unix socket.",
+ Destination: &cliConfig.OvnSouth.Cert,
+ },
+ &cli.StringFlag{
+ Name: "sb-client-cacert",
+ Usage: "CA certificate that the client should use for talking to the OVN database (default when ssl address is used /etc/openvswitch/ovnsb-ca.cert). " +
+ "Default value for this setting is empty which defaults to use local unix socket.",
+ Destination: &cliConfig.OvnSouth.CACert,
+ },
+ &cli.StringFlag{
+ Name: "sb-cert-common-name",
+ Usage: "Common Name of the certificate used for TLS server certificate verification. " +
+ "In cases where the certificate doesn't have any SAN Extensions, this parameter " +
+ "should match the DNS(hostname) of the server. In case the certificate has a " +
+ "SAN extension, this parameter should match one of the SAN fields.",
+ Destination: &cliConfig.OvnSouth.CertCommonName,
+ },
+ &cli.UintFlag{
+ Name: "sb-raft-election-timer",
+ Usage: "The desired southbound database election timer.",
+ Destination: &cliConfig.OvnSouth.ElectionTimer,
+ },
+}
+
+// OVNGatewayFlags capture L3 Gateway related flags
+var OVNGatewayFlags = []cli.Flag{
+ &cli.StringFlag{
+ Name: "gateway-mode",
+ Usage: "Sets the cluster gateway mode. One of \"shared\", " +
+ "or \"local\". If not given, gateway functionality is disabled.",
+ },
+ &cli.StringFlag{
+ Name: "gateway-interface",
+ Usage: "The interface on nodes that will be the gateway interface. " +
+ "If none specified, then the node's interface on which the " +
+ "default gateway is configured will be used as the gateway " +
+ "interface. Only useful with \"init-gateways\"",
+ Destination: &cliConfig.Gateway.Interface,
+ },
+ &cli.StringFlag{
+ Name: "exgw-interface",
+ Usage: "The interface on nodes that will be used for external gw network traffic. " +
+ "If none specified, ovnk will use the default interface",
+ Destination: &cliConfig.Gateway.EgressGWInterface,
+ },
+ &cli.StringFlag{
+ Name: "gateway-nexthop",
+ Usage: "The external default gateway which is used as a next hop by " +
+ "OVN gateway. This is many times just the default gateway " +
+ "of the node in question. If not specified, the default gateway" +
+ "configured in the node is used. Only useful with " +
+ "\"init-gateways\"",
+ Destination: &cliConfig.Gateway.NextHop,
+ },
+ &cli.UintFlag{
+ Name: "gateway-vlanid",
+ Usage: "The VLAN on which the external network is available. " +
+ "Valid only for Shared Gateway interface mode.",
+ Destination: &cliConfig.Gateway.VLANID,
+ },
+ &cli.BoolFlag{
+ Name: "nodeport",
+ Usage: "Setup nodeport based ingress on gateways.",
+ Destination: &cliConfig.Gateway.NodeportEnable,
+ },
+ &cli.BoolFlag{
+ Name: "disable-snat-multiple-gws",
+ Usage: "Disable SNAT for egress traffic with multiple gateways.",
+ Destination: &cliConfig.Gateway.DisableSNATMultipleGWs,
+ },
+ &cli.BoolFlag{
+ Name: "disable-forwarding",
+ Usage: "Disable forwarding on OVNK controlled interfaces.",
+ Destination: &cliConfig.Gateway.DisableForwarding,
+ },
+ &cli.StringFlag{
+ Name: "gateway-v4-join-subnet",
+ Usage: "The v4 join subnet used for assigning join switch IPv4 addresses",
+ Destination: &cliConfig.Gateway.V4JoinSubnet,
+ Value: Gateway.V4JoinSubnet,
+ },
+ &cli.StringFlag{
+ Name: "gateway-v6-join-subnet",
+ Usage: "The v6 join subnet used for assigning join switch IPv6 addresses",
+ Destination: &cliConfig.Gateway.V6JoinSubnet,
+ Value: Gateway.V6JoinSubnet,
+ },
+ &cli.StringFlag{
+ Name: "gateway-v4-masquerade-subnet",
+ Usage: "The v4 masquerade subnet used for assigning masquerade IPv4 addresses",
+ Destination: &cliConfig.Gateway.V4MasqueradeSubnet,
+ Value: Gateway.V4MasqueradeSubnet,
+ },
+ &cli.StringFlag{
+ Name: "gateway-v6-masquerade-subnet",
+ Usage: "The v6 masquerade subnet used for assigning masquerade IPv6 addresses",
+ Destination: &cliConfig.Gateway.V6MasqueradeSubnet,
+ Value: Gateway.V6MasqueradeSubnet,
+ },
+ &cli.BoolFlag{
+ Name: "disable-pkt-mtu-check",
+ Usage: "Disable OpenFlow checks for if packet size is greater than pod MTU",
+ Destination: &cliConfig.Gateway.DisablePacketMTUCheck,
+ },
+ &cli.StringFlag{
+ Name: "gateway-router-subnet",
+ Usage: "The Subnet to be used for the gateway router external port (shared mode only). " +
+ "auto-detected if not given. Must match the the kube node IP address. " +
+ "Currently valid for DPUs only",
+ Destination: &cliConfig.Gateway.RouterSubnet,
+ Value: Gateway.RouterSubnet,
+ },
+ &cli.BoolFlag{
+ Name: "single-node",
+ Usage: "Enable single node optimizations. " +
+ "Single node indicates a one node cluster and allows to simplify ovn-kubernetes gateway logic",
+ Destination: &cliConfig.Gateway.SingleNode,
+ },
+ &cli.BoolFlag{
+ Name: "allow-no-uplink",
+ Usage: "Allow the external gateway bridge without an uplink port in local gateway mode",
+ Destination: &cliConfig.Gateway.AllowNoUplink,
+ },
+ // Deprecated CLI options
+ &cli.BoolFlag{
+ Name: "init-gateways",
+ Usage: "DEPRECATED; use --gateway-mode instead",
+ Destination: &initGateways,
+ },
+ &cli.BoolFlag{
+ Name: "gateway-local",
+ Usage: "DEPRECATED; use --gateway-mode instead",
+ Destination: &gatewayLocal,
+ },
+}
+
+// MasterHAFlags capture leader election flags for master
+var MasterHAFlags = []cli.Flag{
+ &cli.IntFlag{
+ Name: "ha-election-lease-duration",
+ Usage: "Leader election lease duration (in secs) (default: 60)",
+ Destination: &cliConfig.MasterHA.ElectionLeaseDuration,
+ Value: MasterHA.ElectionLeaseDuration,
+ },
+ &cli.IntFlag{
+ Name: "ha-election-renew-deadline",
+ Usage: "Leader election renew deadline (in secs) (default: 30)",
+ Destination: &cliConfig.MasterHA.ElectionRenewDeadline,
+ Value: MasterHA.ElectionRenewDeadline,
+ },
+ &cli.IntFlag{
+ Name: "ha-election-retry-period",
+ Usage: "Leader election retry period (in secs) (default: 20)",
+ Destination: &cliConfig.MasterHA.ElectionRetryPeriod,
+ Value: MasterHA.ElectionRetryPeriod,
+ },
+}
+
+// ClusterMgrHAFlags capture leader election flags for cluster manager
+var ClusterMgrHAFlags = []cli.Flag{
+ &cli.IntFlag{
+ Name: "cluster-manager-ha-election-lease-duration",
+ Usage: "Leader election lease duration (in secs) (default: 60)",
+ Destination: &cliConfig.ClusterMgrHA.ElectionLeaseDuration,
+ Value: ClusterMgrHA.ElectionLeaseDuration,
+ },
+ &cli.IntFlag{
+ Name: "cluster-manager-ha-election-renew-deadline",
+ Usage: "Leader election renew deadline (in secs) (default: 30)",
+ Destination: &cliConfig.ClusterMgrHA.ElectionRenewDeadline,
+ Value: ClusterMgrHA.ElectionRenewDeadline,
+ },
+ &cli.IntFlag{
+ Name: "cluster-manager-ha-election-retry-period",
+ Usage: "Leader election retry period (in secs) (default: 20)",
+ Destination: &cliConfig.ClusterMgrHA.ElectionRetryPeriod,
+ Value: ClusterMgrHA.ElectionRetryPeriod,
+ },
+}
+
+// HybridOverlayFlags capture hybrid overlay feature options
+var HybridOverlayFlags = []cli.Flag{
+ &cli.BoolFlag{
+ Name: "enable-hybrid-overlay",
+ Usage: "Enables hybrid overlay functionality",
+ Destination: &cliConfig.HybridOverlay.Enabled,
+ },
+ &cli.StringFlag{
+ Name: "hybrid-overlay-cluster-subnets",
+ Value: HybridOverlay.RawClusterSubnets,
+ Usage: "A comma separated set of IP subnets and the associated" +
+ "hostsubnetlengths (eg, \"10.128.0.0/14/23,10.0.0.0/14/23\"). " +
+ "to use with the extended hybrid network. Each entry is given " +
+ "in the form IP address/subnet mask/hostsubnetlength, " +
+ "the hostsubnetlength is optional and if unspecified defaults to 24. The " +
+ "hostsubnetlength defines how many IP addresses are dedicated to each node.",
+ Destination: &cliConfig.HybridOverlay.RawClusterSubnets,
+ },
+ &cli.UintFlag{
+ Name: "hybrid-overlay-vxlan-port",
+ Value: HybridOverlay.VXLANPort,
+ Usage: "The UDP port used by the VXLAN protocol for hybrid networks.",
+ Destination: &cliConfig.HybridOverlay.VXLANPort,
+ },
+}
+
+// OvnKubeNodeFlags captures ovnkube-node specific configurations
+var OvnKubeNodeFlags = []cli.Flag{
+ &cli.StringFlag{
+ Name: "ovnkube-node-mode",
+ Usage: "ovnkube-node operating mode full(default), dpu, dpu-host",
+ Value: OvnKubeNode.Mode,
+ Destination: &cliConfig.OvnKubeNode.Mode,
+ },
+ &cli.StringFlag{
+ Name: "ovnkube-node-mgmt-port-netdev",
+ Usage: "When provided, use this netdev as management port. It will be renamed to ovn-k8s-mp0 " +
+ "and used to allow host network services and pods to access k8s pod and service networks. ",
+ Value: OvnKubeNode.MgmtPortNetdev,
+ Destination: &cliConfig.OvnKubeNode.MgmtPortNetdev,
+ },
+ &cli.StringFlag{
+ Name: "ovnkube-node-mgmt-port-dp-resource-name",
+ Usage: "When provided, use this device plugin resource name to find the allocated resource as management port. " +
+ "The interface chosen from this resource will be renamed to ovn-k8s-mp0 " +
+ "and used to allow host network services and pods to access k8s pod and service networks. ",
+ Value: OvnKubeNode.MgmtPortDPResourceName,
+ Destination: &cliConfig.OvnKubeNode.MgmtPortDPResourceName,
+ },
+ &cli.BoolFlag{
+ Name: "disable-ovn-iface-id-ver",
+ Usage: "Deprecated; iface-id-ver is always enabled",
+ Destination: &disableOVNIfaceIDVer,
+ },
+}
+
+// ClusterManagerFlags captures ovnkube-cluster-manager specific configurations
+var ClusterManagerFlags = []cli.Flag{
+ &cli.StringFlag{
+ Name: "cluster-manager-v4-transit-switch-subnet",
+ Usage: "The v4 transit switch subnet used for assigning transit switch IPv4 addresses for interconnect",
+ Destination: &cliConfig.ClusterManager.V4TransitSwitchSubnet,
+ Value: ClusterManager.V4TransitSwitchSubnet,
+ },
+ &cli.StringFlag{
+ Name: "cluster-manager-v6-transit-switch-subnet",
+ Usage: "The v6 transit switch subnet used for assigning transit switch IPv6 addresses for interconnect",
+ Destination: &cliConfig.ClusterManager.V6TransitSwitchSubnet,
+ Value: ClusterManager.V6TransitSwitchSubnet,
+ },
+}
+
+// Flags are general command-line flags. Apps should add these flags to their
+// own urfave/cli flags and call InitConfig() early in the application.
+var Flags []cli.Flag
+
+// GetFlags returns an array of all command-line flags necessary to configure
+// ovn-kubernetes
+func GetFlags(customFlags []cli.Flag) []cli.Flag {
+ flags := CommonFlags
+ flags = append(flags, CNIFlags...)
+ flags = append(flags, OVNK8sFeatureFlags...)
+ flags = append(flags, K8sFlags...)
+ flags = append(flags, MetricsFlags...)
+ flags = append(flags, OvnNBFlags...)
+ flags = append(flags, OvnSBFlags...)
+ flags = append(flags, OVNGatewayFlags...)
+ flags = append(flags, MasterHAFlags...)
+ flags = append(flags, ClusterMgrHAFlags...)
+ flags = append(flags, HybridOverlayFlags...)
+ flags = append(flags, MonitoringFlags...)
+ flags = append(flags, IPFIXFlags...)
+ flags = append(flags, OvnKubeNodeFlags...)
+ flags = append(flags, ClusterManagerFlags...)
+ flags = append(flags, customFlags...)
+ return flags
+}
+
+// Defaults are a set of flags to indicate which options should be read from
+// ovs-vsctl and used as default values if option is not found via the config
+// file or command-line
+type Defaults struct {
+ OvnNorthAddress bool
+ K8sAPIServer bool
+ K8sToken bool
+ K8sTokenFile bool
+ K8sCert bool
+}
+
+const (
+ ovsVsctlCommand = "ovs-vsctl"
+)
+
+// Can't use pkg/ovs or pkg/util here because those package import this one
+func rawExec(exec kexec.Interface, cmd string, args ...string) (string, error) {
+ cmdPath, err := exec.LookPath(cmd)
+ if err != nil {
+ return "", err
+ }
+
+ klog.V(5).Infof("Exec: %s %s", cmdPath, strings.Join(args, " "))
+ out, err := exec.Command(cmdPath, args...).CombinedOutput()
+ if err != nil {
+ klog.V(5).Infof("Exec: %s %s => %v", cmdPath, strings.Join(args, " "), err)
+ return "", err
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
+// Can't use pkg/ovs or pkg/util here because those package import this one
+func runOVSVsctl(exec kexec.Interface, args ...string) (string, error) {
+ newArgs := append([]string{"--timeout=15"}, args...)
+ out, err := rawExec(exec, ovsVsctlCommand, newArgs...)
+ if err != nil {
+ return "", err
+ }
+ return strings.Trim(strings.TrimSpace(out), "\""), nil
+}
+
+func getOVSExternalID(exec kexec.Interface, name string) string {
+ out, err := runOVSVsctl(exec,
+ "--if-exists",
+ "get",
+ "Open_vSwitch",
+ ".",
+ "external_ids:"+name)
+ if err != nil {
+ klog.V(5).Infof("Failed to get OVS external_id %s: %v\n\t%s", name, err, out)
+ return ""
+ }
+ return out
+}
+
+func setOVSExternalID(exec kexec.Interface, key, value string) error {
+ out, err := runOVSVsctl(exec,
+ "set",
+ "Open_vSwitch",
+ ".",
+ fmt.Sprintf("external_ids:%s=%s", key, value))
+ if err != nil {
+ return fmt.Errorf("error setting OVS external ID '%s=%s': %v\n %q", key, value, err, out)
+ }
+ return nil
+}
+
+func buildKubernetesConfig(exec kexec.Interface, cli, file *config, saPath string, defaults *Defaults) error {
+ // token adn ca.crt may be from files mounted in container.
+ saConfig := savedKubernetes
+ if data, err := os.ReadFile(filepath.Join(saPath, kubeServiceAccountFileToken)); err == nil {
+ saConfig.Token = string(data)
+ saConfig.TokenFile = filepath.Join(saPath, kubeServiceAccountFileToken)
+ }
+ if _, err2 := os.Stat(filepath.Join(saPath, kubeServiceAccountFileCACert)); err2 == nil {
+ saConfig.CACert = filepath.Join(saPath, kubeServiceAccountFileCACert)
+ }
+
+ if err := overrideFields(&Kubernetes, &saConfig, &savedKubernetes); err != nil {
+ return err
+ }
+
+ // values for token, cacert, kubeconfig, api-server may be found in several places.
+ // Priority order (highest first): OVS config, command line options, config file,
+ // environment variables, service account files
+
+ envConfig := savedKubernetes
+ envVarsMap := map[string]string{
+ "Kubeconfig": "KUBECONFIG",
+ "BootstrapKubeconfig": "BOOTSTRAP_KUBECONFIG",
+ "CertDir": "CERT_DIR",
+ "CACert": "K8S_CACERT",
+ "APIServer": "K8S_APISERVER",
+ "Token": "K8S_TOKEN",
+ "TokenFile": "K8S_TOKEN_FILE",
+ "HostNetworkNamespace": "OVN_HOST_NETWORK_NAMESPACE",
+ }
+ for k, v := range envVarsMap {
+ if x, exists := os.LookupEnv(v); exists && len(x) > 0 {
+ reflect.ValueOf(&envConfig).Elem().FieldByName(k).SetString(x)
+ }
+ }
+
+ if err := overrideFields(&Kubernetes, &envConfig, &savedKubernetes); err != nil {
+ return err
+ }
+
+ // Copy config file values over default values
+ if err := overrideFields(&Kubernetes, &file.Kubernetes, &savedKubernetes); err != nil {
+ return err
+ }
+
+ // And CLI overrides over config file and default values
+ if err := overrideFields(&Kubernetes, &cli.Kubernetes, &savedKubernetes); err != nil {
+ return err
+ }
+
+ // Grab default values from OVS external IDs
+ if defaults.K8sAPIServer {
+ Kubernetes.APIServer = getOVSExternalID(exec, "k8s-api-server")
+ }
+ if defaults.K8sToken {
+ Kubernetes.Token = getOVSExternalID(exec, "k8s-api-token")
+ }
+ if defaults.K8sTokenFile {
+ Kubernetes.TokenFile = getOVSExternalID(exec, "k8s-api-token-file")
+ }
+
+ if defaults.K8sCert {
+ Kubernetes.CACert = getOVSExternalID(exec, "k8s-ca-certificate")
+ }
+
+ if Kubernetes.Kubeconfig != "" && !pathExists(Kubernetes.Kubeconfig) {
+ return fmt.Errorf("kubernetes kubeconfig file %q not found", Kubernetes.Kubeconfig)
+ }
+
+ if Kubernetes.CACert != "" {
+ bytes, err := os.ReadFile(Kubernetes.CACert)
+ if err != nil {
+ return err
+ }
+ Kubernetes.CAData = bytes
+ }
+
+ url, err := url.Parse(Kubernetes.APIServer)
+ if err != nil {
+ return fmt.Errorf("kubernetes API server address %q invalid: %v", Kubernetes.APIServer, err)
+ } else if url.Scheme != "https" && url.Scheme != "http" {
+ return fmt.Errorf("kubernetes API server URL scheme %q invalid", url.Scheme)
+ }
+
+ // Legacy --service-cluster-ip-range or --k8s-service-cidr options override config file or --k8s-service-cidrs.
+ if serviceClusterIPRange != "" {
+ Kubernetes.RawServiceCIDRs = serviceClusterIPRange
+ } else if Kubernetes.CompatServiceCIDR != "" {
+ Kubernetes.RawServiceCIDRs = Kubernetes.CompatServiceCIDR
+ }
+ if Kubernetes.RawServiceCIDRs == "" {
+ return fmt.Errorf("kubernetes service-cidrs is required")
+ }
+
+ return nil
+}
+
+// completeKubernetesConfig completes the Kubernetes config by parsing raw values
+// into their final form.
+func completeKubernetesConfig(allSubnets *ConfigSubnets) error {
+ Kubernetes.ServiceCIDRs = []*net.IPNet{}
+ for _, cidrString := range strings.Split(Kubernetes.RawServiceCIDRs, ",") {
+ _, serviceCIDR, err := net.ParseCIDR(cidrString)
+ if err != nil {
+ return fmt.Errorf("kubernetes service network CIDR %q invalid: %v", cidrString, err)
+ }
+ Kubernetes.ServiceCIDRs = append(Kubernetes.ServiceCIDRs, serviceCIDR)
+ allSubnets.Append(ConfigSubnetService, serviceCIDR)
+ }
+ if len(Kubernetes.ServiceCIDRs) > 2 {
+ return fmt.Errorf("kubernetes service-cidrs must contain either a single CIDR or else an IPv4/IPv6 pair")
+ } else if len(Kubernetes.ServiceCIDRs) == 2 && utilnet.IsIPv6CIDR(Kubernetes.ServiceCIDRs[0]) == utilnet.IsIPv6CIDR(Kubernetes.ServiceCIDRs[1]) {
+ return fmt.Errorf("kubernetes service-cidrs must contain either a single CIDR or else an IPv4/IPv6 pair")
+ }
+
+ if Kubernetes.RawNoHostSubnetNodes != "" {
+ nodeSelector, err := metav1.ParseToLabelSelector(Kubernetes.RawNoHostSubnetNodes)
+ if err != nil {
+ return fmt.Errorf("labelSelector \"%s\" is invalid: %v", Kubernetes.RawNoHostSubnetNodes, err)
+ }
+ selector, err := metav1.LabelSelectorAsSelector(nodeSelector)
+ if err != nil {
+ return fmt.Errorf("failed to convert %v into a labels.Selector: %v", nodeSelector, err)
+ }
+ Kubernetes.NoHostSubnetNodes = selector
+ }
+
+ return nil
+}
+
+func buildMetricsConfig(cli, file *config) error {
+ // Copy KubernetesConfig backwards-compat values over default values
+ if Kubernetes.CompatMetricsBindAddress != "" {
+ Metrics.BindAddress = Kubernetes.CompatMetricsBindAddress
+ }
+ if Kubernetes.CompatOVNMetricsBindAddress != "" {
+ Metrics.OVNMetricsBindAddress = Kubernetes.CompatOVNMetricsBindAddress
+ }
+ Metrics.EnablePprof = Kubernetes.CompatMetricsEnablePprof
+
+ // Copy config file values over Kubernetes and default values
+ if err := overrideFields(&Metrics, &file.Metrics, &savedMetrics); err != nil {
+ return err
+ }
+
+ // And CLI overrides over config file, Kubernetes, and default values
+ if err := overrideFields(&Metrics, &cli.Metrics, &savedMetrics); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func buildGatewayConfig(ctx *cli.Context, cli, file *config) error {
+ // Copy config file values over default values
+ if err := overrideFields(&Gateway, &file.Gateway, &savedGateway); err != nil {
+ return err
+ }
+
+ cli.Gateway.Mode = GatewayMode(ctx.String("gateway-mode"))
+ if cli.Gateway.Mode == GatewayModeDisabled {
+ // Handle legacy CLI options
+ if ctx.Bool("init-gateways") {
+ cli.Gateway.Mode = GatewayModeShared
+ if ctx.Bool("gateway-local") {
+ cli.Gateway.Mode = GatewayModeLocal
+ }
+ }
+ }
+ // And CLI overrides over config file and default values
+ if err := overrideFields(&Gateway, &cli.Gateway, &savedGateway); err != nil {
+ return err
+ }
+
+ if Gateway.Mode != GatewayModeDisabled {
+ validModes := []string{string(GatewayModeShared), string(GatewayModeLocal)}
+ var found bool
+ for _, mode := range validModes {
+ if string(Gateway.Mode) == mode {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("invalid gateway mode %q: expect one of %s", string(Gateway.Mode), strings.Join(validModes, ","))
+ }
+ }
+
+ // Options are only valid if Mode is not disabled
+ if Gateway.Mode == GatewayModeDisabled {
+ if Gateway.Interface != "" {
+ return fmt.Errorf("gateway interface option %q not allowed when gateway is disabled", Gateway.Interface)
+ }
+ if Gateway.NextHop != "" {
+ return fmt.Errorf("gateway next-hop option %q not allowed when gateway is disabled", Gateway.NextHop)
+ }
+ }
+
+ if Gateway.Mode != GatewayModeShared && Gateway.VLANID != 0 {
+ return fmt.Errorf("gateway VLAN ID option: %d is supported only in shared gateway mode", Gateway.VLANID)
+ }
+
+ return nil
+}
+
+func completeGatewayConfig(allSubnets *ConfigSubnets, masqueradeIPs *MasqueradeIPsConfig) error {
+ // Validate v4 and v6 join subnets
+ v4IP, v4JoinCIDR, err := net.ParseCIDR(Gateway.V4JoinSubnet)
+ if err != nil || utilnet.IsIPv6(v4IP) {
+ return fmt.Errorf("invalid gateway v4 join subnet specified, subnet: %s: error: %v", Gateway.V4JoinSubnet, err)
+ }
+
+ v6IP, v6JoinCIDR, err := net.ParseCIDR(Gateway.V6JoinSubnet)
+ if err != nil || !utilnet.IsIPv6(v6IP) {
+ return fmt.Errorf("invalid gateway v6 join subnet specified, subnet: %s: error: %v", Gateway.V6JoinSubnet, err)
+ }
+ allSubnets.Append(ConfigSubnetJoin, v4JoinCIDR)
+ allSubnets.Append(ConfigSubnetJoin, v6JoinCIDR)
+
+ //validate v4 and v6 masquerade subnets
+ v4MasqueradeIP, v4MasqueradeCIDR, err := net.ParseCIDR(Gateway.V4MasqueradeSubnet)
+ if err != nil || utilnet.IsIPv6(v4MasqueradeCIDR.IP) {
+ return fmt.Errorf("invalid gateway v4 masquerade subnet specified, subnet: %s: error: %v", Gateway.V4MasqueradeSubnet, err)
+ }
+ if err = AllocateV4MasqueradeIPs(v4MasqueradeIP, masqueradeIPs); err != nil {
+ return fmt.Errorf("unable to allocate V4MasqueradeIPs: %s", err)
+ }
+
+ v6MasqueradeIP, v6MasqueradeCIDR, err := net.ParseCIDR(Gateway.V6MasqueradeSubnet)
+ if err != nil || !utilnet.IsIPv6(v6MasqueradeCIDR.IP) {
+ return fmt.Errorf("invalid gateway v6 masquerade subnet specified, subnet: %s: error: %v", Gateway.V6MasqueradeSubnet, err)
+ }
+ if err = AllocateV6MasqueradeIPs(v6MasqueradeIP, masqueradeIPs); err != nil {
+ return fmt.Errorf("unable to allocate V6MasqueradeIPs: %s", err)
+ }
+
+ allSubnets.Append(ConfigSubnetMasquerade, v4MasqueradeCIDR)
+ allSubnets.Append(ConfigSubnetMasquerade, v6MasqueradeCIDR)
+
+ return nil
+}
+
+func buildOVNKubernetesFeatureConfig(ctx *cli.Context, cli, file *config) error {
+ // Copy config file values over default values
+ if err := overrideFields(&OVNKubernetesFeature, &file.OVNKubernetesFeature, &savedOVNKubernetesFeature); err != nil {
+ return err
+ }
+ // And CLI overrides over config file and default values
+ if err := overrideFields(&OVNKubernetesFeature, &cli.OVNKubernetesFeature, &savedOVNKubernetesFeature); err != nil {
+ return err
+ }
+ return nil
+}
+
+func buildMasterHAConfig(ctx *cli.Context, cli, file *config) error {
+ // Copy config file values over default values
+ if err := overrideFields(&MasterHA, &file.MasterHA, &savedMasterHA); err != nil {
+ return err
+ }
+
+ // And CLI overrides over config file and default values
+ if err := overrideFields(&MasterHA, &cli.MasterHA, &savedMasterHA); err != nil {
+ return err
+ }
+
+ if MasterHA.ElectionLeaseDuration <= MasterHA.ElectionRenewDeadline {
+ return fmt.Errorf("invalid HA election lease duration '%d'. "+
+ "It should be greater than HA election renew deadline '%d'",
+ MasterHA.ElectionLeaseDuration, MasterHA.ElectionRenewDeadline)
+ }
+
+ if MasterHA.ElectionRenewDeadline <= MasterHA.ElectionRetryPeriod {
+ return fmt.Errorf("invalid HA election renew deadline duration '%d'. "+
+ "It should be greater than HA election retry period '%d'",
+ MasterHA.ElectionRenewDeadline, MasterHA.ElectionRetryPeriod)
+ }
+ return nil
+}
+
+func buildClusterMgrHAConfig(ctx *cli.Context, cli, file *config) error {
+ // Copy config file values over default values
+ if err := overrideFields(&ClusterMgrHA, &file.ClusterMgrHA, &savedClusterMgrHA); err != nil {
+ return err
+ }
+
+ // And CLI overrides over config file and default values
+ if err := overrideFields(&ClusterMgrHA, &cli.ClusterMgrHA, &savedClusterMgrHA); err != nil {
+ return err
+ }
+
+ if ClusterMgrHA.ElectionLeaseDuration <= ClusterMgrHA.ElectionRenewDeadline {
+ return fmt.Errorf("invalid HA election lease duration '%d'. "+
+ "It should be greater than HA election renew deadline '%d'",
+ ClusterMgrHA.ElectionLeaseDuration, ClusterMgrHA.ElectionRenewDeadline)
+ }
+
+ if ClusterMgrHA.ElectionRenewDeadline <= ClusterMgrHA.ElectionRetryPeriod {
+ return fmt.Errorf("invalid HA election renew deadline duration '%d'. "+
+ "It should be greater than HA election retry period '%d'",
+ ClusterMgrHA.ElectionRenewDeadline, ClusterMgrHA.ElectionRetryPeriod)
+ }
+ return nil
+}
+
+func buildMonitoringConfig(ctx *cli.Context, cli, file *config) error {
+ var err error
+ if err = overrideFields(&Monitoring, &file.Monitoring, &savedMonitoring); err != nil {
+ return err
+ }
+ if err = overrideFields(&Monitoring, &cli.Monitoring, &savedMonitoring); err != nil {
+ return err
+ }
+ return nil
+}
+
+// completeMonitoringConfig completes the Monitoring config by parsing raw values
+// into their final form.
+func completeMonitoringConfig() error {
+ var err error
+ if Monitoring.RawNetFlowTargets != "" {
+ Monitoring.NetFlowTargets, err = ParseFlowCollectors(Monitoring.RawNetFlowTargets)
+ if err != nil {
+ return fmt.Errorf("netflow targets invalid: %v", err)
+ }
+ }
+ if Monitoring.RawSFlowTargets != "" {
+ Monitoring.SFlowTargets, err = ParseFlowCollectors(Monitoring.RawSFlowTargets)
+ if err != nil {
+ return fmt.Errorf("sflow targets invalid: %v", err)
+ }
+ }
+ if Monitoring.RawIPFIXTargets != "" {
+ Monitoring.IPFIXTargets, err = ParseFlowCollectors(Monitoring.RawIPFIXTargets)
+ if err != nil {
+ return fmt.Errorf("ipfix targets invalid: %v", err)
+ }
+ }
+ return nil
+}
+
+func buildIPFIXConfig(cli, file *config) error {
+ if err := overrideFields(&IPFIX, &file.IPFIX, &savedIPFIX); err != nil {
+ return err
+ }
+ return overrideFields(&IPFIX, &cli.IPFIX, &savedIPFIX)
+}
+
+func buildHybridOverlayConfig(ctx *cli.Context, cli, file *config) error {
+ // Copy config file values over default values
+ if err := overrideFields(&HybridOverlay, &file.HybridOverlay, &savedHybridOverlay); err != nil {
+ return err
+ }
+
+ // And CLI overrides over config file and default values
+ if err := overrideFields(&HybridOverlay, &cli.HybridOverlay, &savedHybridOverlay); err != nil {
+ return err
+ }
+
+ if HybridOverlay.Enabled && HybridOverlay.VXLANPort > 65535 {
+ return fmt.Errorf("hybrid overlay vxlan port is invalid. The port cannot be larger than 65535")
+ }
+
+ return nil
+}
+
+// completeHybridOverlayConfig completes the HybridOverlay config by parsing raw values
+// into their final form.
+func completeHybridOverlayConfig(allSubnets *ConfigSubnets) error {
+ if !HybridOverlay.Enabled || len(HybridOverlay.RawClusterSubnets) == 0 {
+ return nil
+ }
+
+ var err error
+ HybridOverlay.ClusterSubnets, err = ParseClusterSubnetEntries(HybridOverlay.RawClusterSubnets)
+ if err != nil {
+ return fmt.Errorf("hybrid overlay cluster subnet invalid: %v", err)
+ }
+ for _, subnet := range HybridOverlay.ClusterSubnets {
+ allSubnets.Append(ConfigSubnetHybrid, subnet.CIDR)
+ }
+
+ return nil
+}
+
+func buildClusterManagerConfig(ctx *cli.Context, cli, file *config) error {
+ // Copy config file values over default values
+ if err := overrideFields(&ClusterManager, &file.ClusterManager, &savedClusterManager); err != nil {
+ return err
+ }
+
+ // And CLI overrides over config file and default values
+ if err := overrideFields(&ClusterManager, &cli.ClusterManager, &savedClusterManager); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// completeClusterManagerConfig completes the ClusterManager config by parsing raw values
+// into their final form.
+func completeClusterManagerConfig(allSubnets *ConfigSubnets) error {
+ // Validate v4 and v6 transit switch subnets
+ v4IP, v4TransitCIDR, err := net.ParseCIDR(ClusterManager.V4TransitSwitchSubnet)
+ if err != nil || utilnet.IsIPv6(v4IP) {
+ return fmt.Errorf("invalid transit switch v4 subnet specified, subnet: %s: error: %v", ClusterManager.V4TransitSwitchSubnet, err)
+ }
+
+ v6IP, v6TransitCIDR, err := net.ParseCIDR(ClusterManager.V6TransitSwitchSubnet)
+ if err != nil || !utilnet.IsIPv6(v6IP) {
+ return fmt.Errorf("invalid transit switch v6 subnet specified, subnet: %s: error: %v", ClusterManager.V6TransitSwitchSubnet, err)
+ }
+ allSubnets.Append(ConfigSubnetTransit, v4TransitCIDR)
+ allSubnets.Append(ConfigSubnetTransit, v6TransitCIDR)
+ return nil
+}
+
+func buildDefaultConfig(cli, file *config) error {
+ if err := overrideFields(&Default, &file.Default, &savedDefault); err != nil {
+ return err
+ }
+
+ if err := overrideFields(&Default, &cli.Default, &savedDefault); err != nil {
+ return err
+ }
+
+ // Legacy cluster-subnet CLI option overrides config file or --cluster-subnets
+ if clusterSubnet != "" {
+ Default.RawClusterSubnets = clusterSubnet
+ }
+ if Default.RawClusterSubnets == "" {
+ return fmt.Errorf("cluster subnet is required")
+ }
+
+ if Default.Zone == "" {
+ Default.Zone = types.OvnDefaultZone
+ }
+ return nil
+}
+
+// completeDefaultConfig completes the Default config by parsing raw values
+// into their final form.
+func completeDefaultConfig(allSubnets *ConfigSubnets) error {
+ var err error
+ Default.ClusterSubnets, err = ParseClusterSubnetEntries(Default.RawClusterSubnets)
+ if err != nil {
+ return fmt.Errorf("cluster subnet invalid: %v", err)
+ }
+ for _, subnet := range Default.ClusterSubnets {
+ allSubnets.Append(ConfigSubnetCluster, subnet.CIDR)
+ }
+
+ Default.UDNAllowedDefaultServices, err = parseServicesNamespacedNames(Default.RawUDNAllowedDefaultServices)
+ if err != nil {
+ return fmt.Errorf("UDN allowed services field is invalid: %v", err)
+ }
+
+ Default.HostMasqConntrackZone = Default.ConntrackZone + 1
+ Default.OVNMasqConntrackZone = Default.ConntrackZone + 2
+ Default.HostNodePortConntrackZone = Default.ConntrackZone + 3
+ Default.ReassemblyConntrackZone = Default.ConntrackZone + 4
+ return nil
+}
+
+// parseServicesNamespacedNames splits the input string by `,` and returns a slice
+// of keys that were verified to be a valid namespaced service name. It ignores spaces between the elements.
+func parseServicesNamespacedNames(servicesRaw string) ([]string, error) {
+ var services []string
+ for _, udnEnabledSVC := range strings.Split(servicesRaw, ",") {
+ svcKey := strings.TrimSpace(udnEnabledSVC)
+ namespace, name, err := cache.SplitMetaNamespaceKey(strings.TrimSpace(svcKey))
+ if namespace == "" {
+ return nil, fmt.Errorf("UDN enabled service %q no namespace set: %v", svcKey, err)
+ }
+ if errs := validation.ValidateNamespaceName(namespace, false); len(errs) != 0 {
+ return nil, fmt.Errorf("UDN enabled service %q has an invalid namespace: %v", svcKey, err)
+ }
+ if errs := validation.NameIsDNSSubdomain(name, false); len(errs) != 0 {
+ return nil, fmt.Errorf("UDN enabled service %q has an invalid name: %v", svcKey, err)
+ }
+ services = append(services, svcKey)
+ }
+ return services, nil
+}
+
+// getConfigFilePath returns config file path and 'true' if the config file is
+// the fallback path (eg not given by the user), 'false' if given explicitly
+// by the user
+func getConfigFilePath(ctx *cli.Context) (string, bool) {
+ configFile := ctx.String("config-file")
+ if configFile != "" {
+ return configFile, false
+ }
+ return "/etc/openvswitch/ovn_k8s.conf", true
+}
+
+// InitConfig reads the config file and common command-line options and
+// constructs the global config object from them. It returns the config file
+// path (if explicitly specified) or an error
+func InitConfig(ctx *cli.Context, exec kexec.Interface, defaults *Defaults) (string, error) {
+ return initConfigWithPath(ctx, exec, kubeServiceAccountPath, defaults)
+}
+
+// InitConfigSa reads the config file and common command-line options and
+// constructs the global config object from them. It passes the service account directory.
+// It returns the config file path (if explicitly specified) or an error
+func InitConfigSa(ctx *cli.Context, exec kexec.Interface, saPath string, defaults *Defaults) (string, error) {
+ return initConfigWithPath(ctx, exec, saPath, defaults)
+}
+
+// stripTokenFromK8sConfig removes k8s SA token & CAData values
+// from the KubernetesConfig struct used for logging.
+func stripTokenFromK8sConfig() KubernetesConfig {
+ k8sConf := Kubernetes
+ // Token and CAData are sensitive fields so stripping
+ // them while logging.
+ k8sConf.Token = ""
+ k8sConf.CAData = []byte{}
+ return k8sConf
+}
+
+// initConfigWithPath reads the given config file (or if empty, reads the config file
+// specified by command-line arguments, or empty, the default config file) and
+// common command-line options and constructs the global config object from
+// them. It returns the config file path (if explicitly specified) or an error
+func initConfigWithPath(ctx *cli.Context, exec kexec.Interface, saPath string, defaults *Defaults) (string, error) {
+ var retConfigFile string
+ var configFile string
+ var configFileIsDefault bool
+ var err error
+ // initialize cfg with default values, allow file read to override
+ cfg := config{
+ Default: savedDefault,
+ Logging: savedLogging,
+ IPFIX: savedIPFIX,
+ CNI: savedCNI,
+ OVNKubernetesFeature: savedOVNKubernetesFeature,
+ Kubernetes: savedKubernetes,
+ OvnNorth: savedOvnNorth,
+ OvnSouth: savedOvnSouth,
+ Gateway: savedGateway,
+ MasterHA: savedMasterHA,
+ ClusterMgrHA: savedClusterMgrHA,
+ HybridOverlay: savedHybridOverlay,
+ OvnKubeNode: savedOvnKubeNode,
+ ClusterManager: savedClusterManager,
+ }
+
+ configFile, configFileIsDefault = getConfigFilePath(ctx)
+
+ if !configFileIsDefault {
+ // Only return explicitly specified config file
+ retConfigFile = configFile
+ }
+
+ f, err := os.Open(configFile)
+ // Failure to find a default config file is not a hard error
+ if err != nil && !configFileIsDefault {
+ return "", fmt.Errorf("failed to open config file %s: %v", configFile, err)
+ }
+ if f != nil {
+ defer f.Close()
+
+ // Parse ovn-k8s config file.
+ if err = gcfg.ReadInto(&cfg, f); err != nil {
+ if gcfg.FatalOnly(err) != nil {
+ return "", fmt.Errorf("failed to parse config file %s: %v", f.Name(), err)
+ }
+ // error is only a warning -> log it but continue
+ klog.Warningf("Warning on parsing config file: %s", err)
+ }
+ klog.Infof("Parsed config file %s", f.Name())
+ klog.Infof("Parsed config: %+v", cfg)
+ }
+
+ if defaults == nil {
+ defaults = &Defaults{}
+ }
+
+ // Build config that needs no special processing
+ if err = overrideFields(&CNI, &cfg.CNI, &savedCNI); err != nil {
+ return "", err
+ }
+ if err = overrideFields(&CNI, &cliConfig.CNI, &savedCNI); err != nil {
+ return "", err
+ }
+
+ // Logging setup
+ if err = overrideFields(&Logging, &cfg.Logging, &savedLogging); err != nil {
+ return "", err
+ }
+ if err = overrideFields(&Logging, &cliConfig.Logging, &savedLogging); err != nil {
+ return "", err
+ }
+
+ var level klog.Level
+ if err := level.Set(strconv.Itoa(Logging.Level)); err != nil {
+ return "", fmt.Errorf("failed to set klog log level %v", err)
+ }
+ if Logging.File != "" {
+ klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
+ klog.InitFlags(klogFlags)
+ if err := klogFlags.Set("logtostderr", "false"); err != nil {
+ klog.Errorf("Error setting klog logtostderr: %v", err)
+ }
+ if err := klogFlags.Set("alsologtostderr", "true"); err != nil {
+ klog.Errorf("Error setting klog alsologtostderr: %v", err)
+ }
+ klog.SetOutput(&lumberjack.Logger{
+ Filename: Logging.File,
+ MaxSize: Logging.LogFileMaxSize, // megabytes
+ MaxBackups: Logging.LogFileMaxBackups,
+ MaxAge: Logging.LogFileMaxAge, // days
+ Compress: true,
+ })
+ }
+
+ if err = buildDefaultConfig(&cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildKubernetesConfig(exec, &cliConfig, &cfg, saPath, defaults); err != nil {
+ return "", err
+ }
+
+ // Metrics must be built after Kubernetes to ensure metrics options override
+ // legacy Kubernetes metrics options
+ if err = buildMetricsConfig(&cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildOVNKubernetesFeatureConfig(ctx, &cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildGatewayConfig(ctx, &cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildMasterHAConfig(ctx, &cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildClusterMgrHAConfig(ctx, &cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildMonitoringConfig(ctx, &cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildIPFIXConfig(&cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildHybridOverlayConfig(ctx, &cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildOvnKubeNodeConfig(ctx, &cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ if err = buildClusterManagerConfig(ctx, &cliConfig, &cfg); err != nil {
+ return "", err
+ }
+
+ tmpAuth, err := buildOvnAuth(exec, true, &cliConfig.OvnNorth, &cfg.OvnNorth, defaults.OvnNorthAddress)
+ if err != nil {
+ return "", err
+ }
+ OvnNorth = *tmpAuth
+
+ tmpAuth, err = buildOvnAuth(exec, false, &cliConfig.OvnSouth, &cfg.OvnSouth, false)
+ if err != nil {
+ return "", err
+ }
+ OvnSouth = *tmpAuth
+
+ if err := completeConfig(); err != nil {
+ return "", err
+ }
+
+ klog.V(5).Infof("Default config: %+v", Default)
+ klog.V(5).Infof("Logging config: %+v", Logging)
+ klog.V(5).Infof("Monitoring config: %+v", Monitoring)
+ klog.V(5).Infof("IPFIX config: %+v", IPFIX)
+ klog.V(5).Infof("CNI config: %+v", CNI)
+ klog.V(5).Infof("Kubernetes config: %+v", stripTokenFromK8sConfig())
+ klog.V(5).Infof("Gateway config: %+v", Gateway)
+ klog.V(5).Infof("OVN North config: %+v", OvnNorth)
+ klog.V(5).Infof("OVN South config: %+v", OvnSouth)
+ klog.V(5).Infof("Hybrid Overlay config: %+v", HybridOverlay)
+ klog.V(5).Infof("Ovnkube Node config: %+v", OvnKubeNode)
+ klog.V(5).Infof("Ovnkube Cluster Manager config: %+v", ClusterManager)
+
+ return retConfigFile, nil
+}
+
+func completeConfig() error {
+ allSubnets := NewConfigSubnets()
+
+ if err := completeKubernetesConfig(allSubnets); err != nil {
+ return err
+ }
+ if err := completeDefaultConfig(allSubnets); err != nil {
+ return err
+ }
+
+ if err := completeGatewayConfig(allSubnets, &Gateway.MasqueradeIPs); err != nil {
+ return err
+ }
+ if err := completeMonitoringConfig(); err != nil {
+ return err
+ }
+ if err := completeHybridOverlayConfig(allSubnets); err != nil {
+ return err
+ }
+
+ if err := completeClusterManagerConfig(allSubnets); err != nil {
+ return err
+ }
+
+ if err := allSubnets.CheckForOverlaps(); err != nil {
+ return err
+ }
+
+ var err error
+ IPv4Mode, IPv6Mode, err = allSubnets.checkIPFamilies()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func pathExists(path string) bool {
+ _, err := os.Stat(path)
+ if err != nil && os.IsNotExist(err) {
+ return false
+ }
+ return true
+}
+
+// parseAddress parses an OVN database address, which can be of form
+// "ssl:1.2.3.4:6641,ssl:1.2.3.5:6641" (OVS/OVN format) or
+// "ssl://1.2.3.4:6641,ssl://1.2.3.5:6641" (legacy ovnkube format)
+// or "ssl:[fd01::1]:6641,ssl:[fd01::2]:6641
+// and returns the validated address(es) and the scheme
+func parseAddress(urlString string) (string, OvnDBScheme, error) {
+ var parsedAddress, scheme string
+ var parsedScheme OvnDBScheme
+
+ urlString = strings.Replace(urlString, "//", "", -1)
+ for _, ovnAddress := range strings.Split(urlString, ",") {
+ splits := strings.SplitN(ovnAddress, ":", 2)
+ if len(splits) != 2 {
+ return "", "", fmt.Errorf("failed to parse OVN address %s", urlString)
+ }
+
+ if scheme == "" {
+ scheme = splits[0]
+ } else if scheme != splits[0] {
+ return "", "", fmt.Errorf("invalid protocols in OVN address %s",
+ urlString)
+ }
+
+ if scheme == "unix" {
+ if parsedAddress != "" {
+ parsedAddress += ","
+ }
+ parsedAddress += ovnAddress
+ } else {
+ host, port, err := net.SplitHostPort(splits[1])
+ if err != nil {
+ return "", "", fmt.Errorf("failed to parse OVN DB host/port %q: %v",
+ splits[1], err)
+ }
+
+ if parsedAddress != "" {
+ parsedAddress += ","
+ }
+ parsedAddress += fmt.Sprintf("%s:%s", scheme, net.JoinHostPort(host, port))
+ }
+ }
+
+ switch {
+ case scheme == "ssl":
+ parsedScheme = OvnDBSchemeSSL
+ case scheme == "tcp":
+ parsedScheme = OvnDBSchemeTCP
+ case scheme == "unix":
+ parsedScheme = OvnDBSchemeUnix
+ default:
+ return "", "", fmt.Errorf("unknown OVN DB scheme %q", scheme)
+ }
+ return parsedAddress, parsedScheme, nil
+}
+
+// buildOvnAuth returns an OvnAuthConfig object describing the connection to an
+// OVN database, given a connection description string and authentication
+// details
+func buildOvnAuth(exec kexec.Interface, northbound bool, cliAuth, confAuth *OvnAuthConfig, readAddress bool) (*OvnAuthConfig, error) {
+ auth := &OvnAuthConfig{
+ northbound: northbound,
+ exec: exec,
+ }
+
+ var direction string
+ var defaultAuth *OvnAuthConfig
+ if northbound {
+ direction = "nb"
+ defaultAuth = &savedOvnNorth
+ } else {
+ direction = "sb"
+ defaultAuth = &savedOvnSouth
+ }
+
+ // Determine final address so we know how to set cert/key defaults
+ address := cliAuth.Address
+ if address == "" {
+ address = confAuth.Address
+ }
+ if address == "" && readAddress {
+ address = getOVSExternalID(exec, "ovn-"+direction)
+ }
+ if strings.HasPrefix(address, "ssl") {
+ // Set up default SSL cert/key paths
+ auth.CACert = "/etc/openvswitch/ovn" + direction + "-ca.cert"
+ auth.PrivKey = "/etc/openvswitch/ovn" + direction + "-privkey.pem"
+ auth.Cert = "/etc/openvswitch/ovn" + direction + "-cert.pem"
+ }
+
+ // Build the final auth config with overrides from CLI and config file
+ if err := overrideFields(auth, confAuth, defaultAuth); err != nil {
+ return nil, err
+ }
+ if err := overrideFields(auth, cliAuth, defaultAuth); err != nil {
+ return nil, err
+ }
+
+ if address == "" {
+ if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" {
+ return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?")
+ }
+ auth.Scheme = OvnDBSchemeUnix
+ auth.Address = fmt.Sprintf("unix:/var/run/ovn/ovn%s_db.sock", direction)
+ return auth, nil
+ }
+
+ var err error
+ auth.Address, auth.Scheme, err = parseAddress(address)
+ if err != nil {
+ return nil, err
+ }
+
+ switch {
+ case auth.Scheme == OvnDBSchemeSSL:
+ if auth.PrivKey == "" || auth.Cert == "" || auth.CACert == "" || auth.CertCommonName == "" {
+ return nil, fmt.Errorf("must specify private key, certificate, CA certificate, and common name used in the certificate for 'ssl' scheme")
+ }
+ case auth.Scheme == OvnDBSchemeTCP:
+ if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" {
+ return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?")
+ }
+ case auth.Scheme == OvnDBSchemeUnix:
+ if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" {
+ return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?")
+ }
+ }
+
+ return auth, nil
+}
+
+func (a *OvnAuthConfig) ensureCACert() error {
+ if pathExists(a.CACert) {
+ // CA file exists, nothing to do
+ return nil
+ }
+
+ // Client can bootstrap the CA from the OVN API. Use nbctl for both
+ // SB and NB since ovn-sbctl only supports --bootstrap-ca-cert from
+ // 2.9.90+.
+ // FIXME: change back to a.ctlCmd when sbctl supports --bootstrap-ca-cert
+ // https://github.com/openvswitch/ovs/pull/226
+ args := []string{
+ "--db=" + a.GetURL(),
+ "--timeout=5",
+ }
+ if a.Scheme == OvnDBSchemeSSL {
+ args = append(args, "--private-key="+a.PrivKey)
+ args = append(args, "--certificate="+a.Cert)
+ args = append(args, "--bootstrap-ca-cert="+a.CACert)
+ }
+ args = append(args, "list", "nb_global")
+ _, _ = rawExec(a.exec, "ovn-nbctl", args...)
+ if _, err := os.Stat(a.CACert); os.IsNotExist(err) {
+ klog.Warningf("Bootstrapping %s CA certificate failed", a.CACert)
+ }
+ return nil
+}
+
+// GetURL returns a URL suitable for passing to ovn-northd which describes the
+// transport mechanism for connection to the database
+func (a *OvnAuthConfig) GetURL() string {
+ return a.Address
+}
+
+// SetDBAuth sets the authentication configuration and connection method
+// for the OVN northbound or southbound database server or client
+func (a *OvnAuthConfig) SetDBAuth() error {
+ if a.Scheme == OvnDBSchemeSSL {
+ // Both server and client SSL schemes require privkey and cert
+ if !pathExists(a.PrivKey) {
+ return fmt.Errorf("private key file %s not found", a.PrivKey)
+ }
+ if !pathExists(a.Cert) {
+ return fmt.Errorf("certificate file %s not found", a.Cert)
+ }
+
+ // Client can bootstrap the CA cert from the DB
+ if err := a.ensureCACert(); err != nil {
+ return err
+ }
+
+ // Tell Southbound DB clients (like ovn-controller)
+ // which certificates to use to talk to the DB.
+ // Must happen *before* setting the "ovn-remote"
+ // external-id.
+ if !a.northbound {
+ out, err := runOVSVsctl(a.exec, "del-ssl")
+ if err != nil {
+ return fmt.Errorf("error deleting ovs-vsctl SSL "+
+ "configuration: %q (%v)", out, err)
+ }
+
+ out, err = runOVSVsctl(a.exec, "set-ssl", a.PrivKey, a.Cert, a.CACert)
+ if err != nil {
+ return fmt.Errorf("error setting client southbound DB SSL options: %v\n %q", err, out)
+ }
+ }
+ }
+
+ if !a.northbound {
+ // store the Southbound Database address in an external id - "external_ids:ovn-remote"
+ if err := setOVSExternalID(a.exec, "ovn-remote", "\""+a.GetURL()+"\""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *OvnAuthConfig) updateIP(newIPs []string, port string) {
+ newAddresses := make([]string, 0, len(newIPs))
+ for _, ipAddress := range newIPs {
+ newAddresses = append(newAddresses, fmt.Sprintf("%v:%s", a.Scheme, net.JoinHostPort(ipAddress, port)))
+ }
+ a.Address = strings.Join(newAddresses, ",")
+}
+
+// UpdateOVNNodeAuth updates the host and URL in ClientAuth
+// for both OvnNorth and OvnSouth. It updates them with the new masterIP.
+func UpdateOVNNodeAuth(masterIP []string, southboundDBPort, northboundDBPort string) {
+ klog.V(5).Infof("Update OVN node auth with new master ip: %s", masterIP)
+ OvnNorth.updateIP(masterIP, northboundDBPort)
+ OvnSouth.updateIP(masterIP, southboundDBPort)
+}
+
+// ovnKubeNodeModeSupported validates the provided mode is supported by ovnkube node
+func ovnKubeNodeModeSupported(mode string) error {
+ found := false
+ supportedModes := []string{types.NodeModeFull, types.NodeModeDPU, types.NodeModeDPUHost}
+ for _, m := range supportedModes {
+ if mode == m {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("unexpected ovnkube-node-mode: %s. supported modes: %v", mode, supportedModes)
+ }
+ return nil
+}
+
+// buildOvnKubeNodeConfig updates OvnKubeNode config from cli and config file
+func buildOvnKubeNodeConfig(ctx *cli.Context, cli, file *config) error {
+ // Copy config file values over default values
+ if err := overrideFields(&OvnKubeNode, &file.OvnKubeNode, &savedOvnKubeNode); err != nil {
+ return err
+ }
+
+ // And CLI overrides over config file and default values
+ if err := overrideFields(&OvnKubeNode, &cli.OvnKubeNode, &savedOvnKubeNode); err != nil {
+ return err
+ }
+
+ // validate ovnkube-node-mode
+ if err := ovnKubeNodeModeSupported(OvnKubeNode.Mode); err != nil {
+ return err
+ }
+
+ // ovnkube-node-mode dpu/dpu-host does not support hybrid overlay
+ if OvnKubeNode.Mode != types.NodeModeFull && HybridOverlay.Enabled {
+ return fmt.Errorf("hybrid overlay is not supported with ovnkube-node mode %s", OvnKubeNode.Mode)
+ }
+
+ // Warn the user if both MgmtPortNetdev and MgmtPortDPResourceName are specified since they
+ // configure the management port.
+ if OvnKubeNode.MgmtPortNetdev != "" && OvnKubeNode.MgmtPortDPResourceName != "" {
+ klog.Warningf("ovnkube-node-mgmt-port-netdev (%s) and ovnkube-node-mgmt-port-dp-resource-name (%s) "+
+ "both specified. The provided netdev in ovnkube-node-mgmt-port-netdev will be overriden by a netdev "+
+ "chosen by the resource provided by ovnkube-node-mgmt-port-dp-resource-name.",
+ OvnKubeNode.MgmtPortNetdev, OvnKubeNode.MgmtPortDPResourceName)
+ }
+
+ // when DPU is used, management port is always backed by a representor. On the
+ // host side, it needs to be provided through --ovnkube-node-mgmt-port-netdev.
+ // On the DPU, it is derrived from the annotation exposed on the host side.
+ if OvnKubeNode.Mode == types.NodeModeDPU && !(OvnKubeNode.MgmtPortNetdev == "" && OvnKubeNode.MgmtPortDPResourceName == "") {
+ return fmt.Errorf("ovnkube-node-mgmt-port-netdev or ovnkube-node-mgmt-port-dp-resource-name must not be provided")
+ }
+ if OvnKubeNode.Mode == types.NodeModeDPUHost && OvnKubeNode.MgmtPortNetdev == "" && OvnKubeNode.MgmtPortDPResourceName == "" {
+ return fmt.Errorf("ovnkube-node-mgmt-port-netdev or ovnkube-node-mgmt-port-dp-resource-name must be provided")
+ }
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go
new file mode 100644
index 000000000..b27be4d7c
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go
@@ -0,0 +1,329 @@
+package config
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "strconv"
+ "strings"
+
+ iputils "github.com/containernetworking/plugins/pkg/ip"
+ utilnet "k8s.io/utils/net"
+)
+
+// HostPort is the object that holds the definition for a host and port tuple
+type HostPort struct {
+ Host *net.IP
+ Port int32
+}
+
+// String representation of a HostPort entry
+func (hp *HostPort) String() string {
+ switch {
+ case hp.Host == nil:
+ return fmt.Sprintf(":%d", hp.Port)
+ case hp.Host.To4() != nil:
+ return fmt.Sprintf("%s:%d", *hp.Host, hp.Port)
+ default:
+ return fmt.Sprintf("[%s]:%d", *hp.Host, hp.Port)
+ }
+}
+
+// CIDRNetworkEntry is the object that holds the definition for a single network CIDR range
+type CIDRNetworkEntry struct {
+ CIDR *net.IPNet
+ HostSubnetLength int
+}
+
+func (c CIDRNetworkEntry) String() string {
+ return fmt.Sprintf("%s/%d", c.CIDR.String(), c.HostSubnetLength)
+}
+
+// ParseClusterSubnetEntriesWithDefaults returns the parsed set of
+// CIDRNetworkEntries. These entries define a network space by specifying a set
+// of CIDR and netmasks the SDN can allocate addresses from including how that
+// network space is partitioned for each of the cluster nodes. When no host
+// specific prefix length is specified, the provided ones are assumed as
+// default. The host specific prefix length is validated to be greater than the
+// overall subnet length. When 0 is specified as default host specific prefix
+// length, no host specific prefix length is allowed or validated.
+func ParseClusterSubnetEntriesWithDefaults(clusterSubnetCmd string, ipv4HostLength, ipv6HostLength int) ([]CIDRNetworkEntry, error) {
+ var parsedClusterList []CIDRNetworkEntry
+ clusterEntriesList := strings.Split(clusterSubnetCmd, ",")
+
+ ipv4HostLengthAllowed := ipv4HostLength != 0
+ ipv6HostLengthAllowed := ipv6HostLength != 0
+
+ for _, clusterEntry := range clusterEntriesList {
+ clusterEntry := strings.TrimSpace(clusterEntry)
+ splitClusterEntry := strings.Split(clusterEntry, "/")
+
+ if len(splitClusterEntry) < 2 || len(splitClusterEntry) > 3 {
+ return nil, fmt.Errorf("CIDR %q not properly formatted", clusterEntry)
+ }
+
+ var err error
+ var parsedClusterEntry CIDRNetworkEntry
+ _, parsedClusterEntry.CIDR, err = net.ParseCIDR(fmt.Sprintf("%s/%s", splitClusterEntry[0], splitClusterEntry[1]))
+ if err != nil {
+ return nil, err
+ }
+
+ ipv6 := utilnet.IsIPv6(parsedClusterEntry.CIDR.IP)
+ hostLengthAllowed := (ipv6 && ipv6HostLengthAllowed) || (!ipv6 && ipv4HostLengthAllowed)
+
+ entryMaskLength, _ := parsedClusterEntry.CIDR.Mask.Size()
+ if len(splitClusterEntry) == 3 {
+ if !hostLengthAllowed {
+ return nil, fmt.Errorf("CIDR %q not properly formatted", clusterEntry)
+ }
+ tmp, err := strconv.Atoi(splitClusterEntry[2])
+ if err != nil {
+ return nil, err
+ }
+ parsedClusterEntry.HostSubnetLength = tmp
+ } else {
+ if ipv6 {
+ parsedClusterEntry.HostSubnetLength = ipv6HostLength
+ } else {
+ // default for backward compatibility
+ parsedClusterEntry.HostSubnetLength = ipv4HostLength
+ }
+ }
+
+ if hostLengthAllowed {
+ if ipv6 && ipv6HostLengthAllowed && parsedClusterEntry.HostSubnetLength != 64 {
+ return nil, fmt.Errorf("IPv6 only supports /64 host subnets")
+ }
+
+ if !ipv6 && parsedClusterEntry.HostSubnetLength > 32 {
+ return nil, fmt.Errorf("invalid host subnet, IPv4 subnet must be < 32")
+ }
+
+ if parsedClusterEntry.HostSubnetLength <= entryMaskLength {
+ return nil, fmt.Errorf("cannot use a host subnet length mask shorter than or equal to the cluster subnet mask. "+
+ "host subnet length: %d, cluster subnet length: %d", parsedClusterEntry.HostSubnetLength, entryMaskLength)
+ }
+ }
+
+ parsedClusterList = append(parsedClusterList, parsedClusterEntry)
+ }
+
+ if len(parsedClusterList) == 0 {
+ return nil, fmt.Errorf("failed to parse any CIDRs from %q", clusterSubnetCmd)
+ }
+
+ return parsedClusterList, nil
+}
+
+// ParseClusterSubnetEntries returns the parsed set of
+// CIDRNetworkEntries. If not specified, it assumes a default host specific
+// prefix length of 24 or 64 bits for ipv4 and ipv6 respectively.
+func ParseClusterSubnetEntries(clusterSubnetCmd string) ([]CIDRNetworkEntry, error) {
+ // default to 24 bits host specific prefix length for backward compatibility
+ return ParseClusterSubnetEntriesWithDefaults(clusterSubnetCmd, 24, 64)
+}
+
+// ParseFlowCollectors returns the parsed set of HostPorts passed by the user on the command line
+// These entries define the flow collectors OVS will send flow metadata by using NetFlow/SFlow/IPFIX.
+func ParseFlowCollectors(flowCollectors string) ([]HostPort, error) {
+ var parsedFlowsCollectors []HostPort
+ readCollectors := map[string]struct{}{}
+ collectors := strings.Split(flowCollectors, ",")
+ for _, v := range collectors {
+ host, port, err := net.SplitHostPort(v)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse hostport: %v", err)
+ }
+ var ipp *net.IP
+ // If the host IP is not provided, we keep it nil and later will assume the Node IP
+ if host != "" {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return nil, fmt.Errorf("collector IP %s is not a valid IP", host)
+ }
+ ipp = &ip
+ }
+ parsedPort, err := strconv.ParseInt(port, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("collector port %s is not a valid port: %v", port, err)
+ }
+ // checking if HostPort entry is duplicate
+ hostPort := HostPort{Host: ipp, Port: int32(parsedPort)}
+ hps := hostPort.String()
+ if _, ok := readCollectors[hps]; ok {
+ // duplicate flow collector. Ignore it
+ continue
+ }
+ readCollectors[hps] = struct{}{}
+ parsedFlowsCollectors = append(parsedFlowsCollectors, hostPort)
+ }
+
+ return parsedFlowsCollectors, nil
+}
+
+type ConfigSubnetType string
+
+const (
+ ConfigSubnetJoin ConfigSubnetType = "built-in join subnet"
+ ConfigSubnetCluster ConfigSubnetType = "cluster subnet"
+ ConfigSubnetService ConfigSubnetType = "service subnet"
+ ConfigSubnetHybrid ConfigSubnetType = "hybrid overlay subnet"
+ ConfigSubnetMasquerade ConfigSubnetType = "masquerade subnet"
+ ConfigSubnetTransit ConfigSubnetType = "transit switch subnet"
+ UserDefinedSubnets ConfigSubnetType = "user defined subnet"
+ UserDefinedJoinSubnet ConfigSubnetType = "user defined join subnet"
+)
+
+type ConfigSubnet struct {
+ SubnetType ConfigSubnetType
+ Subnet *net.IPNet
+}
+
+// ConfigSubnets represents a set of configured subnets (and their names)
+type ConfigSubnets struct {
+ Subnets []ConfigSubnet
+ V4 map[ConfigSubnetType]bool
+ V6 map[ConfigSubnetType]bool
+}
+
+// NewConfigSubnets returns a new ConfigSubnets
+func NewConfigSubnets() *ConfigSubnets {
+ return &ConfigSubnets{
+ V4: make(map[ConfigSubnetType]bool),
+ V6: make(map[ConfigSubnetType]bool),
+ }
+}
+
+// append adds a single subnet to cs
+func (cs *ConfigSubnets) Append(subnetType ConfigSubnetType, subnet *net.IPNet) {
+ cs.Subnets = append(cs.Subnets, ConfigSubnet{SubnetType: subnetType, Subnet: subnet})
+ if subnetType == ConfigSubnetCluster || subnetType == ConfigSubnetService || subnetType == ConfigSubnetHybrid {
+ if utilnet.IsIPv6CIDR(subnet) {
+ cs.V6[subnetType] = true
+ } else {
+ cs.V4[subnetType] = true
+ }
+ }
+}
+
+// CheckForOverlaps checks if any of the subnets in cs overlap
+func (cs *ConfigSubnets) CheckForOverlaps() error {
+ for i, si := range cs.Subnets {
+ for j := 0; j < i; j++ {
+ sj := cs.Subnets[j]
+ if si.Subnet.Contains(sj.Subnet.IP) || sj.Subnet.Contains(si.Subnet.IP) {
+ return fmt.Errorf("illegal network configuration: %s %q overlaps %s %q",
+ si.SubnetType, si.Subnet.String(),
+ sj.SubnetType, sj.Subnet.String())
+ }
+ }
+ }
+ return nil
+}
+
+func (cs *ConfigSubnets) describeSubnetType(subnetType ConfigSubnetType) string {
+ ipv4 := cs.V4[subnetType]
+ ipv6 := cs.V6[subnetType]
+ var familyType string
+ switch {
+ case ipv4 && !ipv6:
+ familyType = "IPv4"
+ case !ipv4 && ipv6:
+ familyType = "IPv6"
+ case ipv4 && ipv6:
+ familyType = "dual-stack"
+ default:
+ familyType = "unknown type"
+ }
+ return familyType + " " + string(subnetType)
+}
+
+// checkIPFamilies determines if cs contains a valid single-stack IPv4 configuration, a
+// valid single-stack IPv6 configuration, a valid dual-stack configuration, or none of the
+// above.
+func (cs *ConfigSubnets) checkIPFamilies() (usingIPv4, usingIPv6 bool, err error) {
+ if len(cs.V6) == 0 {
+ // Single-stack IPv4
+ return true, false, nil
+ } else if len(cs.V4) == 0 {
+ // Single-stack IPv6
+ return false, true, nil
+ } else if reflect.DeepEqual(cs.V4, cs.V6) {
+ // Dual-stack
+ return true, true, nil
+ }
+
+ netConfig := cs.describeSubnetType(ConfigSubnetCluster)
+ netConfig += ", " + cs.describeSubnetType(ConfigSubnetService)
+ if cs.V4[ConfigSubnetHybrid] || cs.V6[ConfigSubnetHybrid] {
+ netConfig += ", " + cs.describeSubnetType(ConfigSubnetHybrid)
+ }
+
+ return false, false, fmt.Errorf("illegal network configuration: %s", netConfig)
+}
+
+// masqueradeIP represents the masqueradeIPs used by the masquerade subnets for host to service traffic
+type MasqueradeIPsConfig struct {
+ V4OVNMasqueradeIP net.IP
+ V6OVNMasqueradeIP net.IP
+ V4HostMasqueradeIP net.IP
+ V6HostMasqueradeIP net.IP
+ V4HostETPLocalMasqueradeIP net.IP
+ V6HostETPLocalMasqueradeIP net.IP
+ V4DummyNextHopMasqueradeIP net.IP
+ V6DummyNextHopMasqueradeIP net.IP
+ V4OVNServiceHairpinMasqueradeIP net.IP
+ V6OVNServiceHairpinMasqueradeIP net.IP
+}
+
+// allocateV4/6MasqueradeIPs allocates the masqueradeIPs based off of the passed in masqueradeSubnet (.0)
+// it does this by cascading down from the initial ip down to the .5 currently (more masqueradeIps may be added in the future)
+
+func AllocateV4MasqueradeIPs(masqueradeSubnetNetworkAddress net.IP, masqueradeIPs *MasqueradeIPsConfig) error {
+ masqueradeIPs.V4OVNMasqueradeIP = iputils.NextIP(masqueradeSubnetNetworkAddress)
+ if masqueradeIPs.V4OVNMasqueradeIP == nil {
+ return fmt.Errorf("error setting V4OVNMasqueradeIP: %s", masqueradeSubnetNetworkAddress)
+ }
+ masqueradeIPs.V4HostMasqueradeIP = iputils.NextIP(masqueradeIPs.V4OVNMasqueradeIP) //using the last set ip we can cascade from the .0 down
+ if masqueradeIPs.V4HostMasqueradeIP == nil {
+ return fmt.Errorf("error setting V4HostMasqueradeIP: %s", masqueradeIPs.V4OVNMasqueradeIP)
+ }
+ masqueradeIPs.V4HostETPLocalMasqueradeIP = iputils.NextIP(masqueradeIPs.V4HostMasqueradeIP)
+ if masqueradeIPs.V4HostETPLocalMasqueradeIP == nil {
+ return fmt.Errorf("error setting V4HostETPLocalMasqueradeIP: %s", masqueradeIPs.V4HostMasqueradeIP)
+ }
+ masqueradeIPs.V4DummyNextHopMasqueradeIP = iputils.NextIP(masqueradeIPs.V4HostETPLocalMasqueradeIP)
+ if masqueradeIPs.V4DummyNextHopMasqueradeIP == nil {
+ return fmt.Errorf("error setting V4DummyNextHopMasqueradeIP: %s", masqueradeIPs.V4HostETPLocalMasqueradeIP)
+ }
+ masqueradeIPs.V4OVNServiceHairpinMasqueradeIP = iputils.NextIP(masqueradeIPs.V4DummyNextHopMasqueradeIP)
+ if masqueradeIPs.V4OVNServiceHairpinMasqueradeIP == nil {
+ return fmt.Errorf("error setting V4OVNServiceHairpinMasqueradeIP: %s", masqueradeIPs.V4DummyNextHopMasqueradeIP)
+ }
+ return nil
+}
+
+func AllocateV6MasqueradeIPs(masqueradeSubnetNetworkAddress net.IP, masqueradeIPs *MasqueradeIPsConfig) error {
+ masqueradeIPs.V6OVNMasqueradeIP = iputils.NextIP(masqueradeSubnetNetworkAddress)
+ if masqueradeIPs.V6OVNMasqueradeIP == nil {
+ return fmt.Errorf("error setting V6OVNMasqueradeIP: %s", masqueradeSubnetNetworkAddress)
+ }
+ masqueradeIPs.V6HostMasqueradeIP = iputils.NextIP(masqueradeIPs.V6OVNMasqueradeIP) //using the last set ip we can cascade from the .0 down
+ if masqueradeIPs.V6HostMasqueradeIP == nil {
+ return fmt.Errorf("error setting V6HostMasqueradeIP: %s", masqueradeIPs.V6OVNMasqueradeIP)
+ }
+ masqueradeIPs.V6HostETPLocalMasqueradeIP = iputils.NextIP(masqueradeIPs.V6HostMasqueradeIP)
+ if masqueradeIPs.V6HostETPLocalMasqueradeIP == nil {
+ return fmt.Errorf("error setting V6HostETPLocalMasqueradeIP: %s", masqueradeIPs.V6HostMasqueradeIP)
+ }
+ masqueradeIPs.V6DummyNextHopMasqueradeIP = iputils.NextIP(masqueradeIPs.V6HostETPLocalMasqueradeIP)
+ if masqueradeIPs.V6DummyNextHopMasqueradeIP == nil {
+ return fmt.Errorf("error setting V6DummyNextHopMasqueradeIP: %s", masqueradeIPs.V6HostETPLocalMasqueradeIP)
+ }
+ masqueradeIPs.V6OVNServiceHairpinMasqueradeIP = iputils.NextIP(masqueradeIPs.V6DummyNextHopMasqueradeIP)
+ if masqueradeIPs.V6OVNServiceHairpinMasqueradeIP == nil {
+ return fmt.Errorf("error setting V6OVNServiceHairpinMasqueradeIP: %s", masqueradeIPs.V6DummyNextHopMasqueradeIP)
+ }
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go
new file mode 100644
index 000000000..9924c9b1d
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go
@@ -0,0 +1,47 @@
+package cryptorand
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "k8s.io/klog/v2"
+ "math/big"
+)
+
+func Intn(n int64) uint64 {
+ val := new(big.Int).SetInt64(n)
+ randNum, err := rand.Int(rand.Reader, val)
+ if err != nil {
+ klog.Errorf("Error generating random number using crypto/rand : %v", err)
+ return 0
+ }
+ return randNum.Uint64()
+}
+
+func Uint32() uint32 {
+ b := make([]byte, 8)
+ _, err := rand.Read(b)
+ if err != nil {
+ klog.Errorf("Error reading bytes for random number generation using crypto/rand: %v", err)
+ return 0
+ }
+ return binary.LittleEndian.Uint32(b)
+}
+
+func Uint64() uint64 {
+ b := make([]byte, 8)
+ _, err := rand.Read(b)
+ if err != nil {
+ klog.Errorf("Error reading bytes for random number generation using crypto/rand: %v", err)
+ return 0
+ }
+ return binary.LittleEndian.Uint64(b)
+}
+
+func Read(randBytes []byte) []byte {
+ _, err := rand.Read(randBytes)
+ if err != nil {
+ klog.Errorf("Error reading bytes using crypto/rand: %v", err)
+ return nil
+ }
+ return randBytes
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go
new file mode 100644
index 000000000..3d6e81514
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go
@@ -0,0 +1,181 @@
+package ops
+
+import (
+ "context"
+ "fmt"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+// GetACLName returns the ACL name if it has one otherwise returns
+// an empty string.
+func GetACLName(acl *nbdb.ACL) string {
+ if acl.Name != nil {
+ return *acl.Name
+ }
+ return ""
+}
+
+func getACLMutableFields(acl *nbdb.ACL) []interface{} {
+ return []interface{}{&acl.Action, &acl.Direction, &acl.ExternalIDs, &acl.Log, &acl.Match, &acl.Meter,
+ &acl.Name, &acl.Options, &acl.Priority, &acl.Severity, &acl.Tier, &acl.SampleNew, &acl.SampleEst}
+}
+
+type aclPredicate func(*nbdb.ACL) bool
+
+// FindACLsWithPredicate looks up ACLs from the cache based on a given predicate
+func FindACLsWithPredicate(nbClient libovsdbclient.Client, p aclPredicate) ([]*nbdb.ACL, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ acls := []*nbdb.ACL{}
+ err := nbClient.WhereCache(p).List(ctx, &acls)
+ return acls, err
+}
+
+func FindACLs(nbClient libovsdbclient.Client, acls []*nbdb.ACL) ([]*nbdb.ACL, error) {
+ opModels := make([]operationModel, 0, len(acls))
+ foundACLs := make([]*nbdb.ACL, 0, len(acls))
+ for i := range acls {
+ // can't use i in the predicate, for loop replaces it in-memory
+ acl := acls[i]
+ found := []*nbdb.ACL{}
+ opModel := operationModel{
+ Model: acl,
+ ExistingResult: &found,
+ ErrNotFound: false,
+ BulkOp: false,
+ DoAfter: func() {
+ if len(found) > 0 {
+ foundACLs = append(foundACLs, found[0])
+ }
+ },
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ err := modelClient.Lookup(opModels...)
+ return foundACLs, err
+}
+
+// BuildACL builds an ACL with empty optional properties unset
+func BuildACL(name string, direction nbdb.ACLDirection, priority int, match string, action nbdb.ACLAction, meter string,
+ severity nbdb.ACLSeverity, log bool, externalIds map[string]string, options map[string]string, tier int) *nbdb.ACL {
+ name = fmt.Sprintf("%.63s", name)
+
+ var realName *string
+ var realMeter *string
+ var realSeverity *string
+ if len(name) != 0 {
+ realName = &name
+ }
+ if len(meter) != 0 {
+ realMeter = &meter
+ }
+ if len(severity) != 0 {
+ realSeverity = &severity
+ }
+ acl := &nbdb.ACL{
+ Name: realName,
+ Direction: direction,
+ Match: match,
+ Action: action,
+ Priority: priority,
+ Severity: realSeverity,
+ Log: log,
+ Meter: realMeter,
+ ExternalIDs: externalIds,
+ Options: options,
+ Tier: tier,
+ }
+
+ return acl
+}
+
+func SetACLLogging(acl *nbdb.ACL, severity nbdb.ACLSeverity, log bool) {
+ var realSeverity *string
+ if len(severity) != 0 {
+ realSeverity = &severity
+ }
+ acl.Severity = realSeverity
+ acl.Log = log
+}
+
+// CreateOrUpdateACLsOps creates or updates the provided ACLs returning the
+// corresponding ops
+func CreateOrUpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, samplingConfig *SamplingConfig, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(acls))
+ for i := range acls {
+ // can't use i in the predicate, for loop replaces it in-memory
+ acl := acls[i]
+ // ensure names are truncated (let's cover our bases from snippets that don't call BuildACL and call this directly)
+ if acl.Name != nil {
+ // node ACLs won't have names set
+ *acl.Name = fmt.Sprintf("%.63s", *acl.Name)
+ }
+ opModels = addSample(samplingConfig, opModels, acl)
+ opModel := operationModel{
+ Model: acl,
+ OnModelUpdates: getACLMutableFields(acl),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels...)
+}
+
+func UpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(acls))
+ for i := range acls {
+ // can't use i in the predicate, for loop replaces it in-memory
+ acl := acls[i]
+ opModel := operationModel{
+ Model: acl,
+ OnModelUpdates: getACLMutableFields(acl),
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels...)
+}
+
+// CreateOrUpdateACLs creates or updates the provided ACLs
+func CreateOrUpdateACLs(nbClient libovsdbclient.Client, samplingConfig *SamplingConfig, acls ...*nbdb.ACL) error {
+ ops, err := CreateOrUpdateACLsOps(nbClient, nil, samplingConfig, acls...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheckAndSetUUIDs(nbClient, acls, ops)
+ return err
+}
+
+// UpdateACLsLoggingOps updates the log and severity on the provided ACLs and
+// returns the corresponding ops
+func UpdateACLsLoggingOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(acls))
+ for i := range acls {
+ // can't use i in the predicate, for loop replaces it in-memory
+ acl := acls[i]
+ opModel := operationModel{
+ Model: acl,
+ OnModelUpdates: []interface{}{&acl.Severity, &acl.Log},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels...)
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go
new file mode 100644
index 000000000..920bde95f
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go
@@ -0,0 +1,231 @@
+package ops
+
+import (
+ "context"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+type addressSetPredicate func(*nbdb.AddressSet) bool
+
+// getNonZeroAddressSetMutableFields builds a list of address set
+// mutable fields with non zero values to be used as the list of fields to
+// Update.
+// The purpose is to prevent libovsdb interpreting non-nil empty maps/slices
+// as default and thus being filtered out of the update. The intention is to
+// use non-nil empty maps/slices to clear them out in the update.
+// See: https://github.com/ovn-org/libovsdb/issues/226
+func getNonZeroAddressSetMutableFields(as *nbdb.AddressSet) []interface{} {
+ fields := []interface{}{}
+ if as.Addresses != nil {
+ fields = append(fields, &as.Addresses)
+ }
+ if as.ExternalIDs != nil {
+ fields = append(fields, &as.ExternalIDs)
+ }
+ return fields
+}
+
+// FindAddressSetsWithPredicate looks up address sets from the cache based on a
+// given predicate
+func FindAddressSetsWithPredicate(nbClient libovsdbclient.Client, p addressSetPredicate) ([]*nbdb.AddressSet, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ found := []*nbdb.AddressSet{}
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
+
+// GetAddressSet looks up an address sets from the cache
+func GetAddressSet(nbClient libovsdbclient.Client, as *nbdb.AddressSet) (*nbdb.AddressSet, error) {
+ found := []*nbdb.AddressSet{}
+ opModel := operationModel{
+ Model: as,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+// CreateAddressSetsOps creates the create-ops for the provided address sets
+func CreateAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(addrSets))
+ for i := range addrSets {
+ as := addrSets[i]
+ opModel := operationModel{
+ Model: as,
+ OnModelUpdates: onModelUpdatesNone(),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+// CreateAddressSets creates the provided address sets
+func CreateAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error {
+ ops, err := CreateAddressSetsOps(nbClient, nil, addrSets...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+func CreateOrUpdateAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(addrSets))
+ for i := range addrSets {
+ as := addrSets[i]
+ opModel := operationModel{
+ Model: as,
+ OnModelUpdates: getNonZeroAddressSetMutableFields(as),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+// CreateOrUpdateAddressSets creates or updates the provided address sets
+func CreateOrUpdateAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error {
+ ops, err := CreateOrUpdateAddressSetsOps(nbClient, nil, addrSets...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// UpdateAddressSetsAddresses updates the Addresses on the provided address sets
+func UpdateAddressSetsAddresses(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error {
+ opModels := make([]operationModel, 0, len(addrSets))
+ for i := range addrSets {
+ as := addrSets[i]
+ opModel := operationModel{
+ Model: as,
+ OnModelUpdates: []interface{}{&as.Addresses},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ _, err := m.CreateOrUpdate(opModels...)
+ return err
+}
+
+// AddAddressesToAddressSetOps adds the provided addresses to the provided address set and
+// returns the corresponding ops
+func AddAddressesToAddressSetOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, as *nbdb.AddressSet, addresses ...string) ([]libovsdb.Operation, error) {
+ originalAddresses := as.Addresses
+ as.Addresses = addresses
+ opModel := operationModel{
+ Model: as,
+ OnModelMutations: []interface{}{&as.Addresses},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ ops, err := m.CreateOrUpdateOps(ops, opModel)
+ as.Addresses = originalAddresses
+ return ops, err
+}
+
+// DeleteAddressesFromAddressSetOps removes the provided addresses from the provided address
+// set and returns the corresponding ops
+func DeleteAddressesFromAddressSetOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, as *nbdb.AddressSet, addresses ...string) ([]libovsdb.Operation, error) {
+ originalAddresses := as.Addresses
+ as.Addresses = addresses
+ opModel := operationModel{
+ Model: as,
+ OnModelMutations: []interface{}{&as.Addresses},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ ops, err := m.DeleteOps(ops, opModel)
+ as.Addresses = originalAddresses
+ return ops, err
+}
+
+func DeleteAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(addrSets))
+ for i := range addrSets {
+ as := addrSets[i]
+ opModel := operationModel{
+ Model: as,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModels...)
+}
+
+// DeleteAddressSets deletes the provided address sets
+func DeleteAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error {
+ opModels := make([]operationModel, 0, len(addrSets))
+ for i := range addrSets {
+ as := addrSets[i]
+ opModel := operationModel{
+ Model: as,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.Delete(opModels...)
+}
+
+// DeleteAddressSetsWithPredicateOps returns the ops to delete address sets based on a given predicate
+func DeleteAddressSetsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p addressSetPredicate) ([]libovsdb.Operation, error) {
+ deleted := []*nbdb.AddressSet{}
+ opModel := operationModel{
+ ModelPredicate: p,
+ ExistingResult: &deleted,
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+// DeleteAddressSetsWithPredicate looks up address sets from the cache based on
+// a given predicate and deletes them
+func DeleteAddressSetsWithPredicate(nbClient libovsdbclient.Client, p addressSetPredicate) error {
+ ops, err := DeleteAddressSetsWithPredicateOps(nbClient, nil, p)
+ if err != nil {
+ return nil
+ }
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go
new file mode 100644
index 000000000..3cc17b64f
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go
@@ -0,0 +1,166 @@
+package ops
+
+import (
+ "context"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb"
+)
+
+// ListChassis looks up all chassis from the cache
+func ListChassis(sbClient libovsdbclient.Client) ([]*sbdb.Chassis, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ searchedChassis := []*sbdb.Chassis{}
+ err := sbClient.List(ctx, &searchedChassis)
+ return searchedChassis, err
+}
+
+// ListChassisPrivate looks up all chassis private models from the cache
+func ListChassisPrivate(sbClient libovsdbclient.Client) ([]*sbdb.ChassisPrivate, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ found := []*sbdb.ChassisPrivate{}
+ err := sbClient.List(ctx, &found)
+ return found, err
+}
+
+// GetChassis looks up a chassis from the cache using the 'Name' column which is an indexed
+// column.
+func GetChassis(sbClient libovsdbclient.Client, chassis *sbdb.Chassis) (*sbdb.Chassis, error) {
+ found := []*sbdb.Chassis{}
+ opModel := operationModel{
+ Model: chassis,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(sbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+// DeleteChassis deletes the provided chassis and associated private chassis
+func DeleteChassis(sbClient libovsdbclient.Client, chassis ...*sbdb.Chassis) error {
+ opModels := make([]operationModel, 0, len(chassis))
+ for i := range chassis {
+ foundChassis := []*sbdb.Chassis{}
+ chassisPrivate := sbdb.ChassisPrivate{
+ Name: chassis[i].Name,
+ }
+ chassisUUID := ""
+ opModel := []operationModel{
+ {
+ Model: chassis[i],
+ ExistingResult: &foundChassis,
+ ErrNotFound: false,
+ BulkOp: false,
+ DoAfter: func() {
+ if len(foundChassis) > 0 {
+ chassisPrivate.Name = foundChassis[0].Name
+ chassisUUID = foundChassis[0].UUID
+ }
+ },
+ },
+ {
+ Model: &chassisPrivate,
+ ErrNotFound: false,
+ BulkOp: false,
+ },
+ // IGMPGroup has a weak link to chassis, deleting multiple chassis may result in IGMP_Groups
+ // with identical values on columns "address", "datapath", and "chassis", when "chassis" goes empty
+ {
+ Model: &sbdb.IGMPGroup{},
+ ModelPredicate: func(group *sbdb.IGMPGroup) bool {
+ return group.Chassis != nil && chassisUUID != "" && *group.Chassis == chassisUUID
+ },
+ ErrNotFound: false,
+ BulkOp: true,
+ },
+ }
+ opModels = append(opModels, opModel...)
+ }
+
+ m := newModelClient(sbClient)
+ err := m.Delete(opModels...)
+ return err
+}
+
+type chassisPredicate func(*sbdb.Chassis) bool
+
+// DeleteChassisWithPredicate looks up chassis from the cache based on a given
+// predicate and deletes them as well as the associated private chassis
+func DeleteChassisWithPredicate(sbClient libovsdbclient.Client, p chassisPredicate) error {
+ foundChassis := []*sbdb.Chassis{}
+ foundChassisNames := sets.NewString()
+ foundChassisUUIDS := sets.NewString()
+ opModels := []operationModel{
+ {
+ Model: &sbdb.Chassis{},
+ ModelPredicate: p,
+ ExistingResult: &foundChassis,
+ ErrNotFound: false,
+ BulkOp: true,
+ DoAfter: func() {
+ for _, chassis := range foundChassis {
+ foundChassisNames.Insert(chassis.Name)
+ foundChassisUUIDS.Insert(chassis.UUID)
+ }
+ },
+ },
+ {
+ Model: &sbdb.ChassisPrivate{},
+ ModelPredicate: func(item *sbdb.ChassisPrivate) bool { return foundChassisNames.Has(item.Name) },
+ ErrNotFound: false,
+ BulkOp: true,
+ },
+ // IGMPGroup has a weak link to chassis, deleting multiple chassis may result in IGMP_Groups
+ // with identical values on columns "address", "datapath", and "chassis", when "chassis" goes empty
+ {
+ Model: &sbdb.IGMPGroup{},
+ ModelPredicate: func(group *sbdb.IGMPGroup) bool { return group.Chassis != nil && foundChassisUUIDS.Has(*group.Chassis) },
+ ErrNotFound: false,
+ BulkOp: true,
+ },
+ }
+ m := newModelClient(sbClient)
+ err := m.Delete(opModels...)
+ return err
+}
+
+// CreateOrUpdateChassis creates or updates the chassis record along with the encap record
+func CreateOrUpdateChassis(sbClient libovsdbclient.Client, chassis *sbdb.Chassis, encap *sbdb.Encap) error {
+ m := newModelClient(sbClient)
+ opModels := []operationModel{
+ {
+ Model: encap,
+ DoAfter: func() {
+ chassis.Encaps = []string{encap.UUID}
+ },
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: false,
+ BulkOp: false,
+ },
+ {
+ Model: chassis,
+ OnModelMutations: []interface{}{&chassis.OtherConfig},
+ OnModelUpdates: []interface{}{&chassis.Encaps},
+ ErrNotFound: false,
+ BulkOp: false,
+ },
+ }
+
+ if _, err := m.CreateOrUpdate(opModels...); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go
new file mode 100644
index 000000000..dac95c5c0
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go
@@ -0,0 +1,47 @@
+package ops
+
+import (
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/ovsdb"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+type coppPredicate func(*nbdb.Copp) bool
+
+// CreateOrUpdateCOPPsOps creates or updates the provided COPP returning the
+// corresponding ops
+func CreateOrUpdateCOPPsOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, copps ...*nbdb.Copp) ([]ovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(copps))
+ for i := range copps {
+ // can't use i in the predicate, for loop replaces it in-memory
+ copp := copps[i]
+ opModel := operationModel{
+ Model: copp,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels...)
+}
+
+// DeleteCOPPsOps deletes the provided COPPs found using the predicate, returning the
+// corresponding ops
+func DeleteCOPPsWithPredicateOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, p coppPredicate) ([]ovsdb.Operation, error) {
+ copp := nbdb.Copp{}
+ opModels := []operationModel{
+ {
+ Model: &copp,
+ ModelPredicate: p,
+ ErrNotFound: false,
+ BulkOp: true,
+ },
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.DeleteOps(ops, opModels...)
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go
new file mode 100644
index 000000000..b242d7d3e
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go
@@ -0,0 +1,332 @@
+package ops
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
+)
+
+type dbObjType int
+type ownerType = string
+type ExternalIDKey string
+
+func (key ExternalIDKey) String() string {
+ return string(key)
+}
+
+// ObjectIDsType defines which ExternalIDs are used to indentify db objects.
+// ExternalIDs are defined based on dbObjType and ownerType, e.g. default network controller creates address
+// sets for namespaces and network policies, and needs to use different sets of ids for them. So it will
+// create ObjectIDsType with the same dbObjType=addressSet, but different ownerTypes NamespaceOwnerType and
+// NetworkPolicyOwnerType. Then it can define a set of ExternalIDs that will be used for each type.
+// From the db perspective, dbObjType is identified based on the db table, and ownerType is used directly
+// in the ExternalIDs with OwnerTypeKey key.
+type ObjectIDsType struct {
+ dbTable dbObjType
+ ownerObjectType ownerType
+ // externalIDKeys is a slice, because primary id for given ObjectIDsType will be built from the
+ // ExternalIDKey values in given order
+ externalIDKeys []ExternalIDKey
+ externalIDsMap map[ExternalIDKey]bool
+}
+
+func (it ObjectIDsType) GetExternalIDKeys() []ExternalIDKey {
+ return it.externalIDKeys
+}
+
+func (it ObjectIDsType) HasKey(key ExternalIDKey) bool {
+ return it.externalIDsMap[key]
+}
+
+func (it ObjectIDsType) IsSameType(it2 *ObjectIDsType) bool {
+ return it.ownerObjectType == it2.ownerObjectType && it.dbTable == it2.dbTable
+}
+
+const (
+ // ExternalIDs keys that will be a part of a client index.
+ // OwnerControllerKey and OwnerTypeKey define managing entity (a.k.a. owner) for given db object.
+ // All the other ids are object-related.
+ // PrimaryIDKey will be used a primary client index.
+ // A combination of OwnerControllerKey, OwnerTypeKey, and ObjectNameKey will be used a secondary client index.
+ // While owner-related keys together with PrimaryIDKey will always be present in the ExternalIDs,
+ // ObjectNameKey may or may not be used, based on ObjectIDsType.
+ OwnerControllerKey ExternalIDKey = types.OvnK8sPrefix + "/owner-controller"
+ OwnerTypeKey ExternalIDKey = types.OvnK8sPrefix + "/owner-type"
+ // ObjectNameKey is a part of a secondary index, together with OwnerControllerKey and OwnerTypeKey
+ // May be used by controllers to store e.g. namespace+name of the object.
+ ObjectNameKey ExternalIDKey = types.OvnK8sPrefix + "/name"
+ // PrimaryIDKey will be used as a primary index, that is unique for every db object,
+ // and can be built based on the combination of all the other ids.
+ PrimaryIDKey ExternalIDKey = types.PrimaryIDKey
+)
+
+// ObjectNameKey may be used as a secondary ID in the future. To ensure easy filtering for namespaced
+// objects, you can combine namespace and name in that key. To unify this process (and potential parsing of the key)
+// the following 2 functions exist:
+// - BuildNamespaceNameKey to combine namespace and name into one key
+// - ParseNamespaceNameKey to split the key back into namespace and name
+
+func BuildNamespaceNameKey(namespace, name string) string {
+ return namespace + ":" + name
+}
+
+func ParseNamespaceNameKey(key string) (namespace, name string, err error) {
+ s := strings.Split(key, ":")
+ if len(s) != 2 {
+ err = fmt.Errorf("failed to parse namespaced name key %v, expected format :", key)
+ return
+ }
+ return s[0], s[1], nil
+}
+
+// dbIDsMap is used to make sure the same ownerType is not defined twice for the same dbObjType to avoid conflicts.
+// It is filled in newObjectIDsType when registering new ObjectIDsType
+var dbIDsMap = map[dbObjType]map[ownerType]bool{}
+
+func newObjectIDsType(dbTable dbObjType, ownerObjectType ownerType, keys []ExternalIDKey) *ObjectIDsType {
+ if dbIDsMap[dbTable][ownerObjectType] {
+ panic(fmt.Sprintf("ObjectIDsType for params %v %v is already registered", dbTable, ownerObjectType))
+ }
+ if dbIDsMap[dbTable] == nil {
+ dbIDsMap[dbTable] = map[ownerType]bool{}
+ }
+ dbIDsMap[dbTable][ownerObjectType] = true
+ keysMap := map[ExternalIDKey]bool{}
+ for _, key := range keys {
+ keysMap[key] = true
+ }
+ return &ObjectIDsType{dbTable, ownerObjectType, keys, keysMap}
+}
+
+// DbObjectIDs is a structure representing a set of db object ExternalIDs, used to identify
+// an object in the db (as a primary/secondary index) or for a predicate search.
+// DbObjectIDs consists of 3 parts:
+// - idsType defines which IDs are used for a given object, as an ObjectIDsType,
+// idsType.ownerObjectType will be written to ExternalIDs[OwnerTypeKey]
+// - ownerControllerName defines who manages given object. It is required in case there are more than 1 controllers
+// using the same idsType to make sure every controller only updates objects it owns.
+// - objectIDs provide values for keys that are used by given idsType. To create a new object, all fields should be set.
+// For predicate search, only some values that need to be matched may be set.
+//
+// dbIndex := NewDbObjectIDs(AddressSetEgressFirewallDNS, "DefaultController",
+// map[ExternalIDKey]string{
+// ObjectNameKey: "dns.name",
+// IPFamilyKey: "ipv4"
+// })
+//
+// uses AddressSetEgressFirewallDNS = newObjectIDsType(addressSet, EgressFirewallDNSOwnerType, []ExternalIDKey{
+// // dnsName
+// ObjectNameKey,
+// IPFamilyKey,
+// })
+//
+// its dbIndex will be mapped to the following ExternalIDs
+//
+// {
+// "k8s.ovn.org/owner-controller": "DefaultController"
+// "k8s.ovn.org/owner-type": "EgressFirewallDNS" (value of EgressFirewallDNSOwnerType)
+// "k8s.ovn.org/name": "dns.name"
+// "k8s.ovn.org/ipFamily": "ipv4"
+// "k8s.ovn.org/id": "DefaultController:EgressFirewallDNS:dns.name:ipv4"
+// }
+type DbObjectIDs struct {
+ idsType *ObjectIDsType
+ // ownerControllerName specifies which controller owns the object.
+ // Controller should only change objects it owns, make sure to always set this field.
+ ownerControllerName string
+ // objectIDs store values for keys required by given ObjectIDsType, and may be different for different ObjectIDsType.
+ // These ids should uniquely identify db object with the same ownerControllerName and OwnerTypeKey.
+ objectIDs map[ExternalIDKey]string
+}
+
+// NewDbObjectIDs is used to construct DbObjectIDs, idsType and controller are always required,
+// objectIds may be empty, or half-filled for predicate search.
+// objectIds keys that are not used by given idsType will cause panic.
+func NewDbObjectIDs(idsType *ObjectIDsType, controller string, objectIds map[ExternalIDKey]string) *DbObjectIDs {
+ if controller == "" {
+ panic("NewDbObjectIDs failed: controller should not be empty")
+ }
+ externalIDKeys := idsType.GetExternalIDKeys()
+ if externalIDKeys == nil {
+ // can only happen if ObjectIDsType{} is passed
+ panic(fmt.Sprintf("NewDbObjectIDs failed: ObjectIDsType %v should not be empty", idsType))
+ }
+ // only use values for keys from idsType
+ for key := range objectIds {
+ if !idsType.HasKey(key) {
+ panic(fmt.Sprintf("NewDbObjectIDs failed: key %v is unknown", key))
+ }
+ }
+ if objectIds == nil {
+ objectIds = map[ExternalIDKey]string{}
+ }
+ objectIDs := &DbObjectIDs{
+ idsType: idsType,
+ ownerControllerName: controller,
+ objectIDs: objectIds,
+ }
+ return objectIDs
+}
+
+// AddIDs creates new DbObjectIDs with the additional extraObjectIds.
+// If at least one of extraObjectIds keys is not used by the objectIDs.idsType it will cause panic.
+func (objectIDs *DbObjectIDs) AddIDs(extraObjectIds map[ExternalIDKey]string) *DbObjectIDs {
+ ids := deepcopyMap(objectIDs.objectIDs)
+ for key, value := range extraObjectIds {
+ ids[key] = value
+ }
+ return &DbObjectIDs{objectIDs.idsType, objectIDs.ownerControllerName, ids}
+}
+
+func (objectIDs *DbObjectIDs) RemoveIDs(idsToDelete ...ExternalIDKey) *DbObjectIDs {
+ ids := deepcopyMap(objectIDs.objectIDs)
+ for _, keyToDel := range idsToDelete {
+ delete(ids, keyToDel)
+ }
+ return &DbObjectIDs{objectIDs.idsType, objectIDs.ownerControllerName, ids}
+}
+
+func (objectIDs *DbObjectIDs) HasSameOwner(ownerController string, objectIDsType *ObjectIDsType) bool {
+ return objectIDs.ownerControllerName == ownerController && objectIDs.idsType.IsSameType(objectIDsType)
+}
+
+func (objectIDs *DbObjectIDs) GetUnsetKeys() []ExternalIDKey {
+ unsetKeys := []ExternalIDKey{}
+ for _, key := range objectIDs.idsType.GetExternalIDKeys() {
+ if _, ok := objectIDs.objectIDs[key]; !ok {
+ unsetKeys = append(unsetKeys, key)
+ }
+ }
+ return unsetKeys
+}
+
+// GetObjectID returns value from objectIDs.objectIDs map, and empty string for not found values.
+// Usually objectIDs.objectIDs doesn't include PrimaryIDKey, OwnerTypeKey, and OwnerControllerKey.
+func (objectIDs *DbObjectIDs) GetObjectID(key ExternalIDKey) string {
+ return objectIDs.objectIDs[key]
+}
+
+// GetExternalIDs should only be used to build ids before creating the new db object.
+// If at least one of required by DbObjectIDs.idsType keys is not present in the DbObjectIDs.objectIDs it will panic.
+// GetExternalIDs returns a map of ids, that always includes keys
+// - OwnerControllerKey
+// - OwnerTypeKey
+// - PrimaryIDKey
+// and also all keys that are preset in objectIDs.objectIDs.
+// PrimaryIDKey value consists of the following values joined with ":"
+// - objectIDs.ownerControllerName
+// - objectIDs.idsType.ownerObjectType
+// - values from DbObjectIDs.objectIDs are added in order set in ObjectIDsType.externalIDKeys
+func (objectIDs *DbObjectIDs) GetExternalIDs() map[string]string {
+ return objectIDs.getExternalIDs(false)
+}
+
+func (objectIDs *DbObjectIDs) getExternalIDs(allowEmptyKeys bool) map[string]string {
+ externalIDs := map[string]string{
+ OwnerControllerKey.String(): objectIDs.ownerControllerName,
+ OwnerTypeKey.String(): objectIDs.idsType.ownerObjectType,
+ }
+ for key, value := range objectIDs.objectIDs {
+ externalIDs[key.String()] = value
+ }
+ primaryID, err := objectIDs.getUniqueID()
+ if err == nil {
+ // err == nil => primary id was properly built
+ externalIDs[PrimaryIDKey.String()] = primaryID
+ } else if !allowEmptyKeys {
+ panic(fmt.Sprintf("Failed to build Primary ID for %+v: %v", objectIDs, err))
+ }
+ return externalIDs
+}
+
+// String returns a string that is similar to PrimaryIDKey value, but if some required keys are not present
+// in the DbObjectIDs.objectIDs, they will be replaced with empty strings.
+// String returns the representation of all the information set in DbObjectIDs.
+func (objectIDs *DbObjectIDs) String() string {
+ id := objectIDs.ownerControllerName + ":" + objectIDs.idsType.ownerObjectType
+ for _, key := range objectIDs.idsType.GetExternalIDKeys() {
+ id += ":" + objectIDs.objectIDs[key]
+ }
+ return id
+}
+
+func (objectIDs *DbObjectIDs) GetIDsType() *ObjectIDsType {
+ return objectIDs.idsType
+}
+
+// getUniqueID returns primary id that is build based on objectIDs values.
+// If at least one required key is missing, an error will be returned.
+func (objectIDs *DbObjectIDs) getUniqueID() (string, error) {
+ id := objectIDs.ownerControllerName + ":" + objectIDs.idsType.ownerObjectType
+ for _, key := range objectIDs.idsType.GetExternalIDKeys() {
+ value, ok := objectIDs.objectIDs[key]
+ if !ok {
+ return "", fmt.Errorf("key %v is required but not present", key)
+ }
+ id += ":" + value
+ }
+ return id, nil
+}
+
+// NewDbObjectIDsFromExternalIDs is used to parse object ExternalIDs, it sets DbObjectIDs.ownerControllerName based
+// on OwnerControllerKey key, and verifies OwnerControllerKey value matches given objectIDsType.
+// All the other ids from objectIDsType will be set to DbObjectIDs.objectIDs.
+func NewDbObjectIDsFromExternalIDs(objectIDsType *ObjectIDsType, externalIDs map[string]string) (*DbObjectIDs, error) {
+ if externalIDs[OwnerTypeKey.String()] != objectIDsType.ownerObjectType {
+ return nil, fmt.Errorf("expected ExternalID %s to equal %s, got %s",
+ OwnerTypeKey, objectIDsType.ownerObjectType, externalIDs[OwnerTypeKey.String()])
+ }
+ if externalIDs[OwnerControllerKey.String()] == "" {
+ return nil, fmt.Errorf("required ExternalID %s is empty", OwnerControllerKey)
+ }
+ objIDs := map[ExternalIDKey]string{}
+ for key, value := range externalIDs {
+ if objectIDsType.HasKey(ExternalIDKey(key)) {
+ objIDs[ExternalIDKey(key)] = value
+ }
+ }
+ return NewDbObjectIDs(objectIDsType, externalIDs[OwnerControllerKey.String()], objIDs), nil
+}
+
+// hasExternalIDs interface should only include types that use new ExternalIDs from DbObjectIDs.
+type hasExternalIDs interface {
+ GetExternalIDs() map[string]string
+}
+
+// GetNoOwnerPredicate should only be used on initial sync when switching to new ExternalIDs.
+// Otherwise, use GetPredicate with the specific OwnerControllerKey id.
+func GetNoOwnerPredicate[T hasExternalIDs]() func(item T) bool {
+ return func(item T) bool {
+ return item.GetExternalIDs()[OwnerControllerKey.String()] == ""
+ }
+}
+
+// GetPredicate returns a predicate to search for db obj of type nbdbT.
+// Only non-empty ids will be matched (that always includes DbObjectIDs.OwnerTypeKey and DbObjectIDs.ownerControllerName),
+// but the other IDs may be empty and will be ignored in the filtering, additional filter function f may be passed, or set
+// to nil.
+func GetPredicate[nbdbT hasExternalIDs](objectIDs *DbObjectIDs, f func(item nbdbT) bool) func(item nbdbT) bool {
+ predicateIDs := objectIDs.getExternalIDs(true)
+ if primaryID, ok := predicateIDs[PrimaryIDKey.String()]; ok {
+ // when primary id is set, other ids are not required
+ predicateIDs = map[string]string{PrimaryIDKey.String(): primaryID}
+ }
+ return func(item nbdbT) bool {
+ dbExternalIDs := item.GetExternalIDs()
+ for predKey, predValue := range predicateIDs {
+ if dbExternalIDs[predKey] != predValue {
+ return false
+ }
+ }
+ return f == nil || f(item)
+ }
+}
+
+func deepcopyMap(m map[ExternalIDKey]string) map[ExternalIDKey]string {
+ result := map[ExternalIDKey]string{}
+ for key, value := range m {
+ result[key] = value
+ }
+ return result
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go
new file mode 100644
index 000000000..9f0e8dfe8
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go
@@ -0,0 +1,328 @@
+package ops
+
+import "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
+
+const (
+ addressSet dbObjType = iota
+ acl
+ dhcpOptions
+ portGroup
+ logicalRouterPolicy
+ qos
+)
+
+const (
+ // owner types
+ EgressFirewallDNSOwnerType ownerType = "EgressFirewallDNS"
+ EgressFirewallOwnerType ownerType = "EgressFirewall"
+ EgressQoSOwnerType ownerType = "EgressQoS"
+ AdminNetworkPolicyOwnerType ownerType = "AdminNetworkPolicy"
+ BaselineAdminNetworkPolicyOwnerType ownerType = "BaselineAdminNetworkPolicy"
+ // NetworkPolicyOwnerType is deprecated for address sets, should only be used for sync.
+ // New owner of network policy address sets, is PodSelectorOwnerType.
+ NetworkPolicyOwnerType ownerType = "NetworkPolicy"
+ NetpolDefaultOwnerType ownerType = "NetpolDefault"
+ PodSelectorOwnerType ownerType = "PodSelector"
+ NamespaceOwnerType ownerType = "Namespace"
+ // HybridNodeRouteOwnerType is transferred from egressgw to apbRoute controller with the same dbIDs
+ HybridNodeRouteOwnerType ownerType = "HybridNodeRoute"
+ EgressIPOwnerType ownerType = "EgressIP"
+ EgressServiceOwnerType ownerType = "EgressService"
+ MulticastNamespaceOwnerType ownerType = "MulticastNS"
+ MulticastClusterOwnerType ownerType = "MulticastCluster"
+ NetpolNodeOwnerType ownerType = "NetpolNode"
+ NetpolNamespaceOwnerType ownerType = "NetpolNamespace"
+ VirtualMachineOwnerType ownerType = "VirtualMachine"
+ // NetworkPolicyPortIndexOwnerType is the old version of NetworkPolicyOwnerType, kept for sync only
+ NetworkPolicyPortIndexOwnerType ownerType = "NetworkPolicyPortIndexOwnerType"
+ // ClusterOwnerType means the object is cluster-scoped and doesn't belong to any k8s objects
+ ClusterOwnerType ownerType = "Cluster"
+ // UDNIsolationOwnerType means the object is needed to implement UserDefinedNetwork isolation
+ UDNIsolationOwnerType ownerType = "UDNIsolation"
+
+ // owner extra IDs, make sure to define only 1 ExternalIDKey for every string value
+ PriorityKey ExternalIDKey = "priority"
+ PolicyDirectionKey ExternalIDKey = "direction"
+ GressIdxKey ExternalIDKey = "gress-index"
+ IPFamilyKey ExternalIDKey = "ip-family"
+ TypeKey ExternalIDKey = "type"
+ IpKey ExternalIDKey = "ip"
+ PortPolicyIndexKey ExternalIDKey = "port-policy-index"
+ IpBlockIndexKey ExternalIDKey = "ip-block-index"
+ RuleIndex ExternalIDKey = "rule-index"
+ CIDRKey ExternalIDKey = types.OvnK8sPrefix + "/cidr"
+ PortPolicyProtocolKey ExternalIDKey = "port-policy-protocol"
+)
+
+// ObjectIDsTypes should only be created here
+
+var AddressSetAdminNetworkPolicy = newObjectIDsType(addressSet, AdminNetworkPolicyOwnerType, []ExternalIDKey{
+ // anp name
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+ // gress rule's index
+ GressIdxKey,
+ IPFamilyKey,
+})
+
+var AddressSetBaselineAdminNetworkPolicy = newObjectIDsType(addressSet, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{
+ // banp name
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+ // gress rule's index
+ GressIdxKey,
+ IPFamilyKey,
+})
+
+var AddressSetEgressFirewallDNS = newObjectIDsType(addressSet, EgressFirewallDNSOwnerType, []ExternalIDKey{
+ // dnsName
+ ObjectNameKey,
+ IPFamilyKey,
+})
+
+var AddressSetHybridNodeRoute = newObjectIDsType(addressSet, HybridNodeRouteOwnerType, []ExternalIDKey{
+ // nodeName
+ ObjectNameKey,
+ IPFamilyKey,
+})
+
+var AddressSetEgressQoS = newObjectIDsType(addressSet, EgressQoSOwnerType, []ExternalIDKey{
+ // namespace
+ ObjectNameKey,
+ // egress qos priority
+ PriorityKey,
+ IPFamilyKey,
+})
+
+var AddressSetPodSelector = newObjectIDsType(addressSet, PodSelectorOwnerType, []ExternalIDKey{
+ // pod selector string representation
+ ObjectNameKey,
+ IPFamilyKey,
+})
+
+// deprecated, should only be used for sync
+var AddressSetNetworkPolicy = newObjectIDsType(addressSet, NetworkPolicyOwnerType, []ExternalIDKey{
+ // namespace_name
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+ // gress rule index
+ GressIdxKey,
+ IPFamilyKey,
+})
+
+var AddressSetNamespace = newObjectIDsType(addressSet, NamespaceOwnerType, []ExternalIDKey{
+ // namespace
+ ObjectNameKey,
+ IPFamilyKey,
+})
+
+var AddressSetEgressIP = newObjectIDsType(addressSet, EgressIPOwnerType, []ExternalIDKey{
+ // cluster-wide address set name
+ ObjectNameKey,
+ IPFamilyKey,
+})
+
+var AddressSetEgressService = newObjectIDsType(addressSet, EgressServiceOwnerType, []ExternalIDKey{
+ // cluster-wide address set name
+ ObjectNameKey,
+ IPFamilyKey,
+})
+
+var ACLAdminNetworkPolicy = newObjectIDsType(acl, AdminNetworkPolicyOwnerType, []ExternalIDKey{
+ // anp name
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+ // gress rule's index
+ GressIdxKey,
+ // gress rule's peer port's protocol index
+ PortPolicyProtocolKey,
+})
+
+var ACLBaselineAdminNetworkPolicy = newObjectIDsType(acl, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{
+ // banp name
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+ // gress rule's index
+ GressIdxKey,
+ // gress rule's peer port's protocol index
+ PortPolicyProtocolKey,
+})
+
+var ACLNetpolDefault = newObjectIDsType(acl, NetpolDefaultOwnerType, []ExternalIDKey{
+ // for now there is only 1 acl of this type, but we use a name in case more types are needed in the future
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+})
+
+var ACLMulticastNamespace = newObjectIDsType(acl, MulticastNamespaceOwnerType, []ExternalIDKey{
+ // namespace
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+})
+
+var ACLMulticastCluster = newObjectIDsType(acl, MulticastClusterOwnerType, []ExternalIDKey{
+ // cluster-scoped multicast acls
+ // there are 2 possible TypeKey values for cluster default multicast acl: DefaultDeny and AllowInterNode
+ TypeKey,
+ // egress or ingress
+ PolicyDirectionKey,
+})
+
+var ACLNetpolNode = newObjectIDsType(acl, NetpolNodeOwnerType, []ExternalIDKey{
+ // node name
+ ObjectNameKey,
+ // exact ip for management port, every node may have more than 1 management ip
+ IpKey,
+})
+
+// ACLNetworkPolicyPortIndex define a unique index for every network policy ACL.
+// ingress/egress + NetworkPolicy[In/E]gressRule idx - defines given gressPolicy.
+// ACLs are created for every gp.portPolicies:
+// - for empty policy (no selectors and no ip blocks) - empty ACL (see allIPsMatch)
+// OR
+// - all selector-based peers ACL
+// - for every IPBlock +1 ACL
+// Therefore unique id for a given gressPolicy is portPolicy idx + IPBlock idx
+// (empty policy and all selector-based peers ACLs will have idx=-1)
+// Note: keep for backward compatibility only
+// Deprecated, should only be used for sync
+var ACLNetworkPolicyPortIndex = newObjectIDsType(acl, NetworkPolicyPortIndexOwnerType, []ExternalIDKey{
+ // policy namespace+name
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+ // gress rule index
+ GressIdxKey,
+ PortPolicyIndexKey,
+ IpBlockIndexKey,
+})
+
+// ACLNetworkPolicy define a unique index for every network policy ACL.
+// ingress/egress + NetworkPolicy[In/E]gressRule idx - defines given gressPolicy.
+// ACLs are created for gp.portPolicies which are grouped by protocol:
+// - for empty policy (no selectors and no ip blocks) - empty ACL (see allIPsMatch)
+// OR
+// - all selector-based peers ACL
+// - for every IPBlock +1 ACL
+// Therefore unique id for a given gressPolicy is protocol name + IPBlock idx
+// (protocol will be "None" if no port policy is defined, and empty policy and all
+// selector-based peers ACLs will have idx=-1)
+var ACLNetworkPolicy = newObjectIDsType(acl, NetworkPolicyOwnerType, []ExternalIDKey{
+ // policy namespace+name
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+ // gress rule index
+ GressIdxKey,
+ PortPolicyProtocolKey,
+ IpBlockIndexKey,
+})
+
+var ACLNetpolNamespace = newObjectIDsType(acl, NetpolNamespaceOwnerType, []ExternalIDKey{
+ // namespace
+ ObjectNameKey,
+ // in the same namespace there can be 2 default deny port groups, egress and ingress
+ PolicyDirectionKey,
+ // every port group has default deny and arp allow acl.
+ TypeKey,
+})
+
+var ACLEgressFirewall = newObjectIDsType(acl, EgressFirewallOwnerType, []ExternalIDKey{
+ // namespace
+ ObjectNameKey,
+ // there can only be 1 egress firewall object in every namespace, named "default"
+ // The only additional id we need is the index of the EgressFirewall.Spec.Egress rule.
+ RuleIndex,
+})
+
+var ACLUDN = newObjectIDsType(acl, UDNIsolationOwnerType, []ExternalIDKey{
+ // name of a UDN-related ACL
+ ObjectNameKey,
+ // egress or ingress
+ PolicyDirectionKey,
+})
+
+var VirtualMachineDHCPOptions = newObjectIDsType(dhcpOptions, VirtualMachineOwnerType, []ExternalIDKey{
+ // We can have multiple VMs with same CIDR they may have different
+ // hostname.
+ // vm "namespace/name"
+ ObjectNameKey,
+ // CIDR field from DHCPOptions with ":" replaced by "."
+ CIDRKey,
+})
+
+var PortGroupNamespace = newObjectIDsType(portGroup, NamespaceOwnerType, []ExternalIDKey{
+ // namespace name
+ ObjectNameKey,
+})
+
+// every namespace that has at least 1 network policy, has resources that are shared by all network policies
+// in that namespace.
+var PortGroupNetpolNamespace = newObjectIDsType(portGroup, NetpolNamespaceOwnerType, []ExternalIDKey{
+ // namespace
+ ObjectNameKey,
+ // in the same namespace there can be 2 default deny port groups, egress and ingress
+ PolicyDirectionKey,
+})
+
+var PortGroupNetworkPolicy = newObjectIDsType(portGroup, NetworkPolicyOwnerType, []ExternalIDKey{
+ // policy namespace+name
+ ObjectNameKey,
+})
+
+var PortGroupAdminNetworkPolicy = newObjectIDsType(portGroup, AdminNetworkPolicyOwnerType, []ExternalIDKey{
+ // ANP name
+ ObjectNameKey,
+})
+
+var PortGroupBaselineAdminNetworkPolicy = newObjectIDsType(portGroup, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{
+ // BANP name
+ ObjectNameKey,
+})
+
+var PortGroupCluster = newObjectIDsType(portGroup, ClusterOwnerType, []ExternalIDKey{
+ // name of a global port group
+ // currently ClusterPortGroup and ClusterRtrPortGroup are present
+ ObjectNameKey,
+})
+
+var PortGroupUDN = newObjectIDsType(portGroup, UDNIsolationOwnerType, []ExternalIDKey{
+ // name of a UDN port group
+ // currently uses:
+ // secondaryPods - on default network switch to distinguish non-primary pods
+ ObjectNameKey,
+})
+
+var LogicalRouterPolicyEgressIP = newObjectIDsType(logicalRouterPolicy, EgressIPOwnerType, []ExternalIDKey{
+ // the priority of the LRP
+ PriorityKey,
+ // for the reroute policies it should be the "EIPName_Namespace/podName"
+ // for the no-reroute global policies it should be the unique global name
+ ObjectNameKey,
+ // the IP Family for this policy, ip4 or ip6 or ip(dualstack)
+ IPFamilyKey,
+})
+
+var QoSEgressQoS = newObjectIDsType(qos, EgressQoSOwnerType, []ExternalIDKey{
+ // the priority of the QoSRule (OVN priority is the same as the rule index priority for this feature)
+ // this value will be unique in a given namespace
+ PriorityKey,
+ // namespace
+ ObjectNameKey,
+})
+
+var QoSRuleEgressIP = newObjectIDsType(qos, EgressIPOwnerType, []ExternalIDKey{
+ // the priority of the QoSRule
+ PriorityKey,
+ // should be the unique global name
+ ObjectNameKey,
+ // the IP Family for this policy, ip4 or ip6 or ip(dualstack)
+ IPFamilyKey,
+})
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go
new file mode 100644
index 000000000..8eb17c6df
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go
@@ -0,0 +1,84 @@
+package ops
+
+import (
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+type DHCPOptionsPredicate func(*nbdb.DHCPOptions) bool
+
+// CreateOrUpdateDhcpOptionsOps will configure logical switch port DHCPv4Options and DHCPv6Options fields with
+// options at dhcpv4Options and dhcpv6Options arguments and create/update DHCPOptions objects that matches the
+// pv4 and pv6 predicates. The missing DHCP options will default to nil in the LSP attributes.
+func CreateOrUpdateDhcpOptionsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lsp *nbdb.LogicalSwitchPort, dhcpIPv4Options, dhcpIPv6Options *nbdb.DHCPOptions) ([]libovsdb.Operation, error) {
+ opModels := []operationModel{}
+ if dhcpIPv4Options != nil {
+ opModel := operationModel{
+ Model: dhcpIPv4Options,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ DoAfter: func() { lsp.Dhcpv4Options = &dhcpIPv4Options.UUID },
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+ if dhcpIPv6Options != nil {
+ opModel := operationModel{
+ Model: dhcpIPv6Options,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ DoAfter: func() { lsp.Dhcpv6Options = &dhcpIPv6Options.UUID },
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+ opModels = append(opModels, operationModel{
+ Model: lsp,
+ OnModelUpdates: []interface{}{
+ &lsp.Dhcpv4Options,
+ &lsp.Dhcpv6Options,
+ },
+ ErrNotFound: true,
+ BulkOp: false,
+ })
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+func CreateOrUpdateDhcpOptions(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort, dhcpIPv4Options, dhcpIPv6Options *nbdb.DHCPOptions) error {
+ ops, err := CreateOrUpdateDhcpOptionsOps(nbClient, nil, lsp, dhcpIPv4Options, dhcpIPv6Options)
+ if err != nil {
+ return err
+ }
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+func DeleteDHCPOptions(nbClient libovsdbclient.Client, dhcpOptions *nbdb.DHCPOptions) error {
+ opModels := []operationModel{}
+ opModel := operationModel{
+ Model: dhcpOptions,
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+ opModels = append(opModels, opModel)
+ m := newModelClient(nbClient)
+ return m.Delete(opModels...)
+
+}
+
+func DeleteDHCPOptionsWithPredicate(nbClient libovsdbclient.Client, p DHCPOptionsPredicate) error {
+ opModels := []operationModel{}
+ opModel := operationModel{
+ Model: &nbdb.DHCPOptions{},
+ ModelPredicate: p,
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+ opModels = append(opModels, opModel)
+ m := newModelClient(nbClient)
+ return m.Delete(opModels...)
+
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go
new file mode 100644
index 000000000..854c8f2b2
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go
@@ -0,0 +1,88 @@
+package ops
+
+import (
+ "context"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+// CreateOrUpdateLoadBalancerGroupOps returns the ops to create or update the
+// provided load balancer group
+func CreateOrUpdateLoadBalancerGroupOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, group *nbdb.LoadBalancerGroup) ([]ovsdb.Operation, error) {
+ // lb group has no fields other than name, safe to update just with non-default values
+ opModel := operationModel{
+ Model: group,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ ops, err := m.CreateOrUpdateOps(ops, opModel)
+ if err != nil {
+ return nil, err
+ }
+ return ops, nil
+}
+
+// AddLoadBalancersToGroupOps adds the provided load balancers to the provided
+// group and returns the corresponding ops
+func AddLoadBalancersToGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, group *nbdb.LoadBalancerGroup, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) {
+ originalLBs := group.LoadBalancer
+ group.LoadBalancer = make([]string, 0, len(lbs))
+ for _, lb := range lbs {
+ group.LoadBalancer = append(group.LoadBalancer, lb.UUID)
+ }
+ opModel := operationModel{
+ Model: group,
+ ModelPredicate: func(item *nbdb.LoadBalancerGroup) bool { return item.Name == group.Name },
+ OnModelMutations: []interface{}{&group.LoadBalancer},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ ops, err := m.CreateOrUpdateOps(ops, opModel)
+ group.LoadBalancer = originalLBs
+ return ops, err
+}
+
+// RemoveLoadBalancersFromGroupOps removes the provided load balancers from the
+// provided group and returns the corresponding ops
+func RemoveLoadBalancersFromGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, group *nbdb.LoadBalancerGroup, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) {
+ originalLBs := group.LoadBalancer
+ group.LoadBalancer = make([]string, 0, len(lbs))
+ for _, lb := range lbs {
+ group.LoadBalancer = append(group.LoadBalancer, lb.UUID)
+ }
+ opModel := operationModel{
+ Model: group,
+ ModelPredicate: func(item *nbdb.LoadBalancerGroup) bool { return item.Name == group.Name },
+ OnModelMutations: []interface{}{&group.LoadBalancer},
+ // if we want to delete loadbalancer from the port group that doesn't exist, that is noop
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ ops, err := m.DeleteOps(ops, opModel)
+ group.LoadBalancer = originalLBs
+ return ops, err
+}
+
+type loadBalancerGroupPredicate func(*nbdb.LoadBalancerGroup) bool
+
+// FindLoadBalancerGroupsWithPredicate looks up load balancer groups from the
+// cache based on a given predicate
+func FindLoadBalancerGroupsWithPredicate(nbClient libovsdbclient.Client, p loadBalancerGroupPredicate) ([]*nbdb.LoadBalancerGroup, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ groups := []*nbdb.LoadBalancerGroup{}
+ err := nbClient.WhereCache(p).List(ctx, &groups)
+ return groups, err
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go
new file mode 100644
index 000000000..097d37b4c
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go
@@ -0,0 +1,151 @@
+package ops
+
+import (
+ "context"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+// getNonZeroLoadBalancerMutableFields builds a list of load balancer
+// mutable fields with non zero values to be used as the list of fields to
+// Update.
+// The purpose is to prevent libovsdb interpreting non-nil empty maps/slices
+// as default and thus being filtered out of the update. The intention is to
+// use non-nil empty maps/slices to clear them out in the update.
+// See: https://github.com/ovn-org/libovsdb/issues/226
+func getNonZeroLoadBalancerMutableFields(lb *nbdb.LoadBalancer) []interface{} {
+ fields := []interface{}{}
+ if lb.Name != "" {
+ fields = append(fields, &lb.Name)
+ }
+ if lb.ExternalIDs != nil {
+ fields = append(fields, &lb.ExternalIDs)
+ }
+ if lb.HealthCheck != nil {
+ fields = append(fields, &lb.HealthCheck)
+ }
+ if lb.IPPortMappings != nil {
+ fields = append(fields, &lb.IPPortMappings)
+ }
+ if lb.Options != nil {
+ fields = append(fields, &lb.Options)
+ }
+ if lb.Protocol != nil {
+ fields = append(fields, &lb.Protocol)
+ }
+ if lb.SelectionFields != nil {
+ fields = append(fields, &lb.SelectionFields)
+ }
+ if lb.Vips != nil {
+ fields = append(fields, &lb.Vips)
+ }
+ return fields
+}
+
+// BuildLoadBalancer builds a load balancer
+func BuildLoadBalancer(name string, protocol nbdb.LoadBalancerProtocol, selectionFields []nbdb.LoadBalancerSelectionFields, vips, options, externalIds map[string]string) *nbdb.LoadBalancer {
+ return &nbdb.LoadBalancer{
+ Name: name,
+ Protocol: &protocol,
+ Vips: vips,
+ SelectionFields: selectionFields,
+ Options: options,
+ ExternalIDs: externalIds,
+ }
+}
+
+// CreateOrUpdateLoadBalancersOps creates or updates the provided load balancers
+// returning the corresponding ops
+func CreateOrUpdateLoadBalancersOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(lbs))
+ for i := range lbs {
+ // can't use i in the predicate, for loop replaces it in-memory
+ lb := lbs[i]
+ opModel := operationModel{
+ Model: lb,
+ OnModelUpdates: getNonZeroLoadBalancerMutableFields(lb),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels...)
+}
+
+// RemoveLoadBalancerVipsOps removes the provided VIPs from the provided load
+// balancer set and returns the corresponding ops
+func RemoveLoadBalancerVipsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lb *nbdb.LoadBalancer, vips ...string) ([]libovsdb.Operation, error) {
+ originalVips := lb.Vips
+ lb.Vips = make(map[string]string, len(vips))
+ for _, vip := range vips {
+ lb.Vips[vip] = ""
+ }
+ opModel := operationModel{
+ Model: lb,
+ OnModelMutations: []interface{}{&lb.Vips},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ modelClient := newModelClient(nbClient)
+ ops, err := modelClient.DeleteOps(ops, opModel)
+ lb.Vips = originalVips
+ return ops, err
+}
+
+// DeleteLoadBalancersOps deletes the provided load balancers and returns the
+// corresponding ops
+func DeleteLoadBalancersOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(lbs))
+ for i := range lbs {
+ // can't use i in the predicate, for loop replaces it in-memory
+ lb := lbs[i]
+ opModel := operationModel{
+ Model: lb,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.DeleteOps(ops, opModels...)
+}
+
+// DeleteLoadBalancers deletes the provided load balancers
+func DeleteLoadBalancers(nbClient libovsdbclient.Client, lbs []*nbdb.LoadBalancer) error {
+ ops, err := DeleteLoadBalancersOps(nbClient, nil, lbs...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// ListLoadBalancers looks up all load balancers from the cache
+func ListLoadBalancers(nbClient libovsdbclient.Client) ([]*nbdb.LoadBalancer, error) {
+ lbs := []*nbdb.LoadBalancer{}
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ err := nbClient.List(ctx, &lbs)
+ return lbs, err
+}
+
+type loadBalancerPredicate func(*nbdb.LoadBalancer) bool
+
+// FindLoadBalancersWithPredicate looks up loadbalancers from the cache
+// based on a given predicate
+func FindLoadBalancersWithPredicate(nbClient libovsdbclient.Client, p loadBalancerPredicate) ([]*nbdb.LoadBalancer, error) {
+ found := []*nbdb.LoadBalancer{}
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go
new file mode 100644
index 000000000..1f7a76ba8
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go
@@ -0,0 +1,61 @@
+package ops
+
+import (
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+// CreateOrUpdateStaticMacBinding creates or updates the provided static mac binding
+func CreateOrUpdateStaticMacBinding(nbClient libovsdbclient.Client, smbs ...*nbdb.StaticMACBinding) error {
+ opModels := make([]operationModel, len(smbs))
+ for i := range smbs {
+ opModel := operationModel{
+ Model: smbs[i],
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels[i] = opModel
+ }
+
+ m := newModelClient(nbClient)
+ _, err := m.CreateOrUpdate(opModels...)
+ return err
+}
+
+// DeleteStaticMacBindings deletes the provided static mac bindings
+func DeleteStaticMacBindings(nbClient libovsdbclient.Client, smbs ...*nbdb.StaticMACBinding) error {
+ opModels := make([]operationModel, len(smbs))
+ for i := range smbs {
+ opModel := operationModel{
+ Model: smbs[i],
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels[i] = opModel
+ }
+
+ m := newModelClient(nbClient)
+ return m.Delete(opModels...)
+}
+
+type staticMACBindingPredicate func(*nbdb.StaticMACBinding) bool
+
+// DeleteStaticMACBindingWithPredicate deletes a Static MAC entry for a logical port from the cache
+func DeleteStaticMACBindingWithPredicate(nbClient libovsdbclient.Client, p staticMACBindingPredicate) error {
+ found := []*nbdb.StaticMACBinding{}
+ opModel := operationModel{
+ ModelPredicate: p,
+ ExistingResult: &found,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Delete(opModel)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go
new file mode 100644
index 000000000..8f1814968
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go
@@ -0,0 +1,66 @@
+package ops
+
+import (
+ "reflect"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+func equalsMeterBand(a, b *nbdb.MeterBand) bool {
+ return a.Action == b.Action &&
+ a.BurstSize == b.BurstSize &&
+ a.Rate == b.Rate &&
+ reflect.DeepEqual(a.ExternalIDs, b.ExternalIDs)
+}
+
+// CreateMeterBandOps creates the provided meter band if it does not exist
+func CreateMeterBandOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, meterBand *nbdb.MeterBand) ([]ovsdb.Operation, error) {
+ bands := []*nbdb.MeterBand{}
+ opModel := operationModel{
+ Model: meterBand,
+ ModelPredicate: func(item *nbdb.MeterBand) bool { return equalsMeterBand(item, meterBand) },
+ OnModelUpdates: onModelUpdatesNone(),
+ ExistingResult: &bands,
+ DoAfter: func() {
+ // in case we have multiple equal bands, pick the first one for
+ // convergence, OVSDB will remove unreferenced ones
+ if len(bands) > 0 {
+ uuids := sets.NewString()
+ for _, band := range bands {
+ uuids.Insert(band.UUID)
+ }
+ meterBand.UUID = uuids.List()[0]
+ }
+ },
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModel)
+}
+
+// CreateOrUpdateMeterOps creates or updates the provided meter associated to
+// the provided meter bands and returns the corresponding ops
+func CreateOrUpdateMeterOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, meter *nbdb.Meter, meterBands []*nbdb.MeterBand, fields ...interface{}) ([]ovsdb.Operation, error) {
+ if len(fields) == 0 {
+ fields = onModelUpdatesAllNonDefault()
+ }
+ meter.Bands = make([]string, 0, len(meterBands))
+ for _, band := range meterBands {
+ meter.Bands = append(meter.Bands, band.UUID)
+ }
+ opModel := operationModel{
+ Model: meter,
+ OnModelUpdates: fields,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModel)
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go
new file mode 100644
index 000000000..b40dd4104
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go
@@ -0,0 +1,516 @@
+package ops
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
+)
+
+func getUUID(model model.Model) string {
+ switch t := model.(type) {
+ case *nbdb.ACL:
+ return t.UUID
+ case *nbdb.AddressSet:
+ return t.UUID
+ case *nbdb.BFD:
+ return t.UUID
+ case *nbdb.Copp:
+ return t.UUID
+ case *nbdb.GatewayChassis:
+ return t.UUID
+ case *nbdb.LoadBalancer:
+ return t.UUID
+ case *nbdb.LoadBalancerGroup:
+ return t.UUID
+ case *nbdb.LogicalRouter:
+ return t.UUID
+ case *nbdb.LogicalRouterPolicy:
+ return t.UUID
+ case *nbdb.LogicalRouterPort:
+ return t.UUID
+ case *nbdb.LogicalRouterStaticRoute:
+ return t.UUID
+ case *nbdb.LogicalSwitch:
+ return t.UUID
+ case *nbdb.LogicalSwitchPort:
+ return t.UUID
+ case *nbdb.NAT:
+ return t.UUID
+ case *nbdb.PortGroup:
+ return t.UUID
+ case *nbdb.NBGlobal:
+ return t.UUID
+ case *nbdb.MeterBand:
+ return t.UUID
+ case *nbdb.Meter:
+ return t.UUID
+ case *nbdb.Sample:
+ return t.UUID
+ case *nbdb.SampleCollector:
+ return t.UUID
+ case *nbdb.SamplingApp:
+ return t.UUID
+ case *nbdb.StaticMACBinding:
+ return t.UUID
+ case *sbdb.Chassis:
+ return t.UUID
+ case *sbdb.ChassisPrivate:
+ return t.UUID
+ case *sbdb.IGMPGroup:
+ return t.UUID
+ case *sbdb.Encap:
+ return t.UUID
+ case *sbdb.PortBinding:
+ return t.UUID
+ case *sbdb.SBGlobal:
+ return t.UUID
+ case *nbdb.QoS:
+ return t.UUID
+ case *nbdb.ChassisTemplateVar:
+ return t.UUID
+ case *nbdb.DHCPOptions:
+ return t.UUID
+ default:
+ panic(fmt.Sprintf("getUUID: unknown model %T", t))
+ }
+}
+
+func setUUID(model model.Model, uuid string) {
+ switch t := model.(type) {
+ case *nbdb.ACL:
+ t.UUID = uuid
+ case *nbdb.AddressSet:
+ t.UUID = uuid
+ case *nbdb.BFD:
+ t.UUID = uuid
+ case *nbdb.Copp:
+ t.UUID = uuid
+ case *nbdb.GatewayChassis:
+ t.UUID = uuid
+ case *nbdb.LoadBalancer:
+ t.UUID = uuid
+ case *nbdb.LoadBalancerGroup:
+ t.UUID = uuid
+ case *nbdb.LogicalRouter:
+ t.UUID = uuid
+ case *nbdb.LogicalRouterPolicy:
+ t.UUID = uuid
+ case *nbdb.LogicalRouterPort:
+ t.UUID = uuid
+ case *nbdb.LogicalRouterStaticRoute:
+ t.UUID = uuid
+ case *nbdb.LogicalSwitch:
+ t.UUID = uuid
+ case *nbdb.LogicalSwitchPort:
+ t.UUID = uuid
+ case *nbdb.NAT:
+ t.UUID = uuid
+ case *nbdb.PortGroup:
+ t.UUID = uuid
+ case *nbdb.NBGlobal:
+ t.UUID = uuid
+ case *nbdb.MeterBand:
+ t.UUID = uuid
+ case *nbdb.Meter:
+ t.UUID = uuid
+ case *nbdb.Sample:
+ t.UUID = uuid
+ case *nbdb.SampleCollector:
+ t.UUID = uuid
+ case *nbdb.SamplingApp:
+ t.UUID = uuid
+ case *nbdb.StaticMACBinding:
+ t.UUID = uuid
+ case *sbdb.Chassis:
+ t.UUID = uuid
+ case *sbdb.ChassisPrivate:
+ t.UUID = uuid
+ case *sbdb.IGMPGroup:
+ t.UUID = uuid
+ case *sbdb.Encap:
+ t.UUID = uuid
+ case *sbdb.PortBinding:
+ t.UUID = uuid
+ case *sbdb.SBGlobal:
+ t.UUID = uuid
+ case *nbdb.QoS:
+ t.UUID = uuid
+ case *nbdb.ChassisTemplateVar:
+ t.UUID = uuid
+ case *nbdb.DHCPOptions:
+ t.UUID = uuid
+ default:
+ panic(fmt.Sprintf("setUUID: unknown model %T", t))
+ }
+}
+
+func copyIndexes(model model.Model) model.Model {
+ switch t := model.(type) {
+ case *nbdb.ACL:
+ return &nbdb.ACL{
+ UUID: t.UUID,
+ ExternalIDs: map[string]string{
+ types.PrimaryIDKey: t.ExternalIDs[types.PrimaryIDKey],
+ },
+ }
+ case *nbdb.AddressSet:
+ return &nbdb.AddressSet{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.BFD:
+ return &nbdb.BFD{
+ UUID: t.UUID,
+ LogicalPort: t.LogicalPort,
+ DstIP: t.DstIP,
+ }
+ case *nbdb.Copp:
+ return &nbdb.Copp{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.GatewayChassis:
+ return &nbdb.GatewayChassis{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.LoadBalancer:
+ return &nbdb.LoadBalancer{
+ UUID: t.UUID,
+ // client index
+ Name: t.Name,
+ }
+ case *nbdb.LoadBalancerGroup:
+ return &nbdb.LoadBalancerGroup{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.LogicalRouter:
+ return &nbdb.LogicalRouter{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.LogicalRouterPolicy:
+ return &nbdb.LogicalRouterPolicy{
+ UUID: t.UUID,
+ }
+ case *nbdb.LogicalRouterPort:
+ return &nbdb.LogicalRouterPort{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.LogicalRouterStaticRoute:
+ return &nbdb.LogicalRouterStaticRoute{
+ UUID: t.UUID,
+ }
+ case *nbdb.LogicalSwitch:
+ return &nbdb.LogicalSwitch{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.LogicalSwitchPort:
+ return &nbdb.LogicalSwitchPort{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.NAT:
+ return &nbdb.NAT{
+ UUID: t.UUID,
+ }
+ case *nbdb.PortGroup:
+ return &nbdb.PortGroup{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.NBGlobal:
+ return &nbdb.NBGlobal{
+ UUID: t.UUID,
+ }
+ case *nbdb.MeterBand:
+ return &nbdb.MeterBand{
+ UUID: t.UUID,
+ }
+ case *nbdb.Meter:
+ return &nbdb.Meter{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *nbdb.Sample:
+ return &nbdb.Sample{
+ UUID: t.UUID,
+ Metadata: t.Metadata,
+ }
+ case *nbdb.SampleCollector:
+ return &nbdb.SampleCollector{
+ UUID: t.UUID,
+ ID: t.ID,
+ }
+ case *nbdb.SamplingApp:
+ return &nbdb.SamplingApp{
+ UUID: t.UUID,
+ Type: t.Type,
+ }
+ case *nbdb.StaticMACBinding:
+ return &nbdb.StaticMACBinding{
+ UUID: t.UUID,
+ LogicalPort: t.LogicalPort,
+ IP: t.IP,
+ }
+ case *sbdb.Chassis:
+ return &sbdb.Chassis{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *sbdb.ChassisPrivate:
+ return &sbdb.ChassisPrivate{
+ UUID: t.UUID,
+ Name: t.Name,
+ }
+ case *sbdb.IGMPGroup:
+ return &sbdb.IGMPGroup{
+ UUID: t.UUID,
+ }
+ case *sbdb.Encap:
+ return &sbdb.Encap{
+ UUID: t.UUID,
+ Type: t.Type,
+ IP: t.IP,
+ ChassisName: t.ChassisName,
+ }
+ case *sbdb.PortBinding:
+ return &sbdb.PortBinding{
+ UUID: t.UUID,
+ LogicalPort: t.LogicalPort,
+ Datapath: t.Datapath,
+ TunnelKey: t.TunnelKey,
+ }
+ case *sbdb.SBGlobal:
+ return &sbdb.SBGlobal{
+ UUID: t.UUID,
+ }
+ case *nbdb.QoS:
+ return &nbdb.QoS{
+ UUID: t.UUID,
+ ExternalIDs: map[string]string{
+ types.PrimaryIDKey: t.ExternalIDs[types.PrimaryIDKey],
+ },
+ }
+ case *nbdb.ChassisTemplateVar:
+ return &nbdb.ChassisTemplateVar{
+ UUID: t.UUID,
+ Chassis: t.Chassis,
+ }
+ case *nbdb.DHCPOptions:
+ return &nbdb.DHCPOptions{
+ UUID: t.UUID,
+ ExternalIDs: copyExternalIDs(t.ExternalIDs, types.PrimaryIDKey),
+ }
+ default:
+ panic(fmt.Sprintf("copyIndexes: unknown model %T", t))
+ }
+}
+
+func getListFromModel(model model.Model) interface{} {
+ switch t := model.(type) {
+ case *nbdb.ACL:
+ return &[]*nbdb.ACL{}
+ case *nbdb.AddressSet:
+ return &[]*nbdb.AddressSet{}
+ case *nbdb.BFD:
+ return &[]*nbdb.BFD{}
+ case *nbdb.Copp:
+ return &[]*nbdb.Copp{}
+ case *nbdb.GatewayChassis:
+ return &[]*nbdb.GatewayChassis{}
+ case *nbdb.LoadBalancer:
+ return &[]*nbdb.LoadBalancer{}
+ case *nbdb.LoadBalancerGroup:
+ return &[]*nbdb.LoadBalancerGroup{}
+ case *nbdb.LogicalRouter:
+ return &[]*nbdb.LogicalRouter{}
+ case *nbdb.LogicalRouterPolicy:
+ return &[]*nbdb.LogicalRouterPolicy{}
+ case *nbdb.LogicalRouterPort:
+ return &[]*nbdb.LogicalRouterPort{}
+ case *nbdb.LogicalRouterStaticRoute:
+ return &[]*nbdb.LogicalRouterStaticRoute{}
+ case *nbdb.LogicalSwitch:
+ return &[]*nbdb.LogicalSwitch{}
+ case *nbdb.LogicalSwitchPort:
+ return &[]*nbdb.LogicalSwitchPort{}
+ case *nbdb.NAT:
+ return &[]*nbdb.NAT{}
+ case *nbdb.PortGroup:
+ return &[]*nbdb.PortGroup{}
+ case *nbdb.NBGlobal:
+ return &[]*nbdb.NBGlobal{}
+ case *nbdb.MeterBand:
+ return &[]*nbdb.MeterBand{}
+ case *nbdb.Meter:
+ return &[]*nbdb.Meter{}
+ case *nbdb.Sample:
+ return &[]*nbdb.Sample{}
+ case *nbdb.SampleCollector:
+ return &[]*nbdb.SampleCollector{}
+ case *nbdb.SamplingApp:
+ return &[]*nbdb.SamplingApp{}
+ case *nbdb.StaticMACBinding:
+ return &[]*nbdb.StaticMACBinding{}
+ case *sbdb.Chassis:
+ return &[]*sbdb.Chassis{}
+ case *sbdb.ChassisPrivate:
+ return &[]*sbdb.ChassisPrivate{}
+ case *sbdb.IGMPGroup:
+ return &[]*sbdb.IGMPGroup{}
+ case *sbdb.Encap:
+ return &[]*sbdb.Encap{}
+ case *sbdb.PortBinding:
+ return &[]*sbdb.PortBinding{}
+ case *nbdb.QoS:
+ return &[]nbdb.QoS{}
+ case *nbdb.ChassisTemplateVar:
+ return &[]*nbdb.ChassisTemplateVar{}
+ case *nbdb.DHCPOptions:
+ return &[]nbdb.DHCPOptions{}
+ default:
+ panic(fmt.Sprintf("getModelList: unknown model %T", t))
+ }
+}
+
+// onModels applies the provided function to a collection of
+// models presented in different ways:
+// - a single model (pointer to a struct)
+// - a slice of models or pointer to slice of models
+// - a slice of structs or pointer to a slice of structs
+// If the provided function returns an error, iteration stops and
+// that error is returned.
+func onModels(models interface{}, do func(interface{}) error) error {
+ v := reflect.ValueOf(models)
+ if !v.IsValid() {
+ return nil
+ }
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return nil
+ }
+ v = v.Elem()
+ }
+ switch v.Kind() {
+ case reflect.Slice:
+ switch v.Type().Elem().Kind() {
+ case reflect.Struct:
+ for i := 0; i < v.Len(); i++ {
+ model := v.Index(i).Addr().Interface()
+ err := do(model)
+ if err != nil {
+ return err
+ }
+ }
+ case reflect.Interface:
+ fallthrough
+ case reflect.Ptr:
+ for i := 0; i < v.Len(); i++ {
+ model := v.Index(i).Interface()
+ err := do(model)
+ if err != nil {
+ return err
+ }
+ }
+ default:
+ panic(fmt.Sprintf("Expected slice of pointers or structs but got %s", v.Type().Elem().Kind()))
+ }
+ case reflect.Struct:
+ err := do(models)
+ if err != nil {
+ return err
+ }
+ default:
+ panic(fmt.Sprintf("Expected slice or struct but got %s", v.Kind()))
+ }
+ return nil
+}
+
+// buildFailOnDuplicateOps builds a wait operation on a condition that will fail
+// if a duplicate to the provided model is considered to be found. We use this
+// to avoid duplicates on certain unknown scenarios that are still to be tracked
+// down. See: https://bugzilla.redhat.com/show_bug.cgi?id=2042001.
+// When no specific operation is required for the provided model, returns an empty
+// array for convenience.
+func buildFailOnDuplicateOps(c client.Client, m model.Model) ([]ovsdb.Operation, error) {
+ // Right now we mostly consider models with a "Name" field that is not an
+ // index for which we don't expect duplicate names.
+ // A duplicate Name field that is an index will fail without the
+ // need of this wait operation.
+ // Some models that require a complex condition to detect duplicates are not
+ // considered for the time being due to the performance hit (e.g ACLs).
+ timeout := types.OVSDBWaitTimeout
+ var field interface{}
+ var value string
+ switch t := m.(type) {
+ case *nbdb.LogicalRouter:
+ field = &t.Name
+ value = t.Name
+ case *nbdb.LogicalSwitch:
+ field = &t.Name
+ value = t.Name
+ case *nbdb.LogicalRouterPolicy:
+ condPriority := model.Condition{
+ Field: &t.Priority,
+ Function: ovsdb.ConditionEqual,
+ Value: t.Priority,
+ }
+ condMatch := model.Condition{
+ Field: &t.Match,
+ Function: ovsdb.ConditionEqual,
+ Value: t.Match,
+ }
+ return c.WhereAll(t, condPriority, condMatch).Wait(
+ ovsdb.WaitConditionNotEqual,
+ &timeout,
+ t,
+ &t.Priority,
+ &t.Match,
+ )
+ default:
+ return []ovsdb.Operation{}, nil
+ }
+
+ cond := model.Condition{
+ Field: field,
+ Function: ovsdb.ConditionEqual,
+ Value: value,
+ }
+ return c.WhereAny(m, cond).Wait(ovsdb.WaitConditionNotEqual, &timeout, m, field)
+}
+
+// getAllUpdatableFields returns a list of all of the columns/fields that can be updated for a model
+func getAllUpdatableFields(model model.Model) []interface{} {
+ switch t := model.(type) {
+ case *nbdb.LogicalSwitchPort:
+ return []interface{}{&t.Addresses, &t.Type, &t.TagRequest, &t.Options, &t.PortSecurity}
+ case *nbdb.PortGroup:
+ return []interface{}{&t.ACLs, &t.Ports, &t.ExternalIDs}
+ default:
+ panic(fmt.Sprintf("getAllUpdatableFields: unknown model %T", t))
+ }
+}
+
+func copyExternalIDs(externalIDs map[string]string, keys ...string) map[string]string {
+ var externalIDsCopy map[string]string
+ for _, key := range keys {
+ value, ok := externalIDs[key]
+ if ok {
+ if externalIDsCopy == nil {
+ externalIDsCopy = map[string]string{}
+ }
+ externalIDsCopy[key] = value
+ }
+ }
+ return externalIDsCopy
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go
new file mode 100644
index 000000000..04f54f0e8
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go
@@ -0,0 +1,511 @@
+package ops
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
+
+ "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "k8s.io/klog/v2"
+)
+
+var errMultipleResults = errors.New("unexpectedly found multiple results for provided predicate")
+var errNoIndexes = errors.New("no indexes found for given model")
+
+type modelClient struct {
+ client client.Client
+}
+
+func newModelClient(client client.Client) modelClient {
+ return modelClient{
+ client: client,
+ }
+}
+
+/*
+extractUUIDsFromModels is a helper function which constructs a mutation
+for the specified field and mutator extracting the UUIDs of the provided
+models as the value for the mutation.
+*/
+func extractUUIDsFromModels(models interface{}) []string {
+ ids := []string{}
+ _ = onModels(models, func(model interface{}) error {
+ uuid := getUUID(model)
+ if uuid != "" {
+ ids = append(ids, uuid)
+ }
+ return nil
+ })
+ if len(ids) == 0 {
+ return nil
+ }
+ return ids
+}
+
+// buildMutationsFromFields builds mutations that use the fields as values.
+func buildMutationsFromFields(fields []interface{}, mutator ovsdb.Mutator) ([]model.Mutation, error) {
+ mutations := []model.Mutation{}
+ for _, field := range fields {
+ switch v := field.(type) {
+ case *map[string]string:
+ if v == nil || len(*v) == 0 {
+ continue
+ }
+ if mutator == ovsdb.MutateOperationDelete {
+ // turn empty map values into a mutation to remove the key for
+ // delete mutations
+ removeKeys := make([]string, 0, len(*v))
+ updateKeys := make(map[string]string, len(*v))
+ for key, value := range *v {
+ if value == "" {
+ removeKeys = append(removeKeys, key)
+ } else {
+ updateKeys[key] = value
+ }
+ }
+ if len(removeKeys) > 0 {
+ mutation := model.Mutation{
+ Field: field,
+ Mutator: mutator,
+ Value: removeKeys,
+ }
+ mutations = append(mutations, mutation)
+ }
+ if len(updateKeys) > 0 {
+ mutation := model.Mutation{
+ Field: field,
+ Mutator: mutator,
+ Value: updateKeys,
+ }
+ mutations = append(mutations, mutation)
+ }
+ continue
+ }
+ // RFC 7047, section 5.1: a MutateOperationDelete is generated
+ // automatically for every updated key.
+ removeKeys := make([]string, 0, len(*v))
+ for key := range *v {
+ removeKeys = append(removeKeys, key)
+ }
+ if len(removeKeys) > 0 {
+ mutation := model.Mutation{
+ Field: field,
+ Mutator: ovsdb.MutateOperationDelete,
+ Value: removeKeys,
+ }
+ mutations = append(mutations, mutation)
+ }
+ mutation := model.Mutation{
+ Field: field,
+ Mutator: mutator,
+ Value: *v,
+ }
+ mutations = append(mutations, mutation)
+ case *[]string:
+ if v == nil || len(*v) == 0 {
+ continue
+ }
+ if mutator == ovsdb.MutateOperationInsert {
+ // Most of string sets are UUIDs. The real server does not allow
+ // this to be empty but the test server does for now. On other
+ // types of sets most probably there is no need to have empty
+ // items. So catch this early.
+ for _, value := range *v {
+ if value == "" {
+ return nil, fmt.Errorf("unsupported mutation of set with empty values: %v", *v)
+ }
+ }
+ }
+ mutation := model.Mutation{
+ Field: field,
+ Mutator: mutator,
+ Value: *v,
+ }
+ mutations = append(mutations, mutation)
+ default:
+ return nil, fmt.Errorf("mutation for type %T not implemented", v)
+ }
+ }
+
+ return mutations, nil
+}
+
+/*
+operationModel is a struct which uses reflection to determine and perform
+idempotent operations against OVS DB (NB DB by default).
+*/
+type operationModel struct {
+ // Model specifies the model to be created, or to look up in the cache.
+ // The values in the fields of the Model are used for mutations and updates
+ // as well. If this Model is looked up or created, it will have its UUID set
+ // after the operation.
+ Model interface{}
+ // ModelPredicate specifies a predicate to look up models in the cache.
+ // If Model is provided with non-zero index column values, ModelPredicate
+ // will be ignored.
+ ModelPredicate interface{}
+ // ExistingResult is where the results of the look up are added to.
+ // Required when Model is not specified.
+ ExistingResult interface{}
+ // OnModelMutations specifies the fields from Model that will be used as
+ // the mutation value.
+ OnModelMutations []interface{}
+ // OnModelUpdates specifies the fields from Model that will be used as
+ // the update value.
+ // Note: while it is okay to have update and mutate operations on the same row, it
+ // is an undefined behavior if the same column is used in both update and mutate.
+ OnModelUpdates []interface{}
+ // ErrNotFound flags this operation to fail with ErrNotFound if a model is
+ // not found.
+ ErrNotFound bool
+ // BulkOp flags this operation as a bulk operation capable of updating or
+ // mutating more than 1 model.
+ BulkOp bool
+ // DoAfter is invoked at the end of the operation and allows to setup a
+ // subsequent operation with values obtained from this one.
+ // If model lookup was successful, or a new db entry was created,
+ // Model will have UUID set, and it can be used in DoAfter. This only works
+ // if BulkOp is false and Model != nil.
+ DoAfter func()
+}
+
+func onModelUpdatesNone() []interface{} {
+ return nil
+}
+
+func onModelUpdatesAllNonDefault() []interface{} {
+ return []interface{}{}
+}
+
+/*
+CreateOrUpdate performs idempotent operations against libovsdb according to the
+following logic:
+
+a) performs a lookup of the models in the cache by ModelPredicate if provided,
+or by Model otherwise. If the models do not exist and ErrNotFound is set,
+it returns ErrNotFound
+
+b) if OnModelUpdates is specified; it performs a direct update of the model if
+it exists.
+
+c) if OnModelMutations is specified; it performs a direct mutation (insert) of
+the Model if it exists.
+
+d) if b) and c) are not true, but Model is provided, it creates the Model
+if it does not exist.
+
+e) if none of the above are true, ErrNotFound is returned.
+
+If BulkOp is set, update or mutate can happen accross multiple models found.
+*/
+func (m *modelClient) CreateOrUpdate(opModels ...operationModel) ([]ovsdb.OperationResult, error) {
+ created, ops, err := m.createOrUpdateOps(nil, opModels...)
+ if err != nil {
+ return nil, err
+ }
+ return TransactAndCheckAndSetUUIDs(m.client, created, ops)
+}
+
+func (m *modelClient) CreateOrUpdateOps(ops []ovsdb.Operation, opModels ...operationModel) ([]ovsdb.Operation, error) {
+ _, ops, err := m.createOrUpdateOps(ops, opModels...)
+ return ops, err
+}
+
+func (m *modelClient) createOrUpdateOps(ops []ovsdb.Operation, opModels ...operationModel) (interface{}, []ovsdb.Operation, error) {
+ hasGuardOp := len(ops) > 0 && isGuardOp(&ops[0])
+ guardOp := []ovsdb.Operation{}
+ doWhenFound := func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) {
+ // nil represents onModelUpdatesNone
+ if opModel.OnModelUpdates != nil {
+ o, err = m.update(model, opModel)
+ }
+ // Note: while it is okay to have update and mutate operations on the same row, it is
+ // an undefined behavior if the same exact column is used in both update and mutate.
+ if err == nil && opModel.OnModelMutations != nil {
+ var o2 []ovsdb.Operation
+ o2, err = m.mutate(model, opModel, ovsdb.MutateOperationInsert)
+ o = append(o, o2...)
+ }
+ return
+ }
+ doWhenNotFound := func(model interface{}, opModel *operationModel) ([]ovsdb.Operation, error) {
+ if !hasGuardOp {
+ // for the first insert of certain models, build a wait operation
+ // that checks for duplicates as a guard op to prevent against
+ // duplicate transactions
+ var err error
+ guardOp, err = buildFailOnDuplicateOps(m.client, opModel.Model)
+ if err != nil {
+ return nil, err
+ }
+ hasGuardOp = len(guardOp) > 0
+ }
+ return m.create(opModel)
+ }
+ created, ops, err := m.buildOps(ops, doWhenFound, doWhenNotFound, opModels...)
+ if len(guardOp) > 0 {
+ // set the guard op as the first of the list
+ ops = append(guardOp, ops...)
+ }
+ return created, ops, err
+}
+
+/*
+Delete performs idempotent delete operations against libovsdb according to the
+following logic:
+
+a) performs a lookup of the models in the cache by ModelPredicate if provided,
+or by Model otherwise. If the models do not exist and ErrNotFound is set
+it returns ErrNotFound.
+
+b) if OnModelMutations is specified; it performs a direct mutation (delete) of the
+Model if it exists.
+
+c) if b) is not true; it performs a direct delete of the Model if it exists.
+
+If BulkOp is set, delete or mutate can happen accross multiple models found.
+*/
+func (m *modelClient) Delete(opModels ...operationModel) error {
+ ops, err := m.DeleteOps(nil, opModels...)
+ if err != nil {
+ return err
+ }
+ _, err = TransactAndCheck(m.client, ops)
+ return err
+}
+
+func (m *modelClient) DeleteOps(ops []ovsdb.Operation, opModels ...operationModel) ([]ovsdb.Operation, error) {
+ doWhenFound := func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) {
+ if opModel.OnModelMutations != nil {
+ return m.mutate(model, opModel, ovsdb.MutateOperationDelete)
+ } else {
+ return m.delete(model, opModel)
+ }
+ }
+ _, ops, err := m.buildOps(ops, doWhenFound, nil, opModels...)
+ return ops, err
+}
+
+type opModelToOpMapper func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error)
+
+func (m *modelClient) buildOps(ops []ovsdb.Operation, doWhenFound opModelToOpMapper, doWhenNotFound opModelToOpMapper, opModels ...operationModel) (interface{}, []ovsdb.Operation, error) {
+ if ops == nil {
+ ops = []ovsdb.Operation{}
+ }
+ notfound := []interface{}{}
+ for _, opModel := range opModels {
+ // do lookup
+ err := m.lookup(&opModel)
+ if err != nil && !errors.Is(err, client.ErrNotFound) {
+ return nil, nil, fmt.Errorf("unable to lookup model %+v: %w", opModel, err)
+ }
+
+ // do updates
+ var hadExistingResults bool
+ err = onModels(opModel.ExistingResult, func(model interface{}) error {
+ if hadExistingResults && !opModel.BulkOp {
+ return errMultipleResults
+ }
+ hadExistingResults = true
+
+ if doWhenFound != nil {
+ o, err := doWhenFound(model, &opModel)
+ if err != nil {
+ return err
+ }
+ ops = append(ops, o...)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // otherwise act when not found
+ if !hadExistingResults {
+ // return ErrNotFound,
+ // - if caller explicitly requested for it or
+ // - failed to provide a Model for us to apply the operation on
+ if opModel.ErrNotFound || (doWhenNotFound != nil && opModel.Model == nil) {
+ return nil, nil, client.ErrNotFound
+ }
+ if doWhenNotFound != nil && opModel.Model != nil {
+ o, err := doWhenNotFound(nil, &opModel)
+ if err != nil {
+ return nil, nil, err
+ }
+ ops = append(ops, o...)
+ notfound = append(notfound, opModel.Model)
+ }
+ }
+
+ if opModel.DoAfter != nil {
+ opModel.DoAfter()
+ }
+ }
+
+ return notfound, ops, nil
+}
+
+/*
+create does a bit more than just "create". create needs to set the generated
+UUID (because if this function is called we know the item does not exists yet)
+then create the item. Generates an until clause and uses a wait operation to avoid
+https://bugzilla.redhat.com/show_bug.cgi?id=2042001
+*/
+func (m *modelClient) create(opModel *operationModel) ([]ovsdb.Operation, error) {
+ uuid := getUUID(opModel.Model)
+ if uuid == "" {
+ setUUID(opModel.Model, buildNamedUUID())
+ }
+
+ ops, err := m.client.Create(opModel.Model)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create model, err: %w", err)
+ }
+
+ klog.V(5).Infof("Create operations generated as: %+v", ops)
+ return ops, nil
+}
+
+func (m *modelClient) update(lookUpModel interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) {
+ o, err = m.client.Where(lookUpModel).Update(opModel.Model, opModel.OnModelUpdates...)
+ if err != nil {
+ return nil, fmt.Errorf("unable to update model, err: %w", err)
+ }
+ klog.V(5).Infof("Update operations generated as: %+v", o)
+ return o, nil
+}
+
+func (m *modelClient) mutate(lookUpModel interface{}, opModel *operationModel, mutator ovsdb.Mutator) (o []ovsdb.Operation, err error) {
+ if opModel.OnModelMutations == nil {
+ return nil, nil
+ }
+ modelMutations, err := buildMutationsFromFields(opModel.OnModelMutations, mutator)
+ if len(modelMutations) == 0 || err != nil {
+ return nil, err
+ }
+ o, err = m.client.Where(lookUpModel).Mutate(opModel.Model, modelMutations...)
+ if err != nil {
+ return nil, fmt.Errorf("unable to mutate model, err: %w", err)
+ }
+ klog.V(5).Infof("Mutate operations generated as: %+v", o)
+ return o, nil
+}
+
+func (m *modelClient) delete(lookUpModel interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) {
+ o, err = m.client.Where(lookUpModel).Delete()
+ if err != nil {
+ return nil, fmt.Errorf("unable to delete model, err: %w", err)
+ }
+ klog.V(5).Infof("Delete operations generated as: %+v", o)
+ return o, nil
+}
+
+func (m *modelClient) Lookup(opModels ...operationModel) error {
+ _, _, err := m.buildOps(nil, nil, nil, opModels...)
+ return err
+}
+
+// CreateOrUpdate, Delete and Lookup can be called to
+// 1. create or update a single model
+// Model should be set, bulkOp = false, errNotfound = false
+// 2. update/delete/lookup 0..n models (create can't be done for multiple models at the same time)
+// Model index or predicate should be set
+//
+// The allowed combination of operationModel fields is different for these cases.
+// Both Model db index, and ModelPredicate can only be empty for the first case
+func lookupRequired(opModel *operationModel) bool {
+ // we know create is not supposed to be performed, if these fields are set
+ if opModel.BulkOp || opModel.ErrNotFound {
+ return true
+ }
+ return false
+}
+
+// lookup the model in the cache prioritizing provided indexes over a
+// predicate
+// If lookup was successful, opModel.Model will have UUID set,
+// so that further user operations with the same model are indexed by UUID
+func (m *modelClient) lookup(opModel *operationModel) error {
+ if opModel.ExistingResult == nil && opModel.Model != nil {
+ opModel.ExistingResult = getListFromModel(opModel.Model)
+ }
+
+ var err error
+ if opModel.Model != nil {
+ err = m.where(opModel)
+ if err != errNoIndexes {
+ // if index wasn't provided by the Model, try predicate search
+ // otherwise return where result
+ return err
+ }
+ }
+ // if index wasn't provided by the Model (errNoIndexes) or Model == nil, try predicate search
+ if opModel.ModelPredicate != nil {
+ return m.whereCache(opModel)
+ }
+ // the only operation that can be performed without a lookup (it can have no db indexes and no ModelPredicate set)
+ // is Create.
+ if lookupRequired(opModel) {
+ return fmt.Errorf("missing model indexes or predicate when a lookup was required")
+ }
+ return nil
+}
+
+func (m *modelClient) where(opModel *operationModel) error {
+ copyModel := copyIndexes(opModel.Model)
+ if reflect.ValueOf(copyModel).Elem().IsZero() {
+ // no indexes available
+ return errNoIndexes
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ var err error
+ if err = m.client.Where(copyModel).List(ctx, opModel.ExistingResult); err != nil {
+ return err
+ }
+ if opModel.Model == nil || opModel.BulkOp {
+ return nil
+ }
+ // for non-bulk op cases, copy (the one) uuid found to model provided.
+ // so that further user operations with the same model are indexed by UUID
+ err = onModels(opModel.ExistingResult, func(model interface{}) error {
+ uuid := getUUID(model)
+ setUUID(opModel.Model, uuid)
+ return nil
+ })
+ return err
+}
+
+func (m *modelClient) whereCache(opModel *operationModel) error {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ var err error
+ if err = m.client.WhereCache(opModel.ModelPredicate).List(ctx, opModel.ExistingResult); err != nil {
+ return err
+ }
+
+ if opModel.Model == nil || opModel.BulkOp {
+ return nil
+ }
+
+ // for non-bulk op cases, copy (the one) uuid found to model provided.
+ // so that further user operations with the same model are indexed by UUID
+ err = onModels(opModel.ExistingResult, func(model interface{}) error {
+ uuid := getUUID(model)
+ setUUID(opModel.Model, uuid)
+ return nil
+ })
+ return err
+}
+
+func isGuardOp(op *ovsdb.Operation) bool {
+ return op != nil && op.Op == ovsdb.OperationWait && op.Timeout != nil && *op.Timeout == types.OVSDBWaitTimeout
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go
new file mode 100644
index 000000000..1860668a7
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go
@@ -0,0 +1,28 @@
+package ops
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand"
+)
+
+const (
+ namedUUIDPrefix = 'u'
+)
+
+var (
+ namedUUIDCounter = cryptorand.Uint32()
+)
+
+// isNamedUUID checks if the passed id is a named-uuid built with
+// BuildNamedUUID
+func isNamedUUID(id string) bool {
+ return id != "" && id[0] == namedUUIDPrefix
+}
+
+// buildNamedUUID builds an id that can be used as a named-uuid
+// as per OVSDB rfc 7047 section 5.1
+func buildNamedUUID() string {
+ return fmt.Sprintf("%c%010d", namedUUIDPrefix, atomic.AddUint32(&namedUUIDCounter, 1))
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go
new file mode 100644
index 000000000..2bb46d3a8
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go
@@ -0,0 +1,64 @@
+package ops
+
+import (
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+// GetNBGlobal looks up the NB Global entry from the cache
+func GetNBGlobal(nbClient libovsdbclient.Client, nbGlobal *nbdb.NBGlobal) (*nbdb.NBGlobal, error) {
+ found := []*nbdb.NBGlobal{}
+ opModel := operationModel{
+ Model: nbGlobal,
+ ModelPredicate: func(item *nbdb.NBGlobal) bool { return true },
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+// UpdateNBGlobalSetOptions sets options on the NB Global entry adding any
+// missing, removing the ones set to an empty value and updating existing
+func UpdateNBGlobalSetOptions(nbClient libovsdbclient.Client, nbGlobal *nbdb.NBGlobal) error {
+ // find the nbGlobal table's UUID, we don't have any other way to reliably look this table entry since it can
+ // only be indexed by UUID
+ updatedNbGlobal, err := GetNBGlobal(nbClient, nbGlobal)
+ if err != nil {
+ return err
+ }
+
+ if updatedNbGlobal.Options == nil {
+ updatedNbGlobal.Options = map[string]string{}
+ }
+
+ for k, v := range nbGlobal.Options {
+ if v == "" {
+ delete(updatedNbGlobal.Options, k)
+ } else {
+ updatedNbGlobal.Options[k] = v
+ }
+ }
+
+ // Update the options column in the nbGlobal entry since we already performed a lookup
+ opModel := operationModel{
+ Model: updatedNbGlobal,
+ OnModelUpdates: []interface{}{
+ &updatedNbGlobal.Options,
+ },
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err = m.CreateOrUpdate(opModel)
+ return err
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go
new file mode 100644
index 000000000..861a63cb9
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go
@@ -0,0 +1,53 @@
+package ops
+
+import (
+ "fmt"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb"
+)
+
+// UpdatePortBindingSetChassis sets the chassis column of the 'portBinding' row so that the OVN thinks that
+// the port binding 'portBinding' is bound on the chassis. Ideally its ovn-controller which claims/binds
+// a port binding. But for a remote chassis, we have to bind it as we created the remote chassis
+// record for the remote zone nodes.
+// TODO (numans) remove this function once OVN supports binding a port binding for a remote
+// chassis.
+func UpdatePortBindingSetChassis(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding, chassis *sbdb.Chassis) error {
+ ch, err := GetChassis(sbClient, chassis)
+ if err != nil {
+ return fmt.Errorf("failed to get chassis id %s(%s), error: %v", chassis.Name, chassis.Hostname, err)
+ }
+ portBinding.Chassis = &ch.UUID
+
+ opModel := operationModel{
+ Model: portBinding,
+ OnModelUpdates: []interface{}{&portBinding.Chassis},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(sbClient)
+ _, err = m.CreateOrUpdate(opModel)
+ return err
+}
+
+// GetPortBinding looks up a portBinding in SBDB
+func GetPortBinding(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding) (*sbdb.PortBinding, error) {
+ found := []*sbdb.PortBinding{}
+ opModel := operationModel{
+ Model: portBinding,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(sbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go
new file mode 100644
index 000000000..c8045c09e
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go
@@ -0,0 +1,329 @@
+package ops
+
+import (
+ "context"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+type portGroupPredicate func(group *nbdb.PortGroup) bool
+
+// FindPortGroupsWithPredicate looks up port groups from the cache based on a
+// given predicate
+func FindPortGroupsWithPredicate(nbClient libovsdbclient.Client, p portGroupPredicate) ([]*nbdb.PortGroup, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ found := []*nbdb.PortGroup{}
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
+
+// CreateOrUpdatePortGroupsOps creates or updates the provided port groups
+// returning the corresponding ops
+func CreateOrUpdatePortGroupsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, pgs ...*nbdb.PortGroup) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(pgs))
+ for i := range pgs {
+ pg := pgs[i]
+ opModel := operationModel{
+ Model: pg,
+ OnModelUpdates: getAllUpdatableFields(pg),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+// CreateOrUpdatePortGroups creates or updates the provided port groups
+func CreateOrUpdatePortGroups(nbClient libovsdbclient.Client, pgs ...*nbdb.PortGroup) error {
+ ops, err := CreateOrUpdatePortGroupsOps(nbClient, nil, pgs...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// CreatePortGroup creates the provided port group if it doesn't exist
+func CreatePortGroup(nbClient libovsdbclient.Client, portGroup *nbdb.PortGroup) error {
+ opModel := operationModel{
+ Model: portGroup,
+ OnModelUpdates: onModelUpdatesNone(),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err := m.CreateOrUpdate(opModel)
+ return err
+}
+
+// GetPortGroup looks up a port group from the cache
+func GetPortGroup(nbClient libovsdbclient.Client, pg *nbdb.PortGroup) (*nbdb.PortGroup, error) {
+ found := []*nbdb.PortGroup{}
+ opModel := operationModel{
+ Model: pg,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+func AddPortsToPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, ports ...string) ([]libovsdb.Operation, error) {
+ if len(ports) == 0 {
+ return ops, nil
+ }
+
+ pg := nbdb.PortGroup{
+ Name: name,
+ Ports: ports,
+ }
+
+ opModel := operationModel{
+ Model: &pg,
+ OnModelMutations: []interface{}{&pg.Ports},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModel)
+}
+
+// AddPortsToPortGroup adds the provided ports to the provided port group
+func AddPortsToPortGroup(nbClient libovsdbclient.Client, name string, ports ...string) error {
+ ops, err := AddPortsToPortGroupOps(nbClient, nil, name, ports...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeletePortsFromPortGroupOps removes the provided ports from the provided port
+// group and returns the corresponding ops
+func DeletePortsFromPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, ports ...string) ([]libovsdb.Operation, error) {
+ if len(ports) == 0 {
+ return ops, nil
+ }
+
+ pg := nbdb.PortGroup{
+ Name: name,
+ Ports: ports,
+ }
+
+ opModel := operationModel{
+ Model: &pg,
+ OnModelMutations: []interface{}{&pg.Ports},
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+// DeletePortsFromPortGroup removes the provided ports from the provided port
+// group
+func DeletePortsFromPortGroup(nbClient libovsdbclient.Client, name string, ports ...string) error {
+ ops, err := DeletePortsFromPortGroupOps(nbClient, nil, name, ports...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// AddACLsToPortGroupOps adds the provided ACLs to the provided port group and
+// returns the corresponding ops
+func AddACLsToPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) {
+ if len(acls) == 0 {
+ return ops, nil
+ }
+
+ pg := nbdb.PortGroup{
+ Name: name,
+ ACLs: make([]string, 0, len(acls)),
+ }
+
+ for _, acl := range acls {
+ pg.ACLs = append(pg.ACLs, acl.UUID)
+ }
+
+ opModel := operationModel{
+ Model: &pg,
+ OnModelMutations: []interface{}{&pg.ACLs},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModel)
+}
+
+// UpdatePortGroupSetACLsOps updates the provided ACLs on the provided port group and
+// returns the corresponding ops. It entirely replaces the existing ACLs on the PG with
+// the newly provided list
+func UpdatePortGroupSetACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls []*nbdb.ACL) ([]libovsdb.Operation, error) {
+ pg := nbdb.PortGroup{
+ Name: name,
+ ACLs: make([]string, 0, len(acls)),
+ }
+ for _, acl := range acls {
+ pg.ACLs = append(pg.ACLs, acl.UUID)
+ }
+ opModel := operationModel{
+ Model: &pg,
+ OnModelUpdates: []interface{}{&pg.ACLs},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModel)
+}
+
+// DeleteACLsFromPortGroupOps removes the provided ACLs from the provided port
+// group and returns the corresponding ops
+func DeleteACLsFromPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) {
+ if len(acls) == 0 {
+ return ops, nil
+ }
+
+ pg := nbdb.PortGroup{
+ Name: name,
+ ACLs: make([]string, 0, len(acls)),
+ }
+
+ for _, acl := range acls {
+ pg.ACLs = append(pg.ACLs, acl.UUID)
+ }
+
+ opModel := operationModel{
+ Model: &pg,
+ OnModelMutations: []interface{}{&pg.ACLs},
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+func DeleteACLsFromPortGroups(nbClient libovsdbclient.Client, names []string, acls ...*nbdb.ACL) error {
+ var err error
+ var ops []libovsdb.Operation
+ for _, pgName := range names {
+ ops, err = DeleteACLsFromPortGroupOps(nbClient, ops, pgName, acls...)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+func DeleteACLsFromAllPortGroups(nbClient libovsdbclient.Client, acls ...*nbdb.ACL) error {
+ if len(acls) == 0 {
+ return nil
+ }
+
+ pg := nbdb.PortGroup{
+ ACLs: make([]string, 0, len(acls)),
+ }
+
+ for _, acl := range acls {
+ pg.ACLs = append(pg.ACLs, acl.UUID)
+ }
+
+ opModel := operationModel{
+ Model: &pg,
+ ModelPredicate: func(item *nbdb.PortGroup) bool { return true },
+ OnModelMutations: []interface{}{&pg.ACLs},
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+
+ m := newModelClient(nbClient)
+ ops, err := m.DeleteOps(nil, opModel)
+ if err != nil {
+ return err
+ }
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeletePortGroupsOps deletes the provided port groups and returns the
+// corresponding ops
+func DeletePortGroupsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, names ...string) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(names))
+ for _, name := range names {
+ pg := nbdb.PortGroup{
+ Name: name,
+ }
+ opModel := operationModel{
+ Model: &pg,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModels...)
+}
+
+// DeletePortGroups deletes the provided port groups and returns the
+// corresponding ops
+func DeletePortGroups(nbClient libovsdbclient.Client, names ...string) error {
+ ops, err := DeletePortGroupsOps(nbClient, nil, names...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeletePortGroupsWithPredicateOps returns the corresponding ops to delete port groups based on
+// a given predicate
+func DeletePortGroupsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p portGroupPredicate) ([]libovsdb.Operation, error) {
+ deleted := []*nbdb.PortGroup{}
+ opModel := operationModel{
+ ModelPredicate: p,
+ ExistingResult: &deleted,
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+// DeletePortGroupsWithPredicate deletes the port groups based on the provided predicate
+func DeletePortGroupsWithPredicate(nbClient libovsdbclient.Client, p portGroupPredicate) error {
+ ops, err := DeletePortGroupsWithPredicateOps(nbClient, nil, p)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go
new file mode 100644
index 000000000..a83a176df
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go
@@ -0,0 +1,119 @@
+package ops
+
+import (
+ "context"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+type QoSPredicate func(*nbdb.QoS) bool
+
+// FindQoSesWithPredicate looks up QoSes from the cache based on a
+// given predicate
+func FindQoSesWithPredicate(nbClient libovsdbclient.Client, p QoSPredicate) ([]*nbdb.QoS, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ found := []*nbdb.QoS{}
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
+
+// CreateOrUpdateQoSesOps returns the ops to create or update the provided QoSes.
+func CreateOrUpdateQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(qoses))
+ for i := range qoses {
+ // can't use i in the predicate, for loop replaces it in-memory
+ qos := qoses[i]
+ opModel := operationModel{
+ Model: qos,
+ OnModelUpdates: []interface{}{}, // update all fields
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels...)
+}
+
+func UpdateQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(qoses))
+ for i := range qoses {
+ // can't use i in the predicate, for loop replaces it in-memory
+ qos := qoses[i]
+ opModel := operationModel{
+ Model: qos,
+ OnModelUpdates: []interface{}{}, // update all fields
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels...)
+}
+
+// AddQoSesToLogicalSwitchOps returns the ops to add the provided QoSes to the switch
+func AddQoSesToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) {
+ sw := &nbdb.LogicalSwitch{
+ Name: name,
+ QOSRules: make([]string, 0, len(qoses)),
+ }
+ for _, qos := range qoses {
+ sw.QOSRules = append(sw.QOSRules, qos.UUID)
+ }
+
+ opModels := operationModel{
+ Model: sw,
+ OnModelMutations: []interface{}{&sw.QOSRules},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels)
+}
+
+// DeleteQoSesOps returns the ops to delete the provided QoSes.
+func DeleteQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(qoses))
+ for i := range qoses {
+ // can't use i in the predicate, for loop replaces it in-memory
+ qos := qoses[i]
+ opModel := operationModel{
+ Model: qos,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.DeleteOps(ops, opModels...)
+}
+
+// RemoveQoSesFromLogicalSwitchOps returns the ops to remove the provided QoSes from the provided switch.
+func RemoveQoSesFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) {
+ sw := &nbdb.LogicalSwitch{
+ Name: name,
+ QOSRules: make([]string, 0, len(qoses)),
+ }
+ for _, qos := range qoses {
+ sw.QOSRules = append(sw.QOSRules, qos.UUID)
+ }
+
+ opModels := operationModel{
+ Model: sw,
+ OnModelMutations: []interface{}{&sw.QOSRules},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.DeleteOps(ops, opModels)
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go
new file mode 100644
index 000000000..9aeb42123
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go
@@ -0,0 +1,1222 @@
+package ops
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+// ROUTER OPs
+
+type logicalRouterPredicate func(*nbdb.LogicalRouter) bool
+
+// GetLogicalRouter looks up a logical router from the cache
+func GetLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) (*nbdb.LogicalRouter, error) {
+ found := []*nbdb.LogicalRouter{}
+ opModel := operationModel{
+ Model: router,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+// FindLogicalRoutersWithPredicate looks up logical routers from the cache based on a
+// given predicate
+func FindLogicalRoutersWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPredicate) ([]*nbdb.LogicalRouter, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ found := []*nbdb.LogicalRouter{}
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
+
+// CreateOrUpdateLogicalRouter creates or updates the provided logical router
+func CreateOrUpdateLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, fields ...interface{}) error {
+ if len(fields) == 0 {
+ fields = onModelUpdatesAllNonDefault()
+ }
+ opModel := operationModel{
+ Model: router,
+ OnModelUpdates: fields,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err := m.CreateOrUpdate(opModel)
+ return err
+}
+
+// UpdateLogicalRouterSetExternalIDs sets external IDs on the provided logical
+// router adding any missing, removing the ones set to an empty value and
+// updating existing
+func UpdateLogicalRouterSetExternalIDs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) error {
+ externalIds := router.ExternalIDs
+ router, err := GetLogicalRouter(nbClient, router)
+ if err != nil {
+ return err
+ }
+
+ if router.ExternalIDs == nil {
+ router.ExternalIDs = map[string]string{}
+ }
+
+ for k, v := range externalIds {
+ if v == "" {
+ delete(router.ExternalIDs, k)
+ } else {
+ router.ExternalIDs[k] = v
+ }
+ }
+
+ opModel := operationModel{
+ Model: router,
+ OnModelUpdates: []interface{}{&router.ExternalIDs},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err = m.CreateOrUpdate(opModel)
+ return err
+}
+
+// DeleteLogicalRoutersWithPredicateOps returns the operations to delete the logical routers matching the provided predicate
+func DeleteLogicalRoutersWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ p logicalRouterPredicate) ([]libovsdb.Operation, error) {
+ opModel := operationModel{
+ Model: &nbdb.LogicalRouter{},
+ ModelPredicate: p,
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+// DeleteLogicalRouterOps returns the operations to delete the provided logical router
+func DeleteLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ router *nbdb.LogicalRouter) ([]libovsdb.Operation, error) {
+ opModel := operationModel{
+ Model: router,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+// DeleteLogicalRouter deletes the provided logical router
+func DeleteLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) error {
+ ops, err := DeleteLogicalRouterOps(nbClient, nil, router)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// LOGICAL ROUTER PORT OPs
+
+type logicalRouterPortPredicate func(*nbdb.LogicalRouterPort) bool
+
+// FindLogicalRouterPortWithPredicate looks up logical router port from
+// the cache based on a given predicate
+func FindLogicalRouterPortWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPortPredicate) ([]*nbdb.LogicalRouterPort, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ found := []*nbdb.LogicalRouterPort{}
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
+
+// GetLogicalRouterPort looks up a logical router port from the cache
+func GetLogicalRouterPort(nbClient libovsdbclient.Client, lrp *nbdb.LogicalRouterPort) (*nbdb.LogicalRouterPort, error) {
+ found := []*nbdb.LogicalRouterPort{}
+ opModel := operationModel{
+ Model: lrp,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+// CreateOrUpdateLogicalRouterPort creates or updates the provided logical
+// router port together with the gateway chassis (if not nil), and adds it to the provided logical router
+func CreateOrUpdateLogicalRouterPort(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter,
+ lrp *nbdb.LogicalRouterPort, chassis *nbdb.GatewayChassis, fields ...interface{}) error {
+ opModels := []operationModel{}
+ if chassis != nil {
+ opModels = append(opModels, operationModel{
+ Model: chassis,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ DoAfter: func() { lrp.GatewayChassis = []string{chassis.UUID} },
+ ErrNotFound: false,
+ BulkOp: false,
+ })
+ }
+ if len(fields) == 0 {
+ fields = onModelUpdatesAllNonDefault()
+ } else if chassis != nil {
+ fields = append(fields, &lrp.GatewayChassis)
+ }
+ originalPorts := router.Ports
+ router.Ports = []string{}
+ opModels = append(opModels, operationModel{
+ Model: lrp,
+ OnModelUpdates: fields,
+ DoAfter: func() { router.Ports = append(router.Ports, lrp.UUID) },
+ ErrNotFound: false,
+ BulkOp: false,
+ })
+ opModels = append(opModels, operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.Ports},
+ ErrNotFound: true,
+ BulkOp: false,
+ })
+ m := newModelClient(nbClient)
+ _, err := m.CreateOrUpdate(opModels...)
+ router.Ports = originalPorts
+ return err
+}
+
+// DeleteLogicalRouterPorts deletes the provided logical router ports and
+// removes them from the provided logical router
+func DeleteLogicalRouterPorts(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, lrps ...*nbdb.LogicalRouterPort) error {
+ originalPorts := router.Ports
+ router.Ports = make([]string, 0, len(lrps))
+ opModels := make([]operationModel, 0, len(lrps)+1)
+ for i := range lrps {
+ lrp := lrps[i]
+ opModel := operationModel{
+ Model: lrp,
+ DoAfter: func() {
+ if lrp.UUID != "" {
+ router.Ports = append(router.Ports, lrp.UUID)
+ }
+ },
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.Ports},
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+
+ m := newModelClient(nbClient)
+ err := m.Delete(opModels...)
+ router.Ports = originalPorts
+ return err
+}
+
+// LOGICAL ROUTER POLICY OPs
+
+type logicalRouterPolicyPredicate func(*nbdb.LogicalRouterPolicy) bool
+
+// FindLogicalRouterPoliciesWithPredicate looks up logical router policies from
+// the cache based on a given predicate
+func FindLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPolicyPredicate) ([]*nbdb.LogicalRouterPolicy, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ found := []*nbdb.LogicalRouterPolicy{}
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
+
+// GetLogicalRouterPolicy looks up a logical router policy from the cache
+func GetLogicalRouterPolicy(nbClient libovsdbclient.Client, policy *nbdb.LogicalRouterPolicy) (*nbdb.LogicalRouterPolicy, error) {
+ found := []*nbdb.LogicalRouterPolicy{}
+ opModel := operationModel{
+ Model: policy,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+// CreateOrUpdateLogicalRouterPolicyWithPredicate looks up a logical router
+// policy from the cache based on a given predicate. If it does not exist, it
+// creates the provided logical router policy. If it does, it updates it. The
+// logical router policy is added to the provided logical router.
+// fields determines which columns to updated. Passing no fields is assumes
+// all fields need to be updated. Passing a single nil field indicates no fields should be updated.
+// Otherwise a caller may pass as many individual fields as desired to specify which columsn need updating.
+func CreateOrUpdateLogicalRouterPolicyWithPredicate(nbClient libovsdbclient.Client, routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate, fields ...interface{}) error {
+ ops, err := CreateOrUpdateLogicalRouterPolicyWithPredicateOps(nbClient, nil, routerName, lrp, p, fields...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// CreateOrUpdateLogicalRouterPolicyWithPredicateOps looks up a logical
+// router policy from the cache based on a given predicate. If it does not
+// exist, it creates the provided logical router policy. If it does, it
+// updates it. The logical router policy is added to the provided logical
+// router. Returns the corresponding ops
+func CreateOrUpdateLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate, fields ...interface{}) ([]libovsdb.Operation, error) {
+ if len(fields) == 0 {
+ fields = onModelUpdatesAllNonDefault()
+ }
+ router := &nbdb.LogicalRouter{
+ Name: routerName,
+ }
+
+ opModels := []operationModel{
+ {
+ Model: lrp,
+ ModelPredicate: p,
+ OnModelUpdates: fields,
+ DoAfter: func() { router.Policies = []string{lrp.UUID} },
+ ErrNotFound: false,
+ BulkOp: false,
+ },
+ {
+ Model: router,
+ OnModelMutations: []interface{}{&router.Policies},
+ ErrNotFound: true,
+ BulkOp: false,
+ },
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+func UpdateLogicalRouterPoliciesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ lrps ...*nbdb.LogicalRouterPolicy) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(lrps))
+ for i := range lrps {
+ lrp := lrps[i]
+ opModel := []operationModel{
+ {
+ Model: lrp,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: true,
+ BulkOp: false,
+ },
+ }
+ opModels = append(opModels, opModel...)
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+// DeleteLogicalRouterPolicyWithPredicateOps looks up a logical
+// router policy from the cache based on a given predicate and returns the
+// corresponding ops to delete it and remove it from the provided router.
+func DeleteLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterPolicyPredicate) ([]libovsdb.Operation, error) {
+ router := &nbdb.LogicalRouter{
+ Name: routerName,
+ }
+
+ deleted := []*nbdb.LogicalRouterPolicy{}
+ opModels := []operationModel{
+ {
+ ModelPredicate: p,
+ ExistingResult: &deleted,
+ DoAfter: func() { router.Policies = extractUUIDsFromModels(&deleted) },
+ ErrNotFound: false,
+ BulkOp: true,
+ },
+ {
+ Model: router,
+ OnModelMutations: []interface{}{&router.Policies},
+ ErrNotFound: false,
+ BulkOp: false,
+ },
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModels...)
+}
+
+// DeleteLogicalRouterPoliciesWithPredicate looks up logical router policies
+// from the cache based on a given predicate, deletes them and removes them from
+// the provided logical router
+func DeleteLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterPolicyPredicate) error {
+ ops, err := DeleteLogicalRouterPolicyWithPredicateOps(nbClient, nil, routerName, p)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// CreateOrAddNextHopsToLogicalRouterPolicyWithPredicateOps looks up a logical
+// router policy from the cache based on a given predicate. If it doesn't find
+// any, it creates the provided logical router policy. If it does, adds any
+// missing Nexthops to the existing logical router policy. The logical router
+// policy is added to the provided logical router. Returns the corresponding ops
+func CreateOrAddNextHopsToLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate) ([]libovsdb.Operation, error) {
+ router := &nbdb.LogicalRouter{
+ Name: routerName,
+ }
+
+ opModels := []operationModel{
+ {
+ Model: lrp,
+ ModelPredicate: p,
+ OnModelMutations: []interface{}{&lrp.Nexthops},
+ DoAfter: func() { router.Policies = []string{lrp.UUID} },
+ ErrNotFound: false,
+ BulkOp: false,
+ },
+ {
+ Model: router,
+ OnModelMutations: []interface{}{&router.Policies},
+ ErrNotFound: true,
+ BulkOp: false,
+ },
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+// DeleteNextHopsFromLogicalRouterPolicyOps removes the Nexthops from the
+// provided logical router policies.
+func DeleteNextHopsFromLogicalRouterPolicyOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrps []*nbdb.LogicalRouterPolicy, nextHops ...string) ([]libovsdb.Operation, error) {
+ nextHopSet := sets.NewString(nextHops...)
+ opModels := []operationModel{}
+ router := &nbdb.LogicalRouter{
+ Name: routerName,
+ Policies: []string{},
+ }
+
+ for i := range lrps {
+ lrp := lrps[i]
+ if nextHopSet.HasAll(lrp.Nexthops...) {
+ // if no next-hops remain in the policy, remove it alltogether
+ router.Policies = append(router.Policies, lrp.UUID)
+ opModel := operationModel{
+ Model: lrp,
+ BulkOp: false,
+ ErrNotFound: false,
+ }
+ opModels = append(opModels, opModel)
+ } else {
+ // otherwise just remove the next-hops
+ lrp.Nexthops = nextHops
+ opModel := operationModel{
+ Model: lrp,
+ OnModelMutations: []interface{}{&lrp.Nexthops},
+ BulkOp: false,
+ ErrNotFound: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+ }
+
+ if len(router.Policies) > 0 {
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.Policies},
+ BulkOp: false,
+ ErrNotFound: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModels...)
+}
+
+// DeleteNextHopsFromLogicalRouterPolicies removes the Nexthops from the
+// provided logical router policies. If a logical router policy ends up with no
+// Nexthops, it is deleted and removed from the provided logical router.
+func DeleteNextHopsFromLogicalRouterPolicies(nbClient libovsdbclient.Client, routerName string, lrps ...*nbdb.LogicalRouterPolicy) error {
+ ops := []libovsdb.Operation{}
+ for _, lrp := range lrps {
+ nextHops := lrp.Nexthops
+ lrp, err := GetLogicalRouterPolicy(nbClient, lrp)
+ if errors.Is(err, libovsdbclient.ErrNotFound) {
+ continue
+ }
+ if err != nil {
+ return err
+ }
+
+ ops, err = DeleteNextHopsFromLogicalRouterPolicyOps(nbClient, ops, routerName, []*nbdb.LogicalRouterPolicy{lrp}, nextHops...)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err := TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps looks up a logical
+// router policy from the cache based on a given predicate and removes the
+// provided Nexthop from it. If the logical router policy ends up with no
+// Nexthops, it is deleted and removed from the provided logical router. Returns
+// the corresponding ops
+func DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterPolicyPredicate, nextHop string) ([]libovsdb.Operation, error) {
+ lrps, err := FindLogicalRouterPoliciesWithPredicate(nbClient, p)
+ if err != nil {
+ return nil, err
+ }
+
+ return DeleteNextHopsFromLogicalRouterPolicyOps(nbClient, ops, routerName, lrps, nextHop)
+}
+
+// DeleteNextHopFromLogicalRouterPoliciesWithPredicate looks up a logical router
+// policy from the cache based on a given predicate and removes the provided
+// Nexthop from it. If the logical router policy ends up with no Nexthops, it is
+// deleted and removed from the provided logical router.
+func DeleteNextHopFromLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterPolicyPredicate, nextHop string) error {
+ ops, err := DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(nbClient, nil, routerName, p, nextHop)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeleteLogicalRouterPolicies deletes the logical router policies and removes
+// them from the provided logical router
+func DeleteLogicalRouterPolicies(nbClient libovsdbclient.Client, routerName string, lrps ...*nbdb.LogicalRouterPolicy) error {
+ opModels := getDeleteOpModelsForLogicalRouterPolicies(routerName, lrps...)
+
+ m := newModelClient(nbClient)
+ return m.Delete(opModels...)
+}
+
+// DeleteLogicalRouterPoliciesOps builds and returns corresponding delete operations for Logical Router
+// Policies from the provided logical router.
+func DeleteLogicalRouterPoliciesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrps ...*nbdb.LogicalRouterPolicy) ([]libovsdb.Operation, error) {
+ opModels := getDeleteOpModelsForLogicalRouterPolicies(routerName, lrps...)
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModels...)
+}
+
+func getDeleteOpModelsForLogicalRouterPolicies(routerName string, lrps ...*nbdb.LogicalRouterPolicy) []operationModel {
+ router := &nbdb.LogicalRouter{
+ Name: routerName,
+ Policies: make([]string, 0, len(lrps)),
+ }
+
+ opModels := make([]operationModel, 0, len(lrps)+1)
+ for _, lrp := range lrps {
+ router.Policies = append(router.Policies, lrp.UUID)
+ opModel := operationModel{
+ Model: lrp,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.Policies},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ return append(opModels, opModel)
+}
+
+// LOGICAL ROUTER STATIC ROUTES
+
+type logicalRouterStaticRoutePredicate func(*nbdb.LogicalRouterStaticRoute) bool
+
+// FindLogicalRouterStaticRoutesWithPredicate looks up logical router static
+// routes from the cache based on a given predicate
+func FindLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client, p logicalRouterStaticRoutePredicate) ([]*nbdb.LogicalRouterStaticRoute, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ found := []*nbdb.LogicalRouterStaticRoute{}
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
+
+// CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps looks up a logical
+// router static route from the cache based on a given predicate. If it does not
+// exist, it creates the provided logical router static route. If it does, it
+// updates it. The logical router static route is added to the provided logical
+// router. Returns the corresponding ops
+func CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ routerName string, lrsr *nbdb.LogicalRouterStaticRoute, p logicalRouterStaticRoutePredicate, fields ...interface{}) ([]libovsdb.Operation, error) {
+ if len(fields) == 0 {
+ fields = onModelUpdatesAllNonDefault()
+ }
+ router := &nbdb.LogicalRouter{
+ Name: routerName,
+ }
+
+ opModels := []operationModel{
+ {
+ Model: lrsr,
+ OnModelUpdates: fields,
+ DoAfter: func() { router.StaticRoutes = []string{lrsr.UUID} },
+ ErrNotFound: false,
+ BulkOp: false,
+ },
+ {
+ Model: router,
+ OnModelMutations: []interface{}{&router.StaticRoutes},
+ ErrNotFound: true,
+ BulkOp: false,
+ },
+ }
+
+ if p != nil {
+ opModels[0].ModelPredicate = p
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+// PolicyEqualPredicate determines if two static routes have the same routing policy (dst-ip or src-ip)
+// If policy is nil, OVN considers that as dst-ip
+func PolicyEqualPredicate(p1, p2 *nbdb.LogicalRouterStaticRoutePolicy) bool {
+ if p1 == nil {
+ return p2 == nil || (p2 != nil && *p2 == nbdb.LogicalRouterStaticRoutePolicyDstIP)
+ }
+
+ if p2 == nil {
+ return *p1 == nbdb.LogicalRouterStaticRoutePolicyDstIP
+ }
+
+ return *p1 == *p2
+}
+
+// CreateOrReplaceLogicalRouterStaticRouteWithPredicate looks up a logical
+// router static route from the cache based on a given predicate. If it does not
+// exist, it creates the provided logical router static route. If it does, it
+// updates it. The logical router static route is added to the provided logical
+// router.
+// If more than one route matches the predicate on the router, the additional routes are removed.
+func CreateOrReplaceLogicalRouterStaticRouteWithPredicate(nbClient libovsdbclient.Client, routerName string,
+ lrsr *nbdb.LogicalRouterStaticRoute, p logicalRouterStaticRoutePredicate, fields ...interface{}) error {
+
+ lr := &nbdb.LogicalRouter{Name: routerName}
+ router, err := GetLogicalRouter(nbClient, lr)
+ if err != nil {
+ return fmt.Errorf("unable to get logical router %s: %w", routerName, err)
+ }
+ newPredicate := func(item *nbdb.LogicalRouterStaticRoute) bool {
+ for _, routeUUID := range router.StaticRoutes {
+ if routeUUID == item.UUID && p(item) {
+ return true
+ }
+ }
+ return false
+ }
+ routes, err := FindLogicalRouterStaticRoutesWithPredicate(nbClient, newPredicate)
+ if err != nil {
+ return fmt.Errorf("unable to get logical router static routes with predicate on router %s: %w", routerName, err)
+ }
+
+ var ops []libovsdb.Operation
+ m := newModelClient(nbClient)
+
+ if len(routes) > 0 {
+ lrsr.UUID = routes[0].UUID
+ }
+
+ if len(routes) > 1 {
+ // should only be a single route remove all except the first
+ routes = routes[1:]
+ opModels := make([]operationModel, 0, len(routes)+1)
+ router.StaticRoutes = []string{}
+ for _, route := range routes {
+ route := route
+ router.StaticRoutes = append(router.StaticRoutes, route.UUID)
+ opModel := operationModel{
+ Model: route,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.StaticRoutes},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+
+ ops, err = m.DeleteOps(nil, opModels...)
+ if err != nil {
+ return err
+ }
+ }
+
+ ops, err = CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps(nbClient, ops, routerName, lrsr, nil, fields...)
+ if err != nil {
+ return fmt.Errorf("unable to get create or update logical router static routes on router %s: %w", routerName, err)
+ }
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeleteLogicalRouterStaticRoutesWithPredicate looks up logical router static
+// routes from the cache based on a given predicate, deletes them and removes
+// them from the provided logical router
+func DeleteLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterStaticRoutePredicate) error {
+ var ops []libovsdb.Operation
+ var err error
+ ops, err = DeleteLogicalRouterStaticRoutesWithPredicateOps(nbClient, ops, routerName, p)
+ if err != nil {
+ return err
+ }
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeleteLogicalRouterStaticRoutesWithPredicateOps looks up logical router static
+// routes from the cache based on a given predicate, and returns the ops to delete
+// them and remove them from the provided logical router
+func DeleteLogicalRouterStaticRoutesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterStaticRoutePredicate) ([]libovsdb.Operation, error) {
+ router := &nbdb.LogicalRouter{
+ Name: routerName,
+ }
+
+ deleted := []*nbdb.LogicalRouterStaticRoute{}
+ opModels := []operationModel{
+ {
+ ModelPredicate: p,
+ ExistingResult: &deleted,
+ DoAfter: func() { router.StaticRoutes = extractUUIDsFromModels(deleted) },
+ ErrNotFound: false,
+ BulkOp: true,
+ },
+ {
+ Model: router,
+ OnModelMutations: []interface{}{&router.StaticRoutes},
+ ErrNotFound: false,
+ BulkOp: false,
+ },
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModels...)
+}
+
+// DeleteLogicalRouterStaticRoutes deletes the logical router static routes and
+// removes them from the provided logical router
+func DeleteLogicalRouterStaticRoutes(nbClient libovsdbclient.Client, routerName string, lrsrs ...*nbdb.LogicalRouterStaticRoute) error {
+ router := &nbdb.LogicalRouter{
+ Name: routerName,
+ StaticRoutes: make([]string, 0, len(lrsrs)),
+ }
+
+ opModels := make([]operationModel, 0, len(lrsrs)+1)
+ for _, lrsr := range lrsrs {
+ lrsr := lrsr
+ router.StaticRoutes = append(router.StaticRoutes, lrsr.UUID)
+ opModel := operationModel{
+ Model: lrsr,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.StaticRoutes},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+
+ m := newModelClient(nbClient)
+ return m.Delete(opModels...)
+}
+
+// BFD ops
+
+// CreateOrUpdateBFDOps creates or updates the provided BFDs and returns
+// the corresponding ops
+func CreateOrUpdateBFDOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, bfds ...*nbdb.BFD) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(bfds))
+ for i := range bfds {
+ bfd := bfds[i]
+ opModel := operationModel{
+ Model: bfd,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels...)
+}
+
+// DeleteBFDs deletes the provided BFDs
+func DeleteBFDs(nbClient libovsdbclient.Client, bfds ...*nbdb.BFD) error {
+ opModels := make([]operationModel, 0, len(bfds))
+ for i := range bfds {
+ bfd := bfds[i]
+ opModel := operationModel{
+ Model: bfd,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ m := newModelClient(nbClient)
+ return m.Delete(opModels...)
+}
+
+func LookupBFD(nbClient libovsdbclient.Client, bfd *nbdb.BFD) (*nbdb.BFD, error) {
+ found := []*nbdb.BFD{}
+ opModel := operationModel{
+ Model: bfd,
+ ModelPredicate: func(item *nbdb.BFD) bool { return item.DstIP == bfd.DstIP && item.LogicalPort == bfd.LogicalPort },
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+ return found[0], nil
+}
+
+// LB OPs
+
+// AddLoadBalancersToLogicalRouterOps adds the provided load balancers to the
+// provided logical router and returns the corresponding ops
+func AddLoadBalancersToLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) {
+ originalLBs := router.LoadBalancer
+ router.LoadBalancer = make([]string, 0, len(lbs))
+ for _, lb := range lbs {
+ router.LoadBalancer = append(router.LoadBalancer, lb.UUID)
+ }
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.LoadBalancer},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ modelClient := newModelClient(nbClient)
+ ops, err := modelClient.CreateOrUpdateOps(ops, opModel)
+ router.LoadBalancer = originalLBs
+ return ops, err
+}
+
+// RemoveLoadBalancersFromLogicalRouterOps removes the provided load balancers from the
+// provided logical router and returns the corresponding ops
+func RemoveLoadBalancersFromLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) {
+ originalLBs := router.LoadBalancer
+ router.LoadBalancer = make([]string, 0, len(lbs))
+ for _, lb := range lbs {
+ router.LoadBalancer = append(router.LoadBalancer, lb.UUID)
+ }
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.LoadBalancer},
+ // if we want to delete loadbalancer from the router that doesn't exist, that is noop
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ modelClient := newModelClient(nbClient)
+ ops, err := modelClient.DeleteOps(ops, opModel)
+ router.LoadBalancer = originalLBs
+ return ops, err
+}
+
+func buildNAT(
+ natType nbdb.NATType,
+ externalIP string,
+ logicalIP string,
+ logicalPort string,
+ externalMac string,
+ externalIDs map[string]string,
+ match string,
+) *nbdb.NAT {
+ nat := &nbdb.NAT{
+ Type: natType,
+ ExternalIP: externalIP,
+ LogicalIP: logicalIP,
+ Options: map[string]string{"stateless": "false"},
+ ExternalIDs: externalIDs,
+ Match: match,
+ }
+
+ if logicalPort != "" {
+ nat.LogicalPort = &logicalPort
+ }
+
+ if externalMac != "" {
+ nat.ExternalMAC = &externalMac
+ }
+
+ return nat
+}
+
+// BuildSNAT builds a logical router SNAT
+func BuildSNAT(
+ externalIP *net.IP,
+ logicalIP *net.IPNet,
+ logicalPort string,
+ externalIDs map[string]string,
+) *nbdb.NAT {
+ return BuildSNATWithMatch(externalIP, logicalIP, logicalPort, externalIDs, "")
+}
+
+func BuildSNATWithMatch(
+ externalIP *net.IP,
+ logicalIP *net.IPNet,
+ logicalPort string,
+ externalIDs map[string]string,
+ match string,
+) *nbdb.NAT {
+ externalIPStr := ""
+ if externalIP != nil {
+ externalIPStr = externalIP.String()
+ }
+ // Strip out mask of logicalIP only if it is a host mask
+ logicalIPMask, _ := logicalIP.Mask.Size()
+ logicalIPStr := logicalIP.IP.String()
+ if logicalIPMask != 32 && logicalIPMask != 128 {
+ logicalIPStr = logicalIP.String()
+ }
+ return buildNAT(nbdb.NATTypeSNAT, externalIPStr, logicalIPStr, logicalPort, "", externalIDs, match)
+}
+
+// BuildDNATAndSNAT builds a logical router DNAT/SNAT
+func BuildDNATAndSNAT(
+ externalIP *net.IP,
+ logicalIP *net.IPNet,
+ logicalPort string,
+ externalMac string,
+ externalIDs map[string]string,
+) *nbdb.NAT {
+ return BuildDNATAndSNATWithMatch(externalIP, logicalIP, logicalPort, externalMac, externalIDs, "")
+}
+
+func BuildDNATAndSNATWithMatch(
+ externalIP *net.IP,
+ logicalIP *net.IPNet,
+ logicalPort string,
+ externalMac string,
+ externalIDs map[string]string,
+ match string,
+) *nbdb.NAT {
+ externalIPStr := ""
+ if externalIP != nil {
+ externalIPStr = externalIP.String()
+ }
+ logicalIPStr := ""
+ if logicalIP != nil {
+ logicalIPStr = logicalIP.IP.String()
+ }
+ return buildNAT(
+ nbdb.NATTypeDNATAndSNAT,
+ externalIPStr,
+ logicalIPStr,
+ logicalPort,
+ externalMac,
+ externalIDs,
+ match)
+}
+
+// isEquivalentNAT checks if the `searched` NAT is equivalent to `existing`.
+// Returns true if the UUID is set in `searched` and matches the UUID of `existing`.
+// Otherwise, perform the following checks:
+// - Compare the Type and Match fields.
+// - Compare ExternalIP if it is set in `searched`.
+// - Compare LogicalIP if the Type in `searched` is SNAT.
+// - Compare LogicalPort if it is set in `searched`.
+// - Ensure that all ExternalIDs of `searched` exist and have the same value in `existing`.
+func isEquivalentNAT(existing *nbdb.NAT, searched *nbdb.NAT) bool {
+ // Simple case: uuid was provided.
+ if searched.UUID != "" && existing.UUID == searched.UUID {
+ return true
+ }
+
+ if searched.Type != existing.Type {
+ return false
+ }
+
+ if searched.Match != existing.Match {
+ return false
+ }
+
+ // Compre externalIP if its not empty.
+ if searched.ExternalIP != "" && searched.ExternalIP != existing.ExternalIP {
+ return false
+ }
+
+ // Compare logicalIP only for SNAT, since DNAT types must have unique ExternalIP.
+ if searched.Type == nbdb.NATTypeSNAT && searched.LogicalIP != existing.LogicalIP {
+ return false
+ }
+
+ // When searching based on logicalPort, no need to go any further.
+ if searched.LogicalPort != nil &&
+ (existing.LogicalPort == nil || *searched.LogicalPort != *existing.LogicalPort) {
+ return false
+ }
+
+ // When searched external ids is populated, check if provided key,value exist in existing row.
+ // A usage case is when doing NAT operations where external id "name" is provided.
+ for externalIdKey, externalIdValue := range searched.ExternalIDs {
+ if foundValue, found := existing.ExternalIDs[externalIdKey]; !found || foundValue != externalIdValue {
+ return false
+ }
+ }
+
+ return true
+}
+
+type natPredicate func(*nbdb.NAT) bool
+
+// GetNAT looks up an NAT from the cache
+func GetNAT(nbClient libovsdbclient.Client, nat *nbdb.NAT) (*nbdb.NAT, error) {
+ found := []*nbdb.NAT{}
+ opModel := operationModel{
+ Model: nat,
+ ModelPredicate: func(item *nbdb.NAT) bool { return isEquivalentNAT(item, nat) },
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+// FindNATsWithPredicate looks up NATs from the cache based on a given predicate
+func FindNATsWithPredicate(nbClient libovsdbclient.Client, predicate natPredicate) ([]*nbdb.NAT, error) {
+ nats := []*nbdb.NAT{}
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ err := nbClient.WhereCache(predicate).List(ctx, &nats)
+ return nats, err
+}
+
+// GetRouterNATs looks up NATs associated to the provided logical router from
+// the cache
+func GetRouterNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) ([]*nbdb.NAT, error) {
+ r, err := GetLogicalRouter(nbClient, router)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get router: %s, error: %w", router.Name, err)
+ }
+
+ nats := []*nbdb.NAT{}
+ for _, uuid := range r.Nat {
+ nat, err := GetNAT(nbClient, &nbdb.NAT{UUID: uuid})
+ if errors.Is(err, libovsdbclient.ErrNotFound) {
+ continue
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to lookup NAT entry with uuid: %s, error: %w", uuid, err)
+ }
+ nats = append(nats, nat)
+ }
+
+ return nats, nil
+}
+
+// CreateOrUpdateNATsOps creates or updates the provided NATs, adds them to
+// the provided logical router and returns the corresponding ops
+func CreateOrUpdateNATsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) ([]libovsdb.Operation, error) {
+ routerNats, err := GetRouterNATs(nbClient, router)
+ if err != nil {
+ return ops, fmt.Errorf("unable to get NAT entries for router %+v: %w", router, err)
+ }
+
+ originalNats := router.Nat
+ router.Nat = make([]string, 0, len(nats))
+ opModels := make([]operationModel, 0, len(nats)+1)
+ for i := range nats {
+ inputNat := nats[i]
+ for _, routerNat := range routerNats {
+ if isEquivalentNAT(routerNat, inputNat) {
+ inputNat.UUID = routerNat.UUID
+ break
+ }
+ }
+ opModel := operationModel{
+ Model: inputNat,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: false,
+ BulkOp: false,
+ DoAfter: func() { router.Nat = append(router.Nat, inputNat.UUID) },
+ }
+ opModels = append(opModels, opModel)
+ }
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.Nat},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+
+ m := newModelClient(nbClient)
+ ops, err = m.CreateOrUpdateOps(ops, opModels...)
+ router.Nat = originalNats
+ return ops, err
+}
+
+// CreateOrUpdateNATs creates or updates the provided NATs and adds them to
+// the provided logical router
+func CreateOrUpdateNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) error {
+ ops, err := CreateOrUpdateNATsOps(nbClient, nil, router, nats...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeleteNATsOps deletes the provided NATs, removes them from the provided
+// logical router and returns the corresponding ops
+func DeleteNATsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) ([]libovsdb.Operation, error) {
+ routerNats, err := GetRouterNATs(nbClient, router)
+ if errors.Is(err, libovsdbclient.ErrNotFound) {
+ return ops, nil
+ }
+ if err != nil {
+ return ops, fmt.Errorf("unable to get NAT entries for router %+v: %w", router, err)
+ }
+
+ originalNats := router.Nat
+ router.Nat = make([]string, 0, len(nats))
+ opModels := make([]operationModel, 0, len(routerNats)+1)
+ for _, routerNat := range routerNats {
+ for _, inputNat := range nats {
+ if isEquivalentNAT(routerNat, inputNat) {
+ router.Nat = append(router.Nat, routerNat.UUID)
+ opModel := operationModel{
+ Model: routerNat,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ break
+ }
+ }
+ }
+ if len(router.Nat) == 0 {
+ return ops, nil
+ }
+ opModel := operationModel{
+ Model: router,
+ OnModelMutations: []interface{}{&router.Nat},
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+
+ m := newModelClient(nbClient)
+ ops, err = m.DeleteOps(ops, opModels...)
+ router.Nat = originalNats
+ return ops, err
+}
+
+// DeleteNATs deletes the provided NATs and removes them from the provided
+// logical router
+func DeleteNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) error {
+ ops, err := DeleteNATsOps(nbClient, nil, router, nats...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeleteNATsWithPredicateOps looks up NATs from the cache based on a given
+// predicate, deletes them, removes them from associated logical routers and
+// returns the corresponding ops
+func DeleteNATsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p natPredicate) ([]libovsdb.Operation, error) {
+ deleted := []*nbdb.NAT{}
+ router := &nbdb.LogicalRouter{}
+ natUUIDs := sets.Set[string]{}
+ opModels := []operationModel{
+ {
+ ModelPredicate: p,
+ ExistingResult: &deleted,
+ DoAfter: func() {
+ router.Nat = extractUUIDsFromModels(&deleted)
+ natUUIDs.Insert(router.Nat...)
+ },
+ BulkOp: true,
+ ErrNotFound: false,
+ },
+ {
+ Model: router,
+ ModelPredicate: func(lr *nbdb.LogicalRouter) bool { return natUUIDs.HasAny(lr.Nat...) },
+ OnModelMutations: []interface{}{&router.Nat},
+ ErrNotFound: false,
+ BulkOp: true,
+ },
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModels...)
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go
new file mode 100644
index 000000000..7f4f527d1
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go
@@ -0,0 +1,221 @@
+package ops
+
+import (
+ "golang.org/x/net/context"
+ "hash/fnv"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/model"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+func CreateOrUpdateSampleCollector(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error {
+ opModel := operationModel{
+ Model: collector,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err := m.CreateOrUpdate(opModel)
+ return err
+}
+
+func UpdateSampleCollectorExternalIDs(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error {
+ opModel := operationModel{
+ Model: collector,
+ OnModelUpdates: []interface{}{&collector.ExternalIDs},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err := m.CreateOrUpdate(opModel)
+ return err
+}
+
+func DeleteSampleCollector(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error {
+ opModel := operationModel{
+ Model: collector,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ m := newModelClient(nbClient)
+ return m.Delete(opModel)
+}
+
+func DeleteSampleCollectorWithPredicate(nbClient libovsdbclient.Client, p func(collector *nbdb.SampleCollector) bool) error {
+ opModel := operationModel{
+ Model: &nbdb.SampleCollector{},
+ ModelPredicate: p,
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+ m := newModelClient(nbClient)
+ return m.Delete(opModel)
+}
+
+func FindSampleCollectorWithPredicate(nbClient libovsdbclient.Client, p func(*nbdb.SampleCollector) bool) ([]*nbdb.SampleCollector, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ collectors := []*nbdb.SampleCollector{}
+ err := nbClient.WhereCache(p).List(ctx, &collectors)
+ return collectors, err
+}
+
+func ListSampleCollectors(nbClient libovsdbclient.Client) ([]*nbdb.SampleCollector, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ collectors := []*nbdb.SampleCollector{}
+ err := nbClient.List(ctx, &collectors)
+ return collectors, err
+}
+
+func CreateOrUpdateSamplingAppsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, samplingApps ...*nbdb.SamplingApp) ([]libovsdb.Operation, error) {
+ opModels := make([]operationModel, 0, len(samplingApps))
+ for i := range samplingApps {
+ // can't use i in the predicate, for loop replaces it in-memory
+ samplingApp := samplingApps[i]
+ opModel := operationModel{
+ Model: samplingApp,
+ OnModelUpdates: onModelUpdatesAllNonDefault(),
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModels...)
+}
+
+func DeleteSamplingAppsWithPredicate(nbClient libovsdbclient.Client, p func(collector *nbdb.SamplingApp) bool) error {
+ opModel := operationModel{
+ Model: &nbdb.SamplingApp{},
+ ModelPredicate: p,
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+ m := newModelClient(nbClient)
+ return m.Delete(opModel)
+}
+
+func FindSample(nbClient libovsdbclient.Client, sampleMetadata int) (*nbdb.Sample, error) {
+ sample := &nbdb.Sample{
+ Metadata: sampleMetadata,
+ }
+ return GetSample(nbClient, sample)
+}
+
+func GetSample(nbClient libovsdbclient.Client, sample *nbdb.Sample) (*nbdb.Sample, error) {
+ found := []*nbdb.Sample{}
+ opModel := operationModel{
+ Model: sample,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+ modelClient := newModelClient(nbClient)
+ err := modelClient.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+ return found[0], err
+}
+
+type SampleFeature = string
+
+const (
+ EgressFirewallSample SampleFeature = "EgressFirewall"
+ NetworkPolicySample SampleFeature = "NetworkPolicy"
+ AdminNetworkPolicySample SampleFeature = "AdminNetworkPolicy"
+ MulticastSample SampleFeature = "Multicast"
+ UDNIsolationSample SampleFeature = "UDNIsolation"
+)
+
+// SamplingConfig is used to configure sampling for different db objects.
+type SamplingConfig struct {
+ featureCollectors map[SampleFeature][]string
+}
+
+func NewSamplingConfig(featureCollectors map[SampleFeature][]string) *SamplingConfig {
+ return &SamplingConfig{
+ featureCollectors: featureCollectors,
+ }
+}
+
+func addSample(c *SamplingConfig, opModels []operationModel, model model.Model) []operationModel {
+ switch t := model.(type) {
+ case *nbdb.ACL:
+ return createOrUpdateSampleForACL(opModels, c, t)
+ }
+ return opModels
+}
+
+// createOrUpdateSampleForACL should be called before acl operationModel is appended to opModels.
+func createOrUpdateSampleForACL(opModels []operationModel, c *SamplingConfig, acl *nbdb.ACL) []operationModel {
+ if c == nil {
+ acl.SampleEst = nil
+ acl.SampleNew = nil
+ return opModels
+ }
+ collectors := c.featureCollectors[getACLSampleFeature(acl)]
+ if len(collectors) == 0 {
+ acl.SampleEst = nil
+ acl.SampleNew = nil
+ return opModels
+ }
+ aclID := GetACLSampleID(acl)
+ sample := &nbdb.Sample{
+ Collectors: collectors,
+ // 32 bits
+ Metadata: int(aclID),
+ }
+ opModel := operationModel{
+ Model: sample,
+ DoAfter: func() {
+ acl.SampleEst = &sample.UUID
+ acl.SampleNew = &sample.UUID
+ },
+ OnModelUpdates: []interface{}{&sample.Collectors},
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ return opModels
+}
+
+func GetACLSampleID(acl *nbdb.ACL) uint32 {
+ // primaryID is unique for each ACL, but established connections will keep sampleID that is set on
+ // connection creation. Here is the situation we want to avoid:
+ // 1. ACL1 is created with sampleID=1 (e.g. based on ANP namespace+name+...+rule index with action Allow)
+ // 2. connection A is established with sampleID=1, sample is decoded to say "Allowed by ANP namespace+name"
+ // 3. ACL1 is updated with sampleID=1 (e.g. now same rule in ANP says Deny, but PrimaryIDKey is the same)
+ // 4. connection A still generates samples with sampleID=1, but now it is "Denied by ANP namespace+name"
+ // In reality, connection A is still allowed, as existing connections are not affected by ANP updates.
+ // To avoid this, we encode Match and Action to the sampleID, to ensure a new sampleID is assigned on Match or action change.
+ // In that case stale sampleIDs will just report messages like "sampling for this connection was updated or deleted".
+ primaryID := acl.ExternalIDs[PrimaryIDKey.String()] + acl.Match + acl.Action
+ h := fnv.New32a()
+ h.Write([]byte(primaryID))
+ return h.Sum32()
+}
+
+func getACLSampleFeature(acl *nbdb.ACL) SampleFeature {
+ switch acl.ExternalIDs[OwnerTypeKey.String()] {
+ case AdminNetworkPolicyOwnerType, BaselineAdminNetworkPolicyOwnerType:
+ return AdminNetworkPolicySample
+ case MulticastNamespaceOwnerType, MulticastClusterOwnerType:
+ return MulticastSample
+ case NetpolNodeOwnerType, NetworkPolicyOwnerType, NetpolNamespaceOwnerType:
+ return NetworkPolicySample
+ case EgressFirewallOwnerType:
+ return EgressFirewallSample
+ case UDNIsolationOwnerType:
+ return UDNIsolationSample
+ }
+ return ""
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go
new file mode 100644
index 000000000..1c5dde339
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go
@@ -0,0 +1,27 @@
+package ops
+
+import (
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb"
+)
+
+// GetNBGlobal looks up the SB Global entry from the cache
+func GetSBGlobal(sbClient libovsdbclient.Client, sbGlobal *sbdb.SBGlobal) (*sbdb.SBGlobal, error) {
+ found := []*sbdb.SBGlobal{}
+ opModel := operationModel{
+ Model: sbGlobal,
+ ModelPredicate: func(item *sbdb.SBGlobal) bool { return true },
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(sbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go
new file mode 100644
index 000000000..964e711bb
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go
@@ -0,0 +1,484 @@
+package ops
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+// LOGICAL_SWITCH OPs
+
+type switchPredicate func(*nbdb.LogicalSwitch) bool
+
+// FindLogicalSwitchesWithPredicate looks up logical switches from the cache
+// based on a given predicate
+func FindLogicalSwitchesWithPredicate(nbClient libovsdbclient.Client, p switchPredicate) ([]*nbdb.LogicalSwitch, error) {
+ found := []*nbdb.LogicalSwitch{}
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+ err := nbClient.WhereCache(p).List(ctx, &found)
+ return found, err
+}
+
+// GetLogicalSwitch looks up a logical switch from the cache
+func GetLogicalSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch) (*nbdb.LogicalSwitch, error) {
+ found := []*nbdb.LogicalSwitch{}
+ opModel := operationModel{
+ Model: sw,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+// CreateOrUpdateLogicalSwitch creates or updates the provided logical switch
+func CreateOrUpdateLogicalSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, fields ...interface{}) error {
+ if len(fields) == 0 {
+ fields = onModelUpdatesAllNonDefault()
+ }
+ opModel := operationModel{
+ Model: sw,
+ OnModelUpdates: fields,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err := m.CreateOrUpdate(opModel)
+ return err
+}
+
+// UpdateLogicalSwitchSetExternalIDs updates the external IDs on the provided logical
+// switch. Empty values means the corresponding keys are to be deleted.
+func UpdateLogicalSwitchSetExternalIDs(nbClient libovsdbclient.Client, logicalSwitch *nbdb.LogicalSwitch) error {
+ externalIds := logicalSwitch.ExternalIDs
+ logicalSwitch, err := GetLogicalSwitch(nbClient, logicalSwitch)
+ if err != nil {
+ return err
+ }
+
+ if logicalSwitch.ExternalIDs == nil {
+ logicalSwitch.ExternalIDs = map[string]string{}
+ }
+
+ for k, v := range externalIds {
+ if v == "" {
+ delete(logicalSwitch.ExternalIDs, k)
+ } else {
+ logicalSwitch.ExternalIDs[k] = v
+ }
+ }
+
+ opModel := operationModel{
+ Model: logicalSwitch,
+ OnModelUpdates: []interface{}{&logicalSwitch.ExternalIDs},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err = m.CreateOrUpdate(opModel)
+ return err
+}
+
+type logicalSwitchPredicate func(*nbdb.LogicalSwitch) bool
+
+// DeleteLogicalSwitchesWithPredicateOps returns the operations to delete the logical switches matching the provided predicate
+func DeleteLogicalSwitchesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ p logicalSwitchPredicate) ([]libovsdb.Operation, error) {
+ opModel := operationModel{
+ Model: &nbdb.LogicalSwitch{},
+ ModelPredicate: p,
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+// DeleteLogicalSwitchOps returns the operations to delete the provided logical switch
+func DeleteLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ swName string) ([]libovsdb.Operation, error) {
+ opModel := operationModel{
+ Model: &nbdb.LogicalSwitch{Name: swName},
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+// DeleteLogicalSwitch deletes the provided logical switch
+func DeleteLogicalSwitch(nbClient libovsdbclient.Client, swName string) error {
+ ops, err := DeleteLogicalSwitchOps(nbClient, nil, swName)
+ if err != nil {
+ return err
+ }
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// LB ops
+
+// AddLoadBalancersToLogicalSwitchOps adds the provided load balancers to the
+// provided logical switch and returns the corresponding ops
+func AddLoadBalancersToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) {
+ sw.LoadBalancer = make([]string, 0, len(lbs))
+ for _, lb := range lbs {
+ sw.LoadBalancer = append(sw.LoadBalancer, lb.UUID)
+ }
+ opModel := operationModel{
+ Model: sw,
+ OnModelMutations: []interface{}{&sw.LoadBalancer},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, opModel)
+}
+
+// RemoveLoadBalancersFromLogicalSwitchOps removes the provided load balancers from the
+// provided logical switch and returns the corresponding ops
+func RemoveLoadBalancersFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) {
+ sw.LoadBalancer = make([]string, 0, len(lbs))
+ for _, lb := range lbs {
+ sw.LoadBalancer = append(sw.LoadBalancer, lb.UUID)
+ }
+ opModel := operationModel{
+ Model: sw,
+ OnModelMutations: []interface{}{&sw.LoadBalancer},
+ // if we want to delete loadbalancer from the switch that doesn't exist, that is noop
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.DeleteOps(ops, opModel)
+}
+
+// ACL ops
+
+// AddACLsToLogicalSwitchOps adds the provided ACLs to the provided logical
+// switch and returns the corresponding ops
+func AddACLsToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) {
+ sw := &nbdb.LogicalSwitch{
+ Name: name,
+ ACLs: make([]string, 0, len(acls)),
+ }
+ for _, acl := range acls {
+ sw.ACLs = append(sw.ACLs, acl.UUID)
+ }
+
+ opModels := operationModel{
+ Model: sw,
+ OnModelMutations: []interface{}{&sw.ACLs},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ return m.CreateOrUpdateOps(ops, opModels)
+}
+
+// RemoveACLsFromLogicalSwitchesWithPredicateOps looks up logical switches from the cache
+// based on a given predicate, removes from them the provided ACLs, and returns the
+// corresponding ops
+func RemoveACLsFromLogicalSwitchesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation,
+ p switchPredicate, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) {
+ sw := nbdb.LogicalSwitch{
+ ACLs: make([]string, 0, len(acls)),
+ }
+ for _, acl := range acls {
+ sw.ACLs = append(sw.ACLs, acl.UUID)
+ }
+ opModel := operationModel{
+ Model: &sw,
+ ModelPredicate: p,
+ OnModelMutations: []interface{}{&sw.ACLs},
+ ErrNotFound: false,
+ BulkOp: true,
+ }
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModel)
+}
+
+// RemoveACLsFromLogicalSwitchesWithPredicate looks up logical switches from the cache
+// based on a given predicate and removes from them the provided ACLs
+func RemoveACLsFromLogicalSwitchesWithPredicate(nbClient libovsdbclient.Client, p switchPredicate, acls ...*nbdb.ACL) error {
+ ops, err := RemoveACLsFromLogicalSwitchesWithPredicateOps(nbClient, nil, p, acls...)
+ if err != nil {
+ return err
+ }
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// UpdateLogicalSwitchSetOtherConfig sets other config on the provided logical
+// switch adding any missing, removing the ones set to an empty value and
+// updating existing
+func UpdateLogicalSwitchSetOtherConfig(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch) error {
+ otherConfig := sw.OtherConfig
+ sw, err := GetLogicalSwitch(nbClient, sw)
+ if err != nil {
+ return err
+ }
+
+ if sw.OtherConfig == nil {
+ sw.OtherConfig = map[string]string{}
+ }
+
+ for k, v := range otherConfig {
+ if v == "" {
+ delete(sw.OtherConfig, k)
+ } else {
+ sw.OtherConfig[k] = v
+ }
+ }
+
+ opModel := operationModel{
+ Model: sw,
+ OnModelUpdates: []interface{}{&sw.OtherConfig},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err = m.CreateOrUpdate(opModel)
+ return err
+}
+
+// LOGICAL SWITCH PORT OPs
+
+// GetLogicalSwitchPort looks up a logical switch port from the cache
+func GetLogicalSwitchPort(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort) (*nbdb.LogicalSwitchPort, error) {
+ found := []*nbdb.LogicalSwitchPort{}
+ opModel := operationModel{
+ Model: lsp,
+ ExistingResult: &found,
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ err := m.Lookup(opModel)
+ if err != nil {
+ return nil, err
+ }
+
+ return found[0], nil
+}
+
+func createOrUpdateLogicalSwitchPortsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, createSwitch bool, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) {
+ originalPorts := sw.Ports
+ sw.Ports = make([]string, 0, len(lsps))
+ opModels := make([]operationModel, 0, len(lsps)+1)
+ for i := range lsps {
+ lsp := lsps[i]
+ opModel := operationModel{
+ Model: lsp,
+ OnModelUpdates: getAllUpdatableFields(lsp),
+ DoAfter: func() { sw.Ports = append(sw.Ports, lsp.UUID) },
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+ opModel := operationModel{
+ Model: sw,
+ OnModelMutations: []interface{}{&sw.Ports},
+ ErrNotFound: !createSwitch,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+
+ m := newModelClient(nbClient)
+ ops, err := m.CreateOrUpdateOps(ops, opModels...)
+ sw.Ports = originalPorts
+ if err != nil && errors.Is(err, libovsdbclient.ErrNotFound) && !createSwitch {
+ err = fmt.Errorf("could not find switch: %q, %w", sw.Name, err)
+ }
+ return ops, err
+}
+
+func createOrUpdateLogicalSwitchPorts(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, createSwitch bool, lsps ...*nbdb.LogicalSwitchPort) error {
+ ops, err := createOrUpdateLogicalSwitchPortsOps(nbClient, nil, sw, createSwitch, lsps...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheckAndSetUUIDs(nbClient, lsps, ops)
+ return err
+}
+
+// CreateOrUpdateLogicalSwitchPortsOnSwitchOps creates or updates the provided
+// logical switch ports, adds them to the provided logical switch and returns
+// the corresponding ops
+func CreateOrUpdateLogicalSwitchPortsOnSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) {
+ return createOrUpdateLogicalSwitchPortsOps(nbClient, ops, sw, false, lsps...)
+}
+
+// CreateOrUpdateLogicalSwitchPortsOnSwitch creates or updates the provided
+// logical switch ports and adds them to the provided logical switch
+func CreateOrUpdateLogicalSwitchPortsOnSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error {
+ return createOrUpdateLogicalSwitchPorts(nbClient, sw, false, lsps...)
+}
+
+// CreateOrUpdateLogicalSwitchPortsAndSwitch creates or updates the provided
+// logical switch ports and adds them to the provided logical switch creating it
+// if it does not exist
+func CreateOrUpdateLogicalSwitchPortsAndSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error {
+ return createOrUpdateLogicalSwitchPorts(nbClient, sw, true, lsps...)
+}
+
+// DeleteLogicalSwitchPortsOps deletes the provided logical switch ports, removes
+// them from the provided logical switch and returns the corresponding ops
+func DeleteLogicalSwitchPortsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) {
+ originalPorts := sw.Ports
+ sw.Ports = make([]string, 0, len(lsps))
+ opModels := make([]operationModel, 0, len(lsps)+1)
+ for i := range lsps {
+ lsp := lsps[i]
+ opModel := operationModel{
+ Model: lsp,
+ DoAfter: func() {
+ if lsp.UUID != "" {
+ sw.Ports = append(sw.Ports, lsp.UUID)
+ }
+ },
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+ opModel := operationModel{
+ Model: sw,
+ OnModelMutations: []interface{}{&sw.Ports},
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+
+ m := newModelClient(nbClient)
+ ops, err := m.DeleteOps(ops, opModels...)
+ sw.Ports = originalPorts
+ return ops, err
+}
+
+// DeleteLogicalSwitchPorts deletes the provided logical switch ports and
+// removes them from the provided logical switch
+func DeleteLogicalSwitchPorts(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error {
+ ops, err := DeleteLogicalSwitchPortsOps(nbClient, nil, sw, lsps...)
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+type logicalSwitchPortPredicate func(*nbdb.LogicalSwitchPort) bool
+
+// DeleteLogicalSwitchPortsWithPredicateOps looks up logical switch ports from
+// the cache based on a given predicate and removes from them the provided
+// logical switch
+func DeleteLogicalSwitchPortsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, p logicalSwitchPortPredicate) ([]libovsdb.Operation, error) {
+ swName := sw.Name
+ sw, err := GetLogicalSwitch(nbClient, sw)
+ if err != nil {
+ if errors.Is(err, libovsdbclient.ErrNotFound) {
+ return ops, nil
+ }
+ return nil, fmt.Errorf("error retrieving logical switch %s from libovsdb cache: %w", swName, err)
+ }
+
+ var lsps []*nbdb.LogicalSwitchPort
+ for _, port := range sw.Ports {
+ lsp := &nbdb.LogicalSwitchPort{UUID: port}
+ lsp, err = GetLogicalSwitchPort(nbClient, lsp)
+ if err != nil {
+ if errors.Is(err, libovsdbclient.ErrNotFound) {
+ continue
+ }
+ return nil, fmt.Errorf("error retrieving logical switch port with UUID %s associated with logical"+
+ " switch %s from libovsdb cache: %w", port, swName, err)
+ }
+ if p(lsp) {
+ lsps = append(lsps, lsp)
+ }
+ }
+
+ opModels := make([]operationModel, 0, len(lsps)+1)
+ sw.Ports = make([]string, 0, len(lsps))
+ for _, lsp := range lsps {
+ sw.Ports = append(sw.Ports, lsp.UUID)
+ opModel := operationModel{
+ Model: lsp,
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+ }
+ opModel := operationModel{
+ Model: sw,
+ OnModelMutations: []interface{}{&sw.Ports},
+ ErrNotFound: false,
+ BulkOp: false,
+ }
+ opModels = append(opModels, opModel)
+
+ m := newModelClient(nbClient)
+ return m.DeleteOps(ops, opModels...)
+}
+
+// UpdateLogicalSwitchPortSetOptions sets options on the provided logical switch
+// port adding any missing, removing the ones set to an empty value and updating
+// existing
+func UpdateLogicalSwitchPortSetOptions(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort) error {
+ options := lsp.Options
+ lsp, err := GetLogicalSwitchPort(nbClient, lsp)
+ if err != nil {
+ return err
+ }
+
+ if lsp.Options == nil {
+ lsp.Options = map[string]string{}
+ }
+
+ for k, v := range options {
+ if v == "" {
+ delete(lsp.Options, k)
+ } else {
+ lsp.Options[k] = v
+ }
+ }
+
+ opModel := operationModel{
+ // For LSP's Name is a valid index, so no predicate is needed
+ Model: lsp,
+ OnModelUpdates: []interface{}{&lsp.Options},
+ ErrNotFound: true,
+ BulkOp: false,
+ }
+
+ m := newModelClient(nbClient)
+ _, err = m.CreateOrUpdate(opModel)
+ return err
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go
new file mode 100644
index 000000000..4672a5c0e
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go
@@ -0,0 +1,112 @@
+package ops
+
+import (
+ "context"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+type chassisTemplateVarPredicate func(*nbdb.ChassisTemplateVar) bool
+
+// ListTemplateVar looks up all chassis template variables.
+func ListTemplateVar(nbClient libovsdbclient.Client) ([]*nbdb.ChassisTemplateVar, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+
+ templatesList := []*nbdb.ChassisTemplateVar{}
+ err := nbClient.List(ctx, &templatesList)
+ return templatesList, err
+}
+
+// CreateOrUpdateChassisTemplateVarOps creates or updates the provided
+// 'template' variable and returns the corresponding ops.
+func CreateOrUpdateChassisTemplateVarOps(nbClient libovsdbclient.Client,
+ ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar) ([]libovsdb.Operation, error) {
+
+ modelClient := newModelClient(nbClient)
+ return modelClient.CreateOrUpdateOps(ops, operationModel{
+ Model: template,
+ OnModelMutations: []interface{}{&template.Variables},
+ ErrNotFound: false,
+ BulkOp: false,
+ })
+}
+
+// deleteChassisTemplateVarVariablesOps removes the variables listed as
+// keys of 'template.Variables' and returns the corresponding ops.
+// It applies the mutation to all records that are selected by 'predicate'.
+func deleteChassisTemplateVarVariablesOps(nbClient libovsdbclient.Client,
+ ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar,
+ predicate chassisTemplateVarPredicate) ([]libovsdb.Operation, error) {
+
+ deleteTemplate := &nbdb.ChassisTemplateVar{
+ Chassis: template.Chassis,
+ Variables: map[string]string{},
+ }
+ for name := range template.Variables {
+ deleteTemplate.Variables[name] = ""
+ }
+ modelClient := newModelClient(nbClient)
+ return modelClient.DeleteOps(ops, operationModel{
+ Model: deleteTemplate,
+ ModelPredicate: predicate,
+ OnModelMutations: []interface{}{&deleteTemplate.Variables},
+ ErrNotFound: false,
+ BulkOp: true,
+ })
+}
+
+// DeleteChassisTemplateVarVariablesOps removes all variables listed as
+// keys of 'template.Variables' from the record matching the same chassis
+// as 'template'. It returns the corresponding ops.
+func DeleteChassisTemplateVarVariablesOps(nbClient libovsdbclient.Client,
+ ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar) ([]libovsdb.Operation, error) {
+
+ return deleteChassisTemplateVarVariablesOps(nbClient, ops, template, nil)
+}
+
+// DeleteAllChassisTemplateVarVariables removes the variables listed as
+// in 'varNames' and commits the transaction to the database. It applies
+// the mutation to all records that contain these variable names.
+func DeleteAllChassisTemplateVarVariables(nbClient libovsdbclient.Client, varNames []string) error {
+ deleteTemplateVar := &nbdb.ChassisTemplateVar{
+ Variables: make(map[string]string, len(varNames)),
+ }
+ for _, name := range varNames {
+ deleteTemplateVar.Variables[name] = ""
+ }
+ ops, err := deleteChassisTemplateVarVariablesOps(nbClient, nil, deleteTemplateVar,
+ func(item *nbdb.ChassisTemplateVar) bool {
+ for _, name := range varNames {
+ if _, found := item.Variables[name]; found {
+ return true
+ }
+ }
+ return false
+ })
+ if err != nil {
+ return err
+ }
+
+ _, err = TransactAndCheck(nbClient, ops)
+ return err
+}
+
+// DeleteChassisTemplateVar deletes all complete Chassis_Template_Var
+// records matching 'templates'.
+func DeleteChassisTemplateVar(nbClient libovsdbclient.Client, templates ...*nbdb.ChassisTemplateVar) error {
+ opModels := make([]operationModel, 0, len(templates))
+ for i := range templates {
+ template := templates[i]
+ opModels = append(opModels, operationModel{
+ Model: template,
+ ErrNotFound: false,
+ BulkOp: false,
+ })
+ }
+ m := newModelClient(nbClient)
+ return m.Delete(opModels...)
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go
new file mode 100644
index 000000000..51fd09cce
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go
@@ -0,0 +1,98 @@
+package ops
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/klog/v2"
+
+ "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
+)
+
+// TransactWithRetry will attempt a transaction several times if it receives an error indicating that the client
+// was not connected when the transaction occurred.
+func TransactWithRetry(ctx context.Context, c client.Client, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) {
+ var results []ovsdb.OperationResult
+ resultErr := wait.PollUntilContextCancel(ctx, 200*time.Millisecond, true, func(ctx context.Context) (bool, error) {
+ var err error
+ results, err = c.Transact(ctx, ops...)
+ if err == nil {
+ return true, nil
+ }
+ if err != nil && errors.Is(err, client.ErrNotConnected) {
+ klog.V(5).Infof("Unable to execute transaction: %+v. Client is disconnected, will retry...", ops)
+ return false, nil
+ }
+ return false, err
+ })
+ return results, resultErr
+}
+
+func TransactAndCheck(c client.Client, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) {
+ if len(ops) <= 0 {
+ return []ovsdb.OperationResult{{}}, nil
+ }
+
+ klog.V(5).Infof("Configuring OVN: %+v", ops)
+
+ ctx, cancel := context.WithTimeout(context.TODO(), config.Default.OVSDBTxnTimeout)
+ defer cancel()
+
+ results, err := TransactWithRetry(ctx, c, ops)
+ if err != nil {
+ return nil, fmt.Errorf("error in transact with ops %+v: %v", ops, err)
+ }
+
+ opErrors, err := ovsdb.CheckOperationResults(results, ops)
+ if err != nil {
+ return nil, fmt.Errorf("error in transact with ops %+v results %+v and errors %+v: %v", ops, results, opErrors, err)
+ }
+
+ return results, nil
+}
+
+// TransactAndCheckAndSetUUIDs transacts the given ops against client and returns
+// results if no error occurred or an error otherwise. It sets the real uuids for
+// the passed models if they were inserted and have a named-uuid (as built by
+// BuildNamedUUID)
+func TransactAndCheckAndSetUUIDs(client client.Client, models interface{}, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) {
+ results, err := TransactAndCheck(client, ops)
+ if err != nil {
+ return nil, err
+ }
+
+ namedModelMap := map[string]model.Model{}
+ _ = onModels(models, func(model interface{}) error {
+ uuid := getUUID(model)
+ if isNamedUUID(uuid) {
+ namedModelMap[uuid] = model
+ }
+ return nil
+ })
+
+ if len(namedModelMap) == 0 {
+ return results, nil
+ }
+
+ for i, op := range ops {
+ if op.Op != ovsdb.OperationInsert {
+ continue
+ }
+
+ if !isNamedUUID(op.UUIDName) {
+ continue
+ }
+
+ if model, ok := namedModelMap[op.UUIDName]; ok {
+ setUUID(model, results[i].UUID.GoUUID)
+ }
+ }
+
+ return results, nil
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore
new file mode 100644
index 000000000..734ba1eff
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore
@@ -0,0 +1 @@
+*.ovsschema
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go
new file mode 100644
index 000000000..0c2840c17
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go
@@ -0,0 +1,303 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ACLTable = "ACL"
+
+type (
+ ACLAction = string
+ ACLDirection = string
+ ACLSeverity = string
+)
+
+var (
+ ACLActionAllow ACLAction = "allow"
+ ACLActionAllowRelated ACLAction = "allow-related"
+ ACLActionAllowStateless ACLAction = "allow-stateless"
+ ACLActionDrop ACLAction = "drop"
+ ACLActionReject ACLAction = "reject"
+ ACLActionPass ACLAction = "pass"
+ ACLDirectionFromLport ACLDirection = "from-lport"
+ ACLDirectionToLport ACLDirection = "to-lport"
+ ACLSeverityAlert ACLSeverity = "alert"
+ ACLSeverityWarning ACLSeverity = "warning"
+ ACLSeverityNotice ACLSeverity = "notice"
+ ACLSeverityInfo ACLSeverity = "info"
+ ACLSeverityDebug ACLSeverity = "debug"
+)
+
+// ACL defines an object in ACL table
+type ACL struct {
+ UUID string `ovsdb:"_uuid"`
+ Action ACLAction `ovsdb:"action"`
+ Direction ACLDirection `ovsdb:"direction"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Label int `ovsdb:"label"`
+ Log bool `ovsdb:"log"`
+ Match string `ovsdb:"match"`
+ Meter *string `ovsdb:"meter"`
+ Name *string `ovsdb:"name"`
+ Options map[string]string `ovsdb:"options"`
+ Priority int `ovsdb:"priority"`
+ SampleEst *string `ovsdb:"sample_est"`
+ SampleNew *string `ovsdb:"sample_new"`
+ Severity *ACLSeverity `ovsdb:"severity"`
+ Tier int `ovsdb:"tier"`
+}
+
+func (a *ACL) GetUUID() string {
+ return a.UUID
+}
+
+func (a *ACL) GetAction() ACLAction {
+ return a.Action
+}
+
+func (a *ACL) GetDirection() ACLDirection {
+ return a.Direction
+}
+
+func (a *ACL) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyACLExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalACLExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ACL) GetLabel() int {
+ return a.Label
+}
+
+func (a *ACL) GetLog() bool {
+ return a.Log
+}
+
+func (a *ACL) GetMatch() string {
+ return a.Match
+}
+
+func (a *ACL) GetMeter() *string {
+ return a.Meter
+}
+
+func copyACLMeter(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalACLMeter(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ACL) GetName() *string {
+ return a.Name
+}
+
+func copyACLName(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalACLName(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ACL) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyACLOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalACLOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ACL) GetPriority() int {
+ return a.Priority
+}
+
+func (a *ACL) GetSampleEst() *string {
+ return a.SampleEst
+}
+
+func copyACLSampleEst(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalACLSampleEst(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ACL) GetSampleNew() *string {
+ return a.SampleNew
+}
+
+func copyACLSampleNew(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalACLSampleNew(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ACL) GetSeverity() *ACLSeverity {
+ return a.Severity
+}
+
+func copyACLSeverity(a *ACLSeverity) *ACLSeverity {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalACLSeverity(a, b *ACLSeverity) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ACL) GetTier() int {
+ return a.Tier
+}
+
+func (a *ACL) DeepCopyInto(b *ACL) {
+ *b = *a
+ b.ExternalIDs = copyACLExternalIDs(a.ExternalIDs)
+ b.Meter = copyACLMeter(a.Meter)
+ b.Name = copyACLName(a.Name)
+ b.Options = copyACLOptions(a.Options)
+ b.SampleEst = copyACLSampleEst(a.SampleEst)
+ b.SampleNew = copyACLSampleNew(a.SampleNew)
+ b.Severity = copyACLSeverity(a.Severity)
+}
+
+func (a *ACL) DeepCopy() *ACL {
+ b := new(ACL)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *ACL) CloneModelInto(b model.Model) {
+ c := b.(*ACL)
+ a.DeepCopyInto(c)
+}
+
+func (a *ACL) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *ACL) Equals(b *ACL) bool {
+ return a.UUID == b.UUID &&
+ a.Action == b.Action &&
+ a.Direction == b.Direction &&
+ equalACLExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Label == b.Label &&
+ a.Log == b.Log &&
+ a.Match == b.Match &&
+ equalACLMeter(a.Meter, b.Meter) &&
+ equalACLName(a.Name, b.Name) &&
+ equalACLOptions(a.Options, b.Options) &&
+ a.Priority == b.Priority &&
+ equalACLSampleEst(a.SampleEst, b.SampleEst) &&
+ equalACLSampleNew(a.SampleNew, b.SampleNew) &&
+ equalACLSeverity(a.Severity, b.Severity) &&
+ a.Tier == b.Tier
+}
+
+func (a *ACL) EqualsModel(b model.Model) bool {
+ c := b.(*ACL)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &ACL{}
+var _ model.ComparableModel = &ACL{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go
new file mode 100644
index 000000000..e8a836e2d
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go
@@ -0,0 +1,118 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const AddressSetTable = "Address_Set"
+
+// AddressSet defines an object in Address_Set table
+type AddressSet struct {
+ UUID string `ovsdb:"_uuid"`
+ Addresses []string `ovsdb:"addresses"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Name string `ovsdb:"name"`
+}
+
+func (a *AddressSet) GetUUID() string {
+ return a.UUID
+}
+
+func (a *AddressSet) GetAddresses() []string {
+ return a.Addresses
+}
+
+func copyAddressSetAddresses(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalAddressSetAddresses(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AddressSet) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyAddressSetExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalAddressSetExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AddressSet) GetName() string {
+ return a.Name
+}
+
+func (a *AddressSet) DeepCopyInto(b *AddressSet) {
+ *b = *a
+ b.Addresses = copyAddressSetAddresses(a.Addresses)
+ b.ExternalIDs = copyAddressSetExternalIDs(a.ExternalIDs)
+}
+
+func (a *AddressSet) DeepCopy() *AddressSet {
+ b := new(AddressSet)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *AddressSet) CloneModelInto(b model.Model) {
+ c := b.(*AddressSet)
+ a.DeepCopyInto(c)
+}
+
+func (a *AddressSet) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *AddressSet) Equals(b *AddressSet) bool {
+ return a.UUID == b.UUID &&
+ equalAddressSetAddresses(a.Addresses, b.Addresses) &&
+ equalAddressSetExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Name == b.Name
+}
+
+func (a *AddressSet) EqualsModel(b model.Model) bool {
+ c := b.(*AddressSet)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &AddressSet{}
+var _ model.ComparableModel = &AddressSet{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go
new file mode 100644
index 000000000..46646e81a
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go
@@ -0,0 +1,237 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const BFDTable = "BFD"
+
+type (
+ BFDStatus = string
+)
+
+var (
+ BFDStatusDown BFDStatus = "down"
+ BFDStatusInit BFDStatus = "init"
+ BFDStatusUp BFDStatus = "up"
+ BFDStatusAdminDown BFDStatus = "admin_down"
+)
+
+// BFD defines an object in BFD table
+type BFD struct {
+ UUID string `ovsdb:"_uuid"`
+ DetectMult *int `ovsdb:"detect_mult"`
+ DstIP string `ovsdb:"dst_ip"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ LogicalPort string `ovsdb:"logical_port"`
+ MinRx *int `ovsdb:"min_rx"`
+ MinTx *int `ovsdb:"min_tx"`
+ Options map[string]string `ovsdb:"options"`
+ Status *BFDStatus `ovsdb:"status"`
+}
+
+func (a *BFD) GetUUID() string {
+ return a.UUID
+}
+
+func (a *BFD) GetDetectMult() *int {
+ return a.DetectMult
+}
+
+func copyBFDDetectMult(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBFDDetectMult(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *BFD) GetDstIP() string {
+ return a.DstIP
+}
+
+func (a *BFD) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyBFDExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBFDExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *BFD) GetLogicalPort() string {
+ return a.LogicalPort
+}
+
+func (a *BFD) GetMinRx() *int {
+ return a.MinRx
+}
+
+func copyBFDMinRx(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBFDMinRx(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *BFD) GetMinTx() *int {
+ return a.MinTx
+}
+
+func copyBFDMinTx(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBFDMinTx(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *BFD) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyBFDOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBFDOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *BFD) GetStatus() *BFDStatus {
+ return a.Status
+}
+
+func copyBFDStatus(a *BFDStatus) *BFDStatus {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalBFDStatus(a, b *BFDStatus) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *BFD) DeepCopyInto(b *BFD) {
+ *b = *a
+ b.DetectMult = copyBFDDetectMult(a.DetectMult)
+ b.ExternalIDs = copyBFDExternalIDs(a.ExternalIDs)
+ b.MinRx = copyBFDMinRx(a.MinRx)
+ b.MinTx = copyBFDMinTx(a.MinTx)
+ b.Options = copyBFDOptions(a.Options)
+ b.Status = copyBFDStatus(a.Status)
+}
+
+func (a *BFD) DeepCopy() *BFD {
+ b := new(BFD)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *BFD) CloneModelInto(b model.Model) {
+ c := b.(*BFD)
+ a.DeepCopyInto(c)
+}
+
+func (a *BFD) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *BFD) Equals(b *BFD) bool {
+ return a.UUID == b.UUID &&
+ equalBFDDetectMult(a.DetectMult, b.DetectMult) &&
+ a.DstIP == b.DstIP &&
+ equalBFDExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.LogicalPort == b.LogicalPort &&
+ equalBFDMinRx(a.MinRx, b.MinRx) &&
+ equalBFDMinTx(a.MinTx, b.MinTx) &&
+ equalBFDOptions(a.Options, b.Options) &&
+ equalBFDStatus(a.Status, b.Status)
+}
+
+func (a *BFD) EqualsModel(b model.Model) bool {
+ c := b.(*BFD)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &BFD{}
+var _ model.ComparableModel = &BFD{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go
new file mode 100644
index 000000000..602c3f522
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go
@@ -0,0 +1,120 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ChassisTemplateVarTable = "Chassis_Template_Var"
+
+// ChassisTemplateVar defines an object in Chassis_Template_Var table
+type ChassisTemplateVar struct {
+ UUID string `ovsdb:"_uuid"`
+ Chassis string `ovsdb:"chassis"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Variables map[string]string `ovsdb:"variables"`
+}
+
+func (a *ChassisTemplateVar) GetUUID() string {
+ return a.UUID
+}
+
+func (a *ChassisTemplateVar) GetChassis() string {
+ return a.Chassis
+}
+
+func (a *ChassisTemplateVar) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyChassisTemplateVarExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalChassisTemplateVarExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ChassisTemplateVar) GetVariables() map[string]string {
+ return a.Variables
+}
+
+func copyChassisTemplateVarVariables(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalChassisTemplateVarVariables(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ChassisTemplateVar) DeepCopyInto(b *ChassisTemplateVar) {
+ *b = *a
+ b.ExternalIDs = copyChassisTemplateVarExternalIDs(a.ExternalIDs)
+ b.Variables = copyChassisTemplateVarVariables(a.Variables)
+}
+
+func (a *ChassisTemplateVar) DeepCopy() *ChassisTemplateVar {
+ b := new(ChassisTemplateVar)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *ChassisTemplateVar) CloneModelInto(b model.Model) {
+ c := b.(*ChassisTemplateVar)
+ a.DeepCopyInto(c)
+}
+
+func (a *ChassisTemplateVar) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *ChassisTemplateVar) Equals(b *ChassisTemplateVar) bool {
+ return a.UUID == b.UUID &&
+ a.Chassis == b.Chassis &&
+ equalChassisTemplateVarExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalChassisTemplateVarVariables(a.Variables, b.Variables)
+}
+
+func (a *ChassisTemplateVar) EqualsModel(b model.Model) bool {
+ c := b.(*ChassisTemplateVar)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &ChassisTemplateVar{}
+var _ model.ComparableModel = &ChassisTemplateVar{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go
new file mode 100644
index 000000000..baf6da344
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go
@@ -0,0 +1,209 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ConnectionTable = "Connection"
+
+// Connection defines an object in Connection table
+type Connection struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ InactivityProbe *int `ovsdb:"inactivity_probe"`
+ IsConnected bool `ovsdb:"is_connected"`
+ MaxBackoff *int `ovsdb:"max_backoff"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+ Status map[string]string `ovsdb:"status"`
+ Target string `ovsdb:"target"`
+}
+
+func (a *Connection) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Connection) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyConnectionExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalConnectionExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Connection) GetInactivityProbe() *int {
+ return a.InactivityProbe
+}
+
+func copyConnectionInactivityProbe(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalConnectionInactivityProbe(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Connection) GetIsConnected() bool {
+ return a.IsConnected
+}
+
+func (a *Connection) GetMaxBackoff() *int {
+ return a.MaxBackoff
+}
+
+func copyConnectionMaxBackoff(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalConnectionMaxBackoff(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Connection) GetOtherConfig() map[string]string {
+ return a.OtherConfig
+}
+
+func copyConnectionOtherConfig(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalConnectionOtherConfig(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Connection) GetStatus() map[string]string {
+ return a.Status
+}
+
+func copyConnectionStatus(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalConnectionStatus(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Connection) GetTarget() string {
+ return a.Target
+}
+
+func (a *Connection) DeepCopyInto(b *Connection) {
+ *b = *a
+ b.ExternalIDs = copyConnectionExternalIDs(a.ExternalIDs)
+ b.InactivityProbe = copyConnectionInactivityProbe(a.InactivityProbe)
+ b.MaxBackoff = copyConnectionMaxBackoff(a.MaxBackoff)
+ b.OtherConfig = copyConnectionOtherConfig(a.OtherConfig)
+ b.Status = copyConnectionStatus(a.Status)
+}
+
+func (a *Connection) DeepCopy() *Connection {
+ b := new(Connection)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Connection) CloneModelInto(b model.Model) {
+ c := b.(*Connection)
+ a.DeepCopyInto(c)
+}
+
+func (a *Connection) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Connection) Equals(b *Connection) bool {
+ return a.UUID == b.UUID &&
+ equalConnectionExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalConnectionInactivityProbe(a.InactivityProbe, b.InactivityProbe) &&
+ a.IsConnected == b.IsConnected &&
+ equalConnectionMaxBackoff(a.MaxBackoff, b.MaxBackoff) &&
+ equalConnectionOtherConfig(a.OtherConfig, b.OtherConfig) &&
+ equalConnectionStatus(a.Status, b.Status) &&
+ a.Target == b.Target
+}
+
+func (a *Connection) EqualsModel(b model.Model) bool {
+ c := b.(*Connection)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Connection{}
+var _ model.ComparableModel = &Connection{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go
new file mode 100644
index 000000000..1e146b657
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go
@@ -0,0 +1,120 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const CoppTable = "Copp"
+
+// Copp defines an object in Copp table
+type Copp struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Meters map[string]string `ovsdb:"meters"`
+ Name string `ovsdb:"name"`
+}
+
+func (a *Copp) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Copp) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyCoppExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalCoppExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Copp) GetMeters() map[string]string {
+ return a.Meters
+}
+
+func copyCoppMeters(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalCoppMeters(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Copp) GetName() string {
+ return a.Name
+}
+
+func (a *Copp) DeepCopyInto(b *Copp) {
+ *b = *a
+ b.ExternalIDs = copyCoppExternalIDs(a.ExternalIDs)
+ b.Meters = copyCoppMeters(a.Meters)
+}
+
+func (a *Copp) DeepCopy() *Copp {
+ b := new(Copp)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Copp) CloneModelInto(b model.Model) {
+ c := b.(*Copp)
+ a.DeepCopyInto(c)
+}
+
+func (a *Copp) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Copp) Equals(b *Copp) bool {
+ return a.UUID == b.UUID &&
+ equalCoppExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalCoppMeters(a.Meters, b.Meters) &&
+ a.Name == b.Name
+}
+
+func (a *Copp) EqualsModel(b model.Model) bool {
+ c := b.(*Copp)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Copp{}
+var _ model.ComparableModel = &Copp{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go
new file mode 100644
index 000000000..fd68ebee2
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go
@@ -0,0 +1,120 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DHCPOptionsTable = "DHCP_Options"
+
+// DHCPOptions defines an object in DHCP_Options table
+type DHCPOptions struct {
+ UUID string `ovsdb:"_uuid"`
+ Cidr string `ovsdb:"cidr"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Options map[string]string `ovsdb:"options"`
+}
+
+func (a *DHCPOptions) GetUUID() string {
+ return a.UUID
+}
+
+func (a *DHCPOptions) GetCidr() string {
+ return a.Cidr
+}
+
+func (a *DHCPOptions) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyDHCPOptionsExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDHCPOptionsExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DHCPOptions) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyDHCPOptionsOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDHCPOptionsOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DHCPOptions) DeepCopyInto(b *DHCPOptions) {
+ *b = *a
+ b.ExternalIDs = copyDHCPOptionsExternalIDs(a.ExternalIDs)
+ b.Options = copyDHCPOptionsOptions(a.Options)
+}
+
+func (a *DHCPOptions) DeepCopy() *DHCPOptions {
+ b := new(DHCPOptions)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *DHCPOptions) CloneModelInto(b model.Model) {
+ c := b.(*DHCPOptions)
+ a.DeepCopyInto(c)
+}
+
+func (a *DHCPOptions) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *DHCPOptions) Equals(b *DHCPOptions) bool {
+ return a.UUID == b.UUID &&
+ a.Cidr == b.Cidr &&
+ equalDHCPOptionsExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalDHCPOptionsOptions(a.Options, b.Options)
+}
+
+func (a *DHCPOptions) EqualsModel(b model.Model) bool {
+ c := b.(*DHCPOptions)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &DHCPOptions{}
+var _ model.ComparableModel = &DHCPOptions{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go
new file mode 100644
index 000000000..f0e973ab7
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go
@@ -0,0 +1,145 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DHCPRelayTable = "DHCP_Relay"
+
+// DHCPRelay defines an object in DHCP_Relay table
+type DHCPRelay struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Name string `ovsdb:"name"`
+ Options map[string]string `ovsdb:"options"`
+ Servers *string `ovsdb:"servers"`
+}
+
+func (a *DHCPRelay) GetUUID() string {
+ return a.UUID
+}
+
+func (a *DHCPRelay) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyDHCPRelayExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDHCPRelayExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DHCPRelay) GetName() string {
+ return a.Name
+}
+
+func (a *DHCPRelay) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyDHCPRelayOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDHCPRelayOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DHCPRelay) GetServers() *string {
+ return a.Servers
+}
+
+func copyDHCPRelayServers(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDHCPRelayServers(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *DHCPRelay) DeepCopyInto(b *DHCPRelay) {
+ *b = *a
+ b.ExternalIDs = copyDHCPRelayExternalIDs(a.ExternalIDs)
+ b.Options = copyDHCPRelayOptions(a.Options)
+ b.Servers = copyDHCPRelayServers(a.Servers)
+}
+
+func (a *DHCPRelay) DeepCopy() *DHCPRelay {
+ b := new(DHCPRelay)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *DHCPRelay) CloneModelInto(b model.Model) {
+ c := b.(*DHCPRelay)
+ a.DeepCopyInto(c)
+}
+
+func (a *DHCPRelay) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *DHCPRelay) Equals(b *DHCPRelay) bool {
+ return a.UUID == b.UUID &&
+ equalDHCPRelayExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Name == b.Name &&
+ equalDHCPRelayOptions(a.Options, b.Options) &&
+ equalDHCPRelayServers(a.Servers, b.Servers)
+}
+
+func (a *DHCPRelay) EqualsModel(b model.Model) bool {
+ c := b.(*DHCPRelay)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &DHCPRelay{}
+var _ model.ComparableModel = &DHCPRelay{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go
new file mode 100644
index 000000000..285d5df28
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go
@@ -0,0 +1,147 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DNSTable = "DNS"
+
+// DNS defines an object in DNS table
+type DNS struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Options map[string]string `ovsdb:"options"`
+ Records map[string]string `ovsdb:"records"`
+}
+
+func (a *DNS) GetUUID() string {
+ return a.UUID
+}
+
+func (a *DNS) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyDNSExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDNSExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DNS) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyDNSOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDNSOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DNS) GetRecords() map[string]string {
+ return a.Records
+}
+
+func copyDNSRecords(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDNSRecords(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DNS) DeepCopyInto(b *DNS) {
+ *b = *a
+ b.ExternalIDs = copyDNSExternalIDs(a.ExternalIDs)
+ b.Options = copyDNSOptions(a.Options)
+ b.Records = copyDNSRecords(a.Records)
+}
+
+func (a *DNS) DeepCopy() *DNS {
+ b := new(DNS)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *DNS) CloneModelInto(b model.Model) {
+ c := b.(*DNS)
+ a.DeepCopyInto(c)
+}
+
+func (a *DNS) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *DNS) Equals(b *DNS) bool {
+ return a.UUID == b.UUID &&
+ equalDNSExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalDNSOptions(a.Options, b.Options) &&
+ equalDNSRecords(a.Records, b.Records)
+}
+
+func (a *DNS) EqualsModel(b model.Model) bool {
+ c := b.(*DNS)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &DNS{}
+var _ model.ComparableModel = &DNS{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go
new file mode 100644
index 000000000..1a0657559
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go
@@ -0,0 +1,136 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ForwardingGroupTable = "Forwarding_Group"
+
+// ForwardingGroup defines an object in Forwarding_Group table
+type ForwardingGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ ChildPort []string `ovsdb:"child_port"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Liveness bool `ovsdb:"liveness"`
+ Name string `ovsdb:"name"`
+ Vip string `ovsdb:"vip"`
+ Vmac string `ovsdb:"vmac"`
+}
+
+func (a *ForwardingGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *ForwardingGroup) GetChildPort() []string {
+ return a.ChildPort
+}
+
+func copyForwardingGroupChildPort(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalForwardingGroupChildPort(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ForwardingGroup) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyForwardingGroupExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalForwardingGroupExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ForwardingGroup) GetLiveness() bool {
+ return a.Liveness
+}
+
+func (a *ForwardingGroup) GetName() string {
+ return a.Name
+}
+
+func (a *ForwardingGroup) GetVip() string {
+ return a.Vip
+}
+
+func (a *ForwardingGroup) GetVmac() string {
+ return a.Vmac
+}
+
+func (a *ForwardingGroup) DeepCopyInto(b *ForwardingGroup) {
+ *b = *a
+ b.ChildPort = copyForwardingGroupChildPort(a.ChildPort)
+ b.ExternalIDs = copyForwardingGroupExternalIDs(a.ExternalIDs)
+}
+
+func (a *ForwardingGroup) DeepCopy() *ForwardingGroup {
+ b := new(ForwardingGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *ForwardingGroup) CloneModelInto(b model.Model) {
+ c := b.(*ForwardingGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *ForwardingGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *ForwardingGroup) Equals(b *ForwardingGroup) bool {
+ return a.UUID == b.UUID &&
+ equalForwardingGroupChildPort(a.ChildPort, b.ChildPort) &&
+ equalForwardingGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Liveness == b.Liveness &&
+ a.Name == b.Name &&
+ a.Vip == b.Vip &&
+ a.Vmac == b.Vmac
+}
+
+func (a *ForwardingGroup) EqualsModel(b model.Model) bool {
+ c := b.(*ForwardingGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &ForwardingGroup{}
+var _ model.ComparableModel = &ForwardingGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go
new file mode 100644
index 000000000..15935847b
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go
@@ -0,0 +1,132 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const GatewayChassisTable = "Gateway_Chassis"
+
+// GatewayChassis defines an object in Gateway_Chassis table
+type GatewayChassis struct {
+ UUID string `ovsdb:"_uuid"`
+ ChassisName string `ovsdb:"chassis_name"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Name string `ovsdb:"name"`
+ Options map[string]string `ovsdb:"options"`
+ Priority int `ovsdb:"priority"`
+}
+
+func (a *GatewayChassis) GetUUID() string {
+ return a.UUID
+}
+
+func (a *GatewayChassis) GetChassisName() string {
+ return a.ChassisName
+}
+
+func (a *GatewayChassis) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyGatewayChassisExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalGatewayChassisExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *GatewayChassis) GetName() string {
+ return a.Name
+}
+
+func (a *GatewayChassis) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyGatewayChassisOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalGatewayChassisOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *GatewayChassis) GetPriority() int {
+ return a.Priority
+}
+
+func (a *GatewayChassis) DeepCopyInto(b *GatewayChassis) {
+ *b = *a
+ b.ExternalIDs = copyGatewayChassisExternalIDs(a.ExternalIDs)
+ b.Options = copyGatewayChassisOptions(a.Options)
+}
+
+func (a *GatewayChassis) DeepCopy() *GatewayChassis {
+ b := new(GatewayChassis)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *GatewayChassis) CloneModelInto(b model.Model) {
+ c := b.(*GatewayChassis)
+ a.DeepCopyInto(c)
+}
+
+func (a *GatewayChassis) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *GatewayChassis) Equals(b *GatewayChassis) bool {
+ return a.UUID == b.UUID &&
+ a.ChassisName == b.ChassisName &&
+ equalGatewayChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Name == b.Name &&
+ equalGatewayChassisOptions(a.Options, b.Options) &&
+ a.Priority == b.Priority
+}
+
+func (a *GatewayChassis) EqualsModel(b model.Model) bool {
+ c := b.(*GatewayChassis)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &GatewayChassis{}
+var _ model.ComparableModel = &GatewayChassis{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go
new file mode 100644
index 000000000..67f8a84f1
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go
@@ -0,0 +1,3 @@
+package nbdb
+
+//go:generate modelgen --extended -p nbdb -o . ovn-nb.ovsschema
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go
new file mode 100644
index 000000000..dc09d1ec9
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go
@@ -0,0 +1,93 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const HAChassisTable = "HA_Chassis"
+
+// HAChassis defines an object in HA_Chassis table
+type HAChassis struct {
+ UUID string `ovsdb:"_uuid"`
+ ChassisName string `ovsdb:"chassis_name"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Priority int `ovsdb:"priority"`
+}
+
+func (a *HAChassis) GetUUID() string {
+ return a.UUID
+}
+
+func (a *HAChassis) GetChassisName() string {
+ return a.ChassisName
+}
+
+func (a *HAChassis) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyHAChassisExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalHAChassisExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *HAChassis) GetPriority() int {
+ return a.Priority
+}
+
+func (a *HAChassis) DeepCopyInto(b *HAChassis) {
+ *b = *a
+ b.ExternalIDs = copyHAChassisExternalIDs(a.ExternalIDs)
+}
+
+func (a *HAChassis) DeepCopy() *HAChassis {
+ b := new(HAChassis)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *HAChassis) CloneModelInto(b model.Model) {
+ c := b.(*HAChassis)
+ a.DeepCopyInto(c)
+}
+
+func (a *HAChassis) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *HAChassis) Equals(b *HAChassis) bool {
+ return a.UUID == b.UUID &&
+ a.ChassisName == b.ChassisName &&
+ equalHAChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Priority == b.Priority
+}
+
+func (a *HAChassis) EqualsModel(b model.Model) bool {
+ c := b.(*HAChassis)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &HAChassis{}
+var _ model.ComparableModel = &HAChassis{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go
new file mode 100644
index 000000000..bdda95aaf
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go
@@ -0,0 +1,118 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const HAChassisGroupTable = "HA_Chassis_Group"
+
+// HAChassisGroup defines an object in HA_Chassis_Group table
+type HAChassisGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ HaChassis []string `ovsdb:"ha_chassis"`
+ Name string `ovsdb:"name"`
+}
+
+func (a *HAChassisGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *HAChassisGroup) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyHAChassisGroupExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalHAChassisGroupExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *HAChassisGroup) GetHaChassis() []string {
+ return a.HaChassis
+}
+
+func copyHAChassisGroupHaChassis(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalHAChassisGroupHaChassis(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *HAChassisGroup) GetName() string {
+ return a.Name
+}
+
+func (a *HAChassisGroup) DeepCopyInto(b *HAChassisGroup) {
+ *b = *a
+ b.ExternalIDs = copyHAChassisGroupExternalIDs(a.ExternalIDs)
+ b.HaChassis = copyHAChassisGroupHaChassis(a.HaChassis)
+}
+
+func (a *HAChassisGroup) DeepCopy() *HAChassisGroup {
+ b := new(HAChassisGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *HAChassisGroup) CloneModelInto(b model.Model) {
+ c := b.(*HAChassisGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *HAChassisGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *HAChassisGroup) Equals(b *HAChassisGroup) bool {
+ return a.UUID == b.UUID &&
+ equalHAChassisGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalHAChassisGroupHaChassis(a.HaChassis, b.HaChassis) &&
+ a.Name == b.Name
+}
+
+func (a *HAChassisGroup) EqualsModel(b model.Model) bool {
+ c := b.(*HAChassisGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &HAChassisGroup{}
+var _ model.ComparableModel = &HAChassisGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go
new file mode 100644
index 000000000..03bcd7601
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go
@@ -0,0 +1,290 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LoadBalancerTable = "Load_Balancer"
+
+type (
+ LoadBalancerProtocol = string
+ LoadBalancerSelectionFields = string
+)
+
+var (
+ LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp"
+ LoadBalancerProtocolUDP LoadBalancerProtocol = "udp"
+ LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp"
+ LoadBalancerSelectionFieldsEthSrc LoadBalancerSelectionFields = "eth_src"
+ LoadBalancerSelectionFieldsEthDst LoadBalancerSelectionFields = "eth_dst"
+ LoadBalancerSelectionFieldsIPSrc LoadBalancerSelectionFields = "ip_src"
+ LoadBalancerSelectionFieldsIPDst LoadBalancerSelectionFields = "ip_dst"
+ LoadBalancerSelectionFieldsTpSrc LoadBalancerSelectionFields = "tp_src"
+ LoadBalancerSelectionFieldsTpDst LoadBalancerSelectionFields = "tp_dst"
+)
+
+// LoadBalancer defines an object in Load_Balancer table
+type LoadBalancer struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ HealthCheck []string `ovsdb:"health_check"`
+ IPPortMappings map[string]string `ovsdb:"ip_port_mappings"`
+ Name string `ovsdb:"name"`
+ Options map[string]string `ovsdb:"options"`
+ Protocol *LoadBalancerProtocol `ovsdb:"protocol"`
+ SelectionFields []LoadBalancerSelectionFields `ovsdb:"selection_fields"`
+ Vips map[string]string `ovsdb:"vips"`
+}
+
+func (a *LoadBalancer) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LoadBalancer) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLoadBalancerExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) GetHealthCheck() []string {
+ return a.HealthCheck
+}
+
+func copyLoadBalancerHealthCheck(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLoadBalancerHealthCheck(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) GetIPPortMappings() map[string]string {
+ return a.IPPortMappings
+}
+
+func copyLoadBalancerIPPortMappings(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerIPPortMappings(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) GetName() string {
+ return a.Name
+}
+
+func (a *LoadBalancer) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyLoadBalancerOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) GetProtocol() *LoadBalancerProtocol {
+ return a.Protocol
+}
+
+func copyLoadBalancerProtocol(a *LoadBalancerProtocol) *LoadBalancerProtocol {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLoadBalancerProtocol(a, b *LoadBalancerProtocol) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LoadBalancer) GetSelectionFields() []LoadBalancerSelectionFields {
+ return a.SelectionFields
+}
+
+func copyLoadBalancerSelectionFields(a []LoadBalancerSelectionFields) []LoadBalancerSelectionFields {
+ if a == nil {
+ return nil
+ }
+ b := make([]LoadBalancerSelectionFields, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLoadBalancerSelectionFields(a, b []LoadBalancerSelectionFields) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) GetVips() map[string]string {
+ return a.Vips
+}
+
+func copyLoadBalancerVips(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerVips(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) DeepCopyInto(b *LoadBalancer) {
+ *b = *a
+ b.ExternalIDs = copyLoadBalancerExternalIDs(a.ExternalIDs)
+ b.HealthCheck = copyLoadBalancerHealthCheck(a.HealthCheck)
+ b.IPPortMappings = copyLoadBalancerIPPortMappings(a.IPPortMappings)
+ b.Options = copyLoadBalancerOptions(a.Options)
+ b.Protocol = copyLoadBalancerProtocol(a.Protocol)
+ b.SelectionFields = copyLoadBalancerSelectionFields(a.SelectionFields)
+ b.Vips = copyLoadBalancerVips(a.Vips)
+}
+
+func (a *LoadBalancer) DeepCopy() *LoadBalancer {
+ b := new(LoadBalancer)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LoadBalancer) CloneModelInto(b model.Model) {
+ c := b.(*LoadBalancer)
+ a.DeepCopyInto(c)
+}
+
+func (a *LoadBalancer) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LoadBalancer) Equals(b *LoadBalancer) bool {
+ return a.UUID == b.UUID &&
+ equalLoadBalancerExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalLoadBalancerHealthCheck(a.HealthCheck, b.HealthCheck) &&
+ equalLoadBalancerIPPortMappings(a.IPPortMappings, b.IPPortMappings) &&
+ a.Name == b.Name &&
+ equalLoadBalancerOptions(a.Options, b.Options) &&
+ equalLoadBalancerProtocol(a.Protocol, b.Protocol) &&
+ equalLoadBalancerSelectionFields(a.SelectionFields, b.SelectionFields) &&
+ equalLoadBalancerVips(a.Vips, b.Vips)
+}
+
+func (a *LoadBalancer) EqualsModel(b model.Model) bool {
+ c := b.(*LoadBalancer)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LoadBalancer{}
+var _ model.ComparableModel = &LoadBalancer{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go
new file mode 100644
index 000000000..775924967
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go
@@ -0,0 +1,85 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LoadBalancerGroupTable = "Load_Balancer_Group"
+
+// LoadBalancerGroup defines an object in Load_Balancer_Group table
+type LoadBalancerGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ LoadBalancer []string `ovsdb:"load_balancer"`
+ Name string `ovsdb:"name"`
+}
+
+func (a *LoadBalancerGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LoadBalancerGroup) GetLoadBalancer() []string {
+ return a.LoadBalancer
+}
+
+func copyLoadBalancerGroupLoadBalancer(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLoadBalancerGroupLoadBalancer(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancerGroup) GetName() string {
+ return a.Name
+}
+
+func (a *LoadBalancerGroup) DeepCopyInto(b *LoadBalancerGroup) {
+ *b = *a
+ b.LoadBalancer = copyLoadBalancerGroupLoadBalancer(a.LoadBalancer)
+}
+
+func (a *LoadBalancerGroup) DeepCopy() *LoadBalancerGroup {
+ b := new(LoadBalancerGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LoadBalancerGroup) CloneModelInto(b model.Model) {
+ c := b.(*LoadBalancerGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *LoadBalancerGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LoadBalancerGroup) Equals(b *LoadBalancerGroup) bool {
+ return a.UUID == b.UUID &&
+ equalLoadBalancerGroupLoadBalancer(a.LoadBalancer, b.LoadBalancer) &&
+ a.Name == b.Name
+}
+
+func (a *LoadBalancerGroup) EqualsModel(b model.Model) bool {
+ c := b.(*LoadBalancerGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LoadBalancerGroup{}
+var _ model.ComparableModel = &LoadBalancerGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go
new file mode 100644
index 000000000..c8163fa00
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go
@@ -0,0 +1,120 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LoadBalancerHealthCheckTable = "Load_Balancer_Health_Check"
+
+// LoadBalancerHealthCheck defines an object in Load_Balancer_Health_Check table
+type LoadBalancerHealthCheck struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Options map[string]string `ovsdb:"options"`
+ Vip string `ovsdb:"vip"`
+}
+
+func (a *LoadBalancerHealthCheck) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LoadBalancerHealthCheck) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLoadBalancerHealthCheckExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerHealthCheckExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancerHealthCheck) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyLoadBalancerHealthCheckOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerHealthCheckOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancerHealthCheck) GetVip() string {
+ return a.Vip
+}
+
+func (a *LoadBalancerHealthCheck) DeepCopyInto(b *LoadBalancerHealthCheck) {
+ *b = *a
+ b.ExternalIDs = copyLoadBalancerHealthCheckExternalIDs(a.ExternalIDs)
+ b.Options = copyLoadBalancerHealthCheckOptions(a.Options)
+}
+
+func (a *LoadBalancerHealthCheck) DeepCopy() *LoadBalancerHealthCheck {
+ b := new(LoadBalancerHealthCheck)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LoadBalancerHealthCheck) CloneModelInto(b model.Model) {
+ c := b.(*LoadBalancerHealthCheck)
+ a.DeepCopyInto(c)
+}
+
+func (a *LoadBalancerHealthCheck) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LoadBalancerHealthCheck) Equals(b *LoadBalancerHealthCheck) bool {
+ return a.UUID == b.UUID &&
+ equalLoadBalancerHealthCheckExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalLoadBalancerHealthCheckOptions(a.Options, b.Options) &&
+ a.Vip == b.Vip
+}
+
+func (a *LoadBalancerHealthCheck) EqualsModel(b model.Model) bool {
+ c := b.(*LoadBalancerHealthCheck)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LoadBalancerHealthCheck{}
+var _ model.ComparableModel = &LoadBalancerHealthCheck{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go
new file mode 100644
index 000000000..81c5efaf9
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go
@@ -0,0 +1,356 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LogicalRouterTable = "Logical_Router"
+
+// LogicalRouter defines an object in Logical_Router table
+type LogicalRouter struct {
+ UUID string `ovsdb:"_uuid"`
+ Copp *string `ovsdb:"copp"`
+ Enabled *bool `ovsdb:"enabled"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ LoadBalancer []string `ovsdb:"load_balancer"`
+ LoadBalancerGroup []string `ovsdb:"load_balancer_group"`
+ Name string `ovsdb:"name"`
+ Nat []string `ovsdb:"nat"`
+ Options map[string]string `ovsdb:"options"`
+ Policies []string `ovsdb:"policies"`
+ Ports []string `ovsdb:"ports"`
+ StaticRoutes []string `ovsdb:"static_routes"`
+}
+
+func (a *LogicalRouter) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LogicalRouter) GetCopp() *string {
+ return a.Copp
+}
+
+func copyLogicalRouterCopp(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterCopp(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouter) GetEnabled() *bool {
+ return a.Enabled
+}
+
+func copyLogicalRouterEnabled(a *bool) *bool {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterEnabled(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouter) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLogicalRouterExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouter) GetLoadBalancer() []string {
+ return a.LoadBalancer
+}
+
+func copyLogicalRouterLoadBalancer(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterLoadBalancer(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouter) GetLoadBalancerGroup() []string {
+ return a.LoadBalancerGroup
+}
+
+func copyLogicalRouterLoadBalancerGroup(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterLoadBalancerGroup(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouter) GetName() string {
+ return a.Name
+}
+
+func (a *LogicalRouter) GetNat() []string {
+ return a.Nat
+}
+
+func copyLogicalRouterNat(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterNat(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouter) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyLogicalRouterOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouter) GetPolicies() []string {
+ return a.Policies
+}
+
+func copyLogicalRouterPolicies(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterPolicies(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouter) GetPorts() []string {
+ return a.Ports
+}
+
+func copyLogicalRouterPorts(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterPorts(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouter) GetStaticRoutes() []string {
+ return a.StaticRoutes
+}
+
+func copyLogicalRouterStaticRoutes(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterStaticRoutes(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouter) DeepCopyInto(b *LogicalRouter) {
+ *b = *a
+ b.Copp = copyLogicalRouterCopp(a.Copp)
+ b.Enabled = copyLogicalRouterEnabled(a.Enabled)
+ b.ExternalIDs = copyLogicalRouterExternalIDs(a.ExternalIDs)
+ b.LoadBalancer = copyLogicalRouterLoadBalancer(a.LoadBalancer)
+ b.LoadBalancerGroup = copyLogicalRouterLoadBalancerGroup(a.LoadBalancerGroup)
+ b.Nat = copyLogicalRouterNat(a.Nat)
+ b.Options = copyLogicalRouterOptions(a.Options)
+ b.Policies = copyLogicalRouterPolicies(a.Policies)
+ b.Ports = copyLogicalRouterPorts(a.Ports)
+ b.StaticRoutes = copyLogicalRouterStaticRoutes(a.StaticRoutes)
+}
+
+func (a *LogicalRouter) DeepCopy() *LogicalRouter {
+ b := new(LogicalRouter)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LogicalRouter) CloneModelInto(b model.Model) {
+ c := b.(*LogicalRouter)
+ a.DeepCopyInto(c)
+}
+
+func (a *LogicalRouter) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LogicalRouter) Equals(b *LogicalRouter) bool {
+ return a.UUID == b.UUID &&
+ equalLogicalRouterCopp(a.Copp, b.Copp) &&
+ equalLogicalRouterEnabled(a.Enabled, b.Enabled) &&
+ equalLogicalRouterExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalLogicalRouterLoadBalancer(a.LoadBalancer, b.LoadBalancer) &&
+ equalLogicalRouterLoadBalancerGroup(a.LoadBalancerGroup, b.LoadBalancerGroup) &&
+ a.Name == b.Name &&
+ equalLogicalRouterNat(a.Nat, b.Nat) &&
+ equalLogicalRouterOptions(a.Options, b.Options) &&
+ equalLogicalRouterPolicies(a.Policies, b.Policies) &&
+ equalLogicalRouterPorts(a.Ports, b.Ports) &&
+ equalLogicalRouterStaticRoutes(a.StaticRoutes, b.StaticRoutes)
+}
+
+func (a *LogicalRouter) EqualsModel(b model.Model) bool {
+ c := b.(*LogicalRouter)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LogicalRouter{}
+var _ model.ComparableModel = &LogicalRouter{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go
new file mode 100644
index 000000000..7272dbb8a
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go
@@ -0,0 +1,229 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LogicalRouterPolicyTable = "Logical_Router_Policy"
+
+type (
+ LogicalRouterPolicyAction = string
+)
+
+var (
+ LogicalRouterPolicyActionAllow LogicalRouterPolicyAction = "allow"
+ LogicalRouterPolicyActionDrop LogicalRouterPolicyAction = "drop"
+ LogicalRouterPolicyActionReroute LogicalRouterPolicyAction = "reroute"
+)
+
+// LogicalRouterPolicy defines an object in Logical_Router_Policy table
+type LogicalRouterPolicy struct {
+ UUID string `ovsdb:"_uuid"`
+ Action LogicalRouterPolicyAction `ovsdb:"action"`
+ BFDSessions []string `ovsdb:"bfd_sessions"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Match string `ovsdb:"match"`
+ Nexthop *string `ovsdb:"nexthop"`
+ Nexthops []string `ovsdb:"nexthops"`
+ Options map[string]string `ovsdb:"options"`
+ Priority int `ovsdb:"priority"`
+}
+
+func (a *LogicalRouterPolicy) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LogicalRouterPolicy) GetAction() LogicalRouterPolicyAction {
+ return a.Action
+}
+
+func (a *LogicalRouterPolicy) GetBFDSessions() []string {
+ return a.BFDSessions
+}
+
+func copyLogicalRouterPolicyBFDSessions(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterPolicyBFDSessions(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPolicy) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLogicalRouterPolicyExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterPolicyExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPolicy) GetMatch() string {
+ return a.Match
+}
+
+func (a *LogicalRouterPolicy) GetNexthop() *string {
+ return a.Nexthop
+}
+
+func copyLogicalRouterPolicyNexthop(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterPolicyNexthop(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouterPolicy) GetNexthops() []string {
+ return a.Nexthops
+}
+
+func copyLogicalRouterPolicyNexthops(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterPolicyNexthops(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPolicy) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyLogicalRouterPolicyOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterPolicyOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPolicy) GetPriority() int {
+ return a.Priority
+}
+
+func (a *LogicalRouterPolicy) DeepCopyInto(b *LogicalRouterPolicy) {
+ *b = *a
+ b.BFDSessions = copyLogicalRouterPolicyBFDSessions(a.BFDSessions)
+ b.ExternalIDs = copyLogicalRouterPolicyExternalIDs(a.ExternalIDs)
+ b.Nexthop = copyLogicalRouterPolicyNexthop(a.Nexthop)
+ b.Nexthops = copyLogicalRouterPolicyNexthops(a.Nexthops)
+ b.Options = copyLogicalRouterPolicyOptions(a.Options)
+}
+
+func (a *LogicalRouterPolicy) DeepCopy() *LogicalRouterPolicy {
+ b := new(LogicalRouterPolicy)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LogicalRouterPolicy) CloneModelInto(b model.Model) {
+ c := b.(*LogicalRouterPolicy)
+ a.DeepCopyInto(c)
+}
+
+func (a *LogicalRouterPolicy) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LogicalRouterPolicy) Equals(b *LogicalRouterPolicy) bool {
+ return a.UUID == b.UUID &&
+ a.Action == b.Action &&
+ equalLogicalRouterPolicyBFDSessions(a.BFDSessions, b.BFDSessions) &&
+ equalLogicalRouterPolicyExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Match == b.Match &&
+ equalLogicalRouterPolicyNexthop(a.Nexthop, b.Nexthop) &&
+ equalLogicalRouterPolicyNexthops(a.Nexthops, b.Nexthops) &&
+ equalLogicalRouterPolicyOptions(a.Options, b.Options) &&
+ a.Priority == b.Priority
+}
+
+func (a *LogicalRouterPolicy) EqualsModel(b model.Model) bool {
+ c := b.(*LogicalRouterPolicy)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LogicalRouterPolicy{}
+var _ model.ComparableModel = &LogicalRouterPolicy{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go
new file mode 100644
index 000000000..d39fe0db4
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go
@@ -0,0 +1,385 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LogicalRouterPortTable = "Logical_Router_Port"
+
+// LogicalRouterPort defines an object in Logical_Router_Port table
+type LogicalRouterPort struct {
+ UUID string `ovsdb:"_uuid"`
+ DhcpRelay *string `ovsdb:"dhcp_relay"`
+ Enabled *bool `ovsdb:"enabled"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ GatewayChassis []string `ovsdb:"gateway_chassis"`
+ HaChassisGroup *string `ovsdb:"ha_chassis_group"`
+ Ipv6Prefix []string `ovsdb:"ipv6_prefix"`
+ Ipv6RaConfigs map[string]string `ovsdb:"ipv6_ra_configs"`
+ MAC string `ovsdb:"mac"`
+ Name string `ovsdb:"name"`
+ Networks []string `ovsdb:"networks"`
+ Options map[string]string `ovsdb:"options"`
+ Peer *string `ovsdb:"peer"`
+ Status map[string]string `ovsdb:"status"`
+}
+
+func (a *LogicalRouterPort) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LogicalRouterPort) GetDhcpRelay() *string {
+ return a.DhcpRelay
+}
+
+func copyLogicalRouterPortDhcpRelay(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterPortDhcpRelay(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouterPort) GetEnabled() *bool {
+ return a.Enabled
+}
+
+func copyLogicalRouterPortEnabled(a *bool) *bool {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterPortEnabled(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouterPort) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLogicalRouterPortExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterPortExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPort) GetGatewayChassis() []string {
+ return a.GatewayChassis
+}
+
+func copyLogicalRouterPortGatewayChassis(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterPortGatewayChassis(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPort) GetHaChassisGroup() *string {
+ return a.HaChassisGroup
+}
+
+func copyLogicalRouterPortHaChassisGroup(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterPortHaChassisGroup(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouterPort) GetIpv6Prefix() []string {
+ return a.Ipv6Prefix
+}
+
+func copyLogicalRouterPortIpv6Prefix(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterPortIpv6Prefix(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPort) GetIpv6RaConfigs() map[string]string {
+ return a.Ipv6RaConfigs
+}
+
+func copyLogicalRouterPortIpv6RaConfigs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterPortIpv6RaConfigs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPort) GetMAC() string {
+ return a.MAC
+}
+
+func (a *LogicalRouterPort) GetName() string {
+ return a.Name
+}
+
+func (a *LogicalRouterPort) GetNetworks() []string {
+ return a.Networks
+}
+
+func copyLogicalRouterPortNetworks(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalRouterPortNetworks(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPort) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyLogicalRouterPortOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterPortOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPort) GetPeer() *string {
+ return a.Peer
+}
+
+func copyLogicalRouterPortPeer(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterPortPeer(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouterPort) GetStatus() map[string]string {
+ return a.Status
+}
+
+func copyLogicalRouterPortStatus(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterPortStatus(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterPort) DeepCopyInto(b *LogicalRouterPort) {
+ *b = *a
+ b.DhcpRelay = copyLogicalRouterPortDhcpRelay(a.DhcpRelay)
+ b.Enabled = copyLogicalRouterPortEnabled(a.Enabled)
+ b.ExternalIDs = copyLogicalRouterPortExternalIDs(a.ExternalIDs)
+ b.GatewayChassis = copyLogicalRouterPortGatewayChassis(a.GatewayChassis)
+ b.HaChassisGroup = copyLogicalRouterPortHaChassisGroup(a.HaChassisGroup)
+ b.Ipv6Prefix = copyLogicalRouterPortIpv6Prefix(a.Ipv6Prefix)
+ b.Ipv6RaConfigs = copyLogicalRouterPortIpv6RaConfigs(a.Ipv6RaConfigs)
+ b.Networks = copyLogicalRouterPortNetworks(a.Networks)
+ b.Options = copyLogicalRouterPortOptions(a.Options)
+ b.Peer = copyLogicalRouterPortPeer(a.Peer)
+ b.Status = copyLogicalRouterPortStatus(a.Status)
+}
+
+func (a *LogicalRouterPort) DeepCopy() *LogicalRouterPort {
+ b := new(LogicalRouterPort)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LogicalRouterPort) CloneModelInto(b model.Model) {
+ c := b.(*LogicalRouterPort)
+ a.DeepCopyInto(c)
+}
+
+func (a *LogicalRouterPort) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LogicalRouterPort) Equals(b *LogicalRouterPort) bool {
+ return a.UUID == b.UUID &&
+ equalLogicalRouterPortDhcpRelay(a.DhcpRelay, b.DhcpRelay) &&
+ equalLogicalRouterPortEnabled(a.Enabled, b.Enabled) &&
+ equalLogicalRouterPortExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalLogicalRouterPortGatewayChassis(a.GatewayChassis, b.GatewayChassis) &&
+ equalLogicalRouterPortHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) &&
+ equalLogicalRouterPortIpv6Prefix(a.Ipv6Prefix, b.Ipv6Prefix) &&
+ equalLogicalRouterPortIpv6RaConfigs(a.Ipv6RaConfigs, b.Ipv6RaConfigs) &&
+ a.MAC == b.MAC &&
+ a.Name == b.Name &&
+ equalLogicalRouterPortNetworks(a.Networks, b.Networks) &&
+ equalLogicalRouterPortOptions(a.Options, b.Options) &&
+ equalLogicalRouterPortPeer(a.Peer, b.Peer) &&
+ equalLogicalRouterPortStatus(a.Status, b.Status)
+}
+
+func (a *LogicalRouterPort) EqualsModel(b model.Model) bool {
+ c := b.(*LogicalRouterPort)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LogicalRouterPort{}
+var _ model.ComparableModel = &LogicalRouterPort{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go
new file mode 100644
index 000000000..ce966e570
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go
@@ -0,0 +1,216 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LogicalRouterStaticRouteTable = "Logical_Router_Static_Route"
+
+type (
+ LogicalRouterStaticRoutePolicy = string
+)
+
+var (
+ LogicalRouterStaticRoutePolicySrcIP LogicalRouterStaticRoutePolicy = "src-ip"
+ LogicalRouterStaticRoutePolicyDstIP LogicalRouterStaticRoutePolicy = "dst-ip"
+)
+
+// LogicalRouterStaticRoute defines an object in Logical_Router_Static_Route table
+type LogicalRouterStaticRoute struct {
+ UUID string `ovsdb:"_uuid"`
+ BFD *string `ovsdb:"bfd"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ IPPrefix string `ovsdb:"ip_prefix"`
+ Nexthop string `ovsdb:"nexthop"`
+ Options map[string]string `ovsdb:"options"`
+ OutputPort *string `ovsdb:"output_port"`
+ Policy *LogicalRouterStaticRoutePolicy `ovsdb:"policy"`
+ RouteTable string `ovsdb:"route_table"`
+}
+
+func (a *LogicalRouterStaticRoute) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LogicalRouterStaticRoute) GetBFD() *string {
+ return a.BFD
+}
+
+func copyLogicalRouterStaticRouteBFD(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterStaticRouteBFD(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouterStaticRoute) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLogicalRouterStaticRouteExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterStaticRouteExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterStaticRoute) GetIPPrefix() string {
+ return a.IPPrefix
+}
+
+func (a *LogicalRouterStaticRoute) GetNexthop() string {
+ return a.Nexthop
+}
+
+func (a *LogicalRouterStaticRoute) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyLogicalRouterStaticRouteOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalRouterStaticRouteOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalRouterStaticRoute) GetOutputPort() *string {
+ return a.OutputPort
+}
+
+func copyLogicalRouterStaticRouteOutputPort(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterStaticRouteOutputPort(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouterStaticRoute) GetPolicy() *LogicalRouterStaticRoutePolicy {
+ return a.Policy
+}
+
+func copyLogicalRouterStaticRoutePolicy(a *LogicalRouterStaticRoutePolicy) *LogicalRouterStaticRoutePolicy {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalRouterStaticRoutePolicy(a, b *LogicalRouterStaticRoutePolicy) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalRouterStaticRoute) GetRouteTable() string {
+ return a.RouteTable
+}
+
+func (a *LogicalRouterStaticRoute) DeepCopyInto(b *LogicalRouterStaticRoute) {
+ *b = *a
+ b.BFD = copyLogicalRouterStaticRouteBFD(a.BFD)
+ b.ExternalIDs = copyLogicalRouterStaticRouteExternalIDs(a.ExternalIDs)
+ b.Options = copyLogicalRouterStaticRouteOptions(a.Options)
+ b.OutputPort = copyLogicalRouterStaticRouteOutputPort(a.OutputPort)
+ b.Policy = copyLogicalRouterStaticRoutePolicy(a.Policy)
+}
+
+func (a *LogicalRouterStaticRoute) DeepCopy() *LogicalRouterStaticRoute {
+ b := new(LogicalRouterStaticRoute)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LogicalRouterStaticRoute) CloneModelInto(b model.Model) {
+ c := b.(*LogicalRouterStaticRoute)
+ a.DeepCopyInto(c)
+}
+
+func (a *LogicalRouterStaticRoute) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LogicalRouterStaticRoute) Equals(b *LogicalRouterStaticRoute) bool {
+ return a.UUID == b.UUID &&
+ equalLogicalRouterStaticRouteBFD(a.BFD, b.BFD) &&
+ equalLogicalRouterStaticRouteExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.IPPrefix == b.IPPrefix &&
+ a.Nexthop == b.Nexthop &&
+ equalLogicalRouterStaticRouteOptions(a.Options, b.Options) &&
+ equalLogicalRouterStaticRouteOutputPort(a.OutputPort, b.OutputPort) &&
+ equalLogicalRouterStaticRoutePolicy(a.Policy, b.Policy) &&
+ a.RouteTable == b.RouteTable
+}
+
+func (a *LogicalRouterStaticRoute) EqualsModel(b model.Model) bool {
+ c := b.(*LogicalRouterStaticRoute)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LogicalRouterStaticRoute{}
+var _ model.ComparableModel = &LogicalRouterStaticRoute{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go
new file mode 100644
index 000000000..50b8214ad
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go
@@ -0,0 +1,362 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LogicalSwitchTable = "Logical_Switch"
+
+// LogicalSwitch defines an object in Logical_Switch table
+type LogicalSwitch struct {
+ UUID string `ovsdb:"_uuid"`
+ ACLs []string `ovsdb:"acls"`
+ Copp *string `ovsdb:"copp"`
+ DNSRecords []string `ovsdb:"dns_records"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ ForwardingGroups []string `ovsdb:"forwarding_groups"`
+ LoadBalancer []string `ovsdb:"load_balancer"`
+ LoadBalancerGroup []string `ovsdb:"load_balancer_group"`
+ Name string `ovsdb:"name"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+ Ports []string `ovsdb:"ports"`
+ QOSRules []string `ovsdb:"qos_rules"`
+}
+
+func (a *LogicalSwitch) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LogicalSwitch) GetACLs() []string {
+ return a.ACLs
+}
+
+func copyLogicalSwitchACLs(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchACLs(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) GetCopp() *string {
+ return a.Copp
+}
+
+func copyLogicalSwitchCopp(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchCopp(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitch) GetDNSRecords() []string {
+ return a.DNSRecords
+}
+
+func copyLogicalSwitchDNSRecords(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchDNSRecords(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLogicalSwitchExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalSwitchExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) GetForwardingGroups() []string {
+ return a.ForwardingGroups
+}
+
+func copyLogicalSwitchForwardingGroups(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchForwardingGroups(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) GetLoadBalancer() []string {
+ return a.LoadBalancer
+}
+
+func copyLogicalSwitchLoadBalancer(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchLoadBalancer(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) GetLoadBalancerGroup() []string {
+ return a.LoadBalancerGroup
+}
+
+func copyLogicalSwitchLoadBalancerGroup(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchLoadBalancerGroup(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) GetName() string {
+ return a.Name
+}
+
+func (a *LogicalSwitch) GetOtherConfig() map[string]string {
+ return a.OtherConfig
+}
+
+func copyLogicalSwitchOtherConfig(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalSwitchOtherConfig(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) GetPorts() []string {
+ return a.Ports
+}
+
+func copyLogicalSwitchPorts(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchPorts(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) GetQOSRules() []string {
+ return a.QOSRules
+}
+
+func copyLogicalSwitchQOSRules(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchQOSRules(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitch) DeepCopyInto(b *LogicalSwitch) {
+ *b = *a
+ b.ACLs = copyLogicalSwitchACLs(a.ACLs)
+ b.Copp = copyLogicalSwitchCopp(a.Copp)
+ b.DNSRecords = copyLogicalSwitchDNSRecords(a.DNSRecords)
+ b.ExternalIDs = copyLogicalSwitchExternalIDs(a.ExternalIDs)
+ b.ForwardingGroups = copyLogicalSwitchForwardingGroups(a.ForwardingGroups)
+ b.LoadBalancer = copyLogicalSwitchLoadBalancer(a.LoadBalancer)
+ b.LoadBalancerGroup = copyLogicalSwitchLoadBalancerGroup(a.LoadBalancerGroup)
+ b.OtherConfig = copyLogicalSwitchOtherConfig(a.OtherConfig)
+ b.Ports = copyLogicalSwitchPorts(a.Ports)
+ b.QOSRules = copyLogicalSwitchQOSRules(a.QOSRules)
+}
+
+func (a *LogicalSwitch) DeepCopy() *LogicalSwitch {
+ b := new(LogicalSwitch)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LogicalSwitch) CloneModelInto(b model.Model) {
+ c := b.(*LogicalSwitch)
+ a.DeepCopyInto(c)
+}
+
+func (a *LogicalSwitch) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LogicalSwitch) Equals(b *LogicalSwitch) bool {
+ return a.UUID == b.UUID &&
+ equalLogicalSwitchACLs(a.ACLs, b.ACLs) &&
+ equalLogicalSwitchCopp(a.Copp, b.Copp) &&
+ equalLogicalSwitchDNSRecords(a.DNSRecords, b.DNSRecords) &&
+ equalLogicalSwitchExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalLogicalSwitchForwardingGroups(a.ForwardingGroups, b.ForwardingGroups) &&
+ equalLogicalSwitchLoadBalancer(a.LoadBalancer, b.LoadBalancer) &&
+ equalLogicalSwitchLoadBalancerGroup(a.LoadBalancerGroup, b.LoadBalancerGroup) &&
+ a.Name == b.Name &&
+ equalLogicalSwitchOtherConfig(a.OtherConfig, b.OtherConfig) &&
+ equalLogicalSwitchPorts(a.Ports, b.Ports) &&
+ equalLogicalSwitchQOSRules(a.QOSRules, b.QOSRules)
+}
+
+func (a *LogicalSwitch) EqualsModel(b model.Model) bool {
+ c := b.(*LogicalSwitch)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LogicalSwitch{}
+var _ model.ComparableModel = &LogicalSwitch{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go
new file mode 100644
index 000000000..c048f7654
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go
@@ -0,0 +1,444 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LogicalSwitchPortTable = "Logical_Switch_Port"
+
+// LogicalSwitchPort defines an object in Logical_Switch_Port table
+type LogicalSwitchPort struct {
+ UUID string `ovsdb:"_uuid"`
+ Addresses []string `ovsdb:"addresses"`
+ Dhcpv4Options *string `ovsdb:"dhcpv4_options"`
+ Dhcpv6Options *string `ovsdb:"dhcpv6_options"`
+ DynamicAddresses *string `ovsdb:"dynamic_addresses"`
+ Enabled *bool `ovsdb:"enabled"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ HaChassisGroup *string `ovsdb:"ha_chassis_group"`
+ MirrorRules []string `ovsdb:"mirror_rules"`
+ Name string `ovsdb:"name"`
+ Options map[string]string `ovsdb:"options"`
+ ParentName *string `ovsdb:"parent_name"`
+ PortSecurity []string `ovsdb:"port_security"`
+ Tag *int `ovsdb:"tag"`
+ TagRequest *int `ovsdb:"tag_request"`
+ Type string `ovsdb:"type"`
+ Up *bool `ovsdb:"up"`
+}
+
+func (a *LogicalSwitchPort) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LogicalSwitchPort) GetAddresses() []string {
+ return a.Addresses
+}
+
+func copyLogicalSwitchPortAddresses(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchPortAddresses(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitchPort) GetDhcpv4Options() *string {
+ return a.Dhcpv4Options
+}
+
+func copyLogicalSwitchPortDhcpv4Options(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortDhcpv4Options(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) GetDhcpv6Options() *string {
+ return a.Dhcpv6Options
+}
+
+func copyLogicalSwitchPortDhcpv6Options(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortDhcpv6Options(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) GetDynamicAddresses() *string {
+ return a.DynamicAddresses
+}
+
+func copyLogicalSwitchPortDynamicAddresses(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortDynamicAddresses(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) GetEnabled() *bool {
+ return a.Enabled
+}
+
+func copyLogicalSwitchPortEnabled(a *bool) *bool {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortEnabled(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLogicalSwitchPortExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalSwitchPortExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitchPort) GetHaChassisGroup() *string {
+ return a.HaChassisGroup
+}
+
+func copyLogicalSwitchPortHaChassisGroup(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortHaChassisGroup(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) GetMirrorRules() []string {
+ return a.MirrorRules
+}
+
+func copyLogicalSwitchPortMirrorRules(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchPortMirrorRules(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitchPort) GetName() string {
+ return a.Name
+}
+
+func (a *LogicalSwitchPort) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyLogicalSwitchPortOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalSwitchPortOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitchPort) GetParentName() *string {
+ return a.ParentName
+}
+
+func copyLogicalSwitchPortParentName(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortParentName(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) GetPortSecurity() []string {
+ return a.PortSecurity
+}
+
+func copyLogicalSwitchPortPortSecurity(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalSwitchPortPortSecurity(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalSwitchPort) GetTag() *int {
+ return a.Tag
+}
+
+func copyLogicalSwitchPortTag(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortTag(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) GetTagRequest() *int {
+ return a.TagRequest
+}
+
+func copyLogicalSwitchPortTagRequest(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortTagRequest(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) GetType() string {
+ return a.Type
+}
+
+func (a *LogicalSwitchPort) GetUp() *bool {
+ return a.Up
+}
+
+func copyLogicalSwitchPortUp(a *bool) *bool {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalSwitchPortUp(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalSwitchPort) DeepCopyInto(b *LogicalSwitchPort) {
+ *b = *a
+ b.Addresses = copyLogicalSwitchPortAddresses(a.Addresses)
+ b.Dhcpv4Options = copyLogicalSwitchPortDhcpv4Options(a.Dhcpv4Options)
+ b.Dhcpv6Options = copyLogicalSwitchPortDhcpv6Options(a.Dhcpv6Options)
+ b.DynamicAddresses = copyLogicalSwitchPortDynamicAddresses(a.DynamicAddresses)
+ b.Enabled = copyLogicalSwitchPortEnabled(a.Enabled)
+ b.ExternalIDs = copyLogicalSwitchPortExternalIDs(a.ExternalIDs)
+ b.HaChassisGroup = copyLogicalSwitchPortHaChassisGroup(a.HaChassisGroup)
+ b.MirrorRules = copyLogicalSwitchPortMirrorRules(a.MirrorRules)
+ b.Options = copyLogicalSwitchPortOptions(a.Options)
+ b.ParentName = copyLogicalSwitchPortParentName(a.ParentName)
+ b.PortSecurity = copyLogicalSwitchPortPortSecurity(a.PortSecurity)
+ b.Tag = copyLogicalSwitchPortTag(a.Tag)
+ b.TagRequest = copyLogicalSwitchPortTagRequest(a.TagRequest)
+ b.Up = copyLogicalSwitchPortUp(a.Up)
+}
+
+func (a *LogicalSwitchPort) DeepCopy() *LogicalSwitchPort {
+ b := new(LogicalSwitchPort)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LogicalSwitchPort) CloneModelInto(b model.Model) {
+ c := b.(*LogicalSwitchPort)
+ a.DeepCopyInto(c)
+}
+
+func (a *LogicalSwitchPort) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LogicalSwitchPort) Equals(b *LogicalSwitchPort) bool {
+ return a.UUID == b.UUID &&
+ equalLogicalSwitchPortAddresses(a.Addresses, b.Addresses) &&
+ equalLogicalSwitchPortDhcpv4Options(a.Dhcpv4Options, b.Dhcpv4Options) &&
+ equalLogicalSwitchPortDhcpv6Options(a.Dhcpv6Options, b.Dhcpv6Options) &&
+ equalLogicalSwitchPortDynamicAddresses(a.DynamicAddresses, b.DynamicAddresses) &&
+ equalLogicalSwitchPortEnabled(a.Enabled, b.Enabled) &&
+ equalLogicalSwitchPortExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalLogicalSwitchPortHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) &&
+ equalLogicalSwitchPortMirrorRules(a.MirrorRules, b.MirrorRules) &&
+ a.Name == b.Name &&
+ equalLogicalSwitchPortOptions(a.Options, b.Options) &&
+ equalLogicalSwitchPortParentName(a.ParentName, b.ParentName) &&
+ equalLogicalSwitchPortPortSecurity(a.PortSecurity, b.PortSecurity) &&
+ equalLogicalSwitchPortTag(a.Tag, b.Tag) &&
+ equalLogicalSwitchPortTagRequest(a.TagRequest, b.TagRequest) &&
+ a.Type == b.Type &&
+ equalLogicalSwitchPortUp(a.Up, b.Up)
+}
+
+func (a *LogicalSwitchPort) EqualsModel(b model.Model) bool {
+ c := b.(*LogicalSwitchPort)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LogicalSwitchPort{}
+var _ model.ComparableModel = &LogicalSwitchPort{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go
new file mode 100644
index 000000000..09b7e9e6a
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go
@@ -0,0 +1,158 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const MeterTable = "Meter"
+
+type (
+ MeterUnit = string
+)
+
+var (
+ MeterUnitKbps MeterUnit = "kbps"
+ MeterUnitPktps MeterUnit = "pktps"
+)
+
+// Meter defines an object in Meter table
+type Meter struct {
+ UUID string `ovsdb:"_uuid"`
+ Bands []string `ovsdb:"bands"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Fair *bool `ovsdb:"fair"`
+ Name string `ovsdb:"name"`
+ Unit MeterUnit `ovsdb:"unit"`
+}
+
+func (a *Meter) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Meter) GetBands() []string {
+ return a.Bands
+}
+
+func copyMeterBands(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalMeterBands(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Meter) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyMeterExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalMeterExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Meter) GetFair() *bool {
+ return a.Fair
+}
+
+func copyMeterFair(a *bool) *bool {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalMeterFair(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Meter) GetName() string {
+ return a.Name
+}
+
+func (a *Meter) GetUnit() MeterUnit {
+ return a.Unit
+}
+
+func (a *Meter) DeepCopyInto(b *Meter) {
+ *b = *a
+ b.Bands = copyMeterBands(a.Bands)
+ b.ExternalIDs = copyMeterExternalIDs(a.ExternalIDs)
+ b.Fair = copyMeterFair(a.Fair)
+}
+
+func (a *Meter) DeepCopy() *Meter {
+ b := new(Meter)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Meter) CloneModelInto(b model.Model) {
+ c := b.(*Meter)
+ a.DeepCopyInto(c)
+}
+
+func (a *Meter) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Meter) Equals(b *Meter) bool {
+ return a.UUID == b.UUID &&
+ equalMeterBands(a.Bands, b.Bands) &&
+ equalMeterExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalMeterFair(a.Fair, b.Fair) &&
+ a.Name == b.Name &&
+ a.Unit == b.Unit
+}
+
+func (a *Meter) EqualsModel(b model.Model) bool {
+ c := b.(*Meter)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Meter{}
+var _ model.ComparableModel = &Meter{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go
new file mode 100644
index 000000000..4ef0d901a
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go
@@ -0,0 +1,107 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const MeterBandTable = "Meter_Band"
+
+type (
+ MeterBandAction = string
+)
+
+var (
+ MeterBandActionDrop MeterBandAction = "drop"
+)
+
+// MeterBand defines an object in Meter_Band table
+type MeterBand struct {
+ UUID string `ovsdb:"_uuid"`
+ Action MeterBandAction `ovsdb:"action"`
+ BurstSize int `ovsdb:"burst_size"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Rate int `ovsdb:"rate"`
+}
+
+func (a *MeterBand) GetUUID() string {
+ return a.UUID
+}
+
+func (a *MeterBand) GetAction() MeterBandAction {
+ return a.Action
+}
+
+func (a *MeterBand) GetBurstSize() int {
+ return a.BurstSize
+}
+
+func (a *MeterBand) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyMeterBandExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalMeterBandExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *MeterBand) GetRate() int {
+ return a.Rate
+}
+
+func (a *MeterBand) DeepCopyInto(b *MeterBand) {
+ *b = *a
+ b.ExternalIDs = copyMeterBandExternalIDs(a.ExternalIDs)
+}
+
+func (a *MeterBand) DeepCopy() *MeterBand {
+ b := new(MeterBand)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *MeterBand) CloneModelInto(b model.Model) {
+ c := b.(*MeterBand)
+ a.DeepCopyInto(c)
+}
+
+func (a *MeterBand) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *MeterBand) Equals(b *MeterBand) bool {
+ return a.UUID == b.UUID &&
+ a.Action == b.Action &&
+ a.BurstSize == b.BurstSize &&
+ equalMeterBandExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Rate == b.Rate
+}
+
+func (a *MeterBand) EqualsModel(b model.Model) bool {
+ c := b.(*MeterBand)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &MeterBand{}
+var _ model.ComparableModel = &MeterBand{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go
new file mode 100644
index 000000000..57e3b01f6
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go
@@ -0,0 +1,125 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const MirrorTable = "Mirror"
+
+type (
+ MirrorFilter = string
+ MirrorType = string
+)
+
+var (
+ MirrorFilterFromLport MirrorFilter = "from-lport"
+ MirrorFilterToLport MirrorFilter = "to-lport"
+ MirrorFilterBoth MirrorFilter = "both"
+ MirrorTypeGre MirrorType = "gre"
+ MirrorTypeErspan MirrorType = "erspan"
+ MirrorTypeLocal MirrorType = "local"
+)
+
+// Mirror defines an object in Mirror table
+type Mirror struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Filter MirrorFilter `ovsdb:"filter"`
+ Index int `ovsdb:"index"`
+ Name string `ovsdb:"name"`
+ Sink string `ovsdb:"sink"`
+ Type MirrorType `ovsdb:"type"`
+}
+
+func (a *Mirror) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Mirror) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyMirrorExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalMirrorExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Mirror) GetFilter() MirrorFilter {
+ return a.Filter
+}
+
+func (a *Mirror) GetIndex() int {
+ return a.Index
+}
+
+func (a *Mirror) GetName() string {
+ return a.Name
+}
+
+func (a *Mirror) GetSink() string {
+ return a.Sink
+}
+
+func (a *Mirror) GetType() MirrorType {
+ return a.Type
+}
+
+func (a *Mirror) DeepCopyInto(b *Mirror) {
+ *b = *a
+ b.ExternalIDs = copyMirrorExternalIDs(a.ExternalIDs)
+}
+
+func (a *Mirror) DeepCopy() *Mirror {
+ b := new(Mirror)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Mirror) CloneModelInto(b model.Model) {
+ c := b.(*Mirror)
+ a.DeepCopyInto(c)
+}
+
+func (a *Mirror) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Mirror) Equals(b *Mirror) bool {
+ return a.UUID == b.UUID &&
+ equalMirrorExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Filter == b.Filter &&
+ a.Index == b.Index &&
+ a.Name == b.Name &&
+ a.Sink == b.Sink &&
+ a.Type == b.Type
+}
+
+func (a *Mirror) EqualsModel(b model.Model) bool {
+ c := b.(*Mirror)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Mirror{}
+var _ model.ComparableModel = &Mirror{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go
new file mode 100644
index 000000000..daabac453
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go
@@ -0,0 +1,2262 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import (
+ "encoding/json"
+
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb
+func FullDatabaseModel() (model.ClientDBModel, error) {
+ return model.NewClientDBModel("OVN_Northbound", map[string]model.Model{
+ "ACL": &ACL{},
+ "Address_Set": &AddressSet{},
+ "BFD": &BFD{},
+ "Chassis_Template_Var": &ChassisTemplateVar{},
+ "Connection": &Connection{},
+ "Copp": &Copp{},
+ "DHCP_Options": &DHCPOptions{},
+ "DHCP_Relay": &DHCPRelay{},
+ "DNS": &DNS{},
+ "Forwarding_Group": &ForwardingGroup{},
+ "Gateway_Chassis": &GatewayChassis{},
+ "HA_Chassis": &HAChassis{},
+ "HA_Chassis_Group": &HAChassisGroup{},
+ "Load_Balancer": &LoadBalancer{},
+ "Load_Balancer_Group": &LoadBalancerGroup{},
+ "Load_Balancer_Health_Check": &LoadBalancerHealthCheck{},
+ "Logical_Router": &LogicalRouter{},
+ "Logical_Router_Policy": &LogicalRouterPolicy{},
+ "Logical_Router_Port": &LogicalRouterPort{},
+ "Logical_Router_Static_Route": &LogicalRouterStaticRoute{},
+ "Logical_Switch": &LogicalSwitch{},
+ "Logical_Switch_Port": &LogicalSwitchPort{},
+ "Meter": &Meter{},
+ "Meter_Band": &MeterBand{},
+ "Mirror": &Mirror{},
+ "NAT": &NAT{},
+ "NB_Global": &NBGlobal{},
+ "Port_Group": &PortGroup{},
+ "QoS": &QoS{},
+ "SSL": &SSL{},
+ "Sample": &Sample{},
+ "Sample_Collector": &SampleCollector{},
+ "Sampling_App": &SamplingApp{},
+ "Static_MAC_Binding": &StaticMACBinding{},
+ })
+}
+
+var schema = `{
+ "name": "OVN_Northbound",
+ "version": "7.6.0",
+ "tables": {
+ "ACL": {
+ "columns": {
+ "action": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "allow",
+ "allow-related",
+ "allow-stateless",
+ "drop",
+ "reject",
+ "pass"
+ ]
+ ]
+ }
+ }
+ },
+ "direction": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "from-lport",
+ "to-lport"
+ ]
+ ]
+ }
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "label": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 4294967295
+ }
+ }
+ },
+ "log": {
+ "type": "boolean"
+ },
+ "match": {
+ "type": "string"
+ },
+ "meter": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "name": {
+ "type": {
+ "key": {
+ "type": "string",
+ "minLength": 63,
+ "maxLength": 63
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32767
+ }
+ }
+ },
+ "sample_est": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Sample",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "sample_new": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Sample",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "severity": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "alert",
+ "warning",
+ "notice",
+ "info",
+ "debug"
+ ]
+ ]
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "tier": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 3
+ }
+ }
+ }
+ }
+ },
+ "Address_Set": {
+ "columns": {
+ "addresses": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "BFD": {
+ "columns": {
+ "detect_mult": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "dst_ip": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "logical_port": {
+ "type": "string"
+ },
+ "min_rx": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "min_tx": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "status": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "down",
+ "init",
+ "up",
+ "admin_down"
+ ]
+ ]
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "indexes": [
+ [
+ "logical_port",
+ "dst_ip"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Chassis_Template_Var": {
+ "columns": {
+ "chassis": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "variables": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "chassis"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Connection": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "inactivity_probe": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "is_connected": {
+ "type": "boolean",
+ "ephemeral": true
+ },
+ "max_backoff": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1000
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "other_config": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "status": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ },
+ "ephemeral": true
+ },
+ "target": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "target"
+ ]
+ ]
+ },
+ "Copp": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "meters": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "DHCP_Options": {
+ "columns": {
+ "cidr": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "DHCP_Relay": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "servers": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "DNS": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "records": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "Forwarding_Group": {
+ "columns": {
+ "child_port": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 1,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "liveness": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "vip": {
+ "type": "string"
+ },
+ "vmac": {
+ "type": "string"
+ }
+ }
+ },
+ "Gateway_Chassis": {
+ "columns": {
+ "chassis_name": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32767
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ]
+ },
+ "HA_Chassis": {
+ "columns": {
+ "chassis_name": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32767
+ }
+ }
+ }
+ }
+ },
+ "HA_Chassis_Group": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ha_chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "HA_Chassis",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Load_Balancer": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "health_check": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Load_Balancer_Health_Check",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ip_port_mappings": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "protocol": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "tcp",
+ "udp",
+ "sctp"
+ ]
+ ]
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "selection_fields": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "eth_src",
+ "eth_dst",
+ "ip_src",
+ "ip_dst",
+ "tp_src",
+ "tp_dst"
+ ]
+ ]
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "vips": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "Load_Balancer_Group": {
+ "columns": {
+ "load_balancer": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Load_Balancer",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Load_Balancer_Health_Check": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "vip": {
+ "type": "string"
+ }
+ }
+ },
+ "Logical_Router": {
+ "columns": {
+ "copp": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Copp",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "enabled": {
+ "type": {
+ "key": {
+ "type": "boolean"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "load_balancer": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Load_Balancer",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "load_balancer_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Load_Balancer_Group"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "nat": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "NAT",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "policies": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_Router_Policy",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ports": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_Router_Port",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "static_routes": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_Router_Static_Route",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "Logical_Router_Policy": {
+ "columns": {
+ "action": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "allow",
+ "drop",
+ "reroute"
+ ]
+ ]
+ }
+ }
+ },
+ "bfd_sessions": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "BFD",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "match": {
+ "type": "string"
+ },
+ "nexthop": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "nexthops": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32767
+ }
+ }
+ }
+ }
+ },
+ "Logical_Router_Port": {
+ "columns": {
+ "dhcp_relay": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "DHCP_Relay",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "enabled": {
+ "type": {
+ "key": {
+ "type": "boolean"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "gateway_chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Gateway_Chassis",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ha_chassis_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "HA_Chassis_Group",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "ipv6_prefix": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ipv6_ra_configs": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "mac": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "networks": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 1,
+ "max": "unlimited"
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "peer": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "status": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ]
+ },
+ "Logical_Router_Static_Route": {
+ "columns": {
+ "bfd": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "BFD",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ip_prefix": {
+ "type": "string"
+ },
+ "nexthop": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "output_port": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "policy": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "src-ip",
+ "dst-ip"
+ ]
+ ]
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "route_table": {
+ "type": "string"
+ }
+ }
+ },
+ "Logical_Switch": {
+ "columns": {
+ "acls": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "ACL",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "copp": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Copp",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "dns_records": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "DNS",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "forwarding_groups": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Forwarding_Group",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "load_balancer": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Load_Balancer",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "load_balancer_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Load_Balancer_Group"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "other_config": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ports": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_Switch_Port",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "qos_rules": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "QoS",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "Logical_Switch_Port": {
+ "columns": {
+ "addresses": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "dhcpv4_options": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "DHCP_Options",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "dhcpv6_options": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "DHCP_Options",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "dynamic_addresses": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "enabled": {
+ "type": {
+ "key": {
+ "type": "boolean"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ha_chassis_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "HA_Chassis_Group",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "mirror_rules": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Mirror",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "parent_name": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "port_security": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "tag": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4095
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "tag_request": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 4095
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "type": {
+ "type": "string"
+ },
+ "up": {
+ "type": {
+ "key": {
+ "type": "boolean"
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ]
+ },
+ "Meter": {
+ "columns": {
+ "bands": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Meter_Band",
+ "refType": "strong"
+ },
+ "min": 1,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "fair": {
+ "type": {
+ "key": {
+ "type": "boolean"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "unit": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "kbps",
+ "pktps"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Meter_Band": {
+ "columns": {
+ "action": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": "drop"
+ }
+ }
+ },
+ "burst_size": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 4294967295
+ }
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "rate": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4294967295
+ }
+ }
+ }
+ }
+ },
+ "Mirror": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "filter": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "from-lport",
+ "to-lport",
+ "both"
+ ]
+ ]
+ }
+ }
+ },
+ "index": {
+ "type": "integer"
+ },
+ "name": {
+ "type": "string"
+ },
+ "sink": {
+ "type": "string"
+ },
+ "type": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "gre",
+ "erspan",
+ "local"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "NAT": {
+ "columns": {
+ "allowed_ext_ips": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Address_Set",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "exempted_ext_ips": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Address_Set",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "external_ip": {
+ "type": "string"
+ },
+ "external_mac": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_port_range": {
+ "type": "string"
+ },
+ "gateway_port": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_Router_Port",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "logical_ip": {
+ "type": "string"
+ },
+ "logical_port": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "match": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32767
+ }
+ }
+ },
+ "type": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "dnat",
+ "snat",
+ "dnat_and_snat"
+ ]
+ ]
+ }
+ }
+ }
+ }
+ },
+ "NB_Global": {
+ "columns": {
+ "connections": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Connection"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "hv_cfg": {
+ "type": "integer"
+ },
+ "hv_cfg_timestamp": {
+ "type": "integer"
+ },
+ "ipsec": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "nb_cfg": {
+ "type": "integer"
+ },
+ "nb_cfg_timestamp": {
+ "type": "integer"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "sb_cfg": {
+ "type": "integer"
+ },
+ "sb_cfg_timestamp": {
+ "type": "integer"
+ },
+ "ssl": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "SSL"
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "Port_Group": {
+ "columns": {
+ "acls": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "ACL",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "ports": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_Switch_Port",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "QoS": {
+ "columns": {
+ "action": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "dscp",
+ "mark"
+ ]
+ ]
+ },
+ "value": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 4294967295
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "bandwidth": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "rate",
+ "burst"
+ ]
+ ]
+ },
+ "value": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4294967295
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "direction": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "from-lport",
+ "to-lport"
+ ]
+ ]
+ }
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "match": {
+ "type": "string"
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32767
+ }
+ }
+ }
+ }
+ },
+ "SSL": {
+ "columns": {
+ "bootstrap_ca_cert": {
+ "type": "boolean"
+ },
+ "ca_cert": {
+ "type": "string"
+ },
+ "certificate": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "private_key": {
+ "type": "string"
+ },
+ "ssl_ciphers": {
+ "type": "string"
+ },
+ "ssl_protocols": {
+ "type": "string"
+ }
+ }
+ },
+ "Sample": {
+ "columns": {
+ "collectors": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Sample_Collector",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "metadata": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4294967295
+ },
+ "min": 1,
+ "max": 1
+ }
+ }
+ },
+ "indexes": [
+ [
+ "metadata"
+ ]
+ ]
+ },
+ "Sample_Collector": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "id": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 255
+ }
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "probability": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 65535
+ }
+ }
+ },
+ "set_id": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4294967295
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "id"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Sampling_App": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "id": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 255
+ }
+ }
+ },
+ "type": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "drop",
+ "acl-new",
+ "acl-est"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "type"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Static_MAC_Binding": {
+ "columns": {
+ "ip": {
+ "type": "string"
+ },
+ "logical_port": {
+ "type": "string"
+ },
+ "mac": {
+ "type": "string"
+ },
+ "override_dynamic_mac": {
+ "type": "boolean"
+ }
+ },
+ "indexes": [
+ [
+ "logical_port",
+ "ip"
+ ]
+ ],
+ "isRoot": true
+ }
+ }
+}`
+
+func Schema() ovsdb.DatabaseSchema {
+ var s ovsdb.DatabaseSchema
+ err := json.Unmarshal([]byte(schema), &s)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go
new file mode 100644
index 000000000..4bd1b7ed4
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go
@@ -0,0 +1,285 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const NATTable = "NAT"
+
+type (
+ NATType = string
+)
+
+var (
+ NATTypeDNAT NATType = "dnat"
+ NATTypeSNAT NATType = "snat"
+ NATTypeDNATAndSNAT NATType = "dnat_and_snat"
+)
+
+// NAT defines an object in NAT table
+type NAT struct {
+ UUID string `ovsdb:"_uuid"`
+ AllowedExtIPs *string `ovsdb:"allowed_ext_ips"`
+ ExemptedExtIPs *string `ovsdb:"exempted_ext_ips"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ ExternalIP string `ovsdb:"external_ip"`
+ ExternalMAC *string `ovsdb:"external_mac"`
+ ExternalPortRange string `ovsdb:"external_port_range"`
+ GatewayPort *string `ovsdb:"gateway_port"`
+ LogicalIP string `ovsdb:"logical_ip"`
+ LogicalPort *string `ovsdb:"logical_port"`
+ Match string `ovsdb:"match"`
+ Options map[string]string `ovsdb:"options"`
+ Priority int `ovsdb:"priority"`
+ Type NATType `ovsdb:"type"`
+}
+
+func (a *NAT) GetUUID() string {
+ return a.UUID
+}
+
+func (a *NAT) GetAllowedExtIPs() *string {
+ return a.AllowedExtIPs
+}
+
+func copyNATAllowedExtIPs(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalNATAllowedExtIPs(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *NAT) GetExemptedExtIPs() *string {
+ return a.ExemptedExtIPs
+}
+
+func copyNATExemptedExtIPs(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalNATExemptedExtIPs(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *NAT) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyNATExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalNATExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *NAT) GetExternalIP() string {
+ return a.ExternalIP
+}
+
+func (a *NAT) GetExternalMAC() *string {
+ return a.ExternalMAC
+}
+
+func copyNATExternalMAC(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalNATExternalMAC(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *NAT) GetExternalPortRange() string {
+ return a.ExternalPortRange
+}
+
+func (a *NAT) GetGatewayPort() *string {
+ return a.GatewayPort
+}
+
+func copyNATGatewayPort(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalNATGatewayPort(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *NAT) GetLogicalIP() string {
+ return a.LogicalIP
+}
+
+func (a *NAT) GetLogicalPort() *string {
+ return a.LogicalPort
+}
+
+func copyNATLogicalPort(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalNATLogicalPort(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *NAT) GetMatch() string {
+ return a.Match
+}
+
+func (a *NAT) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyNATOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalNATOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *NAT) GetPriority() int {
+ return a.Priority
+}
+
+func (a *NAT) GetType() NATType {
+ return a.Type
+}
+
+func (a *NAT) DeepCopyInto(b *NAT) {
+ *b = *a
+ b.AllowedExtIPs = copyNATAllowedExtIPs(a.AllowedExtIPs)
+ b.ExemptedExtIPs = copyNATExemptedExtIPs(a.ExemptedExtIPs)
+ b.ExternalIDs = copyNATExternalIDs(a.ExternalIDs)
+ b.ExternalMAC = copyNATExternalMAC(a.ExternalMAC)
+ b.GatewayPort = copyNATGatewayPort(a.GatewayPort)
+ b.LogicalPort = copyNATLogicalPort(a.LogicalPort)
+ b.Options = copyNATOptions(a.Options)
+}
+
+func (a *NAT) DeepCopy() *NAT {
+ b := new(NAT)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *NAT) CloneModelInto(b model.Model) {
+ c := b.(*NAT)
+ a.DeepCopyInto(c)
+}
+
+func (a *NAT) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *NAT) Equals(b *NAT) bool {
+ return a.UUID == b.UUID &&
+ equalNATAllowedExtIPs(a.AllowedExtIPs, b.AllowedExtIPs) &&
+ equalNATExemptedExtIPs(a.ExemptedExtIPs, b.ExemptedExtIPs) &&
+ equalNATExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.ExternalIP == b.ExternalIP &&
+ equalNATExternalMAC(a.ExternalMAC, b.ExternalMAC) &&
+ a.ExternalPortRange == b.ExternalPortRange &&
+ equalNATGatewayPort(a.GatewayPort, b.GatewayPort) &&
+ a.LogicalIP == b.LogicalIP &&
+ equalNATLogicalPort(a.LogicalPort, b.LogicalPort) &&
+ a.Match == b.Match &&
+ equalNATOptions(a.Options, b.Options) &&
+ a.Priority == b.Priority &&
+ a.Type == b.Type
+}
+
+func (a *NAT) EqualsModel(b model.Model) bool {
+ c := b.(*NAT)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &NAT{}
+var _ model.ComparableModel = &NAT{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go
new file mode 100644
index 000000000..bae9e20f2
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go
@@ -0,0 +1,218 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const NBGlobalTable = "NB_Global"
+
+// NBGlobal defines an object in NB_Global table
+type NBGlobal struct {
+ UUID string `ovsdb:"_uuid"`
+ Connections []string `ovsdb:"connections"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ HvCfg int `ovsdb:"hv_cfg"`
+ HvCfgTimestamp int `ovsdb:"hv_cfg_timestamp"`
+ Ipsec bool `ovsdb:"ipsec"`
+ Name string `ovsdb:"name"`
+ NbCfg int `ovsdb:"nb_cfg"`
+ NbCfgTimestamp int `ovsdb:"nb_cfg_timestamp"`
+ Options map[string]string `ovsdb:"options"`
+ SbCfg int `ovsdb:"sb_cfg"`
+ SbCfgTimestamp int `ovsdb:"sb_cfg_timestamp"`
+ SSL *string `ovsdb:"ssl"`
+}
+
+func (a *NBGlobal) GetUUID() string {
+ return a.UUID
+}
+
+func (a *NBGlobal) GetConnections() []string {
+ return a.Connections
+}
+
+func copyNBGlobalConnections(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalNBGlobalConnections(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *NBGlobal) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyNBGlobalExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalNBGlobalExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *NBGlobal) GetHvCfg() int {
+ return a.HvCfg
+}
+
+func (a *NBGlobal) GetHvCfgTimestamp() int {
+ return a.HvCfgTimestamp
+}
+
+func (a *NBGlobal) GetIpsec() bool {
+ return a.Ipsec
+}
+
+func (a *NBGlobal) GetName() string {
+ return a.Name
+}
+
+func (a *NBGlobal) GetNbCfg() int {
+ return a.NbCfg
+}
+
+func (a *NBGlobal) GetNbCfgTimestamp() int {
+ return a.NbCfgTimestamp
+}
+
+func (a *NBGlobal) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyNBGlobalOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalNBGlobalOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *NBGlobal) GetSbCfg() int {
+ return a.SbCfg
+}
+
+func (a *NBGlobal) GetSbCfgTimestamp() int {
+ return a.SbCfgTimestamp
+}
+
+func (a *NBGlobal) GetSSL() *string {
+ return a.SSL
+}
+
+func copyNBGlobalSSL(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalNBGlobalSSL(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *NBGlobal) DeepCopyInto(b *NBGlobal) {
+ *b = *a
+ b.Connections = copyNBGlobalConnections(a.Connections)
+ b.ExternalIDs = copyNBGlobalExternalIDs(a.ExternalIDs)
+ b.Options = copyNBGlobalOptions(a.Options)
+ b.SSL = copyNBGlobalSSL(a.SSL)
+}
+
+func (a *NBGlobal) DeepCopy() *NBGlobal {
+ b := new(NBGlobal)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *NBGlobal) CloneModelInto(b model.Model) {
+ c := b.(*NBGlobal)
+ a.DeepCopyInto(c)
+}
+
+func (a *NBGlobal) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *NBGlobal) Equals(b *NBGlobal) bool {
+ return a.UUID == b.UUID &&
+ equalNBGlobalConnections(a.Connections, b.Connections) &&
+ equalNBGlobalExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.HvCfg == b.HvCfg &&
+ a.HvCfgTimestamp == b.HvCfgTimestamp &&
+ a.Ipsec == b.Ipsec &&
+ a.Name == b.Name &&
+ a.NbCfg == b.NbCfg &&
+ a.NbCfgTimestamp == b.NbCfgTimestamp &&
+ equalNBGlobalOptions(a.Options, b.Options) &&
+ a.SbCfg == b.SbCfg &&
+ a.SbCfgTimestamp == b.SbCfgTimestamp &&
+ equalNBGlobalSSL(a.SSL, b.SSL)
+}
+
+func (a *NBGlobal) EqualsModel(b model.Model) bool {
+ c := b.(*NBGlobal)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &NBGlobal{}
+var _ model.ComparableModel = &NBGlobal{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go
new file mode 100644
index 000000000..bf4fa809b
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go
@@ -0,0 +1,149 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const PortGroupTable = "Port_Group"
+
+// PortGroup defines an object in Port_Group table
+type PortGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ ACLs []string `ovsdb:"acls"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Name string `ovsdb:"name"`
+ Ports []string `ovsdb:"ports"`
+}
+
+func (a *PortGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *PortGroup) GetACLs() []string {
+ return a.ACLs
+}
+
+func copyPortGroupACLs(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortGroupACLs(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortGroup) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyPortGroupExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalPortGroupExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortGroup) GetName() string {
+ return a.Name
+}
+
+func (a *PortGroup) GetPorts() []string {
+ return a.Ports
+}
+
+func copyPortGroupPorts(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortGroupPorts(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortGroup) DeepCopyInto(b *PortGroup) {
+ *b = *a
+ b.ACLs = copyPortGroupACLs(a.ACLs)
+ b.ExternalIDs = copyPortGroupExternalIDs(a.ExternalIDs)
+ b.Ports = copyPortGroupPorts(a.Ports)
+}
+
+func (a *PortGroup) DeepCopy() *PortGroup {
+ b := new(PortGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *PortGroup) CloneModelInto(b model.Model) {
+ c := b.(*PortGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *PortGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *PortGroup) Equals(b *PortGroup) bool {
+ return a.UUID == b.UUID &&
+ equalPortGroupACLs(a.ACLs, b.ACLs) &&
+ equalPortGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Name == b.Name &&
+ equalPortGroupPorts(a.Ports, b.Ports)
+}
+
+func (a *PortGroup) EqualsModel(b model.Model) bool {
+ c := b.(*PortGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &PortGroup{}
+var _ model.ComparableModel = &PortGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go
new file mode 100644
index 000000000..d25322b4b
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go
@@ -0,0 +1,180 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const QoSTable = "QoS"
+
+type (
+ QoSAction = string
+ QoSBandwidth = string
+ QoSDirection = string
+)
+
+var (
+ QoSActionDSCP QoSAction = "dscp"
+ QoSActionMark QoSAction = "mark"
+ QoSBandwidthRate QoSBandwidth = "rate"
+ QoSBandwidthBurst QoSBandwidth = "burst"
+ QoSDirectionFromLport QoSDirection = "from-lport"
+ QoSDirectionToLport QoSDirection = "to-lport"
+)
+
+// QoS defines an object in QoS table
+type QoS struct {
+ UUID string `ovsdb:"_uuid"`
+ Action map[string]int `ovsdb:"action"`
+ Bandwidth map[string]int `ovsdb:"bandwidth"`
+ Direction QoSDirection `ovsdb:"direction"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Match string `ovsdb:"match"`
+ Priority int `ovsdb:"priority"`
+}
+
+func (a *QoS) GetUUID() string {
+ return a.UUID
+}
+
+func (a *QoS) GetAction() map[string]int {
+ return a.Action
+}
+
+func copyQoSAction(a map[string]int) map[string]int {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]int, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalQoSAction(a, b map[string]int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *QoS) GetBandwidth() map[string]int {
+ return a.Bandwidth
+}
+
+func copyQoSBandwidth(a map[string]int) map[string]int {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]int, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalQoSBandwidth(a, b map[string]int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *QoS) GetDirection() QoSDirection {
+ return a.Direction
+}
+
+func (a *QoS) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyQoSExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalQoSExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *QoS) GetMatch() string {
+ return a.Match
+}
+
+func (a *QoS) GetPriority() int {
+ return a.Priority
+}
+
+func (a *QoS) DeepCopyInto(b *QoS) {
+ *b = *a
+ b.Action = copyQoSAction(a.Action)
+ b.Bandwidth = copyQoSBandwidth(a.Bandwidth)
+ b.ExternalIDs = copyQoSExternalIDs(a.ExternalIDs)
+}
+
+func (a *QoS) DeepCopy() *QoS {
+ b := new(QoS)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *QoS) CloneModelInto(b model.Model) {
+ c := b.(*QoS)
+ a.DeepCopyInto(c)
+}
+
+func (a *QoS) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *QoS) Equals(b *QoS) bool {
+ return a.UUID == b.UUID &&
+ equalQoSAction(a.Action, b.Action) &&
+ equalQoSBandwidth(a.Bandwidth, b.Bandwidth) &&
+ a.Direction == b.Direction &&
+ equalQoSExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Match == b.Match &&
+ a.Priority == b.Priority
+}
+
+func (a *QoS) EqualsModel(b model.Model) bool {
+ c := b.(*QoS)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &QoS{}
+var _ model.ComparableModel = &QoS{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go
new file mode 100644
index 000000000..639393a1e
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go
@@ -0,0 +1,85 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const SampleTable = "Sample"
+
+// Sample defines an object in Sample table
+type Sample struct {
+ UUID string `ovsdb:"_uuid"`
+ Collectors []string `ovsdb:"collectors"`
+ Metadata int `ovsdb:"metadata"`
+}
+
+func (a *Sample) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Sample) GetCollectors() []string {
+ return a.Collectors
+}
+
+func copySampleCollectors(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalSampleCollectors(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Sample) GetMetadata() int {
+ return a.Metadata
+}
+
+func (a *Sample) DeepCopyInto(b *Sample) {
+ *b = *a
+ b.Collectors = copySampleCollectors(a.Collectors)
+}
+
+func (a *Sample) DeepCopy() *Sample {
+ b := new(Sample)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Sample) CloneModelInto(b model.Model) {
+ c := b.(*Sample)
+ a.DeepCopyInto(c)
+}
+
+func (a *Sample) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Sample) Equals(b *Sample) bool {
+ return a.UUID == b.UUID &&
+ equalSampleCollectors(a.Collectors, b.Collectors) &&
+ a.Metadata == b.Metadata
+}
+
+func (a *Sample) EqualsModel(b model.Model) bool {
+ c := b.(*Sample)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Sample{}
+var _ model.ComparableModel = &Sample{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go
new file mode 100644
index 000000000..50f065904
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go
@@ -0,0 +1,105 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const SampleCollectorTable = "Sample_Collector"
+
+// SampleCollector defines an object in Sample_Collector table
+type SampleCollector struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ ID int `ovsdb:"id"`
+ Name string `ovsdb:"name"`
+ Probability int `ovsdb:"probability"`
+ SetID int `ovsdb:"set_id"`
+}
+
+func (a *SampleCollector) GetUUID() string {
+ return a.UUID
+}
+
+func (a *SampleCollector) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copySampleCollectorExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalSampleCollectorExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *SampleCollector) GetID() int {
+ return a.ID
+}
+
+func (a *SampleCollector) GetName() string {
+ return a.Name
+}
+
+func (a *SampleCollector) GetProbability() int {
+ return a.Probability
+}
+
+func (a *SampleCollector) GetSetID() int {
+ return a.SetID
+}
+
+func (a *SampleCollector) DeepCopyInto(b *SampleCollector) {
+ *b = *a
+ b.ExternalIDs = copySampleCollectorExternalIDs(a.ExternalIDs)
+}
+
+func (a *SampleCollector) DeepCopy() *SampleCollector {
+ b := new(SampleCollector)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *SampleCollector) CloneModelInto(b model.Model) {
+ c := b.(*SampleCollector)
+ a.DeepCopyInto(c)
+}
+
+func (a *SampleCollector) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *SampleCollector) Equals(b *SampleCollector) bool {
+ return a.UUID == b.UUID &&
+ equalSampleCollectorExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.ID == b.ID &&
+ a.Name == b.Name &&
+ a.Probability == b.Probability &&
+ a.SetID == b.SetID
+}
+
+func (a *SampleCollector) EqualsModel(b model.Model) bool {
+ c := b.(*SampleCollector)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &SampleCollector{}
+var _ model.ComparableModel = &SampleCollector{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go
new file mode 100644
index 000000000..a152b4237
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go
@@ -0,0 +1,103 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const SamplingAppTable = "Sampling_App"
+
+type (
+ SamplingAppType = string
+)
+
+var (
+ SamplingAppTypeDrop SamplingAppType = "drop"
+ SamplingAppTypeACLNew SamplingAppType = "acl-new"
+ SamplingAppTypeACLEst SamplingAppType = "acl-est"
+)
+
+// SamplingApp defines an object in Sampling_App table
+type SamplingApp struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ ID int `ovsdb:"id"`
+ Type SamplingAppType `ovsdb:"type"`
+}
+
+func (a *SamplingApp) GetUUID() string {
+ return a.UUID
+}
+
+func (a *SamplingApp) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copySamplingAppExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalSamplingAppExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *SamplingApp) GetID() int {
+ return a.ID
+}
+
+func (a *SamplingApp) GetType() SamplingAppType {
+ return a.Type
+}
+
+func (a *SamplingApp) DeepCopyInto(b *SamplingApp) {
+ *b = *a
+ b.ExternalIDs = copySamplingAppExternalIDs(a.ExternalIDs)
+}
+
+func (a *SamplingApp) DeepCopy() *SamplingApp {
+ b := new(SamplingApp)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *SamplingApp) CloneModelInto(b model.Model) {
+ c := b.(*SamplingApp)
+ a.DeepCopyInto(c)
+}
+
+func (a *SamplingApp) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *SamplingApp) Equals(b *SamplingApp) bool {
+ return a.UUID == b.UUID &&
+ equalSamplingAppExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.ID == b.ID &&
+ a.Type == b.Type
+}
+
+func (a *SamplingApp) EqualsModel(b model.Model) bool {
+ c := b.(*SamplingApp)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &SamplingApp{}
+var _ model.ComparableModel = &SamplingApp{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go
new file mode 100644
index 000000000..ddaba5d32
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go
@@ -0,0 +1,117 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const SSLTable = "SSL"
+
+// SSL defines an object in SSL table
+type SSL struct {
+ UUID string `ovsdb:"_uuid"`
+ BootstrapCaCert bool `ovsdb:"bootstrap_ca_cert"`
+ CaCert string `ovsdb:"ca_cert"`
+ Certificate string `ovsdb:"certificate"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ PrivateKey string `ovsdb:"private_key"`
+ SSLCiphers string `ovsdb:"ssl_ciphers"`
+ SSLProtocols string `ovsdb:"ssl_protocols"`
+}
+
+func (a *SSL) GetUUID() string {
+ return a.UUID
+}
+
+func (a *SSL) GetBootstrapCaCert() bool {
+ return a.BootstrapCaCert
+}
+
+func (a *SSL) GetCaCert() string {
+ return a.CaCert
+}
+
+func (a *SSL) GetCertificate() string {
+ return a.Certificate
+}
+
+func (a *SSL) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copySSLExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalSSLExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *SSL) GetPrivateKey() string {
+ return a.PrivateKey
+}
+
+func (a *SSL) GetSSLCiphers() string {
+ return a.SSLCiphers
+}
+
+func (a *SSL) GetSSLProtocols() string {
+ return a.SSLProtocols
+}
+
+func (a *SSL) DeepCopyInto(b *SSL) {
+ *b = *a
+ b.ExternalIDs = copySSLExternalIDs(a.ExternalIDs)
+}
+
+func (a *SSL) DeepCopy() *SSL {
+ b := new(SSL)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *SSL) CloneModelInto(b model.Model) {
+ c := b.(*SSL)
+ a.DeepCopyInto(c)
+}
+
+func (a *SSL) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *SSL) Equals(b *SSL) bool {
+ return a.UUID == b.UUID &&
+ a.BootstrapCaCert == b.BootstrapCaCert &&
+ a.CaCert == b.CaCert &&
+ a.Certificate == b.Certificate &&
+ equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.PrivateKey == b.PrivateKey &&
+ a.SSLCiphers == b.SSLCiphers &&
+ a.SSLProtocols == b.SSLProtocols
+}
+
+func (a *SSL) EqualsModel(b model.Model) bool {
+ c := b.(*SSL)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &SSL{}
+var _ model.ComparableModel = &SSL{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go
new file mode 100644
index 000000000..15207e648
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go
@@ -0,0 +1,72 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package nbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const StaticMACBindingTable = "Static_MAC_Binding"
+
+// StaticMACBinding defines an object in Static_MAC_Binding table
+type StaticMACBinding struct {
+ UUID string `ovsdb:"_uuid"`
+ IP string `ovsdb:"ip"`
+ LogicalPort string `ovsdb:"logical_port"`
+ MAC string `ovsdb:"mac"`
+ OverrideDynamicMAC bool `ovsdb:"override_dynamic_mac"`
+}
+
+func (a *StaticMACBinding) GetUUID() string {
+ return a.UUID
+}
+
+func (a *StaticMACBinding) GetIP() string {
+ return a.IP
+}
+
+func (a *StaticMACBinding) GetLogicalPort() string {
+ return a.LogicalPort
+}
+
+func (a *StaticMACBinding) GetMAC() string {
+ return a.MAC
+}
+
+func (a *StaticMACBinding) GetOverrideDynamicMAC() bool {
+ return a.OverrideDynamicMAC
+}
+
+func (a *StaticMACBinding) DeepCopyInto(b *StaticMACBinding) {
+ *b = *a
+}
+
+func (a *StaticMACBinding) DeepCopy() *StaticMACBinding {
+ b := new(StaticMACBinding)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *StaticMACBinding) CloneModelInto(b model.Model) {
+ c := b.(*StaticMACBinding)
+ a.DeepCopyInto(c)
+}
+
+func (a *StaticMACBinding) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *StaticMACBinding) Equals(b *StaticMACBinding) bool {
+ return a.UUID == b.UUID &&
+ a.IP == b.IP &&
+ a.LogicalPort == b.LogicalPort &&
+ a.MAC == b.MAC &&
+ a.OverrideDynamicMAC == b.OverrideDynamicMAC
+}
+
+func (a *StaticMACBinding) EqualsModel(b model.Model) bool {
+ c := b.(*StaticMACBinding)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &StaticMACBinding{}
+var _ model.ComparableModel = &StaticMACBinding{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go
new file mode 100644
index 000000000..6349c1519
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go
@@ -0,0 +1,303 @@
+package observability
+
+import (
+ "fmt"
+ "slices"
+ "strings"
+ "sync"
+ "time"
+
+ libovsdbclient "github.com/ovn-org/libovsdb/client"
+ libovsdb "github.com/ovn-org/libovsdb/ovsdb"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/klog/v2"
+
+ libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops"
+ "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb"
+)
+
+// OVN observ app IDs. Make sure to always add new apps in the end.
+const (
+ DropSamplingID = iota + 1
+ ACLNewTrafficSamplingID
+ ACLEstTrafficSamplingID
+)
+
+// temporary const, until we have dynamic config
+const DefaultObservabilityCollectorSetID = 42
+
+// this is inferred from nbdb schema, check Sample_Collector.id
+const maxCollectorID = 255
+const collectorFeaturesExternalID = "sample-features"
+
+// collectorConfig holds the configuration for a collector.
+// It is allowed to set different probabilities for every feature.
+// collectorSetID is used to set up sampling via OVSDB.
+type collectorConfig struct {
+ collectorSetID int
+ // probability in percent, 0 to 100
+ featuresProbability map[libovsdbops.SampleFeature]int
+}
+
+type Manager struct {
+ nbClient libovsdbclient.Client
+ sampConfig *libovsdbops.SamplingConfig
+ collectorsLock sync.Mutex
+ // nbdb Collectors have probability. To allow different probabilities for different features,
+ // multiple nbdb Collectors will be created, one per probability.
+ // getCollectorKey() => collector.UUID
+ dbCollectors map[string]string
+ // cleaning up unused collectors may take time and multiple retries, as all referencing samples must be removed first.
+ // Therefore, we need to save state between those retries.
+ // getCollectorKey() => collector.SetID
+ unusedCollectors map[string]int
+ unusedCollectorsRetryInterval time.Duration
+ collectorsCleanupRetries int
+ // Only maxCollectorID collectors are allowed, each should have unique ID.
+ // this set is tracking already assigned IDs.
+ takenCollectorIDs sets.Set[int]
+}
+
+func NewManager(nbClient libovsdbclient.Client) *Manager {
+ return &Manager{
+ nbClient: nbClient,
+ collectorsLock: sync.Mutex{},
+ dbCollectors: make(map[string]string),
+ unusedCollectors: make(map[string]int),
+ unusedCollectorsRetryInterval: time.Minute,
+ takenCollectorIDs: sets.New[int](),
+ }
+}
+
+func (m *Manager) SamplingConfig() *libovsdbops.SamplingConfig {
+ return m.sampConfig
+}
+
+func (m *Manager) Init() error {
+ // this will be read from the kube-api in the future
+ currentConfig := &collectorConfig{
+ collectorSetID: DefaultObservabilityCollectorSetID,
+ featuresProbability: map[libovsdbops.SampleFeature]int{
+ libovsdbops.EgressFirewallSample: 100,
+ libovsdbops.NetworkPolicySample: 100,
+ libovsdbops.AdminNetworkPolicySample: 100,
+ libovsdbops.MulticastSample: 100,
+ libovsdbops.UDNIsolationSample: 100,
+ },
+ }
+
+ return m.initWithConfig(currentConfig)
+}
+
+func (m *Manager) initWithConfig(config *collectorConfig) error {
+ if err := m.setSamplingAppIDs(); err != nil {
+ return err
+ }
+ if err := m.setDbCollectors(); err != nil {
+ return err
+ }
+
+ featuresConfig, err := m.addCollector(config)
+ if err != nil {
+ return err
+ }
+ m.sampConfig = libovsdbops.NewSamplingConfig(featuresConfig)
+
+ // now cleanup stale collectors
+ m.deleteStaleCollectorsWithRetry()
+ return nil
+}
+
+func (m *Manager) setDbCollectors() error {
+ m.collectorsLock.Lock()
+ defer m.collectorsLock.Unlock()
+ clear(m.dbCollectors)
+ collectors, err := libovsdbops.ListSampleCollectors(m.nbClient)
+ if err != nil {
+ return fmt.Errorf("error getting sample collectors: %w", err)
+ }
+ for _, collector := range collectors {
+ collectorKey := getCollectorKey(collector.SetID, collector.Probability)
+ m.dbCollectors[collectorKey] = collector.UUID
+ m.takenCollectorIDs.Insert(collector.ID)
+ // all collectors are unused, until we update existing configs
+ m.unusedCollectors[collectorKey] = collector.ID
+ }
+ return nil
+}
+
+// Stale collectors can't be deleted until all referencing Samples are deleted.
+// Samples will be deleted asynchronously by different controllers on their init with the new Manager.
+// deleteStaleCollectorsWithRetry will retry, considering deletion should eventually succeed when all controllers
+// update their db entries to use the latest observability config.
+func (m *Manager) deleteStaleCollectorsWithRetry() {
+ if err := m.deleteStaleCollectors(); err != nil {
+ m.collectorsCleanupRetries += 1
+ // allow retries for 1 hour, hopefully it will be enough for all handler to complete initial sync
+ if m.collectorsCleanupRetries > 60 {
+ m.collectorsCleanupRetries = 0
+ klog.Errorf("Cleanup stale collectors failed after 30 retries: %v", err)
+ return
+ }
+ time.AfterFunc(m.unusedCollectorsRetryInterval, m.deleteStaleCollectorsWithRetry)
+ return
+ }
+ m.collectorsCleanupRetries = 0
+ klog.Infof("Cleanup stale collectors succeeded.")
+}
+
+func (m *Manager) deleteStaleCollectors() error {
+ m.collectorsLock.Lock()
+ defer m.collectorsLock.Unlock()
+ var lastErr error
+ for collectorKey, collectorSetID := range m.unusedCollectors {
+ collectorUUID := m.dbCollectors[collectorKey]
+ err := libovsdbops.DeleteSampleCollector(m.nbClient, &nbdb.SampleCollector{
+ UUID: collectorUUID,
+ })
+ if err != nil {
+ lastErr = err
+ klog.Infof("Error deleting collector with ID=%d: %v", collectorSetID, lastErr)
+ continue
+ }
+ delete(m.unusedCollectors, collectorKey)
+ delete(m.dbCollectors, collectorKey)
+ delete(m.takenCollectorIDs, collectorSetID)
+ }
+ return lastErr
+}
+
+// Cleanup must be called when observability is no longer needed.
+// It will return an error if some samples still exist in the db.
+// This is expected, and Cleanup may be retried on the next restart.
+func Cleanup(nbClient libovsdbclient.Client) error {
+ // Do the opposite of init
+ err := libovsdbops.DeleteSamplingAppsWithPredicate(nbClient, func(app *nbdb.SamplingApp) bool {
+ return true
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting sampling apps: %w", err)
+ }
+
+ err = libovsdbops.DeleteSampleCollectorWithPredicate(nbClient, func(collector *nbdb.SampleCollector) bool {
+ return true
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting sample collectors: %w", err)
+ }
+ return nil
+}
+
+func (m *Manager) setSamplingAppIDs() error {
+ var ops []libovsdb.Operation
+ var err error
+ for _, appConfig := range []struct {
+ id int
+ appType nbdb.SamplingAppType
+ }{
+ {
+ id: DropSamplingID,
+ appType: nbdb.SamplingAppTypeDrop,
+ },
+ {
+ id: ACLNewTrafficSamplingID,
+ appType: nbdb.SamplingAppTypeACLNew,
+ },
+ {
+ id: ACLEstTrafficSamplingID,
+ appType: nbdb.SamplingAppTypeACLEst,
+ },
+ } {
+ samplingApp := &nbdb.SamplingApp{
+ ID: appConfig.id,
+ Type: appConfig.appType,
+ }
+ ops, err = libovsdbops.CreateOrUpdateSamplingAppsOps(m.nbClient, ops, samplingApp)
+ if err != nil {
+ return fmt.Errorf("error creating or updating sampling app %s: %w", appConfig.appType, err)
+ }
+ }
+ _, err = libovsdbops.TransactAndCheck(m.nbClient, ops)
+ return err
+}
+
+func groupByProbability(c *collectorConfig) map[int][]libovsdbops.SampleFeature {
+ probabilities := make(map[int][]libovsdbops.SampleFeature)
+ for feature, percentProbability := range c.featuresProbability {
+ probability := percentToProbability(percentProbability)
+ probabilities[probability] = append(probabilities[probability], feature)
+ }
+ return probabilities
+}
+
+func getCollectorKey(collectorID int, probability int) string {
+ return fmt.Sprintf("%d-%d", collectorID, probability)
+}
+
+func (m *Manager) getFreeCollectorID() (int, error) {
+ for i := 1; i <= maxCollectorID; i++ {
+ if !m.takenCollectorIDs.Has(i) {
+ return i, nil
+ }
+ }
+ return 0, fmt.Errorf("no free collector IDs")
+}
+
+func (m *Manager) addCollector(conf *collectorConfig) (map[libovsdbops.SampleFeature][]string, error) {
+ m.collectorsLock.Lock()
+ defer m.collectorsLock.Unlock()
+ sampleFeaturesConfig := make(map[libovsdbops.SampleFeature][]string)
+ probabilityConfig := groupByProbability(conf)
+
+ for probability, features := range probabilityConfig {
+ collectorKey := getCollectorKey(conf.collectorSetID, probability)
+ var collectorUUID string
+ var ok bool
+ // ensure predictable externalID
+ slices.Sort(features)
+ collectorFeatures := strings.Join(features, ",")
+ if collectorUUID, ok = m.dbCollectors[collectorKey]; !ok {
+ collectorID, err := m.getFreeCollectorID()
+ if err != nil {
+ return sampleFeaturesConfig, err
+ }
+ collector := &nbdb.SampleCollector{
+ ID: collectorID,
+ SetID: conf.collectorSetID,
+ Probability: probability,
+ ExternalIDs: map[string]string{
+ collectorFeaturesExternalID: collectorFeatures,
+ },
+ }
+ err = libovsdbops.CreateOrUpdateSampleCollector(m.nbClient, collector)
+ if err != nil {
+ return sampleFeaturesConfig, err
+ }
+ collectorUUID = collector.UUID
+ m.dbCollectors[collectorKey] = collectorUUID
+ m.takenCollectorIDs.Insert(collectorID)
+ } else {
+ // update collector's features
+ collector := &nbdb.SampleCollector{
+ UUID: collectorUUID,
+ ExternalIDs: map[string]string{
+ collectorFeaturesExternalID: collectorFeatures,
+ },
+ }
+ err := libovsdbops.UpdateSampleCollectorExternalIDs(m.nbClient, collector)
+ if err != nil {
+ return sampleFeaturesConfig, err
+ }
+ // collector is used, remove from unused Collectors
+ delete(m.unusedCollectors, collectorKey)
+ }
+ for _, feature := range features {
+ sampleFeaturesConfig[feature] = append(sampleFeaturesConfig[feature], collectorUUID)
+ }
+ }
+ return sampleFeaturesConfig, nil
+}
+
+func percentToProbability(percent int) int {
+ return 65535 * percent / 100
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore
new file mode 100644
index 000000000..734ba1eff
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore
@@ -0,0 +1 @@
+*.ovsschema
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go
new file mode 100644
index 000000000..b3b1c3c2d
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go
@@ -0,0 +1,85 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const AddressSetTable = "Address_Set"
+
+// AddressSet defines an object in Address_Set table
+type AddressSet struct {
+ UUID string `ovsdb:"_uuid"`
+ Addresses []string `ovsdb:"addresses"`
+ Name string `ovsdb:"name"`
+}
+
+func (a *AddressSet) GetUUID() string {
+ return a.UUID
+}
+
+func (a *AddressSet) GetAddresses() []string {
+ return a.Addresses
+}
+
+func copyAddressSetAddresses(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalAddressSetAddresses(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AddressSet) GetName() string {
+ return a.Name
+}
+
+func (a *AddressSet) DeepCopyInto(b *AddressSet) {
+ *b = *a
+ b.Addresses = copyAddressSetAddresses(a.Addresses)
+}
+
+func (a *AddressSet) DeepCopy() *AddressSet {
+ b := new(AddressSet)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *AddressSet) CloneModelInto(b model.Model) {
+ c := b.(*AddressSet)
+ a.DeepCopyInto(c)
+}
+
+func (a *AddressSet) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *AddressSet) Equals(b *AddressSet) bool {
+ return a.UUID == b.UUID &&
+ equalAddressSetAddresses(a.Addresses, b.Addresses) &&
+ a.Name == b.Name
+}
+
+func (a *AddressSet) EqualsModel(b model.Model) bool {
+ c := b.(*AddressSet)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &AddressSet{}
+var _ model.ComparableModel = &AddressSet{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go
new file mode 100644
index 000000000..cf27814b5
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go
@@ -0,0 +1,179 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const BFDTable = "BFD"
+
+type (
+ BFDStatus = string
+)
+
+var (
+ BFDStatusDown BFDStatus = "down"
+ BFDStatusInit BFDStatus = "init"
+ BFDStatusUp BFDStatus = "up"
+ BFDStatusAdminDown BFDStatus = "admin_down"
+)
+
+// BFD defines an object in BFD table
+type BFD struct {
+ UUID string `ovsdb:"_uuid"`
+ ChassisName string `ovsdb:"chassis_name"`
+ DetectMult int `ovsdb:"detect_mult"`
+ Disc int `ovsdb:"disc"`
+ DstIP string `ovsdb:"dst_ip"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ LogicalPort string `ovsdb:"logical_port"`
+ MinRx int `ovsdb:"min_rx"`
+ MinTx int `ovsdb:"min_tx"`
+ Options map[string]string `ovsdb:"options"`
+ SrcPort int `ovsdb:"src_port"`
+ Status BFDStatus `ovsdb:"status"`
+}
+
+func (a *BFD) GetUUID() string {
+ return a.UUID
+}
+
+func (a *BFD) GetChassisName() string {
+ return a.ChassisName
+}
+
+func (a *BFD) GetDetectMult() int {
+ return a.DetectMult
+}
+
+func (a *BFD) GetDisc() int {
+ return a.Disc
+}
+
+func (a *BFD) GetDstIP() string {
+ return a.DstIP
+}
+
+func (a *BFD) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyBFDExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBFDExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *BFD) GetLogicalPort() string {
+ return a.LogicalPort
+}
+
+func (a *BFD) GetMinRx() int {
+ return a.MinRx
+}
+
+func (a *BFD) GetMinTx() int {
+ return a.MinTx
+}
+
+func (a *BFD) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyBFDOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalBFDOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *BFD) GetSrcPort() int {
+ return a.SrcPort
+}
+
+func (a *BFD) GetStatus() BFDStatus {
+ return a.Status
+}
+
+func (a *BFD) DeepCopyInto(b *BFD) {
+ *b = *a
+ b.ExternalIDs = copyBFDExternalIDs(a.ExternalIDs)
+ b.Options = copyBFDOptions(a.Options)
+}
+
+func (a *BFD) DeepCopy() *BFD {
+ b := new(BFD)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *BFD) CloneModelInto(b model.Model) {
+ c := b.(*BFD)
+ a.DeepCopyInto(c)
+}
+
+func (a *BFD) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *BFD) Equals(b *BFD) bool {
+ return a.UUID == b.UUID &&
+ a.ChassisName == b.ChassisName &&
+ a.DetectMult == b.DetectMult &&
+ a.Disc == b.Disc &&
+ a.DstIP == b.DstIP &&
+ equalBFDExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.LogicalPort == b.LogicalPort &&
+ a.MinRx == b.MinRx &&
+ a.MinTx == b.MinTx &&
+ equalBFDOptions(a.Options, b.Options) &&
+ a.SrcPort == b.SrcPort &&
+ a.Status == b.Status
+}
+
+func (a *BFD) EqualsModel(b model.Model) bool {
+ c := b.(*BFD)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &BFD{}
+var _ model.ComparableModel = &BFD{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go
new file mode 100644
index 000000000..3526f096f
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go
@@ -0,0 +1,225 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ChassisTable = "Chassis"
+
+// Chassis defines an object in Chassis table
+type Chassis struct {
+ UUID string `ovsdb:"_uuid"`
+ Encaps []string `ovsdb:"encaps"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Hostname string `ovsdb:"hostname"`
+ Name string `ovsdb:"name"`
+ NbCfg int `ovsdb:"nb_cfg"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+ TransportZones []string `ovsdb:"transport_zones"`
+ VtepLogicalSwitches []string `ovsdb:"vtep_logical_switches"`
+}
+
+func (a *Chassis) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Chassis) GetEncaps() []string {
+ return a.Encaps
+}
+
+func copyChassisEncaps(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalChassisEncaps(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Chassis) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyChassisExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalChassisExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Chassis) GetHostname() string {
+ return a.Hostname
+}
+
+func (a *Chassis) GetName() string {
+ return a.Name
+}
+
+func (a *Chassis) GetNbCfg() int {
+ return a.NbCfg
+}
+
+func (a *Chassis) GetOtherConfig() map[string]string {
+ return a.OtherConfig
+}
+
+func copyChassisOtherConfig(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalChassisOtherConfig(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Chassis) GetTransportZones() []string {
+ return a.TransportZones
+}
+
+func copyChassisTransportZones(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalChassisTransportZones(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Chassis) GetVtepLogicalSwitches() []string {
+ return a.VtepLogicalSwitches
+}
+
+func copyChassisVtepLogicalSwitches(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalChassisVtepLogicalSwitches(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Chassis) DeepCopyInto(b *Chassis) {
+ *b = *a
+ b.Encaps = copyChassisEncaps(a.Encaps)
+ b.ExternalIDs = copyChassisExternalIDs(a.ExternalIDs)
+ b.OtherConfig = copyChassisOtherConfig(a.OtherConfig)
+ b.TransportZones = copyChassisTransportZones(a.TransportZones)
+ b.VtepLogicalSwitches = copyChassisVtepLogicalSwitches(a.VtepLogicalSwitches)
+}
+
+func (a *Chassis) DeepCopy() *Chassis {
+ b := new(Chassis)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Chassis) CloneModelInto(b model.Model) {
+ c := b.(*Chassis)
+ a.DeepCopyInto(c)
+}
+
+func (a *Chassis) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Chassis) Equals(b *Chassis) bool {
+ return a.UUID == b.UUID &&
+ equalChassisEncaps(a.Encaps, b.Encaps) &&
+ equalChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Hostname == b.Hostname &&
+ a.Name == b.Name &&
+ a.NbCfg == b.NbCfg &&
+ equalChassisOtherConfig(a.OtherConfig, b.OtherConfig) &&
+ equalChassisTransportZones(a.TransportZones, b.TransportZones) &&
+ equalChassisVtepLogicalSwitches(a.VtepLogicalSwitches, b.VtepLogicalSwitches)
+}
+
+func (a *Chassis) EqualsModel(b model.Model) bool {
+ c := b.(*Chassis)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Chassis{}
+var _ model.ComparableModel = &Chassis{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go
new file mode 100644
index 000000000..1e8c3764b
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go
@@ -0,0 +1,124 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ChassisPrivateTable = "Chassis_Private"
+
+// ChassisPrivate defines an object in Chassis_Private table
+type ChassisPrivate struct {
+ UUID string `ovsdb:"_uuid"`
+ Chassis *string `ovsdb:"chassis"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Name string `ovsdb:"name"`
+ NbCfg int `ovsdb:"nb_cfg"`
+ NbCfgTimestamp int `ovsdb:"nb_cfg_timestamp"`
+}
+
+func (a *ChassisPrivate) GetUUID() string {
+ return a.UUID
+}
+
+func (a *ChassisPrivate) GetChassis() *string {
+ return a.Chassis
+}
+
+func copyChassisPrivateChassis(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalChassisPrivateChassis(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ChassisPrivate) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyChassisPrivateExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalChassisPrivateExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ChassisPrivate) GetName() string {
+ return a.Name
+}
+
+func (a *ChassisPrivate) GetNbCfg() int {
+ return a.NbCfg
+}
+
+func (a *ChassisPrivate) GetNbCfgTimestamp() int {
+ return a.NbCfgTimestamp
+}
+
+func (a *ChassisPrivate) DeepCopyInto(b *ChassisPrivate) {
+ *b = *a
+ b.Chassis = copyChassisPrivateChassis(a.Chassis)
+ b.ExternalIDs = copyChassisPrivateExternalIDs(a.ExternalIDs)
+}
+
+func (a *ChassisPrivate) DeepCopy() *ChassisPrivate {
+ b := new(ChassisPrivate)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *ChassisPrivate) CloneModelInto(b model.Model) {
+ c := b.(*ChassisPrivate)
+ a.DeepCopyInto(c)
+}
+
+func (a *ChassisPrivate) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *ChassisPrivate) Equals(b *ChassisPrivate) bool {
+ return a.UUID == b.UUID &&
+ equalChassisPrivateChassis(a.Chassis, b.Chassis) &&
+ equalChassisPrivateExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Name == b.Name &&
+ a.NbCfg == b.NbCfg &&
+ a.NbCfgTimestamp == b.NbCfgTimestamp
+}
+
+func (a *ChassisPrivate) EqualsModel(b model.Model) bool {
+ c := b.(*ChassisPrivate)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &ChassisPrivate{}
+var _ model.ComparableModel = &ChassisPrivate{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go
new file mode 100644
index 000000000..212e772be
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go
@@ -0,0 +1,87 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ChassisTemplateVarTable = "Chassis_Template_Var"
+
+// ChassisTemplateVar defines an object in Chassis_Template_Var table
+type ChassisTemplateVar struct {
+ UUID string `ovsdb:"_uuid"`
+ Chassis string `ovsdb:"chassis"`
+ Variables map[string]string `ovsdb:"variables"`
+}
+
+func (a *ChassisTemplateVar) GetUUID() string {
+ return a.UUID
+}
+
+func (a *ChassisTemplateVar) GetChassis() string {
+ return a.Chassis
+}
+
+func (a *ChassisTemplateVar) GetVariables() map[string]string {
+ return a.Variables
+}
+
+func copyChassisTemplateVarVariables(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalChassisTemplateVarVariables(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ChassisTemplateVar) DeepCopyInto(b *ChassisTemplateVar) {
+ *b = *a
+ b.Variables = copyChassisTemplateVarVariables(a.Variables)
+}
+
+func (a *ChassisTemplateVar) DeepCopy() *ChassisTemplateVar {
+ b := new(ChassisTemplateVar)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *ChassisTemplateVar) CloneModelInto(b model.Model) {
+ c := b.(*ChassisTemplateVar)
+ a.DeepCopyInto(c)
+}
+
+func (a *ChassisTemplateVar) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *ChassisTemplateVar) Equals(b *ChassisTemplateVar) bool {
+ return a.UUID == b.UUID &&
+ a.Chassis == b.Chassis &&
+ equalChassisTemplateVarVariables(a.Variables, b.Variables)
+}
+
+func (a *ChassisTemplateVar) EqualsModel(b model.Model) bool {
+ c := b.(*ChassisTemplateVar)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &ChassisTemplateVar{}
+var _ model.ComparableModel = &ChassisTemplateVar{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go
new file mode 100644
index 000000000..8f96f5422
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go
@@ -0,0 +1,221 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ConnectionTable = "Connection"
+
+// Connection defines an object in Connection table
+type Connection struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ InactivityProbe *int `ovsdb:"inactivity_probe"`
+ IsConnected bool `ovsdb:"is_connected"`
+ MaxBackoff *int `ovsdb:"max_backoff"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+ ReadOnly bool `ovsdb:"read_only"`
+ Role string `ovsdb:"role"`
+ Status map[string]string `ovsdb:"status"`
+ Target string `ovsdb:"target"`
+}
+
+func (a *Connection) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Connection) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyConnectionExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalConnectionExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Connection) GetInactivityProbe() *int {
+ return a.InactivityProbe
+}
+
+func copyConnectionInactivityProbe(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalConnectionInactivityProbe(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Connection) GetIsConnected() bool {
+ return a.IsConnected
+}
+
+func (a *Connection) GetMaxBackoff() *int {
+ return a.MaxBackoff
+}
+
+func copyConnectionMaxBackoff(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalConnectionMaxBackoff(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Connection) GetOtherConfig() map[string]string {
+ return a.OtherConfig
+}
+
+func copyConnectionOtherConfig(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalConnectionOtherConfig(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Connection) GetReadOnly() bool {
+ return a.ReadOnly
+}
+
+func (a *Connection) GetRole() string {
+ return a.Role
+}
+
+func (a *Connection) GetStatus() map[string]string {
+ return a.Status
+}
+
+func copyConnectionStatus(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalConnectionStatus(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Connection) GetTarget() string {
+ return a.Target
+}
+
+func (a *Connection) DeepCopyInto(b *Connection) {
+ *b = *a
+ b.ExternalIDs = copyConnectionExternalIDs(a.ExternalIDs)
+ b.InactivityProbe = copyConnectionInactivityProbe(a.InactivityProbe)
+ b.MaxBackoff = copyConnectionMaxBackoff(a.MaxBackoff)
+ b.OtherConfig = copyConnectionOtherConfig(a.OtherConfig)
+ b.Status = copyConnectionStatus(a.Status)
+}
+
+func (a *Connection) DeepCopy() *Connection {
+ b := new(Connection)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Connection) CloneModelInto(b model.Model) {
+ c := b.(*Connection)
+ a.DeepCopyInto(c)
+}
+
+func (a *Connection) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Connection) Equals(b *Connection) bool {
+ return a.UUID == b.UUID &&
+ equalConnectionExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalConnectionInactivityProbe(a.InactivityProbe, b.InactivityProbe) &&
+ a.IsConnected == b.IsConnected &&
+ equalConnectionMaxBackoff(a.MaxBackoff, b.MaxBackoff) &&
+ equalConnectionOtherConfig(a.OtherConfig, b.OtherConfig) &&
+ a.ReadOnly == b.ReadOnly &&
+ a.Role == b.Role &&
+ equalConnectionStatus(a.Status, b.Status) &&
+ a.Target == b.Target
+}
+
+func (a *Connection) EqualsModel(b model.Model) bool {
+ c := b.(*Connection)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Connection{}
+var _ model.ComparableModel = &Connection{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go
new file mode 100644
index 000000000..741ffd028
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go
@@ -0,0 +1,126 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ControllerEventTable = "Controller_Event"
+
+type (
+ ControllerEventEventType = string
+)
+
+var (
+ ControllerEventEventTypeEmptyLbBackends ControllerEventEventType = "empty_lb_backends"
+)
+
+// ControllerEvent defines an object in Controller_Event table
+type ControllerEvent struct {
+ UUID string `ovsdb:"_uuid"`
+ Chassis *string `ovsdb:"chassis"`
+ EventInfo map[string]string `ovsdb:"event_info"`
+ EventType ControllerEventEventType `ovsdb:"event_type"`
+ SeqNum int `ovsdb:"seq_num"`
+}
+
+func (a *ControllerEvent) GetUUID() string {
+ return a.UUID
+}
+
+func (a *ControllerEvent) GetChassis() *string {
+ return a.Chassis
+}
+
+func copyControllerEventChassis(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalControllerEventChassis(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ControllerEvent) GetEventInfo() map[string]string {
+ return a.EventInfo
+}
+
+func copyControllerEventEventInfo(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalControllerEventEventInfo(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ControllerEvent) GetEventType() ControllerEventEventType {
+ return a.EventType
+}
+
+func (a *ControllerEvent) GetSeqNum() int {
+ return a.SeqNum
+}
+
+func (a *ControllerEvent) DeepCopyInto(b *ControllerEvent) {
+ *b = *a
+ b.Chassis = copyControllerEventChassis(a.Chassis)
+ b.EventInfo = copyControllerEventEventInfo(a.EventInfo)
+}
+
+func (a *ControllerEvent) DeepCopy() *ControllerEvent {
+ b := new(ControllerEvent)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *ControllerEvent) CloneModelInto(b model.Model) {
+ c := b.(*ControllerEvent)
+ a.DeepCopyInto(c)
+}
+
+func (a *ControllerEvent) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *ControllerEvent) Equals(b *ControllerEvent) bool {
+ return a.UUID == b.UUID &&
+ equalControllerEventChassis(a.Chassis, b.Chassis) &&
+ equalControllerEventEventInfo(a.EventInfo, b.EventInfo) &&
+ a.EventType == b.EventType &&
+ a.SeqNum == b.SeqNum
+}
+
+func (a *ControllerEvent) EqualsModel(b model.Model) bool {
+ c := b.(*ControllerEvent)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &ControllerEvent{}
+var _ model.ComparableModel = &ControllerEvent{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go
new file mode 100644
index 000000000..10247286f
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go
@@ -0,0 +1,118 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DatapathBindingTable = "Datapath_Binding"
+
+// DatapathBinding defines an object in Datapath_Binding table
+type DatapathBinding struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ LoadBalancers []string `ovsdb:"load_balancers"`
+ TunnelKey int `ovsdb:"tunnel_key"`
+}
+
+func (a *DatapathBinding) GetUUID() string {
+ return a.UUID
+}
+
+func (a *DatapathBinding) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyDatapathBindingExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDatapathBindingExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DatapathBinding) GetLoadBalancers() []string {
+ return a.LoadBalancers
+}
+
+func copyDatapathBindingLoadBalancers(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalDatapathBindingLoadBalancers(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DatapathBinding) GetTunnelKey() int {
+ return a.TunnelKey
+}
+
+func (a *DatapathBinding) DeepCopyInto(b *DatapathBinding) {
+ *b = *a
+ b.ExternalIDs = copyDatapathBindingExternalIDs(a.ExternalIDs)
+ b.LoadBalancers = copyDatapathBindingLoadBalancers(a.LoadBalancers)
+}
+
+func (a *DatapathBinding) DeepCopy() *DatapathBinding {
+ b := new(DatapathBinding)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *DatapathBinding) CloneModelInto(b model.Model) {
+ c := b.(*DatapathBinding)
+ a.DeepCopyInto(c)
+}
+
+func (a *DatapathBinding) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *DatapathBinding) Equals(b *DatapathBinding) bool {
+ return a.UUID == b.UUID &&
+ equalDatapathBindingExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalDatapathBindingLoadBalancers(a.LoadBalancers, b.LoadBalancers) &&
+ a.TunnelKey == b.TunnelKey
+}
+
+func (a *DatapathBinding) EqualsModel(b model.Model) bool {
+ c := b.(*DatapathBinding)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &DatapathBinding{}
+var _ model.ComparableModel = &DatapathBinding{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go
new file mode 100644
index 000000000..e9ec44ce2
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go
@@ -0,0 +1,82 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DHCPOptionsTable = "DHCP_Options"
+
+type (
+ DHCPOptionsType = string
+)
+
+var (
+ DHCPOptionsTypeBool DHCPOptionsType = "bool"
+ DHCPOptionsTypeUint8 DHCPOptionsType = "uint8"
+ DHCPOptionsTypeUint16 DHCPOptionsType = "uint16"
+ DHCPOptionsTypeUint32 DHCPOptionsType = "uint32"
+ DHCPOptionsTypeIpv4 DHCPOptionsType = "ipv4"
+ DHCPOptionsTypeStaticRoutes DHCPOptionsType = "static_routes"
+ DHCPOptionsTypeStr DHCPOptionsType = "str"
+ DHCPOptionsTypeHostID DHCPOptionsType = "host_id"
+ DHCPOptionsTypeDomains DHCPOptionsType = "domains"
+)
+
+// DHCPOptions defines an object in DHCP_Options table
+type DHCPOptions struct {
+ UUID string `ovsdb:"_uuid"`
+ Code int `ovsdb:"code"`
+ Name string `ovsdb:"name"`
+ Type DHCPOptionsType `ovsdb:"type"`
+}
+
+func (a *DHCPOptions) GetUUID() string {
+ return a.UUID
+}
+
+func (a *DHCPOptions) GetCode() int {
+ return a.Code
+}
+
+func (a *DHCPOptions) GetName() string {
+ return a.Name
+}
+
+func (a *DHCPOptions) GetType() DHCPOptionsType {
+ return a.Type
+}
+
+func (a *DHCPOptions) DeepCopyInto(b *DHCPOptions) {
+ *b = *a
+}
+
+func (a *DHCPOptions) DeepCopy() *DHCPOptions {
+ b := new(DHCPOptions)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *DHCPOptions) CloneModelInto(b model.Model) {
+ c := b.(*DHCPOptions)
+ a.DeepCopyInto(c)
+}
+
+func (a *DHCPOptions) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *DHCPOptions) Equals(b *DHCPOptions) bool {
+ return a.UUID == b.UUID &&
+ a.Code == b.Code &&
+ a.Name == b.Name &&
+ a.Type == b.Type
+}
+
+func (a *DHCPOptions) EqualsModel(b model.Model) bool {
+ c := b.(*DHCPOptions)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &DHCPOptions{}
+var _ model.ComparableModel = &DHCPOptions{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go
new file mode 100644
index 000000000..908d1e0ad
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go
@@ -0,0 +1,77 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DHCPv6OptionsTable = "DHCPv6_Options"
+
+type (
+ DHCPv6OptionsType = string
+)
+
+var (
+ DHCPv6OptionsTypeIpv6 DHCPv6OptionsType = "ipv6"
+ DHCPv6OptionsTypeStr DHCPv6OptionsType = "str"
+ DHCPv6OptionsTypeMAC DHCPv6OptionsType = "mac"
+ DHCPv6OptionsTypeDomain DHCPv6OptionsType = "domain"
+)
+
+// DHCPv6Options defines an object in DHCPv6_Options table
+type DHCPv6Options struct {
+ UUID string `ovsdb:"_uuid"`
+ Code int `ovsdb:"code"`
+ Name string `ovsdb:"name"`
+ Type DHCPv6OptionsType `ovsdb:"type"`
+}
+
+func (a *DHCPv6Options) GetUUID() string {
+ return a.UUID
+}
+
+func (a *DHCPv6Options) GetCode() int {
+ return a.Code
+}
+
+func (a *DHCPv6Options) GetName() string {
+ return a.Name
+}
+
+func (a *DHCPv6Options) GetType() DHCPv6OptionsType {
+ return a.Type
+}
+
+func (a *DHCPv6Options) DeepCopyInto(b *DHCPv6Options) {
+ *b = *a
+}
+
+func (a *DHCPv6Options) DeepCopy() *DHCPv6Options {
+ b := new(DHCPv6Options)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *DHCPv6Options) CloneModelInto(b model.Model) {
+ c := b.(*DHCPv6Options)
+ a.DeepCopyInto(c)
+}
+
+func (a *DHCPv6Options) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *DHCPv6Options) Equals(b *DHCPv6Options) bool {
+ return a.UUID == b.UUID &&
+ a.Code == b.Code &&
+ a.Name == b.Name &&
+ a.Type == b.Type
+}
+
+func (a *DHCPv6Options) EqualsModel(b model.Model) bool {
+ c := b.(*DHCPv6Options)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &DHCPv6Options{}
+var _ model.ComparableModel = &DHCPv6Options{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go
new file mode 100644
index 000000000..95c0a52d1
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go
@@ -0,0 +1,178 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DNSTable = "DNS"
+
+// DNS defines an object in DNS table
+type DNS struct {
+ UUID string `ovsdb:"_uuid"`
+ Datapaths []string `ovsdb:"datapaths"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Options map[string]string `ovsdb:"options"`
+ Records map[string]string `ovsdb:"records"`
+}
+
+func (a *DNS) GetUUID() string {
+ return a.UUID
+}
+
+func (a *DNS) GetDatapaths() []string {
+ return a.Datapaths
+}
+
+func copyDNSDatapaths(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalDNSDatapaths(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DNS) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyDNSExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDNSExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DNS) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyDNSOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDNSOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DNS) GetRecords() map[string]string {
+ return a.Records
+}
+
+func copyDNSRecords(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalDNSRecords(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *DNS) DeepCopyInto(b *DNS) {
+ *b = *a
+ b.Datapaths = copyDNSDatapaths(a.Datapaths)
+ b.ExternalIDs = copyDNSExternalIDs(a.ExternalIDs)
+ b.Options = copyDNSOptions(a.Options)
+ b.Records = copyDNSRecords(a.Records)
+}
+
+func (a *DNS) DeepCopy() *DNS {
+ b := new(DNS)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *DNS) CloneModelInto(b model.Model) {
+ c := b.(*DNS)
+ a.DeepCopyInto(c)
+}
+
+func (a *DNS) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *DNS) Equals(b *DNS) bool {
+ return a.UUID == b.UUID &&
+ equalDNSDatapaths(a.Datapaths, b.Datapaths) &&
+ equalDNSExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalDNSOptions(a.Options, b.Options) &&
+ equalDNSRecords(a.Records, b.Records)
+}
+
+func (a *DNS) EqualsModel(b model.Model) bool {
+ c := b.(*DNS)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &DNS{}
+var _ model.ComparableModel = &DNS{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go
new file mode 100644
index 000000000..9a2f17fba
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go
@@ -0,0 +1,109 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const EncapTable = "Encap"
+
+type (
+ EncapType = string
+)
+
+var (
+ EncapTypeGeneve EncapType = "geneve"
+ EncapTypeSTT EncapType = "stt"
+ EncapTypeVxlan EncapType = "vxlan"
+)
+
+// Encap defines an object in Encap table
+type Encap struct {
+ UUID string `ovsdb:"_uuid"`
+ ChassisName string `ovsdb:"chassis_name"`
+ IP string `ovsdb:"ip"`
+ Options map[string]string `ovsdb:"options"`
+ Type EncapType `ovsdb:"type"`
+}
+
+func (a *Encap) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Encap) GetChassisName() string {
+ return a.ChassisName
+}
+
+func (a *Encap) GetIP() string {
+ return a.IP
+}
+
+func (a *Encap) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyEncapOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalEncapOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Encap) GetType() EncapType {
+ return a.Type
+}
+
+func (a *Encap) DeepCopyInto(b *Encap) {
+ *b = *a
+ b.Options = copyEncapOptions(a.Options)
+}
+
+func (a *Encap) DeepCopy() *Encap {
+ b := new(Encap)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Encap) CloneModelInto(b model.Model) {
+ c := b.(*Encap)
+ a.DeepCopyInto(c)
+}
+
+func (a *Encap) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Encap) Equals(b *Encap) bool {
+ return a.UUID == b.UUID &&
+ a.ChassisName == b.ChassisName &&
+ a.IP == b.IP &&
+ equalEncapOptions(a.Options, b.Options) &&
+ a.Type == b.Type
+}
+
+func (a *Encap) EqualsModel(b model.Model) bool {
+ c := b.(*Encap)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Encap{}
+var _ model.ComparableModel = &Encap{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go
new file mode 100644
index 000000000..8253e7059
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go
@@ -0,0 +1,72 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const FDBTable = "FDB"
+
+// FDB defines an object in FDB table
+type FDB struct {
+ UUID string `ovsdb:"_uuid"`
+ DpKey int `ovsdb:"dp_key"`
+ MAC string `ovsdb:"mac"`
+ PortKey int `ovsdb:"port_key"`
+ Timestamp int `ovsdb:"timestamp"`
+}
+
+func (a *FDB) GetUUID() string {
+ return a.UUID
+}
+
+func (a *FDB) GetDpKey() int {
+ return a.DpKey
+}
+
+func (a *FDB) GetMAC() string {
+ return a.MAC
+}
+
+func (a *FDB) GetPortKey() int {
+ return a.PortKey
+}
+
+func (a *FDB) GetTimestamp() int {
+ return a.Timestamp
+}
+
+func (a *FDB) DeepCopyInto(b *FDB) {
+ *b = *a
+}
+
+func (a *FDB) DeepCopy() *FDB {
+ b := new(FDB)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *FDB) CloneModelInto(b model.Model) {
+ c := b.(*FDB)
+ a.DeepCopyInto(c)
+}
+
+func (a *FDB) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *FDB) Equals(b *FDB) bool {
+ return a.UUID == b.UUID &&
+ a.DpKey == b.DpKey &&
+ a.MAC == b.MAC &&
+ a.PortKey == b.PortKey &&
+ a.Timestamp == b.Timestamp
+}
+
+func (a *FDB) EqualsModel(b model.Model) bool {
+ c := b.(*FDB)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &FDB{}
+var _ model.ComparableModel = &FDB{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go
new file mode 100644
index 000000000..a84ad7fc4
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go
@@ -0,0 +1,151 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const GatewayChassisTable = "Gateway_Chassis"
+
+// GatewayChassis defines an object in Gateway_Chassis table
+type GatewayChassis struct {
+ UUID string `ovsdb:"_uuid"`
+ Chassis *string `ovsdb:"chassis"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Name string `ovsdb:"name"`
+ Options map[string]string `ovsdb:"options"`
+ Priority int `ovsdb:"priority"`
+}
+
+func (a *GatewayChassis) GetUUID() string {
+ return a.UUID
+}
+
+func (a *GatewayChassis) GetChassis() *string {
+ return a.Chassis
+}
+
+func copyGatewayChassisChassis(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalGatewayChassisChassis(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *GatewayChassis) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyGatewayChassisExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalGatewayChassisExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *GatewayChassis) GetName() string {
+ return a.Name
+}
+
+func (a *GatewayChassis) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyGatewayChassisOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalGatewayChassisOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *GatewayChassis) GetPriority() int {
+ return a.Priority
+}
+
+func (a *GatewayChassis) DeepCopyInto(b *GatewayChassis) {
+ *b = *a
+ b.Chassis = copyGatewayChassisChassis(a.Chassis)
+ b.ExternalIDs = copyGatewayChassisExternalIDs(a.ExternalIDs)
+ b.Options = copyGatewayChassisOptions(a.Options)
+}
+
+func (a *GatewayChassis) DeepCopy() *GatewayChassis {
+ b := new(GatewayChassis)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *GatewayChassis) CloneModelInto(b model.Model) {
+ c := b.(*GatewayChassis)
+ a.DeepCopyInto(c)
+}
+
+func (a *GatewayChassis) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *GatewayChassis) Equals(b *GatewayChassis) bool {
+ return a.UUID == b.UUID &&
+ equalGatewayChassisChassis(a.Chassis, b.Chassis) &&
+ equalGatewayChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Name == b.Name &&
+ equalGatewayChassisOptions(a.Options, b.Options) &&
+ a.Priority == b.Priority
+}
+
+func (a *GatewayChassis) EqualsModel(b model.Model) bool {
+ c := b.(*GatewayChassis)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &GatewayChassis{}
+var _ model.ComparableModel = &GatewayChassis{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go
new file mode 100644
index 000000000..6507d071e
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go
@@ -0,0 +1,3 @@
+package sbdb
+
+//go:generate modelgen --extended -p sbdb -o . ovn-sb.ovsschema
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go
new file mode 100644
index 000000000..b0b3cebbb
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go
@@ -0,0 +1,112 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const HAChassisTable = "HA_Chassis"
+
+// HAChassis defines an object in HA_Chassis table
+type HAChassis struct {
+ UUID string `ovsdb:"_uuid"`
+ Chassis *string `ovsdb:"chassis"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Priority int `ovsdb:"priority"`
+}
+
+func (a *HAChassis) GetUUID() string {
+ return a.UUID
+}
+
+func (a *HAChassis) GetChassis() *string {
+ return a.Chassis
+}
+
+func copyHAChassisChassis(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalHAChassisChassis(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *HAChassis) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyHAChassisExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalHAChassisExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *HAChassis) GetPriority() int {
+ return a.Priority
+}
+
+func (a *HAChassis) DeepCopyInto(b *HAChassis) {
+ *b = *a
+ b.Chassis = copyHAChassisChassis(a.Chassis)
+ b.ExternalIDs = copyHAChassisExternalIDs(a.ExternalIDs)
+}
+
+func (a *HAChassis) DeepCopy() *HAChassis {
+ b := new(HAChassis)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *HAChassis) CloneModelInto(b model.Model) {
+ c := b.(*HAChassis)
+ a.DeepCopyInto(c)
+}
+
+func (a *HAChassis) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *HAChassis) Equals(b *HAChassis) bool {
+ return a.UUID == b.UUID &&
+ equalHAChassisChassis(a.Chassis, b.Chassis) &&
+ equalHAChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Priority == b.Priority
+}
+
+func (a *HAChassis) EqualsModel(b model.Model) bool {
+ c := b.(*HAChassis)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &HAChassis{}
+var _ model.ComparableModel = &HAChassis{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go
new file mode 100644
index 000000000..1cc013c70
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go
@@ -0,0 +1,149 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const HAChassisGroupTable = "HA_Chassis_Group"
+
+// HAChassisGroup defines an object in HA_Chassis_Group table
+type HAChassisGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ HaChassis []string `ovsdb:"ha_chassis"`
+ Name string `ovsdb:"name"`
+ RefChassis []string `ovsdb:"ref_chassis"`
+}
+
+func (a *HAChassisGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *HAChassisGroup) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyHAChassisGroupExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalHAChassisGroupExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *HAChassisGroup) GetHaChassis() []string {
+ return a.HaChassis
+}
+
+func copyHAChassisGroupHaChassis(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalHAChassisGroupHaChassis(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *HAChassisGroup) GetName() string {
+ return a.Name
+}
+
+func (a *HAChassisGroup) GetRefChassis() []string {
+ return a.RefChassis
+}
+
+func copyHAChassisGroupRefChassis(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalHAChassisGroupRefChassis(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *HAChassisGroup) DeepCopyInto(b *HAChassisGroup) {
+ *b = *a
+ b.ExternalIDs = copyHAChassisGroupExternalIDs(a.ExternalIDs)
+ b.HaChassis = copyHAChassisGroupHaChassis(a.HaChassis)
+ b.RefChassis = copyHAChassisGroupRefChassis(a.RefChassis)
+}
+
+func (a *HAChassisGroup) DeepCopy() *HAChassisGroup {
+ b := new(HAChassisGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *HAChassisGroup) CloneModelInto(b model.Model) {
+ c := b.(*HAChassisGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *HAChassisGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *HAChassisGroup) Equals(b *HAChassisGroup) bool {
+ return a.UUID == b.UUID &&
+ equalHAChassisGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalHAChassisGroupHaChassis(a.HaChassis, b.HaChassis) &&
+ a.Name == b.Name &&
+ equalHAChassisGroupRefChassis(a.RefChassis, b.RefChassis)
+}
+
+func (a *HAChassisGroup) EqualsModel(b model.Model) bool {
+ c := b.(*HAChassisGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &HAChassisGroup{}
+var _ model.ComparableModel = &HAChassisGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go
new file mode 100644
index 000000000..73a0bb943
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go
@@ -0,0 +1,147 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const IGMPGroupTable = "IGMP_Group"
+
+// IGMPGroup defines an object in IGMP_Group table
+type IGMPGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ Address string `ovsdb:"address"`
+ Chassis *string `ovsdb:"chassis"`
+ ChassisName string `ovsdb:"chassis_name"`
+ Datapath *string `ovsdb:"datapath"`
+ Ports []string `ovsdb:"ports"`
+ Protocol string `ovsdb:"protocol"`
+}
+
+func (a *IGMPGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *IGMPGroup) GetAddress() string {
+ return a.Address
+}
+
+func (a *IGMPGroup) GetChassis() *string {
+ return a.Chassis
+}
+
+func copyIGMPGroupChassis(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalIGMPGroupChassis(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *IGMPGroup) GetChassisName() string {
+ return a.ChassisName
+}
+
+func (a *IGMPGroup) GetDatapath() *string {
+ return a.Datapath
+}
+
+func copyIGMPGroupDatapath(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalIGMPGroupDatapath(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *IGMPGroup) GetPorts() []string {
+ return a.Ports
+}
+
+func copyIGMPGroupPorts(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalIGMPGroupPorts(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *IGMPGroup) GetProtocol() string {
+ return a.Protocol
+}
+
+func (a *IGMPGroup) DeepCopyInto(b *IGMPGroup) {
+ *b = *a
+ b.Chassis = copyIGMPGroupChassis(a.Chassis)
+ b.Datapath = copyIGMPGroupDatapath(a.Datapath)
+ b.Ports = copyIGMPGroupPorts(a.Ports)
+}
+
+func (a *IGMPGroup) DeepCopy() *IGMPGroup {
+ b := new(IGMPGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *IGMPGroup) CloneModelInto(b model.Model) {
+ c := b.(*IGMPGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *IGMPGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *IGMPGroup) Equals(b *IGMPGroup) bool {
+ return a.UUID == b.UUID &&
+ a.Address == b.Address &&
+ equalIGMPGroupChassis(a.Chassis, b.Chassis) &&
+ a.ChassisName == b.ChassisName &&
+ equalIGMPGroupDatapath(a.Datapath, b.Datapath) &&
+ equalIGMPGroupPorts(a.Ports, b.Ports) &&
+ a.Protocol == b.Protocol
+}
+
+func (a *IGMPGroup) EqualsModel(b model.Model) bool {
+ c := b.(*IGMPGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &IGMPGroup{}
+var _ model.ComparableModel = &IGMPGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go
new file mode 100644
index 000000000..493cd342d
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go
@@ -0,0 +1,228 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const IPMulticastTable = "IP_Multicast"
+
+// IPMulticast defines an object in IP_Multicast table
+type IPMulticast struct {
+ UUID string `ovsdb:"_uuid"`
+ Datapath string `ovsdb:"datapath"`
+ Enabled *bool `ovsdb:"enabled"`
+ EthSrc string `ovsdb:"eth_src"`
+ IdleTimeout *int `ovsdb:"idle_timeout"`
+ Ip4Src string `ovsdb:"ip4_src"`
+ Ip6Src string `ovsdb:"ip6_src"`
+ Querier *bool `ovsdb:"querier"`
+ QueryInterval *int `ovsdb:"query_interval"`
+ QueryMaxResp *int `ovsdb:"query_max_resp"`
+ SeqNo int `ovsdb:"seq_no"`
+ TableSize *int `ovsdb:"table_size"`
+}
+
+func (a *IPMulticast) GetUUID() string {
+ return a.UUID
+}
+
+func (a *IPMulticast) GetDatapath() string {
+ return a.Datapath
+}
+
+func (a *IPMulticast) GetEnabled() *bool {
+ return a.Enabled
+}
+
+func copyIPMulticastEnabled(a *bool) *bool {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalIPMulticastEnabled(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *IPMulticast) GetEthSrc() string {
+ return a.EthSrc
+}
+
+func (a *IPMulticast) GetIdleTimeout() *int {
+ return a.IdleTimeout
+}
+
+func copyIPMulticastIdleTimeout(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalIPMulticastIdleTimeout(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *IPMulticast) GetIp4Src() string {
+ return a.Ip4Src
+}
+
+func (a *IPMulticast) GetIp6Src() string {
+ return a.Ip6Src
+}
+
+func (a *IPMulticast) GetQuerier() *bool {
+ return a.Querier
+}
+
+func copyIPMulticastQuerier(a *bool) *bool {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalIPMulticastQuerier(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *IPMulticast) GetQueryInterval() *int {
+ return a.QueryInterval
+}
+
+func copyIPMulticastQueryInterval(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalIPMulticastQueryInterval(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *IPMulticast) GetQueryMaxResp() *int {
+ return a.QueryMaxResp
+}
+
+func copyIPMulticastQueryMaxResp(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalIPMulticastQueryMaxResp(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *IPMulticast) GetSeqNo() int {
+ return a.SeqNo
+}
+
+func (a *IPMulticast) GetTableSize() *int {
+ return a.TableSize
+}
+
+func copyIPMulticastTableSize(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalIPMulticastTableSize(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *IPMulticast) DeepCopyInto(b *IPMulticast) {
+ *b = *a
+ b.Enabled = copyIPMulticastEnabled(a.Enabled)
+ b.IdleTimeout = copyIPMulticastIdleTimeout(a.IdleTimeout)
+ b.Querier = copyIPMulticastQuerier(a.Querier)
+ b.QueryInterval = copyIPMulticastQueryInterval(a.QueryInterval)
+ b.QueryMaxResp = copyIPMulticastQueryMaxResp(a.QueryMaxResp)
+ b.TableSize = copyIPMulticastTableSize(a.TableSize)
+}
+
+func (a *IPMulticast) DeepCopy() *IPMulticast {
+ b := new(IPMulticast)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *IPMulticast) CloneModelInto(b model.Model) {
+ c := b.(*IPMulticast)
+ a.DeepCopyInto(c)
+}
+
+func (a *IPMulticast) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *IPMulticast) Equals(b *IPMulticast) bool {
+ return a.UUID == b.UUID &&
+ a.Datapath == b.Datapath &&
+ equalIPMulticastEnabled(a.Enabled, b.Enabled) &&
+ a.EthSrc == b.EthSrc &&
+ equalIPMulticastIdleTimeout(a.IdleTimeout, b.IdleTimeout) &&
+ a.Ip4Src == b.Ip4Src &&
+ a.Ip6Src == b.Ip6Src &&
+ equalIPMulticastQuerier(a.Querier, b.Querier) &&
+ equalIPMulticastQueryInterval(a.QueryInterval, b.QueryInterval) &&
+ equalIPMulticastQueryMaxResp(a.QueryMaxResp, b.QueryMaxResp) &&
+ a.SeqNo == b.SeqNo &&
+ equalIPMulticastTableSize(a.TableSize, b.TableSize)
+}
+
+func (a *IPMulticast) EqualsModel(b model.Model) bool {
+ c := b.(*IPMulticast)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &IPMulticast{}
+var _ model.ComparableModel = &IPMulticast{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go
new file mode 100644
index 000000000..bc341807e
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go
@@ -0,0 +1,294 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LoadBalancerTable = "Load_Balancer"
+
+type (
+ LoadBalancerProtocol = string
+)
+
+var (
+ LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp"
+ LoadBalancerProtocolUDP LoadBalancerProtocol = "udp"
+ LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp"
+)
+
+// LoadBalancer defines an object in Load_Balancer table
+type LoadBalancer struct {
+ UUID string `ovsdb:"_uuid"`
+ DatapathGroup *string `ovsdb:"datapath_group"`
+ Datapaths []string `ovsdb:"datapaths"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ LrDatapathGroup *string `ovsdb:"lr_datapath_group"`
+ LsDatapathGroup *string `ovsdb:"ls_datapath_group"`
+ Name string `ovsdb:"name"`
+ Options map[string]string `ovsdb:"options"`
+ Protocol *LoadBalancerProtocol `ovsdb:"protocol"`
+ Vips map[string]string `ovsdb:"vips"`
+}
+
+func (a *LoadBalancer) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LoadBalancer) GetDatapathGroup() *string {
+ return a.DatapathGroup
+}
+
+func copyLoadBalancerDatapathGroup(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLoadBalancerDatapathGroup(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LoadBalancer) GetDatapaths() []string {
+ return a.Datapaths
+}
+
+func copyLoadBalancerDatapaths(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLoadBalancerDatapaths(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLoadBalancerExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) GetLrDatapathGroup() *string {
+ return a.LrDatapathGroup
+}
+
+func copyLoadBalancerLrDatapathGroup(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLoadBalancerLrDatapathGroup(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LoadBalancer) GetLsDatapathGroup() *string {
+ return a.LsDatapathGroup
+}
+
+func copyLoadBalancerLsDatapathGroup(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLoadBalancerLsDatapathGroup(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LoadBalancer) GetName() string {
+ return a.Name
+}
+
+func (a *LoadBalancer) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyLoadBalancerOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) GetProtocol() *LoadBalancerProtocol {
+ return a.Protocol
+}
+
+func copyLoadBalancerProtocol(a *LoadBalancerProtocol) *LoadBalancerProtocol {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLoadBalancerProtocol(a, b *LoadBalancerProtocol) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LoadBalancer) GetVips() map[string]string {
+ return a.Vips
+}
+
+func copyLoadBalancerVips(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLoadBalancerVips(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LoadBalancer) DeepCopyInto(b *LoadBalancer) {
+ *b = *a
+ b.DatapathGroup = copyLoadBalancerDatapathGroup(a.DatapathGroup)
+ b.Datapaths = copyLoadBalancerDatapaths(a.Datapaths)
+ b.ExternalIDs = copyLoadBalancerExternalIDs(a.ExternalIDs)
+ b.LrDatapathGroup = copyLoadBalancerLrDatapathGroup(a.LrDatapathGroup)
+ b.LsDatapathGroup = copyLoadBalancerLsDatapathGroup(a.LsDatapathGroup)
+ b.Options = copyLoadBalancerOptions(a.Options)
+ b.Protocol = copyLoadBalancerProtocol(a.Protocol)
+ b.Vips = copyLoadBalancerVips(a.Vips)
+}
+
+func (a *LoadBalancer) DeepCopy() *LoadBalancer {
+ b := new(LoadBalancer)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LoadBalancer) CloneModelInto(b model.Model) {
+ c := b.(*LoadBalancer)
+ a.DeepCopyInto(c)
+}
+
+func (a *LoadBalancer) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LoadBalancer) Equals(b *LoadBalancer) bool {
+ return a.UUID == b.UUID &&
+ equalLoadBalancerDatapathGroup(a.DatapathGroup, b.DatapathGroup) &&
+ equalLoadBalancerDatapaths(a.Datapaths, b.Datapaths) &&
+ equalLoadBalancerExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalLoadBalancerLrDatapathGroup(a.LrDatapathGroup, b.LrDatapathGroup) &&
+ equalLoadBalancerLsDatapathGroup(a.LsDatapathGroup, b.LsDatapathGroup) &&
+ a.Name == b.Name &&
+ equalLoadBalancerOptions(a.Options, b.Options) &&
+ equalLoadBalancerProtocol(a.Protocol, b.Protocol) &&
+ equalLoadBalancerVips(a.Vips, b.Vips)
+}
+
+func (a *LoadBalancer) EqualsModel(b model.Model) bool {
+ c := b.(*LoadBalancer)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LoadBalancer{}
+var _ model.ComparableModel = &LoadBalancer{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go
new file mode 100644
index 000000000..911de2eed
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go
@@ -0,0 +1,79 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LogicalDPGroupTable = "Logical_DP_Group"
+
+// LogicalDPGroup defines an object in Logical_DP_Group table
+type LogicalDPGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ Datapaths []string `ovsdb:"datapaths"`
+}
+
+func (a *LogicalDPGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LogicalDPGroup) GetDatapaths() []string {
+ return a.Datapaths
+}
+
+func copyLogicalDPGroupDatapaths(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalLogicalDPGroupDatapaths(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalDPGroup) DeepCopyInto(b *LogicalDPGroup) {
+ *b = *a
+ b.Datapaths = copyLogicalDPGroupDatapaths(a.Datapaths)
+}
+
+func (a *LogicalDPGroup) DeepCopy() *LogicalDPGroup {
+ b := new(LogicalDPGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LogicalDPGroup) CloneModelInto(b model.Model) {
+ c := b.(*LogicalDPGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *LogicalDPGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LogicalDPGroup) Equals(b *LogicalDPGroup) bool {
+ return a.UUID == b.UUID &&
+ equalLogicalDPGroupDatapaths(a.Datapaths, b.Datapaths)
+}
+
+func (a *LogicalDPGroup) EqualsModel(b model.Model) bool {
+ c := b.(*LogicalDPGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LogicalDPGroup{}
+var _ model.ComparableModel = &LogicalDPGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go
new file mode 100644
index 000000000..42af1cdf5
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go
@@ -0,0 +1,253 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const LogicalFlowTable = "Logical_Flow"
+
+type (
+ LogicalFlowPipeline = string
+)
+
+var (
+ LogicalFlowPipelineIngress LogicalFlowPipeline = "ingress"
+ LogicalFlowPipelineEgress LogicalFlowPipeline = "egress"
+)
+
+// LogicalFlow defines an object in Logical_Flow table
+type LogicalFlow struct {
+ UUID string `ovsdb:"_uuid"`
+ Actions string `ovsdb:"actions"`
+ ControllerMeter *string `ovsdb:"controller_meter"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ FlowDesc *string `ovsdb:"flow_desc"`
+ LogicalDatapath *string `ovsdb:"logical_datapath"`
+ LogicalDpGroup *string `ovsdb:"logical_dp_group"`
+ Match string `ovsdb:"match"`
+ Pipeline LogicalFlowPipeline `ovsdb:"pipeline"`
+ Priority int `ovsdb:"priority"`
+ TableID int `ovsdb:"table_id"`
+ Tags map[string]string `ovsdb:"tags"`
+}
+
+func (a *LogicalFlow) GetUUID() string {
+ return a.UUID
+}
+
+func (a *LogicalFlow) GetActions() string {
+ return a.Actions
+}
+
+func (a *LogicalFlow) GetControllerMeter() *string {
+ return a.ControllerMeter
+}
+
+func copyLogicalFlowControllerMeter(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalFlowControllerMeter(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalFlow) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyLogicalFlowExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalFlowExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalFlow) GetFlowDesc() *string {
+ return a.FlowDesc
+}
+
+func copyLogicalFlowFlowDesc(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalFlowFlowDesc(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalFlow) GetLogicalDatapath() *string {
+ return a.LogicalDatapath
+}
+
+func copyLogicalFlowLogicalDatapath(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalFlowLogicalDatapath(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalFlow) GetLogicalDpGroup() *string {
+ return a.LogicalDpGroup
+}
+
+func copyLogicalFlowLogicalDpGroup(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalLogicalFlowLogicalDpGroup(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *LogicalFlow) GetMatch() string {
+ return a.Match
+}
+
+func (a *LogicalFlow) GetPipeline() LogicalFlowPipeline {
+ return a.Pipeline
+}
+
+func (a *LogicalFlow) GetPriority() int {
+ return a.Priority
+}
+
+func (a *LogicalFlow) GetTableID() int {
+ return a.TableID
+}
+
+func (a *LogicalFlow) GetTags() map[string]string {
+ return a.Tags
+}
+
+func copyLogicalFlowTags(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalLogicalFlowTags(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *LogicalFlow) DeepCopyInto(b *LogicalFlow) {
+ *b = *a
+ b.ControllerMeter = copyLogicalFlowControllerMeter(a.ControllerMeter)
+ b.ExternalIDs = copyLogicalFlowExternalIDs(a.ExternalIDs)
+ b.FlowDesc = copyLogicalFlowFlowDesc(a.FlowDesc)
+ b.LogicalDatapath = copyLogicalFlowLogicalDatapath(a.LogicalDatapath)
+ b.LogicalDpGroup = copyLogicalFlowLogicalDpGroup(a.LogicalDpGroup)
+ b.Tags = copyLogicalFlowTags(a.Tags)
+}
+
+func (a *LogicalFlow) DeepCopy() *LogicalFlow {
+ b := new(LogicalFlow)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *LogicalFlow) CloneModelInto(b model.Model) {
+ c := b.(*LogicalFlow)
+ a.DeepCopyInto(c)
+}
+
+func (a *LogicalFlow) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *LogicalFlow) Equals(b *LogicalFlow) bool {
+ return a.UUID == b.UUID &&
+ a.Actions == b.Actions &&
+ equalLogicalFlowControllerMeter(a.ControllerMeter, b.ControllerMeter) &&
+ equalLogicalFlowExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalLogicalFlowFlowDesc(a.FlowDesc, b.FlowDesc) &&
+ equalLogicalFlowLogicalDatapath(a.LogicalDatapath, b.LogicalDatapath) &&
+ equalLogicalFlowLogicalDpGroup(a.LogicalDpGroup, b.LogicalDpGroup) &&
+ a.Match == b.Match &&
+ a.Pipeline == b.Pipeline &&
+ a.Priority == b.Priority &&
+ a.TableID == b.TableID &&
+ equalLogicalFlowTags(a.Tags, b.Tags)
+}
+
+func (a *LogicalFlow) EqualsModel(b model.Model) bool {
+ c := b.(*LogicalFlow)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &LogicalFlow{}
+var _ model.ComparableModel = &LogicalFlow{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go
new file mode 100644
index 000000000..705431f1d
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go
@@ -0,0 +1,78 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const MACBindingTable = "MAC_Binding"
+
+// MACBinding defines an object in MAC_Binding table
+type MACBinding struct {
+ UUID string `ovsdb:"_uuid"`
+ Datapath string `ovsdb:"datapath"`
+ IP string `ovsdb:"ip"`
+ LogicalPort string `ovsdb:"logical_port"`
+ MAC string `ovsdb:"mac"`
+ Timestamp int `ovsdb:"timestamp"`
+}
+
+func (a *MACBinding) GetUUID() string {
+ return a.UUID
+}
+
+func (a *MACBinding) GetDatapath() string {
+ return a.Datapath
+}
+
+func (a *MACBinding) GetIP() string {
+ return a.IP
+}
+
+func (a *MACBinding) GetLogicalPort() string {
+ return a.LogicalPort
+}
+
+func (a *MACBinding) GetMAC() string {
+ return a.MAC
+}
+
+func (a *MACBinding) GetTimestamp() int {
+ return a.Timestamp
+}
+
+func (a *MACBinding) DeepCopyInto(b *MACBinding) {
+ *b = *a
+}
+
+func (a *MACBinding) DeepCopy() *MACBinding {
+ b := new(MACBinding)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *MACBinding) CloneModelInto(b model.Model) {
+ c := b.(*MACBinding)
+ a.DeepCopyInto(c)
+}
+
+func (a *MACBinding) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *MACBinding) Equals(b *MACBinding) bool {
+ return a.UUID == b.UUID &&
+ a.Datapath == b.Datapath &&
+ a.IP == b.IP &&
+ a.LogicalPort == b.LogicalPort &&
+ a.MAC == b.MAC &&
+ a.Timestamp == b.Timestamp
+}
+
+func (a *MACBinding) EqualsModel(b model.Model) bool {
+ c := b.(*MACBinding)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &MACBinding{}
+var _ model.ComparableModel = &MACBinding{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go
new file mode 100644
index 000000000..95c4daec2
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go
@@ -0,0 +1,100 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const MeterTable = "Meter"
+
+type (
+ MeterUnit = string
+)
+
+var (
+ MeterUnitKbps MeterUnit = "kbps"
+ MeterUnitPktps MeterUnit = "pktps"
+)
+
+// Meter defines an object in Meter table
+type Meter struct {
+ UUID string `ovsdb:"_uuid"`
+ Bands []string `ovsdb:"bands"`
+ Name string `ovsdb:"name"`
+ Unit MeterUnit `ovsdb:"unit"`
+}
+
+func (a *Meter) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Meter) GetBands() []string {
+ return a.Bands
+}
+
+func copyMeterBands(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalMeterBands(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Meter) GetName() string {
+ return a.Name
+}
+
+func (a *Meter) GetUnit() MeterUnit {
+ return a.Unit
+}
+
+func (a *Meter) DeepCopyInto(b *Meter) {
+ *b = *a
+ b.Bands = copyMeterBands(a.Bands)
+}
+
+func (a *Meter) DeepCopy() *Meter {
+ b := new(Meter)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Meter) CloneModelInto(b model.Model) {
+ c := b.(*Meter)
+ a.DeepCopyInto(c)
+}
+
+func (a *Meter) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Meter) Equals(b *Meter) bool {
+ return a.UUID == b.UUID &&
+ equalMeterBands(a.Bands, b.Bands) &&
+ a.Name == b.Name &&
+ a.Unit == b.Unit
+}
+
+func (a *Meter) EqualsModel(b model.Model) bool {
+ c := b.(*Meter)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Meter{}
+var _ model.ComparableModel = &Meter{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go
new file mode 100644
index 000000000..addb01b64
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go
@@ -0,0 +1,74 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const MeterBandTable = "Meter_Band"
+
+type (
+ MeterBandAction = string
+)
+
+var (
+ MeterBandActionDrop MeterBandAction = "drop"
+)
+
+// MeterBand defines an object in Meter_Band table
+type MeterBand struct {
+ UUID string `ovsdb:"_uuid"`
+ Action MeterBandAction `ovsdb:"action"`
+ BurstSize int `ovsdb:"burst_size"`
+ Rate int `ovsdb:"rate"`
+}
+
+func (a *MeterBand) GetUUID() string {
+ return a.UUID
+}
+
+func (a *MeterBand) GetAction() MeterBandAction {
+ return a.Action
+}
+
+func (a *MeterBand) GetBurstSize() int {
+ return a.BurstSize
+}
+
+func (a *MeterBand) GetRate() int {
+ return a.Rate
+}
+
+func (a *MeterBand) DeepCopyInto(b *MeterBand) {
+ *b = *a
+}
+
+func (a *MeterBand) DeepCopy() *MeterBand {
+ b := new(MeterBand)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *MeterBand) CloneModelInto(b model.Model) {
+ c := b.(*MeterBand)
+ a.DeepCopyInto(c)
+}
+
+func (a *MeterBand) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *MeterBand) Equals(b *MeterBand) bool {
+ return a.UUID == b.UUID &&
+ a.Action == b.Action &&
+ a.BurstSize == b.BurstSize &&
+ a.Rate == b.Rate
+}
+
+func (a *MeterBand) EqualsModel(b model.Model) bool {
+ c := b.(*MeterBand)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &MeterBand{}
+var _ model.ComparableModel = &MeterBand{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go
new file mode 100644
index 000000000..69444ea73
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go
@@ -0,0 +1,125 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const MirrorTable = "Mirror"
+
+type (
+ MirrorFilter = string
+ MirrorType = string
+)
+
+var (
+ MirrorFilterFromLport MirrorFilter = "from-lport"
+ MirrorFilterToLport MirrorFilter = "to-lport"
+ MirrorFilterBoth MirrorFilter = "both"
+ MirrorTypeGre MirrorType = "gre"
+ MirrorTypeErspan MirrorType = "erspan"
+ MirrorTypeLocal MirrorType = "local"
+)
+
+// Mirror defines an object in Mirror table
+type Mirror struct {
+ UUID string `ovsdb:"_uuid"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Filter MirrorFilter `ovsdb:"filter"`
+ Index int `ovsdb:"index"`
+ Name string `ovsdb:"name"`
+ Sink string `ovsdb:"sink"`
+ Type MirrorType `ovsdb:"type"`
+}
+
+func (a *Mirror) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Mirror) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyMirrorExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalMirrorExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Mirror) GetFilter() MirrorFilter {
+ return a.Filter
+}
+
+func (a *Mirror) GetIndex() int {
+ return a.Index
+}
+
+func (a *Mirror) GetName() string {
+ return a.Name
+}
+
+func (a *Mirror) GetSink() string {
+ return a.Sink
+}
+
+func (a *Mirror) GetType() MirrorType {
+ return a.Type
+}
+
+func (a *Mirror) DeepCopyInto(b *Mirror) {
+ *b = *a
+ b.ExternalIDs = copyMirrorExternalIDs(a.ExternalIDs)
+}
+
+func (a *Mirror) DeepCopy() *Mirror {
+ b := new(Mirror)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Mirror) CloneModelInto(b model.Model) {
+ c := b.(*Mirror)
+ a.DeepCopyInto(c)
+}
+
+func (a *Mirror) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Mirror) Equals(b *Mirror) bool {
+ return a.UUID == b.UUID &&
+ equalMirrorExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Filter == b.Filter &&
+ a.Index == b.Index &&
+ a.Name == b.Name &&
+ a.Sink == b.Sink &&
+ a.Type == b.Type
+}
+
+func (a *Mirror) EqualsModel(b model.Model) bool {
+ c := b.(*Mirror)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Mirror{}
+var _ model.ComparableModel = &Mirror{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go
new file mode 100644
index 000000000..bc838fe49
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go
@@ -0,0 +1,1884 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import (
+ "encoding/json"
+
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb
+func FullDatabaseModel() (model.ClientDBModel, error) {
+ return model.NewClientDBModel("OVN_Southbound", map[string]model.Model{
+ "Address_Set": &AddressSet{},
+ "BFD": &BFD{},
+ "Chassis": &Chassis{},
+ "Chassis_Private": &ChassisPrivate{},
+ "Chassis_Template_Var": &ChassisTemplateVar{},
+ "Connection": &Connection{},
+ "Controller_Event": &ControllerEvent{},
+ "DHCP_Options": &DHCPOptions{},
+ "DHCPv6_Options": &DHCPv6Options{},
+ "DNS": &DNS{},
+ "Datapath_Binding": &DatapathBinding{},
+ "Encap": &Encap{},
+ "FDB": &FDB{},
+ "Gateway_Chassis": &GatewayChassis{},
+ "HA_Chassis": &HAChassis{},
+ "HA_Chassis_Group": &HAChassisGroup{},
+ "IGMP_Group": &IGMPGroup{},
+ "IP_Multicast": &IPMulticast{},
+ "Load_Balancer": &LoadBalancer{},
+ "Logical_DP_Group": &LogicalDPGroup{},
+ "Logical_Flow": &LogicalFlow{},
+ "MAC_Binding": &MACBinding{},
+ "Meter": &Meter{},
+ "Meter_Band": &MeterBand{},
+ "Mirror": &Mirror{},
+ "Multicast_Group": &MulticastGroup{},
+ "Port_Binding": &PortBinding{},
+ "Port_Group": &PortGroup{},
+ "RBAC_Permission": &RBACPermission{},
+ "RBAC_Role": &RBACRole{},
+ "SB_Global": &SBGlobal{},
+ "SSL": &SSL{},
+ "Service_Monitor": &ServiceMonitor{},
+ "Static_MAC_Binding": &StaticMACBinding{},
+ })
+}
+
+var schema = `{
+ "name": "OVN_Southbound",
+ "version": "20.37.0",
+ "tables": {
+ "Address_Set": {
+ "columns": {
+ "addresses": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "BFD": {
+ "columns": {
+ "chassis_name": {
+ "type": "string"
+ },
+ "detect_mult": {
+ "type": "integer"
+ },
+ "disc": {
+ "type": "integer"
+ },
+ "dst_ip": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "logical_port": {
+ "type": "string"
+ },
+ "min_rx": {
+ "type": "integer"
+ },
+ "min_tx": {
+ "type": "integer"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "src_port": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 49152,
+ "maxInteger": 65535
+ }
+ }
+ },
+ "status": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "down",
+ "init",
+ "up",
+ "admin_down"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "logical_port",
+ "dst_ip",
+ "src_port",
+ "disc"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Chassis": {
+ "columns": {
+ "encaps": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Encap"
+ },
+ "min": 1,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "hostname": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "nb_cfg": {
+ "type": "integer"
+ },
+ "other_config": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "transport_zones": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "vtep_logical_switches": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Chassis_Private": {
+ "columns": {
+ "chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "nb_cfg": {
+ "type": "integer"
+ },
+ "nb_cfg_timestamp": {
+ "type": "integer"
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Chassis_Template_Var": {
+ "columns": {
+ "chassis": {
+ "type": "string"
+ },
+ "variables": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "chassis"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Connection": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "inactivity_probe": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "is_connected": {
+ "type": "boolean",
+ "ephemeral": true
+ },
+ "max_backoff": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1000
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "other_config": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "read_only": {
+ "type": "boolean"
+ },
+ "role": {
+ "type": "string"
+ },
+ "status": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ },
+ "ephemeral": true
+ },
+ "target": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "target"
+ ]
+ ]
+ },
+ "Controller_Event": {
+ "columns": {
+ "chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "event_info": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "event_type": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": "empty_lb_backends"
+ }
+ }
+ },
+ "seq_num": {
+ "type": "integer"
+ }
+ },
+ "isRoot": true
+ },
+ "DHCP_Options": {
+ "columns": {
+ "code": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 254
+ }
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "bool",
+ "uint8",
+ "uint16",
+ "uint32",
+ "ipv4",
+ "static_routes",
+ "str",
+ "host_id",
+ "domains"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "DHCPv6_Options": {
+ "columns": {
+ "code": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 254
+ }
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "ipv6",
+ "str",
+ "mac",
+ "domain"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "DNS": {
+ "columns": {
+ "datapaths": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding"
+ },
+ "min": 1,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "records": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "Datapath_Binding": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "load_balancers": {
+ "type": {
+ "key": {
+ "type": "uuid"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "tunnel_key": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 16777215
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "tunnel_key"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Encap": {
+ "columns": {
+ "chassis_name": {
+ "type": "string"
+ },
+ "ip": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "type": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "geneve",
+ "stt",
+ "vxlan"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "type",
+ "ip"
+ ]
+ ]
+ },
+ "FDB": {
+ "columns": {
+ "dp_key": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 16777215
+ }
+ }
+ },
+ "mac": {
+ "type": "string"
+ },
+ "port_key": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 16777215
+ }
+ }
+ },
+ "timestamp": {
+ "type": "integer"
+ }
+ },
+ "indexes": [
+ [
+ "mac",
+ "dp_key"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Gateway_Chassis": {
+ "columns": {
+ "chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32767
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ]
+ },
+ "HA_Chassis": {
+ "columns": {
+ "chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32767
+ }
+ }
+ }
+ }
+ },
+ "HA_Chassis_Group": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ha_chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "HA_Chassis",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "ref_chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "IGMP_Group": {
+ "columns": {
+ "address": {
+ "type": "string"
+ },
+ "chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "chassis_name": {
+ "type": "string"
+ },
+ "datapath": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "ports": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Port_Binding",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "protocol": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "address",
+ "datapath",
+ "chassis"
+ ]
+ ],
+ "isRoot": true
+ },
+ "IP_Multicast": {
+ "columns": {
+ "datapath": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding",
+ "refType": "weak"
+ }
+ }
+ },
+ "enabled": {
+ "type": {
+ "key": {
+ "type": "boolean"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "eth_src": {
+ "type": "string"
+ },
+ "idle_timeout": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "ip4_src": {
+ "type": "string"
+ },
+ "ip6_src": {
+ "type": "string"
+ },
+ "querier": {
+ "type": {
+ "key": {
+ "type": "boolean"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "query_interval": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "query_max_resp": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "seq_no": {
+ "type": "integer"
+ },
+ "table_size": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "indexes": [
+ [
+ "datapath"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Load_Balancer": {
+ "columns": {
+ "datapath_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_DP_Group"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "datapaths": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "lr_datapath_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_DP_Group"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "ls_datapath_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_DP_Group"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "protocol": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "tcp",
+ "udp",
+ "sctp"
+ ]
+ ]
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "vips": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "Logical_DP_Group": {
+ "columns": {
+ "datapaths": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ }
+ },
+ "Logical_Flow": {
+ "columns": {
+ "actions": {
+ "type": "string"
+ },
+ "controller_meter": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "flow_desc": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "logical_datapath": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "logical_dp_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Logical_DP_Group"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "match": {
+ "type": "string"
+ },
+ "pipeline": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "ingress",
+ "egress"
+ ]
+ ]
+ }
+ }
+ },
+ "priority": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 65535
+ }
+ }
+ },
+ "table_id": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 32
+ }
+ }
+ },
+ "tags": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "MAC_Binding": {
+ "columns": {
+ "datapath": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding"
+ }
+ }
+ },
+ "ip": {
+ "type": "string"
+ },
+ "logical_port": {
+ "type": "string"
+ },
+ "mac": {
+ "type": "string"
+ },
+ "timestamp": {
+ "type": "integer"
+ }
+ },
+ "indexes": [
+ [
+ "logical_port",
+ "ip"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Meter": {
+ "columns": {
+ "bands": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Meter_Band",
+ "refType": "strong"
+ },
+ "min": 1,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "unit": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "kbps",
+ "pktps"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Meter_Band": {
+ "columns": {
+ "action": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": "drop"
+ }
+ }
+ },
+ "burst_size": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 4294967295
+ }
+ }
+ },
+ "rate": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4294967295
+ }
+ }
+ }
+ }
+ },
+ "Mirror": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "filter": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "from-lport",
+ "to-lport",
+ "both"
+ ]
+ ]
+ }
+ }
+ },
+ "index": {
+ "type": "integer"
+ },
+ "name": {
+ "type": "string"
+ },
+ "sink": {
+ "type": "string"
+ },
+ "type": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "gre",
+ "erspan",
+ "local"
+ ]
+ ]
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Multicast_Group": {
+ "columns": {
+ "datapath": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding"
+ }
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "ports": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Port_Binding",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "tunnel_key": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 32768,
+ "maxInteger": 65535
+ }
+ }
+ }
+ },
+ "indexes": [
+ [
+ "datapath",
+ "tunnel_key"
+ ],
+ [
+ "datapath",
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Port_Binding": {
+ "columns": {
+ "additional_chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "additional_encap": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Encap",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "datapath": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding"
+ }
+ }
+ },
+ "encap": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Encap",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "gateway_chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Gateway_Chassis",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ha_chassis_group": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "HA_Chassis_Group",
+ "refType": "strong"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "logical_port": {
+ "type": "string"
+ },
+ "mac": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "mirror_rules": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Mirror",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "nat_addresses": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "parent_port": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "port_security": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "requested_additional_chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "requested_chassis": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Chassis",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "tag": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4095
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "tunnel_key": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 1,
+ "maxInteger": 32767
+ }
+ }
+ },
+ "type": {
+ "type": "string"
+ },
+ "up": {
+ "type": {
+ "key": {
+ "type": "boolean"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "virtual_parent": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "indexes": [
+ [
+ "datapath",
+ "tunnel_key"
+ ],
+ [
+ "logical_port"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Port_Group": {
+ "columns": {
+ "name": {
+ "type": "string"
+ },
+ "ports": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ],
+ "isRoot": true
+ },
+ "RBAC_Permission": {
+ "columns": {
+ "authorization": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "insert_delete": {
+ "type": "boolean"
+ },
+ "table": {
+ "type": "string"
+ },
+ "update": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "RBAC_Role": {
+ "columns": {
+ "name": {
+ "type": "string"
+ },
+ "permissions": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "uuid",
+ "refTable": "RBAC_Permission",
+ "refType": "weak"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "SB_Global": {
+ "columns": {
+ "connections": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Connection"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ipsec": {
+ "type": "boolean"
+ },
+ "nb_cfg": {
+ "type": "integer"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ssl": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "SSL"
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "SSL": {
+ "columns": {
+ "bootstrap_ca_cert": {
+ "type": "boolean"
+ },
+ "ca_cert": {
+ "type": "string"
+ },
+ "certificate": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "private_key": {
+ "type": "string"
+ },
+ "ssl_ciphers": {
+ "type": "string"
+ },
+ "ssl_protocols": {
+ "type": "string"
+ }
+ }
+ },
+ "Service_Monitor": {
+ "columns": {
+ "chassis_name": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ip": {
+ "type": "string"
+ },
+ "logical_port": {
+ "type": "string"
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "port": {
+ "type": {
+ "key": {
+ "type": "integer",
+ "minInteger": 0,
+ "maxInteger": 65535
+ }
+ }
+ },
+ "protocol": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "tcp",
+ "udp"
+ ]
+ ]
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "src_ip": {
+ "type": "string"
+ },
+ "src_mac": {
+ "type": "string"
+ },
+ "status": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "online",
+ "offline",
+ "error"
+ ]
+ ]
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "indexes": [
+ [
+ "logical_port",
+ "ip",
+ "port",
+ "protocol"
+ ]
+ ],
+ "isRoot": true
+ },
+ "Static_MAC_Binding": {
+ "columns": {
+ "datapath": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Datapath_Binding"
+ }
+ }
+ },
+ "ip": {
+ "type": "string"
+ },
+ "logical_port": {
+ "type": "string"
+ },
+ "mac": {
+ "type": "string"
+ },
+ "override_dynamic_mac": {
+ "type": "boolean"
+ }
+ },
+ "indexes": [
+ [
+ "logical_port",
+ "ip"
+ ]
+ ],
+ "isRoot": true
+ }
+ }
+}`
+
+func Schema() ovsdb.DatabaseSchema {
+ var s ovsdb.DatabaseSchema
+ err := json.Unmarshal([]byte(schema), &s)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go
new file mode 100644
index 000000000..1af933ea6
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go
@@ -0,0 +1,97 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const MulticastGroupTable = "Multicast_Group"
+
+// MulticastGroup defines an object in Multicast_Group table
+type MulticastGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ Datapath string `ovsdb:"datapath"`
+ Name string `ovsdb:"name"`
+ Ports []string `ovsdb:"ports"`
+ TunnelKey int `ovsdb:"tunnel_key"`
+}
+
+func (a *MulticastGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *MulticastGroup) GetDatapath() string {
+ return a.Datapath
+}
+
+func (a *MulticastGroup) GetName() string {
+ return a.Name
+}
+
+func (a *MulticastGroup) GetPorts() []string {
+ return a.Ports
+}
+
+func copyMulticastGroupPorts(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalMulticastGroupPorts(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *MulticastGroup) GetTunnelKey() int {
+ return a.TunnelKey
+}
+
+func (a *MulticastGroup) DeepCopyInto(b *MulticastGroup) {
+ *b = *a
+ b.Ports = copyMulticastGroupPorts(a.Ports)
+}
+
+func (a *MulticastGroup) DeepCopy() *MulticastGroup {
+ b := new(MulticastGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *MulticastGroup) CloneModelInto(b model.Model) {
+ c := b.(*MulticastGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *MulticastGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *MulticastGroup) Equals(b *MulticastGroup) bool {
+ return a.UUID == b.UUID &&
+ a.Datapath == b.Datapath &&
+ a.Name == b.Name &&
+ equalMulticastGroupPorts(a.Ports, b.Ports) &&
+ a.TunnelKey == b.TunnelKey
+}
+
+func (a *MulticastGroup) EqualsModel(b model.Model) bool {
+ c := b.(*MulticastGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &MulticastGroup{}
+var _ model.ComparableModel = &MulticastGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go
new file mode 100644
index 000000000..b3d30f843
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go
@@ -0,0 +1,586 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const PortBindingTable = "Port_Binding"
+
+// PortBinding defines an object in Port_Binding table
+type PortBinding struct {
+ UUID string `ovsdb:"_uuid"`
+ AdditionalChassis []string `ovsdb:"additional_chassis"`
+ AdditionalEncap []string `ovsdb:"additional_encap"`
+ Chassis *string `ovsdb:"chassis"`
+ Datapath string `ovsdb:"datapath"`
+ Encap *string `ovsdb:"encap"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ GatewayChassis []string `ovsdb:"gateway_chassis"`
+ HaChassisGroup *string `ovsdb:"ha_chassis_group"`
+ LogicalPort string `ovsdb:"logical_port"`
+ MAC []string `ovsdb:"mac"`
+ MirrorRules []string `ovsdb:"mirror_rules"`
+ NatAddresses []string `ovsdb:"nat_addresses"`
+ Options map[string]string `ovsdb:"options"`
+ ParentPort *string `ovsdb:"parent_port"`
+ PortSecurity []string `ovsdb:"port_security"`
+ RequestedAdditionalChassis []string `ovsdb:"requested_additional_chassis"`
+ RequestedChassis *string `ovsdb:"requested_chassis"`
+ Tag *int `ovsdb:"tag"`
+ TunnelKey int `ovsdb:"tunnel_key"`
+ Type string `ovsdb:"type"`
+ Up *bool `ovsdb:"up"`
+ VirtualParent *string `ovsdb:"virtual_parent"`
+}
+
+func (a *PortBinding) GetUUID() string {
+ return a.UUID
+}
+
+func (a *PortBinding) GetAdditionalChassis() []string {
+ return a.AdditionalChassis
+}
+
+func copyPortBindingAdditionalChassis(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortBindingAdditionalChassis(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetAdditionalEncap() []string {
+ return a.AdditionalEncap
+}
+
+func copyPortBindingAdditionalEncap(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortBindingAdditionalEncap(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetChassis() *string {
+ return a.Chassis
+}
+
+func copyPortBindingChassis(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalPortBindingChassis(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *PortBinding) GetDatapath() string {
+ return a.Datapath
+}
+
+func (a *PortBinding) GetEncap() *string {
+ return a.Encap
+}
+
+func copyPortBindingEncap(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalPortBindingEncap(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *PortBinding) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyPortBindingExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalPortBindingExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetGatewayChassis() []string {
+ return a.GatewayChassis
+}
+
+func copyPortBindingGatewayChassis(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortBindingGatewayChassis(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetHaChassisGroup() *string {
+ return a.HaChassisGroup
+}
+
+func copyPortBindingHaChassisGroup(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalPortBindingHaChassisGroup(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *PortBinding) GetLogicalPort() string {
+ return a.LogicalPort
+}
+
+func (a *PortBinding) GetMAC() []string {
+ return a.MAC
+}
+
+func copyPortBindingMAC(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortBindingMAC(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetMirrorRules() []string {
+ return a.MirrorRules
+}
+
+func copyPortBindingMirrorRules(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortBindingMirrorRules(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetNatAddresses() []string {
+ return a.NatAddresses
+}
+
+func copyPortBindingNatAddresses(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortBindingNatAddresses(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyPortBindingOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalPortBindingOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetParentPort() *string {
+ return a.ParentPort
+}
+
+func copyPortBindingParentPort(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalPortBindingParentPort(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *PortBinding) GetPortSecurity() []string {
+ return a.PortSecurity
+}
+
+func copyPortBindingPortSecurity(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortBindingPortSecurity(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetRequestedAdditionalChassis() []string {
+ return a.RequestedAdditionalChassis
+}
+
+func copyPortBindingRequestedAdditionalChassis(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortBindingRequestedAdditionalChassis(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortBinding) GetRequestedChassis() *string {
+ return a.RequestedChassis
+}
+
+func copyPortBindingRequestedChassis(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalPortBindingRequestedChassis(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *PortBinding) GetTag() *int {
+ return a.Tag
+}
+
+func copyPortBindingTag(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalPortBindingTag(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *PortBinding) GetTunnelKey() int {
+ return a.TunnelKey
+}
+
+func (a *PortBinding) GetType() string {
+ return a.Type
+}
+
+func (a *PortBinding) GetUp() *bool {
+ return a.Up
+}
+
+func copyPortBindingUp(a *bool) *bool {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalPortBindingUp(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *PortBinding) GetVirtualParent() *string {
+ return a.VirtualParent
+}
+
+func copyPortBindingVirtualParent(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalPortBindingVirtualParent(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *PortBinding) DeepCopyInto(b *PortBinding) {
+ *b = *a
+ b.AdditionalChassis = copyPortBindingAdditionalChassis(a.AdditionalChassis)
+ b.AdditionalEncap = copyPortBindingAdditionalEncap(a.AdditionalEncap)
+ b.Chassis = copyPortBindingChassis(a.Chassis)
+ b.Encap = copyPortBindingEncap(a.Encap)
+ b.ExternalIDs = copyPortBindingExternalIDs(a.ExternalIDs)
+ b.GatewayChassis = copyPortBindingGatewayChassis(a.GatewayChassis)
+ b.HaChassisGroup = copyPortBindingHaChassisGroup(a.HaChassisGroup)
+ b.MAC = copyPortBindingMAC(a.MAC)
+ b.MirrorRules = copyPortBindingMirrorRules(a.MirrorRules)
+ b.NatAddresses = copyPortBindingNatAddresses(a.NatAddresses)
+ b.Options = copyPortBindingOptions(a.Options)
+ b.ParentPort = copyPortBindingParentPort(a.ParentPort)
+ b.PortSecurity = copyPortBindingPortSecurity(a.PortSecurity)
+ b.RequestedAdditionalChassis = copyPortBindingRequestedAdditionalChassis(a.RequestedAdditionalChassis)
+ b.RequestedChassis = copyPortBindingRequestedChassis(a.RequestedChassis)
+ b.Tag = copyPortBindingTag(a.Tag)
+ b.Up = copyPortBindingUp(a.Up)
+ b.VirtualParent = copyPortBindingVirtualParent(a.VirtualParent)
+}
+
+func (a *PortBinding) DeepCopy() *PortBinding {
+ b := new(PortBinding)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *PortBinding) CloneModelInto(b model.Model) {
+ c := b.(*PortBinding)
+ a.DeepCopyInto(c)
+}
+
+func (a *PortBinding) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *PortBinding) Equals(b *PortBinding) bool {
+ return a.UUID == b.UUID &&
+ equalPortBindingAdditionalChassis(a.AdditionalChassis, b.AdditionalChassis) &&
+ equalPortBindingAdditionalEncap(a.AdditionalEncap, b.AdditionalEncap) &&
+ equalPortBindingChassis(a.Chassis, b.Chassis) &&
+ a.Datapath == b.Datapath &&
+ equalPortBindingEncap(a.Encap, b.Encap) &&
+ equalPortBindingExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ equalPortBindingGatewayChassis(a.GatewayChassis, b.GatewayChassis) &&
+ equalPortBindingHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) &&
+ a.LogicalPort == b.LogicalPort &&
+ equalPortBindingMAC(a.MAC, b.MAC) &&
+ equalPortBindingMirrorRules(a.MirrorRules, b.MirrorRules) &&
+ equalPortBindingNatAddresses(a.NatAddresses, b.NatAddresses) &&
+ equalPortBindingOptions(a.Options, b.Options) &&
+ equalPortBindingParentPort(a.ParentPort, b.ParentPort) &&
+ equalPortBindingPortSecurity(a.PortSecurity, b.PortSecurity) &&
+ equalPortBindingRequestedAdditionalChassis(a.RequestedAdditionalChassis, b.RequestedAdditionalChassis) &&
+ equalPortBindingRequestedChassis(a.RequestedChassis, b.RequestedChassis) &&
+ equalPortBindingTag(a.Tag, b.Tag) &&
+ a.TunnelKey == b.TunnelKey &&
+ a.Type == b.Type &&
+ equalPortBindingUp(a.Up, b.Up) &&
+ equalPortBindingVirtualParent(a.VirtualParent, b.VirtualParent)
+}
+
+func (a *PortBinding) EqualsModel(b model.Model) bool {
+ c := b.(*PortBinding)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &PortBinding{}
+var _ model.ComparableModel = &PortBinding{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go
new file mode 100644
index 000000000..358e26b33
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go
@@ -0,0 +1,85 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const PortGroupTable = "Port_Group"
+
+// PortGroup defines an object in Port_Group table
+type PortGroup struct {
+ UUID string `ovsdb:"_uuid"`
+ Name string `ovsdb:"name"`
+ Ports []string `ovsdb:"ports"`
+}
+
+func (a *PortGroup) GetUUID() string {
+ return a.UUID
+}
+
+func (a *PortGroup) GetName() string {
+ return a.Name
+}
+
+func (a *PortGroup) GetPorts() []string {
+ return a.Ports
+}
+
+func copyPortGroupPorts(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalPortGroupPorts(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *PortGroup) DeepCopyInto(b *PortGroup) {
+ *b = *a
+ b.Ports = copyPortGroupPorts(a.Ports)
+}
+
+func (a *PortGroup) DeepCopy() *PortGroup {
+ b := new(PortGroup)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *PortGroup) CloneModelInto(b model.Model) {
+ c := b.(*PortGroup)
+ a.DeepCopyInto(c)
+}
+
+func (a *PortGroup) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *PortGroup) Equals(b *PortGroup) bool {
+ return a.UUID == b.UUID &&
+ a.Name == b.Name &&
+ equalPortGroupPorts(a.Ports, b.Ports)
+}
+
+func (a *PortGroup) EqualsModel(b model.Model) bool {
+ c := b.(*PortGroup)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &PortGroup{}
+var _ model.ComparableModel = &PortGroup{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go
new file mode 100644
index 000000000..9d760527e
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go
@@ -0,0 +1,122 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const RBACPermissionTable = "RBAC_Permission"
+
+// RBACPermission defines an object in RBAC_Permission table
+type RBACPermission struct {
+ UUID string `ovsdb:"_uuid"`
+ Authorization []string `ovsdb:"authorization"`
+ InsertDelete bool `ovsdb:"insert_delete"`
+ Table string `ovsdb:"table"`
+ Update []string `ovsdb:"update"`
+}
+
+func (a *RBACPermission) GetUUID() string {
+ return a.UUID
+}
+
+func (a *RBACPermission) GetAuthorization() []string {
+ return a.Authorization
+}
+
+func copyRBACPermissionAuthorization(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalRBACPermissionAuthorization(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *RBACPermission) GetInsertDelete() bool {
+ return a.InsertDelete
+}
+
+func (a *RBACPermission) GetTable() string {
+ return a.Table
+}
+
+func (a *RBACPermission) GetUpdate() []string {
+ return a.Update
+}
+
+func copyRBACPermissionUpdate(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalRBACPermissionUpdate(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *RBACPermission) DeepCopyInto(b *RBACPermission) {
+ *b = *a
+ b.Authorization = copyRBACPermissionAuthorization(a.Authorization)
+ b.Update = copyRBACPermissionUpdate(a.Update)
+}
+
+func (a *RBACPermission) DeepCopy() *RBACPermission {
+ b := new(RBACPermission)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *RBACPermission) CloneModelInto(b model.Model) {
+ c := b.(*RBACPermission)
+ a.DeepCopyInto(c)
+}
+
+func (a *RBACPermission) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *RBACPermission) Equals(b *RBACPermission) bool {
+ return a.UUID == b.UUID &&
+ equalRBACPermissionAuthorization(a.Authorization, b.Authorization) &&
+ a.InsertDelete == b.InsertDelete &&
+ a.Table == b.Table &&
+ equalRBACPermissionUpdate(a.Update, b.Update)
+}
+
+func (a *RBACPermission) EqualsModel(b model.Model) bool {
+ c := b.(*RBACPermission)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &RBACPermission{}
+var _ model.ComparableModel = &RBACPermission{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go
new file mode 100644
index 000000000..ce8798645
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go
@@ -0,0 +1,87 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const RBACRoleTable = "RBAC_Role"
+
+// RBACRole defines an object in RBAC_Role table
+type RBACRole struct {
+ UUID string `ovsdb:"_uuid"`
+ Name string `ovsdb:"name"`
+ Permissions map[string]string `ovsdb:"permissions"`
+}
+
+func (a *RBACRole) GetUUID() string {
+ return a.UUID
+}
+
+func (a *RBACRole) GetName() string {
+ return a.Name
+}
+
+func (a *RBACRole) GetPermissions() map[string]string {
+ return a.Permissions
+}
+
+func copyRBACRolePermissions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalRBACRolePermissions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *RBACRole) DeepCopyInto(b *RBACRole) {
+ *b = *a
+ b.Permissions = copyRBACRolePermissions(a.Permissions)
+}
+
+func (a *RBACRole) DeepCopy() *RBACRole {
+ b := new(RBACRole)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *RBACRole) CloneModelInto(b model.Model) {
+ c := b.(*RBACRole)
+ a.DeepCopyInto(c)
+}
+
+func (a *RBACRole) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *RBACRole) Equals(b *RBACRole) bool {
+ return a.UUID == b.UUID &&
+ a.Name == b.Name &&
+ equalRBACRolePermissions(a.Permissions, b.Permissions)
+}
+
+func (a *RBACRole) EqualsModel(b model.Model) bool {
+ c := b.(*RBACRole)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &RBACRole{}
+var _ model.ComparableModel = &RBACRole{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go
new file mode 100644
index 000000000..2374478db
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go
@@ -0,0 +1,182 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const SBGlobalTable = "SB_Global"
+
+// SBGlobal defines an object in SB_Global table
+type SBGlobal struct {
+ UUID string `ovsdb:"_uuid"`
+ Connections []string `ovsdb:"connections"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ Ipsec bool `ovsdb:"ipsec"`
+ NbCfg int `ovsdb:"nb_cfg"`
+ Options map[string]string `ovsdb:"options"`
+ SSL *string `ovsdb:"ssl"`
+}
+
+func (a *SBGlobal) GetUUID() string {
+ return a.UUID
+}
+
+func (a *SBGlobal) GetConnections() []string {
+ return a.Connections
+}
+
+func copySBGlobalConnections(a []string) []string {
+ if a == nil {
+ return nil
+ }
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
+}
+
+func equalSBGlobalConnections(a, b []string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if b[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *SBGlobal) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copySBGlobalExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalSBGlobalExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *SBGlobal) GetIpsec() bool {
+ return a.Ipsec
+}
+
+func (a *SBGlobal) GetNbCfg() int {
+ return a.NbCfg
+}
+
+func (a *SBGlobal) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copySBGlobalOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalSBGlobalOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *SBGlobal) GetSSL() *string {
+ return a.SSL
+}
+
+func copySBGlobalSSL(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalSBGlobalSSL(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *SBGlobal) DeepCopyInto(b *SBGlobal) {
+ *b = *a
+ b.Connections = copySBGlobalConnections(a.Connections)
+ b.ExternalIDs = copySBGlobalExternalIDs(a.ExternalIDs)
+ b.Options = copySBGlobalOptions(a.Options)
+ b.SSL = copySBGlobalSSL(a.SSL)
+}
+
+func (a *SBGlobal) DeepCopy() *SBGlobal {
+ b := new(SBGlobal)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *SBGlobal) CloneModelInto(b model.Model) {
+ c := b.(*SBGlobal)
+ a.DeepCopyInto(c)
+}
+
+func (a *SBGlobal) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *SBGlobal) Equals(b *SBGlobal) bool {
+ return a.UUID == b.UUID &&
+ equalSBGlobalConnections(a.Connections, b.Connections) &&
+ equalSBGlobalExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.Ipsec == b.Ipsec &&
+ a.NbCfg == b.NbCfg &&
+ equalSBGlobalOptions(a.Options, b.Options) &&
+ equalSBGlobalSSL(a.SSL, b.SSL)
+}
+
+func (a *SBGlobal) EqualsModel(b model.Model) bool {
+ c := b.(*SBGlobal)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &SBGlobal{}
+var _ model.ComparableModel = &SBGlobal{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go
new file mode 100644
index 000000000..d3e118868
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go
@@ -0,0 +1,213 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const ServiceMonitorTable = "Service_Monitor"
+
+type (
+ ServiceMonitorProtocol = string
+ ServiceMonitorStatus = string
+)
+
+var (
+ ServiceMonitorProtocolTCP ServiceMonitorProtocol = "tcp"
+ ServiceMonitorProtocolUDP ServiceMonitorProtocol = "udp"
+ ServiceMonitorStatusOnline ServiceMonitorStatus = "online"
+ ServiceMonitorStatusOffline ServiceMonitorStatus = "offline"
+ ServiceMonitorStatusError ServiceMonitorStatus = "error"
+)
+
+// ServiceMonitor defines an object in Service_Monitor table
+type ServiceMonitor struct {
+ UUID string `ovsdb:"_uuid"`
+ ChassisName string `ovsdb:"chassis_name"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ IP string `ovsdb:"ip"`
+ LogicalPort string `ovsdb:"logical_port"`
+ Options map[string]string `ovsdb:"options"`
+ Port int `ovsdb:"port"`
+ Protocol *ServiceMonitorProtocol `ovsdb:"protocol"`
+ SrcIP string `ovsdb:"src_ip"`
+ SrcMAC string `ovsdb:"src_mac"`
+ Status *ServiceMonitorStatus `ovsdb:"status"`
+}
+
+func (a *ServiceMonitor) GetUUID() string {
+ return a.UUID
+}
+
+func (a *ServiceMonitor) GetChassisName() string {
+ return a.ChassisName
+}
+
+func (a *ServiceMonitor) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copyServiceMonitorExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalServiceMonitorExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ServiceMonitor) GetIP() string {
+ return a.IP
+}
+
+func (a *ServiceMonitor) GetLogicalPort() string {
+ return a.LogicalPort
+}
+
+func (a *ServiceMonitor) GetOptions() map[string]string {
+ return a.Options
+}
+
+func copyServiceMonitorOptions(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalServiceMonitorOptions(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *ServiceMonitor) GetPort() int {
+ return a.Port
+}
+
+func (a *ServiceMonitor) GetProtocol() *ServiceMonitorProtocol {
+ return a.Protocol
+}
+
+func copyServiceMonitorProtocol(a *ServiceMonitorProtocol) *ServiceMonitorProtocol {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalServiceMonitorProtocol(a, b *ServiceMonitorProtocol) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ServiceMonitor) GetSrcIP() string {
+ return a.SrcIP
+}
+
+func (a *ServiceMonitor) GetSrcMAC() string {
+ return a.SrcMAC
+}
+
+func (a *ServiceMonitor) GetStatus() *ServiceMonitorStatus {
+ return a.Status
+}
+
+func copyServiceMonitorStatus(a *ServiceMonitorStatus) *ServiceMonitorStatus {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalServiceMonitorStatus(a, b *ServiceMonitorStatus) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *ServiceMonitor) DeepCopyInto(b *ServiceMonitor) {
+ *b = *a
+ b.ExternalIDs = copyServiceMonitorExternalIDs(a.ExternalIDs)
+ b.Options = copyServiceMonitorOptions(a.Options)
+ b.Protocol = copyServiceMonitorProtocol(a.Protocol)
+ b.Status = copyServiceMonitorStatus(a.Status)
+}
+
+func (a *ServiceMonitor) DeepCopy() *ServiceMonitor {
+ b := new(ServiceMonitor)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *ServiceMonitor) CloneModelInto(b model.Model) {
+ c := b.(*ServiceMonitor)
+ a.DeepCopyInto(c)
+}
+
+func (a *ServiceMonitor) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *ServiceMonitor) Equals(b *ServiceMonitor) bool {
+ return a.UUID == b.UUID &&
+ a.ChassisName == b.ChassisName &&
+ equalServiceMonitorExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.IP == b.IP &&
+ a.LogicalPort == b.LogicalPort &&
+ equalServiceMonitorOptions(a.Options, b.Options) &&
+ a.Port == b.Port &&
+ equalServiceMonitorProtocol(a.Protocol, b.Protocol) &&
+ a.SrcIP == b.SrcIP &&
+ a.SrcMAC == b.SrcMAC &&
+ equalServiceMonitorStatus(a.Status, b.Status)
+}
+
+func (a *ServiceMonitor) EqualsModel(b model.Model) bool {
+ c := b.(*ServiceMonitor)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &ServiceMonitor{}
+var _ model.ComparableModel = &ServiceMonitor{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go
new file mode 100644
index 000000000..3fab5fd1e
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go
@@ -0,0 +1,117 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const SSLTable = "SSL"
+
+// SSL defines an object in SSL table
+type SSL struct {
+ UUID string `ovsdb:"_uuid"`
+ BootstrapCaCert bool `ovsdb:"bootstrap_ca_cert"`
+ CaCert string `ovsdb:"ca_cert"`
+ Certificate string `ovsdb:"certificate"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ PrivateKey string `ovsdb:"private_key"`
+ SSLCiphers string `ovsdb:"ssl_ciphers"`
+ SSLProtocols string `ovsdb:"ssl_protocols"`
+}
+
+func (a *SSL) GetUUID() string {
+ return a.UUID
+}
+
+func (a *SSL) GetBootstrapCaCert() bool {
+ return a.BootstrapCaCert
+}
+
+func (a *SSL) GetCaCert() string {
+ return a.CaCert
+}
+
+func (a *SSL) GetCertificate() string {
+ return a.Certificate
+}
+
+func (a *SSL) GetExternalIDs() map[string]string {
+ return a.ExternalIDs
+}
+
+func copySSLExternalIDs(a map[string]string) map[string]string {
+ if a == nil {
+ return nil
+ }
+ b := make(map[string]string, len(a))
+ for k, v := range a {
+ b[k] = v
+ }
+ return b
+}
+
+func equalSSLExternalIDs(a, b map[string]string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for k, v := range a {
+ if w, ok := b[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *SSL) GetPrivateKey() string {
+ return a.PrivateKey
+}
+
+func (a *SSL) GetSSLCiphers() string {
+ return a.SSLCiphers
+}
+
+func (a *SSL) GetSSLProtocols() string {
+ return a.SSLProtocols
+}
+
+func (a *SSL) DeepCopyInto(b *SSL) {
+ *b = *a
+ b.ExternalIDs = copySSLExternalIDs(a.ExternalIDs)
+}
+
+func (a *SSL) DeepCopy() *SSL {
+ b := new(SSL)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *SSL) CloneModelInto(b model.Model) {
+ c := b.(*SSL)
+ a.DeepCopyInto(c)
+}
+
+func (a *SSL) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *SSL) Equals(b *SSL) bool {
+ return a.UUID == b.UUID &&
+ a.BootstrapCaCert == b.BootstrapCaCert &&
+ a.CaCert == b.CaCert &&
+ a.Certificate == b.Certificate &&
+ equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
+ a.PrivateKey == b.PrivateKey &&
+ a.SSLCiphers == b.SSLCiphers &&
+ a.SSLProtocols == b.SSLProtocols
+}
+
+func (a *SSL) EqualsModel(b model.Model) bool {
+ c := b.(*SSL)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &SSL{}
+var _ model.ComparableModel = &SSL{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go
new file mode 100644
index 000000000..370968f60
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go
@@ -0,0 +1,78 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package sbdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const StaticMACBindingTable = "Static_MAC_Binding"
+
+// StaticMACBinding defines an object in Static_MAC_Binding table
+type StaticMACBinding struct {
+ UUID string `ovsdb:"_uuid"`
+ Datapath string `ovsdb:"datapath"`
+ IP string `ovsdb:"ip"`
+ LogicalPort string `ovsdb:"logical_port"`
+ MAC string `ovsdb:"mac"`
+ OverrideDynamicMAC bool `ovsdb:"override_dynamic_mac"`
+}
+
+func (a *StaticMACBinding) GetUUID() string {
+ return a.UUID
+}
+
+func (a *StaticMACBinding) GetDatapath() string {
+ return a.Datapath
+}
+
+func (a *StaticMACBinding) GetIP() string {
+ return a.IP
+}
+
+func (a *StaticMACBinding) GetLogicalPort() string {
+ return a.LogicalPort
+}
+
+func (a *StaticMACBinding) GetMAC() string {
+ return a.MAC
+}
+
+func (a *StaticMACBinding) GetOverrideDynamicMAC() bool {
+ return a.OverrideDynamicMAC
+}
+
+func (a *StaticMACBinding) DeepCopyInto(b *StaticMACBinding) {
+ *b = *a
+}
+
+func (a *StaticMACBinding) DeepCopy() *StaticMACBinding {
+ b := new(StaticMACBinding)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *StaticMACBinding) CloneModelInto(b model.Model) {
+ c := b.(*StaticMACBinding)
+ a.DeepCopyInto(c)
+}
+
+func (a *StaticMACBinding) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *StaticMACBinding) Equals(b *StaticMACBinding) bool {
+ return a.UUID == b.UUID &&
+ a.Datapath == b.Datapath &&
+ a.IP == b.IP &&
+ a.LogicalPort == b.LogicalPort &&
+ a.MAC == b.MAC &&
+ a.OverrideDynamicMAC == b.OverrideDynamicMAC
+}
+
+func (a *StaticMACBinding) EqualsModel(b model.Model) bool {
+ c := b.(*StaticMACBinding)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &StaticMACBinding{}
+var _ model.ComparableModel = &StaticMACBinding{}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go
new file mode 100644
index 000000000..d014dc3a3
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go
@@ -0,0 +1,250 @@
+package types
+
+import "time"
+
+const (
+ // Default network name
+ DefaultNetworkName = "default"
+ K8sPrefix = "k8s-"
+ HybridOverlayPrefix = "int-"
+ HybridOverlayGRSubfix = "-gr"
+
+ // K8sMgmtIntfNamePrefix name to be used as an OVS internal port on the node as prefix for networs
+ K8sMgmtIntfNamePrefix = "ovn-k8s-mp"
+
+ // UDNVRFDeviceSuffix vrf device suffix associated with every user defined primary network.
+ UDNVRFDeviceSuffix = "-udn-vrf"
+ // UDNVRFDevicePrefix vrf device prefix associated with every user
+ UDNVRFDevicePrefix = "mp"
+
+ // K8sMgmtIntfName name to be used as an OVS internal port on the node
+ K8sMgmtIntfName = K8sMgmtIntfNamePrefix + "0"
+
+ // PhysicalNetworkName is the name that maps to an OVS bridge that provides
+ // access to physical/external network
+ PhysicalNetworkName = "physnet"
+ PhysicalNetworkExGwName = "exgwphysnet"
+
+ // LocalNetworkName is the name that maps to an OVS bridge that provides
+ // access to local service
+ LocalNetworkName = "locnet"
+
+ // Local Bridge used for DGP access
+ LocalBridgeName = "br-local"
+ LocalnetGatewayNextHopPort = "ovn-k8s-gw0"
+
+ // OVS Bridge Datapath types
+ DatapathUserspace = "netdev"
+
+ // types.OVNClusterRouter is the name of the distributed router
+ OVNClusterRouter = "ovn_cluster_router"
+ OVNJoinSwitch = "join"
+
+ JoinSwitchPrefix = "join_"
+ ExternalSwitchPrefix = "ext_"
+ GWRouterPrefix = "GR_"
+ GWRouterLocalLBPostfix = "_local"
+ RouterToSwitchPrefix = "rtos-"
+ InterPrefix = "inter-"
+ HybridSubnetPrefix = "hybrid-subnet-"
+ SwitchToRouterPrefix = "stor-"
+ JoinSwitchToGWRouterPrefix = "jtor-"
+ GWRouterToJoinSwitchPrefix = "rtoj-"
+ DistRouterToJoinSwitchPrefix = "dtoj-"
+ JoinSwitchToDistRouterPrefix = "jtod-"
+ EXTSwitchToGWRouterPrefix = "etor-"
+ GWRouterToExtSwitchPrefix = "rtoe-"
+ EgressGWSwitchPrefix = "exgw-"
+ PatchPortPrefix = "patch-"
+ PatchPortSuffix = "-to-br-int"
+
+ NodeLocalSwitch = "node_local_switch"
+
+ // types.OVNLayer2Switch is the name of layer2 topology switch
+ OVNLayer2Switch = "ovn_layer2_switch"
+ // types.OVNLocalnetSwitch is the name of localnet topology switch
+ OVNLocalnetSwitch = "ovn_localnet_switch"
+ // types.OVNLocalnetPort is the name of localnet topology localnet port
+ OVNLocalnetPort = "ovn_localnet_port"
+
+ TransitSwitch = "transit_switch"
+ TransitSwitchToRouterPrefix = "tstor-"
+ RouterToTransitSwitchPrefix = "rtots-"
+
+ // ACL Default Tier Priorities
+
+ // Default routed multicast allow acl rule priority
+ DefaultRoutedMcastAllowPriority = 1013
+ // Default multicast allow acl rule priority
+ DefaultMcastAllowPriority = 1012
+ // Default multicast deny acl rule priority
+ DefaultMcastDenyPriority = 1011
+ // Default allow acl rule priority
+ DefaultAllowPriority = 1001
+ // Default deny acl rule priority
+ DefaultDenyPriority = 1000
+
+ // ACL PlaceHolderACL Tier Priorities
+ PrimaryUDNAllowPriority = 1001
+ // Default deny acl rule priority
+ PrimaryUDNDenyPriority = 1000
+
+ // ACL Tiers
+ // Tier 0 is called Primary as it is evaluated before any other feature-related Tiers.
+ // Currently used for User Defined Network Feature.
+ // NOTE: When we upgrade from an OVN version without tiers to the new version with
+ // tiers, all values in the new ACL.Tier column will be set to 0.
+ PrimaryACLTier = 0
+ // Default Tier for all ACLs
+ DefaultACLTier = 2
+ // Default Tier for all ACLs belonging to Admin Network Policy
+ DefaultANPACLTier = 1
+ // Default Tier for all ACLs belonging to Baseline Admin Network Policy
+ DefaultBANPACLTier = 3
+
+ // priority of logical router policies on the OVNClusterRouter
+ EgressFirewallStartPriority = 10000
+ MinimumReservedEgressFirewallPriority = 2000
+ MGMTPortPolicyPriority = "1005"
+ NodeSubnetPolicyPriority = "1004"
+ InterNodePolicyPriority = "1003"
+ UDNHostCIDRPolicyPriority = "99"
+ HybridOverlaySubnetPriority = 1002
+ HybridOverlayReroutePriority = 501
+ DefaultNoRereoutePriority = 102
+ EgressSVCReroutePriority = 101
+ EgressIPReroutePriority = 100
+ EgressIPRerouteQoSRulePriority = 103
+ EgressLiveMigrationReroutePiority = 10
+
+ // EndpointSliceMirrorControllerName mirror EndpointSlice controller name (used as a value for the "endpointslice.kubernetes.io/managed-by" label)
+ EndpointSliceMirrorControllerName = "endpointslice-mirror-controller.k8s.ovn.org"
+ // EndpointSliceDefaultControllerName default kubernetes EndpointSlice controller name (used as a value for the "endpointslice.kubernetes.io/managed-by" label)
+ EndpointSliceDefaultControllerName = "endpointslice-controller.k8s.io"
+ // LabelSourceEndpointSlice label key used in mirrored EndpointSlice
+ // that has the value of the default EndpointSlice name
+ LabelSourceEndpointSlice = "k8s.ovn.org/source-endpointslice"
+ // LabelSourceEndpointSliceVersion label key used in mirrored EndpointSlice
+ // that has the value of the last known default EndpointSlice ResourceVersion
+ LabelSourceEndpointSliceVersion = "k8s.ovn.org/source-endpointslice-version"
+ // LabelUserDefinedEndpointSliceNetwork label key used in mirrored EndpointSlices that contains the current primary user defined network name
+ LabelUserDefinedEndpointSliceNetwork = "k8s.ovn.org/endpointslice-network"
+ // LabelUserDefinedServiceName label key used in mirrored EndpointSlices that contains the service name matching the EndpointSlice
+ LabelUserDefinedServiceName = "k8s.ovn.org/service-name"
+
+ // Packet marking
+ EgressIPNodeConnectionMark = "1008"
+ EgressIPReplyTrafficConnectionMark = 42
+
+ // primary user defined network's default join subnet value
+ // users can configure custom values using NADs
+ UserDefinedPrimaryNetworkJoinSubnetV4 = "100.65.0.0/16"
+ UserDefinedPrimaryNetworkJoinSubnetV6 = "fd99::/64"
+
+ // OpenFlow and Networking constants
+ RouteAdvertisementICMPType = 134
+ NeighborAdvertisementICMPType = 136
+
+ // Meter constants
+ OvnACLLoggingMeter = "acl-logging"
+ OvnRateLimitingMeter = "rate-limiter"
+ PacketsPerSecond = "pktps"
+ MeterAction = "drop"
+
+ // OVN-K8S annotation & taint constants
+ OvnK8sPrefix = "k8s.ovn.org"
+ // Deprecated: we used to set topology version as an annotation on the node. We don't do this anymore.
+ OvnK8sTopoAnno = OvnK8sPrefix + "/" + "topology-version"
+ OvnK8sSmallMTUTaintKey = OvnK8sPrefix + "/" + "mtu-too-small"
+
+ // name of the configmap used to synchronize status (e.g. watch for topology changes)
+ OvnK8sStatusCMName = "control-plane-status"
+ OvnK8sStatusKeyTopoVersion = "topology-version"
+
+ // Monitoring constants
+ SFlowAgent = "ovn-k8s-mp0"
+
+ // OVNKube-Node Node types
+ NodeModeFull = "full"
+ NodeModeDPU = "dpu"
+ NodeModeDPUHost = "dpu-host"
+
+ // Geneve header length for IPv4 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823)
+ GeneveHeaderLengthIPv4 = 58
+ // Geneve header length for IPv6 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823)
+ GeneveHeaderLengthIPv6 = GeneveHeaderLengthIPv4 + 20
+
+ ClusterPortGroupNameBase = "clusterPortGroup"
+ ClusterRtrPortGroupNameBase = "clusterRtrPortGroup"
+
+ OVSDBTimeout = 10 * time.Second
+ OVSDBWaitTimeout = 0
+
+ ClusterLBGroupName = "clusterLBGroup"
+ ClusterSwitchLBGroupName = "clusterSwitchLBGroup"
+ ClusterRouterLBGroupName = "clusterRouterLBGroup"
+
+ // key for network name external-id
+ NetworkExternalID = OvnK8sPrefix + "/" + "network"
+ // key for node name external-id
+ NodeExternalID = OvnK8sPrefix + "/" + "node"
+ // key for network role external-id: possible values are "default", "primary", "secondary"
+ NetworkRoleExternalID = OvnK8sPrefix + "/" + "role"
+ // key for NAD name external-id, only used for secondary logical switch port of a pod
+ // key for network name external-id
+ NADExternalID = OvnK8sPrefix + "/" + "nad"
+ // key for topology type external-id, only used for secondary network logical entities
+ TopologyExternalID = OvnK8sPrefix + "/" + "topology"
+ // key for load_balancer kind external-id
+ LoadBalancerKindExternalID = OvnK8sPrefix + "/" + "kind"
+ // key for load_balancer service external-id
+ LoadBalancerOwnerExternalID = OvnK8sPrefix + "/" + "owner"
+ // key for UDN enabled services routes
+ UDNEnabledServiceExternalID = OvnK8sPrefix + "/" + "udn-enabled-default-service"
+
+ // different secondary network topology type defined in CNI netconf
+ Layer3Topology = "layer3"
+ Layer2Topology = "layer2"
+ LocalnetTopology = "localnet"
+
+ // different types of network roles
+ // defined in CNI netconf as a user defined network
+ NetworkRolePrimary = "primary"
+ NetworkRoleSecondary = "secondary"
+ NetworkRoleDefault = "default"
+ // defined internally by ovnkube to recognize "default"
+ // network's role as a "infrastructure-locked" network
+ // when user defined network is the primary network for
+ // the pod which makes "default" network niether primary
+ // nor secondary
+ NetworkRoleInfrastructure = "infrastructure-locked"
+
+ // db index keys
+ // PrimaryIDKey is used as a primary client index
+ PrimaryIDKey = OvnK8sPrefix + "/id"
+
+ OvnDefaultZone = "global"
+
+ // EgressService "reserved" hosts - when set on an EgressService they have a special meaning
+
+ EgressServiceNoHost = "" // set on services with no allocated node
+ EgressServiceNoSNATHost = "ALL" // set on services with sourceIPBy=Network
+
+ // MaxLogicalPortTunnelKey is maximum tunnel key that can be requested for a
+ // Logical Switch or Router Port
+ MaxLogicalPortTunnelKey = 32767
+
+ // InformerSyncTimeout is used when waiting for the initial informer cache sync
+ // (i.e. all existing objects should be listed by the informer).
+ // It allows ~4 list() retries with the default reflector exponential backoff config
+ InformerSyncTimeout = 20 * time.Second
+
+ // HandlerSyncTimeout is used when waiting for initial object handler sync.
+ // (i.e. all the ADD events should be processed for the existing objects by the event handler)
+ HandlerSyncTimeout = 20 * time.Second
+
+ // GRMACBindingAgeThreshold is the lifetime in seconds of each MAC binding
+ // entry for the gateway routers. After this time, the entry is removed and
+ // may be refreshed with a new ARP request.
+ GRMACBindingAgeThreshold = "300"
+)
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go
new file mode 100644
index 000000000..566f03fa9
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go
@@ -0,0 +1,44 @@
+package types
+
+import (
+ "errors"
+ "fmt"
+
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+type SuppressedError struct {
+ Inner error
+}
+
+func (e *SuppressedError) Error() string {
+ return fmt.Sprintf("suppressed error logged: %v", e.Inner.Error())
+}
+
+func (e *SuppressedError) Unwrap() error {
+ return e.Inner
+}
+
+func NewSuppressedError(err error) error {
+ return &SuppressedError{
+ Inner: err,
+ }
+}
+
+func IsSuppressedError(err error) bool {
+ var suppressedError *SuppressedError
+ // errors.As() is not supported with Aggregate type error. Aggregate.Errors() converts an
+ // Aggregate type error into a slice of builtin error and then errors.As() can be used
+ if agg, ok := err.(kerrors.Aggregate); ok && err != nil {
+ suppress := false
+ for _, err := range agg.Errors() {
+ if errors.As(err, &suppressedError) {
+ suppress = true
+ } else {
+ return false
+ }
+ }
+ return suppress
+ }
+ return errors.As(err, &suppressedError)
+}
diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go
new file mode 100644
index 000000000..2a69fd57c
--- /dev/null
+++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go
@@ -0,0 +1,21 @@
+package types
+
+import (
+ "fmt"
+ "strings"
+)
+
+// this file defines error messages that are used to figure out if a resource reconciliation failed
+const (
+ APBRouteErrorMsg = "failed to apply policy"
+ EgressFirewallErrorMsg = "EgressFirewall Rules not correctly applied"
+ EgressQoSErrorMsg = "EgressQoS Rules not correctly applied"
+)
+
+func GetZoneStatus(zoneID, message string) string {
+ return fmt.Sprintf("%s: %s", zoneID, message)
+}
+
+func GetZoneFromStatus(status string) string {
+ return strings.Split(status, ":")[0]
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore
new file mode 100644
index 000000000..5d7e88de0
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/.gitignore
@@ -0,0 +1,36 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+cmd/*/*exe
+.idea
+
+fuzz/*.zip
diff --git a/vendor/github.com/pierrec/lz4/v4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE
new file mode 100644
index 000000000..bd899d835
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md
new file mode 100644
index 000000000..4629c9d0e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/README.md
@@ -0,0 +1,92 @@
+# lz4 : LZ4 compression in pure Go
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4)
+[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
+
+## Overview
+
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4/v4
+```
+
+There is a command line interface tool to compress and decompress LZ4 files.
+
+```
+go install github.com/pierrec/lz4/v4/cmd/lz4c
+```
+
+Usage
+
+```
+Usage of lz4c:
+ -version
+ print the program version
+
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [ ...]
+ -bc
+ enable block checksum
+ -l int
+ compression level (0=fastest)
+ -sc
+ disable stream checksum
+ -size string
+ block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [ ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+ // Compress the input string.
+ _, _ = io.Copy(zw, r)
+ _ = zw.Close() // Make sure the writer is closed
+ _ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far!
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
+
+Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64.
+
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
new file mode 100644
index 000000000..fec8adb03
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
@@ -0,0 +1,481 @@
+package lz4block
+
+import (
+ "encoding/binary"
+ "math/bits"
+ "sync"
+
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+const (
+ // The following constants are used to setup the compression algorithm.
+ minMatch = 4 // the minimum size of the match sequence size (4 bytes)
+ winSizeLog = 16 // LZ4 64Kb window size limit
+ winSize = 1 << winSizeLog
+ winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+ // hashLog determines the size of the hash table used to quickly find a previous match position.
+ // Its value influences the compression speed and memory usage, the lower the faster,
+ // but at the expense of the compression ratio.
+ // 16 seems to be the best compromise for fast compression.
+ hashLog = 16
+ htSize = 1 << hashLog
+
+ mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
+)
+
+func recoverBlock(e *error) {
+ if r := recover(); r != nil && *e == nil {
+ *e = lz4errors.ErrInvalidSourceShortBuffer
+ }
+}
+
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+ const prime6bytes = 227718039650203
+ return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
+}
+
+func CompressBlockBound(n int) int {
+ return n + n/255 + 16
+}
+
+func UncompressBlock(src, dst, dict []byte) (int, error) {
+ if len(src) == 0 {
+ return 0, nil
+ }
+ if di := decodeBlock(dst, src, dict); di >= 0 {
+ return di, nil
+ }
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+}
+
+type Compressor struct {
+ // Offsets are at most 64kiB, so we can store only the lower 16 bits of
+ // match positions: effectively, an offset from some 64kiB block boundary.
+ //
+ // When we retrieve such an offset, we interpret it as relative to the last
+ // block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
+ // depending on which of these is inside the current window. If a table
+ // entry was generated more than 64kiB back in the input, we find out by
+ // inspecting the input stream.
+ table [htSize]uint16
+
+ // Bitmap indicating which positions in the table are in use.
+ // This allows us to quickly reset the table for reuse,
+ // without having to zero everything.
+ inUse [htSize / 32]uint32
+}
+
+// Get returns the position of a presumptive match for the hash h.
+// The match may be a false positive due to a hash collision or an old entry.
+// If si < winSize, the return value may be negative.
+func (c *Compressor) get(h uint32, si int) int {
+ h &= htSize - 1
+ i := 0
+ if c.inUse[h/32]&(1<<(h%32)) != 0 {
+ i = int(c.table[h])
+ }
+ i += si &^ winMask
+ if i >= si {
+ // Try previous 64kiB block (negative when in first block).
+ i -= winSize
+ }
+ return i
+}
+
+func (c *Compressor) put(h uint32, si int) {
+ h &= htSize - 1
+ c.table[h] = uint16(si)
+ c.inUse[h/32] |= 1 << (h % 32)
+}
+
+func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} }
+
+var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
+
+func CompressBlock(src, dst []byte) (int, error) {
+ c := compressorPool.Get().(*Compressor)
+ n, err := c.CompressBlock(src, dst)
+ compressorPool.Put(c)
+ return n, err
+}
+
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+ // Zero out reused table to avoid non-deterministic output (issue #65).
+ c.reset()
+
+ // Return 0, nil only if the destination buffer size is < CompressBlockBound.
+ isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+ // This significantly speeds up incompressible data and usually has very small impact on compression.
+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
+ const adaptSkipLog = 7
+
+ // si: Current position of the search.
+ // anchor: Position of the current literals.
+ var si, di, anchor int
+ sn := len(src) - mfLimit
+ if sn <= 0 {
+ goto lastLiterals
+ }
+
+ // Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+ for si < sn {
+ // Hash the next 6 bytes (sequence)...
+ match := binary.LittleEndian.Uint64(src[si:])
+ h := blockHash(match)
+ h2 := blockHash(match >> 8)
+
+ // We check a match at s, s+1 and s+2 and pick the first one we get.
+ // Checking 3 only requires us to load the source one.
+ ref := c.get(h, si)
+ ref2 := c.get(h2, si+1)
+ c.put(h, si)
+ c.put(h2, si+1)
+
+ offset := si - ref
+
+ if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
+ // No match. Start calculating another hash.
+ // The processor can usually do this out-of-order.
+ h = blockHash(match >> 16)
+ ref3 := c.get(h, si+2)
+
+ // Check the second match at si+1
+ si += 1
+ offset = si - ref2
+
+ if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+ // No match. Check the third match at si+2
+ si += 1
+ offset = si - ref3
+ c.put(h, si)
+
+ if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
+ // Skip one extra byte (at si+3) before we check 3 matches again.
+ si += 2 + (si-anchor)>>adaptSkipLog
+ continue
+ }
+ }
+ }
+
+ // Match found.
+ lLen := si - anchor // Literal length.
+ // We already matched 4 bytes.
+ mLen := 4
+
+ // Extend backwards if we can, reducing literals.
+ tOff := si - offset - 1
+ for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+ si--
+ tOff--
+ lLen--
+ mLen++
+ }
+
+ // Add the match length, so we continue search at the end.
+ // Use mLen to store the offset base.
+ si, mLen = si+mLen, si+minMatch
+
+ // Find the longest match by looking by batches of 8 bytes.
+ for si+8 <= sn {
+ x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+ if x == 0 {
+ si += 8
+ } else {
+ // Stop is first non-zero byte.
+ si += bits.TrailingZeros64(x) >> 3
+ break
+ }
+ }
+
+ mLen = si - mLen
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // Encode literals length.
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ di++
+ l := lLen - 0xF
+ for ; l >= 0xFF && di < len(dst); l -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di] = byte(l)
+ }
+ di++
+
+ // Literals.
+ if di+lLen > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+ di += lLen + 2
+ anchor = si
+
+ // Encode offset.
+ if di > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // Encode match length part 2.
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di] = byte(mLen)
+ di++
+ }
+ // Check if we can load next values.
+ if si >= sn {
+ break
+ }
+ // Hash match end-2
+ h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+ c.put(h, si-2)
+ }
+
+lastLiterals:
+ if isNotCompressible && anchor == 0 {
+ // Incompressible.
+ return 0, nil
+ }
+
+ // Last literals.
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ di++
+ for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di] = byte(lLen)
+ }
+ di++
+
+ // Write the last literals.
+ if isNotCompressible && di >= anchor {
+ // Incompressible.
+ return 0, nil
+ }
+ if di+len(src)-anchor > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+ return di, nil
+}
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+ const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+ return x * hasher >> (32 - winSizeLog)
+}
+
+type CompressorHC struct {
+ // hashTable: stores the last position found for a given hash
+ // chainTable: stores previous positions for a given hash
+ hashTable, chainTable [htSize]int
+ needsReset bool
+}
+
+var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
+
+func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
+ c := compressorHCPool.Get().(*CompressorHC)
+ n, err := c.CompressBlock(src, dst, depth)
+ compressorHCPool.Put(c)
+ return n, err
+}
+
+func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
+ if c.needsReset {
+ // Zero out reused table to avoid non-deterministic output (issue #65).
+ c.hashTable = [htSize]int{}
+ c.chainTable = [htSize]int{}
+ }
+ c.needsReset = true // Only false on first call.
+
+ defer recoverBlock(&err)
+
+ // Return 0, nil only if the destination buffer size is < CompressBlockBound.
+ isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+ // This significantly speeds up incompressible data and usually has very small impact on compression.
+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
+ const adaptSkipLog = 7
+
+ var si, di, anchor int
+ sn := len(src) - mfLimit
+ if sn <= 0 {
+ goto lastLiterals
+ }
+
+ if depth == 0 {
+ depth = winSize
+ }
+
+ for si < sn {
+ // Hash the next 4 bytes (sequence).
+ match := binary.LittleEndian.Uint32(src[si:])
+ h := blockHashHC(match)
+
+ // Follow the chain until out of window and give the longest match.
+ mLen := 0
+ offset := 0
+ for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
+ // The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+ // must match to improve on the match length.
+ if src[next+mLen] != src[si+mLen] {
+ continue
+ }
+ ml := 0
+ // Compare the current position with a previous with the same hash.
+ for ml < sn-si {
+ x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+ if x == 0 {
+ ml += 8
+ } else {
+ // Stop is first non-zero byte.
+ ml += bits.TrailingZeros64(x) >> 3
+ break
+ }
+ }
+ if ml < minMatch || ml <= mLen {
+ // Match too small (>adaptSkipLog
+ continue
+ }
+
+ // Match found.
+ // Update hash/chain tables with overlapping bytes:
+ // si already hashed, add everything from si+1 up to the match length.
+ winStart := si + 1
+ if ws := si + mLen - winSize; ws > winStart {
+ winStart = ws
+ }
+ for si, ml := winStart, si+mLen; si < ml; {
+ match >>= 8
+ match |= uint32(src[si+3]) << 24
+ h := blockHashHC(match)
+ c.chainTable[si&winMask] = c.hashTable[h]
+ c.hashTable[h] = si
+ si++
+ }
+
+ lLen := si - anchor
+ si += mLen
+ mLen -= minMatch // Match length does not include minMatch.
+
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // Encode literals length.
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ di++
+ l := lLen - 0xF
+ for ; l >= 0xFF; l -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(l)
+ }
+ di++
+
+ // Literals.
+ copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+ di += lLen
+ anchor = si
+
+ // Encode offset.
+ di += 2
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // Encode match length part 2.
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(mLen)
+ di++
+ }
+ }
+
+ if isNotCompressible && anchor == 0 {
+ // Incompressible.
+ return 0, nil
+ }
+
+ // Last literals.
+lastLiterals:
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ di++
+ lLen -= 0xF
+ for ; lLen >= 0xFF; lLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(lLen)
+ }
+ di++
+
+ // Write the last literals.
+ if isNotCompressible && di >= anchor {
+ // Incompressible.
+ return 0, nil
+ }
+ di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+ return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
new file mode 100644
index 000000000..a1bfa99e4
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
@@ -0,0 +1,90 @@
+// Package lz4block provides LZ4 BlockSize types and pools of buffers.
+package lz4block
+
+import "sync"
+
+const (
+ Block64Kb uint32 = 1 << (16 + iota*2)
+ Block256Kb
+ Block1Mb
+ Block4Mb
+)
+
+// In legacy mode all blocks are compressed regardless
+// of the compressed size: use the bound size.
+var Block8Mb = uint32(CompressBlockBound(8 << 20))
+
+var (
+ BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
+ BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
+ BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
+ BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
+ BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }}
+)
+
+func Index(b uint32) BlockSizeIndex {
+ switch b {
+ case Block64Kb:
+ return 4
+ case Block256Kb:
+ return 5
+ case Block1Mb:
+ return 6
+ case Block4Mb:
+ return 7
+ case Block8Mb: // only valid in legacy mode
+ return 3
+ }
+ return 0
+}
+
+func IsValid(b uint32) bool {
+ return Index(b) > 0
+}
+
+type BlockSizeIndex uint8
+
+func (b BlockSizeIndex) IsValid() bool {
+ switch b {
+ case 4, 5, 6, 7:
+ return true
+ }
+ return false
+}
+
+func (b BlockSizeIndex) Get() []byte {
+ var buf interface{}
+ switch b {
+ case 4:
+ buf = BlockPool64K.Get()
+ case 5:
+ buf = BlockPool256K.Get()
+ case 6:
+ buf = BlockPool1M.Get()
+ case 7:
+ buf = BlockPool4M.Get()
+ case 3:
+ buf = BlockPool8M.Get()
+ }
+ return buf.([]byte)
+}
+
+func Put(buf []byte) {
+ // Safeguard: do not allow invalid buffers.
+ switch c := cap(buf); uint32(c) {
+ case Block64Kb:
+ BlockPool64K.Put(buf[:c])
+ case Block256Kb:
+ BlockPool256K.Put(buf[:c])
+ case Block1Mb:
+ BlockPool1M.Put(buf[:c])
+ case Block4Mb:
+ BlockPool4M.Put(buf[:c])
+ case Block8Mb:
+ BlockPool8M.Put(buf[:c])
+ }
+}
+
+type CompressionLevel uint32
+
+const Fast CompressionLevel = 0
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
new file mode 100644
index 000000000..1d00133fa
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
@@ -0,0 +1,448 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// AX scratch
+// BX scratch
+// CX literal and match lengths
+// DX token, match offset
+//
+// DI &dst
+// SI &src
+// R8 &dst + len(dst)
+// R9 &src + len(src)
+// R11 &dst
+// R12 short output end
+// R13 short input end
+// R14 &dict
+// R15 len(dict)
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOSPLIT, $48-80
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, R11
+ MOVQ dst_len+8(FP), R8
+ ADDQ DI, R8
+
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R9
+ CMPQ R9, $0
+ JE err_corrupt
+ ADDQ SI, R9
+
+ MOVQ dict_base+48(FP), R14
+ MOVQ dict_len+56(FP), R15
+
+ // shortcut ends
+ // short output end
+ MOVQ R8, R12
+ SUBQ $32, R12
+ // short input end
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+ XORL CX, CX
+
+loop:
+ // token := uint32(src[si])
+ MOVBLZX (SI), DX
+ INCQ SI
+
+ // lit_len = token >> 4
+ // if lit_len > 0
+ // CX = lit_len
+ MOVL DX, CX
+ SHRL $4, CX
+
+ // if lit_len != 0xF
+ CMPL CX, $0xF
+ JEQ lit_len_loop
+ CMPQ DI, R12
+ JAE copy_literal
+ CMPQ SI, R13
+ JAE copy_literal
+
+ // copy shortcut
+
+ // A two-stage shortcut for the most common case:
+ // 1) If the literal length is 0..14, and there is enough space,
+ // enter the shortcut and copy 16 bytes on behalf of the literals
+ // (in the fast mode, only 8 bytes can be safely copied this way).
+ // 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ // manner; but we ensure that there's enough space in the output for
+ // those 18 bytes earlier, upon entering the shortcut (in other words,
+ // there is a combined check for both stages).
+
+ // copy literal
+ MOVOU (SI), X0
+ MOVOU X0, (DI)
+ ADDQ CX, DI
+ ADDQ CX, SI
+
+ MOVL DX, CX
+ ANDL $0xF, CX
+
+ // The second stage: prepare for match copying, decode full info.
+ // If it doesn't work out, the info won't be wasted.
+ // offset := uint16(data[:2])
+ MOVWLZX (SI), DX
+ TESTL DX, DX
+ JE err_corrupt
+ ADDQ $2, SI
+ JC err_short_buf
+
+ MOVQ DI, AX
+ SUBQ DX, AX
+ JC err_corrupt
+ CMPQ AX, DI
+ JA err_short_buf
+
+ // if we can't do the second stage then jump straight to read the
+ // match length, we already have the offset.
+ CMPL CX, $0xF
+ JEQ match_len_loop_pre
+ CMPL DX, $8
+ JLT match_len_loop_pre
+ CMPQ AX, R11
+ JB match_len_loop_pre
+
+ // memcpy(op + 0, match + 0, 8);
+ MOVQ (AX), BX
+ MOVQ BX, (DI)
+ // memcpy(op + 8, match + 8, 8);
+ MOVQ 8(AX), BX
+ MOVQ BX, 8(DI)
+ // memcpy(op +16, match +16, 2);
+ MOVW 16(AX), BX
+ MOVW BX, 16(DI)
+
+ LEAQ const_minMatch(DI)(CX*1), DI
+
+ // shortcut complete, load next token
+ JMP loopcheck
+
+ // Read the rest of the literal length:
+ // do { BX = src[si++]; lit_len += BX } while (BX == 0xFF).
+lit_len_loop:
+ CMPQ SI, R9
+ JAE err_short_buf
+
+ MOVBLZX (SI), BX
+ INCQ SI
+ ADDQ BX, CX
+
+ CMPB BX, $0xFF
+ JE lit_len_loop
+
+copy_literal:
+ // bounds check src and dst
+ MOVQ SI, AX
+ ADDQ CX, AX
+ JC err_short_buf
+ CMPQ AX, R9
+ JA err_short_buf
+
+ MOVQ DI, BX
+ ADDQ CX, BX
+ JC err_short_buf
+ CMPQ BX, R8
+ JA err_short_buf
+
+ // Copy literals of <=48 bytes through the XMM registers.
+ CMPQ CX, $48
+ JGT memmove_lit
+
+ // if len(dst[di:]) < 48
+ MOVQ R8, AX
+ SUBQ DI, AX
+ CMPQ AX, $48
+ JLT memmove_lit
+
+ // if len(src[si:]) < 48
+ MOVQ R9, BX
+ SUBQ SI, BX
+ CMPQ BX, $48
+ JLT memmove_lit
+
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU 32(SI), X2
+ MOVOU X0, (DI)
+ MOVOU X1, 16(DI)
+ MOVOU X2, 32(DI)
+
+ ADDQ CX, SI
+ ADDQ CX, DI
+
+ JMP finish_lit_copy
+
+memmove_lit:
+ // memmove(to, from, len)
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+
+ // Spill registers. Increment SI, DI now so we don't need to save CX.
+ ADDQ CX, DI
+ ADDQ CX, SI
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVL DX, 40(SP)
+
+ CALL runtime·memmove(SB)
+
+ // restore registers
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVL 40(SP), DX
+
+ // recalc initial values
+ MOVQ dst_base+0(FP), R8
+ MOVQ R8, R11
+ ADDQ dst_len+8(FP), R8
+ MOVQ src_base+24(FP), R9
+ ADDQ src_len+32(FP), R9
+ MOVQ dict_base+48(FP), R14
+ MOVQ dict_len+56(FP), R15
+ MOVQ R8, R12
+ SUBQ $32, R12
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+finish_lit_copy:
+ // CX := mLen
+ // free up DX to use for offset
+ MOVL DX, CX
+ ANDL $0xF, CX
+
+ CMPQ SI, R9
+ JAE end
+
+ // offset
+ // si += 2
+ // DX := int(src[si-2]) | int(src[si-1])<<8
+ ADDQ $2, SI
+ JC err_short_buf
+ CMPQ SI, R9
+ JA err_short_buf
+ MOVWQZX -2(SI), DX
+
+ // 0 offset is invalid
+ TESTL DX, DX
+ JEQ err_corrupt
+
+match_len_loop_pre:
+ // if mlen != 0xF
+ CMPB CX, $0xF
+ JNE copy_match
+
+ // do { BX = src[si++]; mlen += BX } while (BX == 0xFF).
+match_len_loop:
+ CMPQ SI, R9
+ JAE err_short_buf
+
+ MOVBLZX (SI), BX
+ INCQ SI
+ ADDQ BX, CX
+
+ CMPB BX, $0xFF
+ JE match_len_loop
+
+copy_match:
+ ADDQ $const_minMatch, CX
+
+ // check we have match_len bytes left in dst
+ // di+match_len < len(dst)
+ MOVQ DI, AX
+ ADDQ CX, AX
+ JC err_short_buf
+ CMPQ AX, R8
+ JA err_short_buf
+
+ // DX = offset
+ // CX = match_len
+ // BX = &dst + (di - offset)
+ MOVQ DI, BX
+ SUBQ DX, BX
+
+ // check BX is within dst
+ // if BX < &dst
+ JC copy_match_from_dict
+ CMPQ BX, R11
+ JBE copy_match_from_dict
+
+ // if offset + match_len < di
+ LEAQ (BX)(CX*1), AX
+ CMPQ DI, AX
+ JA copy_interior_match
+
+ // AX := len(dst[:di])
+ // MOVQ DI, AX
+ // SUBQ R11, AX
+
+ // copy 16 bytes at a time
+ // if di-offset < 16 copy 16-(di-offset) bytes to di
+ // then do the remaining
+
+copy_match_loop:
+ // for match_len >= 0
+ // dst[di] = dst[i]
+ // di++
+ // i++
+ MOVB (BX), AX
+ MOVB AX, (DI)
+ INCQ DI
+ INCQ BX
+ DECQ CX
+ JNZ copy_match_loop
+
+ JMP loopcheck
+
+copy_interior_match:
+ CMPQ CX, $16
+ JGT memmove_match
+
+ // if len(dst[di:]) < 16
+ MOVQ R8, AX
+ SUBQ DI, AX
+ CMPQ AX, $16
+ JLT memmove_match
+
+ MOVOU (BX), X0
+ MOVOU X0, (DI)
+
+ ADDQ CX, DI
+ XORL CX, CX
+ JMP loopcheck
+
+copy_match_from_dict:
+ // CX = match_len
+ // BX = &dst + (di - offset)
+
+ // AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
+ MOVQ R11, AX
+ SUBQ BX, AX
+
+ // BX = len(dict) - dict_bytes_available
+ MOVQ R15, BX
+ SUBQ AX, BX
+ JS err_short_dict
+
+ ADDQ R14, BX
+
+ // if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
+ CMPQ CX, AX
+ JLT memmove_match
+
+ // The match stretches over the dictionary and our block
+ // 1) copy what comes from the dictionary
+ // AX = dict_bytes_available = copy_size
+ // BX = &dict_end - copy_size
+ // CX = match_len
+
+ // memmove(to, from, len)
+ MOVQ DI, 0(SP)
+ MOVQ BX, 8(SP)
+ MOVQ AX, 16(SP)
+ // store extra stuff we want to recover
+ // spill
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // restore registers
+ MOVQ 16(SP), AX // copy_size
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX // match_len
+
+ // recalc initial values
+ MOVQ dst_base+0(FP), R8
+ MOVQ R8, R11 // TODO: make these sensible numbers
+ ADDQ dst_len+8(FP), R8
+ MOVQ src_base+24(FP), R9
+ ADDQ src_len+32(FP), R9
+ MOVQ dict_base+48(FP), R14
+ MOVQ dict_len+56(FP), R15
+ MOVQ R8, R12
+ SUBQ $32, R12
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+ // di+=copy_size
+ ADDQ AX, DI
+
+ // 2) copy the rest from the current block
+ // CX = match_len - copy_size = rest_size
+ SUBQ AX, CX
+ MOVQ R11, BX
+
+ // check if we have a copy overlap
+ // AX = &dst + rest_size
+ MOVQ CX, AX
+ ADDQ BX, AX
+ // if &dst + rest_size > di, copy byte by byte
+ CMPQ AX, DI
+
+ JA copy_match_loop
+
+memmove_match:
+ // memmove(to, from, len)
+ MOVQ DI, 0(SP)
+ MOVQ BX, 8(SP)
+ MOVQ CX, 16(SP)
+
+ // Spill registers. Increment DI now so we don't need to save CX.
+ ADDQ CX, DI
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+
+ CALL runtime·memmove(SB)
+
+ // restore registers
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+
+ // recalc initial values
+ MOVQ dst_base+0(FP), R8
+ MOVQ R8, R11 // TODO: make these sensible numbers
+ ADDQ dst_len+8(FP), R8
+ MOVQ src_base+24(FP), R9
+ ADDQ src_len+32(FP), R9
+ MOVQ R8, R12
+ SUBQ $32, R12
+ MOVQ R9, R13
+ SUBQ $16, R13
+ MOVQ dict_base+48(FP), R14
+ MOVQ dict_len+56(FP), R15
+ XORL CX, CX
+
+loopcheck:
+ // for si < len(src)
+ CMPQ SI, R9
+ JB loop
+
+end:
+ // Remaining length must be zero.
+ TESTQ CX, CX
+ JNE err_corrupt
+
+ SUBQ R11, DI
+ MOVQ DI, ret+72(FP)
+ RET
+
+err_corrupt:
+ MOVQ $-1, ret+72(FP)
+ RET
+
+err_short_buf:
+ MOVQ $-2, ret+72(FP)
+ RET
+
+err_short_dict:
+ MOVQ $-3, ret+72(FP)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
new file mode 100644
index 000000000..20b21fcf1
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
@@ -0,0 +1,231 @@
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst R0
+#define dstorig R1
+#define src R2
+#define dstend R3
+#define srcend R4
+#define match R5 // Match address.
+#define dictend R6
+#define token R7
+#define len R8 // Literal and match lengths.
+#define offset R7 // Match offset; overlaps with token.
+#define tmp1 R9
+#define tmp2 R11
+#define tmp3 R12
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40
+ MOVW dst_base +0(FP), dst
+ MOVW dst_len +4(FP), dstend
+ MOVW src_base +12(FP), src
+ MOVW src_len +16(FP), srcend
+
+ CMP $0, srcend
+ BEQ shortSrc
+
+ ADD dst, dstend
+ ADD src, srcend
+
+ MOVW dst, dstorig
+
+loop:
+ // Read token. Extract literal length.
+ MOVBU.P 1(src), token
+ MOVW token >> 4, len
+ CMP $15, len
+ BNE readLitlenDone
+
+readLitlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADD.S tmp1, len
+ BVS shortDst
+ CMP $255, tmp1
+ BEQ readLitlenLoop
+
+readLitlenDone:
+ CMP $0, len
+ BEQ copyLiteralDone
+
+ // Bounds check dst+len and src+len.
+ ADD.S dst, len, tmp1
+ ADD.CC.S src, len, tmp2
+ BCS shortSrc
+ CMP dstend, tmp1
+ //BHI shortDst // Uncomment for distinct error codes.
+ CMP.LS srcend, tmp2
+ BHI shortSrc
+
+ // Copy literal.
+ CMP $4, len
+ BLO copyLiteralFinish
+
+ // Copy 0-3 bytes until src is aligned.
+ TST $1, src
+ MOVBU.NE.P 1(src), tmp1
+ MOVB.NE.P tmp1, 1(dst)
+ SUB.NE $1, len
+
+ TST $2, src
+ MOVHU.NE.P 2(src), tmp2
+ MOVB.NE.P tmp2, 1(dst)
+ MOVW.NE tmp2 >> 8, tmp1
+ MOVB.NE.P tmp1, 1(dst)
+ SUB.NE $2, len
+
+ B copyLiteralLoopCond
+
+copyLiteralLoop:
+ // Aligned load, unaligned write.
+ MOVW.P 4(src), tmp1
+ MOVW tmp1 >> 8, tmp2
+ MOVB tmp2, 1(dst)
+ MOVW tmp1 >> 16, tmp3
+ MOVB tmp3, 2(dst)
+ MOVW tmp1 >> 24, tmp2
+ MOVB tmp2, 3(dst)
+ MOVB.P tmp1, 4(dst)
+copyLiteralLoopCond:
+ // Loop until len-4 < 0.
+ SUB.S $4, len
+ BPL copyLiteralLoop
+
+copyLiteralFinish:
+ // Copy remaining 0-3 bytes.
+ // At this point, len may be < 0, but len&3 is still accurate.
+ TST $1, len
+ MOVB.NE.P 1(src), tmp3
+ MOVB.NE.P tmp3, 1(dst)
+ TST $2, len
+ MOVB.NE.P 2(src), tmp1
+ MOVB.NE.P tmp1, 2(dst)
+ MOVB.NE -1(src), tmp2
+ MOVB.NE tmp2, -1(dst)
+
+copyLiteralDone:
+ // Initial part of match length.
+ // This frees up the token register for reuse as offset.
+ AND $15, token, len
+
+ CMP src, srcend
+ BEQ end
+
+ // Read offset.
+ ADD.S $2, src
+ BCS shortSrc
+ CMP srcend, src
+ BHI shortSrc
+ MOVBU -2(src), offset
+ MOVBU -1(src), tmp1
+ ORR.S tmp1 << 8, offset
+ BEQ corrupt
+
+ // Read rest of match length.
+ CMP $15, len
+ BNE readMatchlenDone
+
+readMatchlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADD.S tmp1, len
+ BVS shortDst
+ CMP $255, tmp1
+ BEQ readMatchlenLoop
+
+readMatchlenDone:
+ // Bounds check dst+len+minMatch.
+ ADD.S dst, len, tmp1
+ ADD.CC.S $const_minMatch, tmp1
+ BCS shortDst
+ CMP dstend, tmp1
+ BHI shortDst
+
+ RSB dst, offset, match
+ CMP dstorig, match
+ BGE copyMatch4
+
+ // match < dstorig means the match starts in the dictionary,
+ // at len(dict) - offset + (dst - dstorig).
+ MOVW dict_base+24(FP), match
+ MOVW dict_len +28(FP), dictend
+
+ ADD $const_minMatch, len
+
+ RSB dst, dstorig, tmp1
+ RSB dictend, offset, tmp2
+ ADD.S tmp2, tmp1
+ BMI shortDict
+ ADD match, dictend
+ ADD tmp1, match
+
+copyDict:
+ MOVBU.P 1(match), tmp1
+ MOVB.P tmp1, 1(dst)
+ SUB.S $1, len
+ CMP.NE match, dictend
+ BNE copyDict
+
+ // If the match extends beyond the dictionary, the rest is at dstorig.
+ CMP $0, len
+ BEQ copyMatchDone
+ MOVW dstorig, match
+ B copyMatch
+
+ // Copy a regular match.
+ // Since len+minMatch is at least four, we can do a 4× unrolled
+ // byte copy loop. Using MOVW instead of four byte loads is faster,
+ // but to remain portable we'd have to align match first, which is
+ // too expensive. By alternating loads and stores, we also handle
+ // the case offset < 4.
+copyMatch4:
+ SUB.S $4, len
+ MOVBU.P 4(match), tmp1
+ MOVB.P tmp1, 4(dst)
+ MOVBU -3(match), tmp2
+ MOVB tmp2, -3(dst)
+ MOVBU -2(match), tmp3
+ MOVB tmp3, -2(dst)
+ MOVBU -1(match), tmp1
+ MOVB tmp1, -1(dst)
+ BPL copyMatch4
+
+ // Restore len, which is now negative.
+ ADD.S $4, len
+ BEQ copyMatchDone
+
+copyMatch:
+ // Finish with a byte-at-a-time copy.
+ SUB.S $1, len
+ MOVBU.P 1(match), tmp2
+ MOVB.P tmp2, 1(dst)
+ BNE copyMatch
+
+copyMatchDone:
+ CMP src, srcend
+ BNE loop
+
+end:
+ CMP $0, len
+ BNE corrupt
+ SUB dstorig, dst, tmp1
+ MOVW tmp1, ret+36(FP)
+ RET
+
+ // The error cases have distinct labels so we can put different
+ // return codes here when debugging, or if the error returns need to
+ // be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+ MOVW $-1, tmp1
+ MOVW tmp1, ret+36(FP)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
new file mode 100644
index 000000000..c43e8a8d2
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
@@ -0,0 +1,230 @@
+// +build gc
+// +build !noasm
+
+// This implementation assumes that strict alignment checking is turned off.
+// The Go compiler makes the same assumption.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst R0
+#define dstorig R1
+#define src R2
+#define dstend R3
+#define dstend16 R4 // dstend - 16
+#define srcend R5
+#define srcend16 R6 // srcend - 16
+#define match R7 // Match address.
+#define dict R8
+#define dictlen R9
+#define dictend R10
+#define token R11
+#define len R12 // Literal and match lengths.
+#define lenRem R13
+#define offset R14 // Match offset.
+#define tmp1 R15
+#define tmp2 R16
+#define tmp3 R17
+#define tmp4 R19
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80
+ LDP dst_base+0(FP), (dst, dstend)
+ ADD dst, dstend
+ MOVD dst, dstorig
+
+ LDP src_base+24(FP), (src, srcend)
+ CBZ srcend, shortSrc
+ ADD src, srcend
+
+ // dstend16 = max(dstend-16, 0) and similarly for srcend16.
+ SUBS $16, dstend, dstend16
+ CSEL LO, ZR, dstend16, dstend16
+ SUBS $16, srcend, srcend16
+ CSEL LO, ZR, srcend16, srcend16
+
+ LDP dict_base+48(FP), (dict, dictlen)
+ ADD dict, dictlen, dictend
+
+loop:
+ // Read token. Extract literal length.
+ MOVBU.P 1(src), token
+ LSR $4, token, len
+ CMP $15, len
+ BNE readLitlenDone
+
+readLitlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADDS tmp1, len
+ BVS shortDst
+ CMP $255, tmp1
+ BEQ readLitlenLoop
+
+readLitlenDone:
+ CBZ len, copyLiteralDone
+
+ // Bounds check dst+len and src+len.
+ ADDS dst, len, tmp1
+ BCS shortSrc
+ ADDS src, len, tmp2
+ BCS shortSrc
+ CMP dstend, tmp1
+ BHI shortDst
+ CMP srcend, tmp2
+ BHI shortSrc
+
+ // Copy literal.
+ SUBS $16, len
+ BLO copyLiteralShort
+
+copyLiteralLoop:
+ LDP.P 16(src), (tmp1, tmp2)
+ STP.P (tmp1, tmp2), 16(dst)
+ SUBS $16, len
+ BPL copyLiteralLoop
+
+ // Copy (final part of) literal of length 0-15.
+ // If we have >=16 bytes left in src and dst, just copy 16 bytes.
+copyLiteralShort:
+ CMP dstend16, dst
+ CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO).
+ BHS copyLiteralShortEnd
+
+ AND $15, len
+
+ LDP (src), (tmp1, tmp2)
+ ADD len, src
+ STP (tmp1, tmp2), (dst)
+ ADD len, dst
+
+ B copyLiteralDone
+
+ // Safe but slow copy near the end of src, dst.
+copyLiteralShortEnd:
+ TBZ $3, len, 3(PC)
+ MOVD.P 8(src), tmp1
+ MOVD.P tmp1, 8(dst)
+ TBZ $2, len, 3(PC)
+ MOVW.P 4(src), tmp2
+ MOVW.P tmp2, 4(dst)
+ TBZ $1, len, 3(PC)
+ MOVH.P 2(src), tmp3
+ MOVH.P tmp3, 2(dst)
+ TBZ $0, len, 3(PC)
+ MOVBU.P 1(src), tmp4
+ MOVB.P tmp4, 1(dst)
+
+copyLiteralDone:
+ // Initial part of match length.
+ AND $15, token, len
+
+ CMP src, srcend
+ BEQ end
+
+ // Read offset.
+ ADDS $2, src
+ BCS shortSrc
+ CMP srcend, src
+ BHI shortSrc
+ MOVHU -2(src), offset
+ CBZ offset, corrupt
+
+ // Read rest of match length.
+ CMP $15, len
+ BNE readMatchlenDone
+
+readMatchlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADDS tmp1, len
+ BVS shortDst
+ CMP $255, tmp1
+ BEQ readMatchlenLoop
+
+readMatchlenDone:
+ ADD $const_minMatch, len
+
+ // Bounds check dst+len.
+ ADDS dst, len, tmp2
+ BCS shortDst
+ CMP dstend, tmp2
+ BHI shortDst
+
+ SUB offset, dst, match
+ CMP dstorig, match
+ BHS copyMatchTry8
+
+ // match < dstorig means the match starts in the dictionary,
+ // at len(dict) - offset + (dst - dstorig).
+ SUB dstorig, dst, tmp1
+ SUB offset, dictlen, tmp2
+ ADDS tmp2, tmp1
+ BMI shortDict
+ ADD dict, tmp1, match
+
+copyDict:
+ MOVBU.P 1(match), tmp3
+ MOVB.P tmp3, 1(dst)
+ SUBS $1, len
+ CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag.
+ BNE copyDict
+
+ CBZ len, copyMatchDone
+
+ // If the match extends beyond the dictionary, the rest is at dstorig.
+ // Recompute the offset for the next check.
+ MOVD dstorig, match
+ SUB dstorig, dst, offset
+
+copyMatchTry8:
+ // Copy doublewords if both len and offset are at least eight.
+ // A 16-at-a-time loop doesn't provide a further speedup.
+ CMP $8, len
+ CCMP HS, offset, $8, $0
+ BLO copyMatchLoop1
+
+ AND $7, len, lenRem
+ SUB $8, len
+copyMatchLoop8:
+ MOVD.P 8(match), tmp1
+ MOVD.P tmp1, 8(dst)
+ SUBS $8, len
+ BPL copyMatchLoop8
+
+ MOVD (match)(len), tmp2 // match+len == match+lenRem-8.
+ ADD lenRem, dst
+ MOVD $0, len
+ MOVD tmp2, -8(dst)
+ B copyMatchDone
+
+copyMatchLoop1:
+ // Byte-at-a-time copy for small offsets.
+ MOVBU.P 1(match), tmp2
+ MOVB.P tmp2, 1(dst)
+ SUBS $1, len
+ BNE copyMatchLoop1
+
+copyMatchDone:
+ CMP src, srcend
+ BNE loop
+
+end:
+ CBNZ len, corrupt
+ SUB dstorig, dst, tmp1
+ MOVD tmp1, ret+72(FP)
+ RET
+
+ // The error cases have distinct labels so we can put different
+ // return codes here when debugging, or if the error returns need to
+ // be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+ MOVD $-1, tmp1
+ MOVD tmp1, ret+72(FP)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
new file mode 100644
index 000000000..8d9023d10
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
@@ -0,0 +1,10 @@
+//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm
+// +build amd64 arm arm64
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package lz4block
+
+//go:noescape
+func decodeBlock(dst, src, dict []byte) int
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
new file mode 100644
index 000000000..9f568fbb1
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
@@ -0,0 +1,139 @@
+//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm
+// +build !amd64,!arm,!arm64 appengine !gc noasm
+
+package lz4block
+
+import (
+ "encoding/binary"
+)
+
+func decodeBlock(dst, src, dict []byte) (ret int) {
+ // Restrict capacities so we don't read or write out of bounds.
+ dst = dst[:len(dst):len(dst)]
+ src = src[:len(src):len(src)]
+
+ const hasError = -2
+
+ if len(src) == 0 {
+ return hasError
+ }
+
+ defer func() {
+ if recover() != nil {
+ ret = hasError
+ }
+ }()
+
+ var si, di uint
+ for si < uint(len(src)) {
+ // Literals and match lengths (token).
+ b := uint(src[si])
+ si++
+
+ // Literals.
+ if lLen := b >> 4; lLen > 0 {
+ switch {
+ case lLen < 0xF && si+16 < uint(len(src)):
+ // Shortcut 1
+ // if we have enough room in src and dst, and the literals length
+ // is small enough (0..14) then copy all 16 bytes, even if not all
+ // are part of the literals.
+ copy(dst[di:], src[si:si+16])
+ si += lLen
+ di += lLen
+ if mLen := b & 0xF; mLen < 0xF {
+ // Shortcut 2
+ // if the match length (4..18) fits within the literals, then copy
+ // all 18 bytes, even if not all are part of the literals.
+ mLen += 4
+ if offset := u16(src[si:]); mLen <= offset && offset < di {
+ i := di - offset
+ // The remaining buffer may not hold 18 bytes.
+ // See https://github.com/pierrec/lz4/issues/51.
+ if end := i + 18; end <= uint(len(dst)) {
+ copy(dst[di:], dst[i:end])
+ si += 2
+ di += mLen
+ continue
+ }
+ }
+ }
+ case lLen == 0xF:
+ for {
+ x := uint(src[si])
+ if lLen += x; int(lLen) < 0 {
+ return hasError
+ }
+ si++
+ if x != 0xFF {
+ break
+ }
+ }
+ fallthrough
+ default:
+ copy(dst[di:di+lLen], src[si:si+lLen])
+ si += lLen
+ di += lLen
+ }
+ }
+
+ mLen := b & 0xF
+ if si == uint(len(src)) && mLen == 0 {
+ break
+ } else if si >= uint(len(src)) {
+ return hasError
+ }
+
+ offset := u16(src[si:])
+ if offset == 0 {
+ return hasError
+ }
+ si += 2
+
+ // Match.
+ mLen += minMatch
+ if mLen == minMatch+0xF {
+ for {
+ x := uint(src[si])
+ if mLen += x; int(mLen) < 0 {
+ return hasError
+ }
+ si++
+ if x != 0xFF {
+ break
+ }
+ }
+ }
+
+ // Copy the match.
+ if di < offset {
+ // The match is beyond our block, meaning the first part
+ // is in the dictionary.
+ fromDict := dict[uint(len(dict))+di-offset:]
+ n := uint(copy(dst[di:di+mLen], fromDict))
+ di += n
+ if mLen -= n; mLen == 0 {
+ continue
+ }
+ // We copied n = offset-di bytes from the dictionary,
+ // then set di = di+n = offset, so the following code
+ // copies from dst[di-offset:] = dst[0:].
+ }
+
+ expanded := dst[di-offset:]
+ if mLen > offset {
+ // Efficiently copy the match dst[di-offset:di] into the dst slice.
+ bytesToCopy := offset * (mLen / offset)
+ for n := offset; n <= bytesToCopy+offset; n *= 2 {
+ copy(expanded[n:], expanded[:n])
+ }
+ di += bytesToCopy
+ mLen -= bytesToCopy
+ }
+ di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
+ }
+
+ return int(di)
+}
+
+func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) }
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
new file mode 100644
index 000000000..710ea4281
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
@@ -0,0 +1,19 @@
+package lz4errors
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+ ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short"
+ ErrInvalidFrame Error = "lz4: bad magic number"
+ ErrInternalUnhandledState Error = "lz4: unhandled state"
+ ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum"
+ ErrInvalidBlockChecksum Error = "lz4: invalid block checksum"
+ ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum"
+ ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
+ ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object"
+ ErrOptionInvalidBlockSize Error = "lz4: invalid block size"
+ ErrOptionNotApplicable Error = "lz4: option not applicable"
+ ErrWriterNotClosed Error = "lz4: writer not closed"
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
new file mode 100644
index 000000000..459086f09
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
@@ -0,0 +1,350 @@
+package lz4stream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+type Blocks struct {
+ Block *FrameDataBlock
+ Blocks chan chan *FrameDataBlock
+ mu sync.Mutex
+ err error
+}
+
+func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
+ if num == 1 {
+ b.Blocks = nil
+ b.Block = NewFrameDataBlock(f)
+ return
+ }
+ b.Block = nil
+ if cap(b.Blocks) != num {
+ b.Blocks = make(chan chan *FrameDataBlock, num)
+ }
+ // goroutine managing concurrent block compression goroutines.
+ go func() {
+ // Process next block compression item.
+ for c := range b.Blocks {
+ // Read the next compressed block result.
+ // Waiting here ensures that the blocks are output in the order they were sent.
+ // The incoming channel is always closed as it indicates to the caller that
+ // the block has been processed.
+ block := <-c
+ if block == nil {
+ // Notify the block compression routine that we are done with its result.
+ // This is used when a sentinel block is sent to terminate the compression.
+ close(c)
+ return
+ }
+ // Do not attempt to write the block upon any previous failure.
+ if b.err == nil {
+ // Write the block.
+ if err := block.Write(f, dst); err != nil {
+ // Keep the first error.
+ b.err = err
+ // All pending compression goroutines need to shut down, so we need to keep going.
+ }
+ }
+ close(c)
+ }
+ }()
+}
+
+func (b *Blocks) close(f *Frame, num int) error {
+ if num == 1 {
+ if b.Block != nil {
+ b.Block.Close(f)
+ }
+ err := b.err
+ b.err = nil
+ return err
+ }
+ if b.Blocks == nil {
+ err := b.err
+ b.err = nil
+ return err
+ }
+ c := make(chan *FrameDataBlock)
+ b.Blocks <- c
+ c <- nil
+ <-c
+ err := b.err
+ b.err = nil
+ return err
+}
+
+// ErrorR returns any error set while uncompressing a stream.
+func (b *Blocks) ErrorR() error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.err
+}
+
+// initR returns a channel that streams the uncompressed blocks if in concurrent
+// mode and no error. When the channel is closed, check for any error with b.ErrorR.
+//
+// If not in concurrent mode, the uncompressed block is b.Block and the returned error
+// needs to be checked.
+func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
+ size := f.Descriptor.Flags.BlockSizeIndex()
+ if num == 1 {
+ b.Blocks = nil
+ b.Block = NewFrameDataBlock(f)
+ return nil, nil
+ }
+ b.Block = nil
+ blocks := make(chan chan []byte, num)
+ // data receives the uncompressed blocks.
+ data := make(chan []byte)
+ // Read blocks from the source sequentially
+ // and uncompress them concurrently.
+
+ // In legacy mode, accrue the uncompress sizes in cum.
+ var cum uint32
+ go func() {
+ var cumx uint32
+ var err error
+ for b.ErrorR() == nil {
+ block := NewFrameDataBlock(f)
+ cumx, err = block.Read(f, src, 0)
+ if err != nil {
+ block.Close(f)
+ break
+ }
+ // Recheck for an error as reading may be slow and uncompressing is expensive.
+ if b.ErrorR() != nil {
+ block.Close(f)
+ break
+ }
+ c := make(chan []byte)
+ blocks <- c
+ go func() {
+ defer block.Close(f)
+ data, err := block.Uncompress(f, size.Get(), nil, false)
+ if err != nil {
+ b.closeR(err)
+ // Close the block channel to indicate an error.
+ close(c)
+ } else {
+ c <- data
+ }
+ }()
+ }
+ // End the collection loop and the data channel.
+ c := make(chan []byte)
+ blocks <- c
+ c <- nil // signal the collection loop that we are done
+ <-c // wait for the collect loop to complete
+ if f.isLegacy() && cum == cumx {
+ err = io.EOF
+ }
+ b.closeR(err)
+ close(data)
+ }()
+ // Collect the uncompressed blocks and make them available
+ // on the returned channel.
+ go func(leg bool) {
+ defer close(blocks)
+ skipBlocks := false
+ for c := range blocks {
+ buf, ok := <-c
+ if !ok {
+ // A closed channel indicates an error.
+ // All remaining channels should be discarded.
+ skipBlocks = true
+ continue
+ }
+ if buf == nil {
+ // Signal to end the loop.
+ close(c)
+ return
+ }
+ if skipBlocks {
+ // A previous error has occurred, skipping remaining channels.
+ continue
+ }
+ // Perform checksum now as the blocks are received in order.
+ if f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(buf)
+ }
+ if leg {
+ cum += uint32(len(buf))
+ }
+ data <- buf
+ close(c)
+ }
+ }(f.isLegacy())
+ return data, nil
+}
+
+// closeR safely sets the error on b if not already set.
+func (b *Blocks) closeR(err error) {
+ b.mu.Lock()
+ if b.err == nil {
+ b.err = err
+ }
+ b.mu.Unlock()
+}
+
+func NewFrameDataBlock(f *Frame) *FrameDataBlock {
+ buf := f.Descriptor.Flags.BlockSizeIndex().Get()
+ return &FrameDataBlock{Data: buf, data: buf}
+}
+
+type FrameDataBlock struct {
+ Size DataBlockSize
+ Data []byte // compressed or uncompressed data (.data or .src)
+ Checksum uint32
+ data []byte // buffer for compressed data
+ src []byte // uncompressed data
+ err error // used in concurrent mode
+}
+
+func (b *FrameDataBlock) Close(f *Frame) {
+ b.Size = 0
+ b.Checksum = 0
+ b.err = nil
+ if b.data != nil {
+ // Block was not already closed.
+ lz4block.Put(b.data)
+ b.Data = nil
+ b.data = nil
+ b.src = nil
+ }
+}
+
+// Block compression errors are ignored since the buffer is sized appropriately.
+func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
+ data := b.data
+ if f.isLegacy() {
+ // In legacy mode, the buffer is sized according to CompressBlockBound,
+ // but only 8Mb is buffered for compression.
+ src = src[:8<<20]
+ } else {
+ data = data[:len(src)] // trigger the incompressible flag in CompressBlock
+ }
+ var n int
+ switch level {
+ case lz4block.Fast:
+ n, _ = lz4block.CompressBlock(src, data)
+ default:
+ n, _ = lz4block.CompressBlockHC(src, data, level)
+ }
+ if n == 0 {
+ b.Size.UncompressedSet(true)
+ b.Data = src
+ } else {
+ b.Size.UncompressedSet(false)
+ b.Data = data[:n]
+ }
+ b.Size.sizeSet(len(b.Data))
+ b.src = src // keep track of the source for content checksum
+
+ if f.Descriptor.Flags.BlockChecksum() {
+ b.Checksum = xxh32.ChecksumZero(src)
+ }
+ return b
+}
+
+func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
+ // Write is called in the same order as blocks are compressed,
+ // so content checksum must be done here.
+ if f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(b.src)
+ }
+ buf := f.buf[:]
+ binary.LittleEndian.PutUint32(buf, uint32(b.Size))
+ if _, err := dst.Write(buf[:4]); err != nil {
+ return err
+ }
+
+ if _, err := dst.Write(b.Data); err != nil {
+ return err
+ }
+
+ if b.Checksum == 0 {
+ return nil
+ }
+ binary.LittleEndian.PutUint32(buf, b.Checksum)
+ _, err := dst.Write(buf[:4])
+ return err
+}
+
+// Read updates b with the next block data, size and checksum if available.
+func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
+ x, err := f.readUint32(src)
+ if err != nil {
+ return 0, err
+ }
+ if f.isLegacy() {
+ switch x {
+ case frameMagicLegacy:
+ // Concatenated legacy frame.
+ return b.Read(f, src, cum)
+ case cum:
+ // Only works in non concurrent mode, for concurrent mode
+ // it is handled separately.
+ // Linux kernel format appends the total uncompressed size at the end.
+ return 0, io.EOF
+ }
+ } else if x == 0 {
+ // Marker for end of stream.
+ return 0, io.EOF
+ }
+ b.Size = DataBlockSize(x)
+
+ size := b.Size.size()
+ if size > cap(b.data) {
+ return x, lz4errors.ErrOptionInvalidBlockSize
+ }
+ b.data = b.data[:size]
+ if _, err := io.ReadFull(src, b.data); err != nil {
+ return x, err
+ }
+ if f.Descriptor.Flags.BlockChecksum() {
+ sum, err := f.readUint32(src)
+ if err != nil {
+ return 0, err
+ }
+ b.Checksum = sum
+ }
+ return x, nil
+}
+
+func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) {
+ if b.Size.Uncompressed() {
+ n := copy(dst, b.data)
+ dst = dst[:n]
+ } else {
+ n, err := lz4block.UncompressBlock(b.data, dst, dict)
+ if err != nil {
+ return nil, err
+ }
+ dst = dst[:n]
+ }
+ if f.Descriptor.Flags.BlockChecksum() {
+ if c := xxh32.ChecksumZero(dst); c != b.Checksum {
+ err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
+ return nil, err
+ }
+ }
+ if sum && f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(dst)
+ }
+ return dst, nil
+}
+
+func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
+ if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
+ return
+ }
+ x = binary.LittleEndian.Uint32(f.buf[:4])
+ return
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
new file mode 100644
index 000000000..18192a943
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
@@ -0,0 +1,204 @@
+// Package lz4stream provides the types that support reading and writing LZ4 data streams.
+package lz4stream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+//go:generate go run gen.go
+
+const (
+ frameMagic uint32 = 0x184D2204
+ frameSkipMagic uint32 = 0x184D2A50
+ frameMagicLegacy uint32 = 0x184C2102
+)
+
+func NewFrame() *Frame {
+ return &Frame{}
+}
+
+type Frame struct {
+ buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
+ Magic uint32
+ Descriptor FrameDescriptor
+ Blocks Blocks
+ Checksum uint32
+ checksum xxh32.XXHZero
+}
+
+// Reset allows reusing the Frame.
+// The Descriptor configuration is not modified.
+func (f *Frame) Reset(num int) {
+ f.Magic = 0
+ f.Descriptor.Checksum = 0
+ f.Descriptor.ContentSize = 0
+ _ = f.Blocks.close(f, num)
+ f.Checksum = 0
+}
+
+func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
+ if legacy {
+ f.Magic = frameMagicLegacy
+ idx := lz4block.Index(lz4block.Block8Mb)
+ f.Descriptor.Flags.BlockSizeIndexSet(idx)
+ } else {
+ f.Magic = frameMagic
+ f.Descriptor.initW()
+ }
+ f.Blocks.initW(f, dst, num)
+ f.checksum.Reset()
+}
+
+func (f *Frame) CloseW(dst io.Writer, num int) error {
+ if err := f.Blocks.close(f, num); err != nil {
+ return err
+ }
+ if f.isLegacy() {
+ return nil
+ }
+ buf := f.buf[:0]
+ // End mark (data block size of uint32(0)).
+ buf = append(buf, 0, 0, 0, 0)
+ if f.Descriptor.Flags.ContentChecksum() {
+ buf = f.checksum.Sum(buf)
+ }
+ _, err := dst.Write(buf)
+ return err
+}
+
+func (f *Frame) isLegacy() bool {
+ return f.Magic == frameMagicLegacy
+}
+
+func (f *Frame) ParseHeaders(src io.Reader) error {
+ if f.Magic > 0 {
+ // Header already read.
+ return nil
+ }
+
+newFrame:
+ var err error
+ if f.Magic, err = f.readUint32(src); err != nil {
+ return err
+ }
+ switch m := f.Magic; {
+ case m == frameMagic || m == frameMagicLegacy:
+ // All 16 values of frameSkipMagic are valid.
+ case m>>8 == frameSkipMagic>>8:
+ skip, err := f.readUint32(src)
+ if err != nil {
+ return err
+ }
+ if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
+ return err
+ }
+ goto newFrame
+ default:
+ return lz4errors.ErrInvalidFrame
+ }
+ if err := f.Descriptor.initR(f, src); err != nil {
+ return err
+ }
+ f.checksum.Reset()
+ return nil
+}
+
+func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
+ return f.Blocks.initR(f, num, src)
+}
+
+func (f *Frame) CloseR(src io.Reader) (err error) {
+ if f.isLegacy() {
+ return nil
+ }
+ if !f.Descriptor.Flags.ContentChecksum() {
+ return nil
+ }
+ if f.Checksum, err = f.readUint32(src); err != nil {
+ return err
+ }
+ if c := f.checksum.Sum32(); c != f.Checksum {
+ return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
+ }
+ return nil
+}
+
+type FrameDescriptor struct {
+ Flags DescriptorFlags
+ ContentSize uint64
+ Checksum uint8
+}
+
+func (fd *FrameDescriptor) initW() {
+ fd.Flags.VersionSet(1)
+ fd.Flags.BlockIndependenceSet(true)
+}
+
+func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
+ if fd.Checksum > 0 {
+ // Header already written.
+ return nil
+ }
+
+ buf := f.buf[:4]
+ // Write the magic number here even though it belongs to the Frame.
+ binary.LittleEndian.PutUint32(buf, f.Magic)
+ if !f.isLegacy() {
+ buf = buf[:4+2]
+ binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
+
+ if fd.Flags.Size() {
+ buf = buf[:4+2+8]
+ binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
+ }
+ fd.Checksum = descriptorChecksum(buf[4:])
+ buf = append(buf, fd.Checksum)
+ }
+
+ _, err := dst.Write(buf)
+ return err
+}
+
+func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
+ if f.isLegacy() {
+ idx := lz4block.Index(lz4block.Block8Mb)
+ f.Descriptor.Flags.BlockSizeIndexSet(idx)
+ return nil
+ }
+ // Read the flags and the checksum, hoping that there is not content size.
+ buf := f.buf[:3]
+ if _, err := io.ReadFull(src, buf); err != nil {
+ return err
+ }
+ descr := binary.LittleEndian.Uint16(buf)
+ fd.Flags = DescriptorFlags(descr)
+ if fd.Flags.Size() {
+ // Append the 8 missing bytes.
+ buf = buf[:3+8]
+ if _, err := io.ReadFull(src, buf[3:]); err != nil {
+ return err
+ }
+ fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
+ }
+ fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
+ buf = buf[:len(buf)-1] // all descriptor fields except checksum
+ if c := descriptorChecksum(buf); fd.Checksum != c {
+ return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
+ }
+ // Validate the elements that can be.
+ if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
+ return lz4errors.ErrOptionInvalidBlockSize
+ }
+ return nil
+}
+
+func descriptorChecksum(buf []byte) byte {
+ return byte(xxh32.ChecksumZero(buf) >> 8)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
new file mode 100644
index 000000000..d33a6be95
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
@@ -0,0 +1,103 @@
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+package lz4stream
+
+import "github.com/pierrec/lz4/v4/internal/lz4block"
+
+// DescriptorFlags is defined as follow:
+// field bits
+// ----- ----
+// _ 2
+// ContentChecksum 1
+// Size 1
+// BlockChecksum 1
+// BlockIndependence 1
+// Version 2
+// _ 4
+// BlockSizeIndex 3
+// _ 1
+type DescriptorFlags uint16
+
+// Getters.
+func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 }
+func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 }
+func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 }
+func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
+func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) }
+func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex {
+ return lz4block.BlockSizeIndex(x >> 12 & 0x7)
+}
+
+// Setters.
+func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags {
+ const b = 1 << 2
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags {
+ const b = 1 << 3
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags {
+ const b = 1 << 4
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags {
+ const b = 1 << 5
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags {
+ *x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6)
+ return x
+}
+func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags {
+ *x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12)
+ return x
+}
+
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+// DataBlockSize is defined as follow:
+// field bits
+// ----- ----
+// size 31
+// Uncompressed 1
+type DataBlockSize uint32
+
+// Getters.
+func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) }
+func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
+
+// Setters.
+func (x *DataBlockSize) sizeSet(v int) *DataBlockSize {
+ *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF
+ return x
+}
+func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize {
+ const b = 1 << 31
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
new file mode 100644
index 000000000..651d10c10
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,212 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (ported from the reference implementation https://github.com/Cyan4973/xxHash/)
+package xxh32
+
+import (
+ "encoding/binary"
+)
+
+const (
+ prime1 uint32 = 2654435761
+ prime2 uint32 = 2246822519
+ prime3 uint32 = 3266489917
+ prime4 uint32 = 668265263
+ prime5 uint32 = 374761393
+
+ primeMask = 0xFFFFFFFF
+ prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+ prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+ v [4]uint32
+ totalLen uint64
+ buf [16]byte
+ bufused int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+ h32 := xxh.Sum32()
+ return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+ xxh.v[0] = prime1plus2
+ xxh.v[1] = prime2
+ xxh.v[2] = 0
+ xxh.v[3] = prime1minus
+ xxh.totalLen = 0
+ xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+ return 4
+}
+
+// BlockSizeIndex gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+ return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+ if xxh.totalLen == 0 {
+ xxh.Reset()
+ }
+ n := len(input)
+ m := xxh.bufused
+
+ xxh.totalLen += uint64(n)
+
+ r := len(xxh.buf) - m
+ if n < r {
+ copy(xxh.buf[m:], input)
+ xxh.bufused += len(input)
+ return n, nil
+ }
+
+ var buf *[16]byte
+ if m != 0 {
+ // some data left from previous update
+ buf = &xxh.buf
+ c := copy(buf[m:], input)
+ n -= c
+ input = input[c:]
+ }
+ update(&xxh.v, buf, input)
+ xxh.bufused = copy(xxh.buf[:], input[n-n%16:])
+
+ return n, nil
+}
+
+// Portable version of update. This updates v by processing all of buf
+// (if not nil) and all full 16-byte blocks of input.
+func updateGo(v *[4]uint32, buf *[16]byte, input []byte) {
+ // Causes compiler to work directly from registers instead of stack:
+ v1, v2, v3, v4 := v[0], v[1], v[2], v[3]
+
+ if buf != nil {
+ v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
+ }
+
+ for ; len(input) >= 16; input = input[16:] {
+ sub := input[:16] //BCE hint for compiler
+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+ }
+ v[0], v[1], v[2], v[3] = v1, v2, v3, v4
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+ h32 := uint32(xxh.totalLen)
+ if h32 >= 16 {
+ h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3])
+ } else {
+ h32 += prime5
+ }
+
+ p := 0
+ n := xxh.bufused
+ buf := xxh.buf
+ for n := n - 4; p <= n; p += 4 {
+ h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+ h32 = rol17(h32) * prime4
+ }
+ for ; p < n; p++ {
+ h32 += uint32(buf[p]) * prime5
+ h32 = rol11(h32) * prime1
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime2
+ h32 ^= h32 >> 13
+ h32 *= prime3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+// Portable version of ChecksumZero.
+func checksumZeroGo(input []byte) uint32 {
+ n := len(input)
+ h32 := uint32(n)
+
+ if n < 16 {
+ h32 += prime5
+ } else {
+ v1 := prime1plus2
+ v2 := prime2
+ v3 := uint32(0)
+ v4 := prime1minus
+ p := 0
+ for n := n - 16; p <= n; p += 16 {
+ sub := input[p:][:16] //BCE hint for compiler
+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+ }
+ input = input[p:]
+ n -= p
+ h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ }
+
+ p := 0
+ for n := n - 4; p <= n; p += 4 {
+ h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+ h32 = rol17(h32) * prime4
+ }
+ for p < n {
+ h32 += uint32(input[p]) * prime5
+ h32 = rol11(h32) * prime1
+ p++
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime2
+ h32 ^= h32 >> 13
+ h32 *= prime3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+func rol1(u uint32) uint32 {
+ return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+ return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+ return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+ return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+ return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+ return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+ return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
new file mode 100644
index 000000000..0978b2665
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
@@ -0,0 +1,11 @@
+// +build !noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+//
+//go:noescape
+func ChecksumZero(input []byte) uint32
+
+//go:noescape
+func update(v *[4]uint32, buf *[16]byte, input []byte)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
new file mode 100644
index 000000000..c18ffd574
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
@@ -0,0 +1,251 @@
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define p R0
+#define n R1
+#define h R2
+#define v1 R2 // Alias for h.
+#define v2 R3
+#define v3 R4
+#define v4 R5
+#define x1 R6
+#define x2 R7
+#define x3 R8
+#define x4 R9
+
+// We need the primes in registers. The 16-byte loop only uses prime{1,2}.
+#define prime1r R11
+#define prime2r R12
+#define prime3r R3 // The rest can alias v{2-4}.
+#define prime4r R4
+#define prime5r R5
+
+// Update round macros. These read from and increment p.
+
+#define round16aligned \
+ MOVM.IA.W (p), [x1, x2, x3, x4] \
+ \
+ MULA x1, prime2r, v1, v1 \
+ MULA x2, prime2r, v2, v2 \
+ MULA x3, prime2r, v3, v3 \
+ MULA x4, prime2r, v4, v4 \
+ \
+ MOVW v1 @> 19, v1 \
+ MOVW v2 @> 19, v2 \
+ MOVW v3 @> 19, v3 \
+ MOVW v4 @> 19, v4 \
+ \
+ MUL prime1r, v1 \
+ MUL prime1r, v2 \
+ MUL prime1r, v3 \
+ MUL prime1r, v4 \
+
+#define round16unaligned \
+ MOVBU.P 16(p), x1 \
+ MOVBU -15(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -14(p), x3 \
+ MOVBU -13(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v1, v1 \
+ MOVW v1 @> 19, v1 \
+ MUL prime1r, v1 \
+ \
+ MOVBU -12(p), x1 \
+ MOVBU -11(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -10(p), x3 \
+ MOVBU -9(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v2, v2 \
+ MOVW v2 @> 19, v2 \
+ MUL prime1r, v2 \
+ \
+ MOVBU -8(p), x1 \
+ MOVBU -7(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -6(p), x3 \
+ MOVBU -5(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v3, v3 \
+ MOVW v3 @> 19, v3 \
+ MUL prime1r, v3 \
+ \
+ MOVBU -4(p), x1 \
+ MOVBU -3(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -2(p), x3 \
+ MOVBU -1(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v4, v4 \
+ MOVW v4 @> 19, v4 \
+ MUL prime1r, v4 \
+
+
+// func ChecksumZero([]byte) uint32
+TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16
+ MOVW input_base+0(FP), p
+ MOVW input_len+4(FP), n
+
+ MOVW $const_prime1, prime1r
+ MOVW $const_prime2, prime2r
+
+ // Set up h for n < 16. It's tempting to say {ADD prime5, n, h}
+ // here, but that's a pseudo-op that generates a load through R11.
+ MOVW $const_prime5, prime5r
+ ADD prime5r, n, h
+ CMP $0, n
+ BEQ end
+
+ // We let n go negative so we can do comparisons with SUB.S
+ // instead of separate CMP.
+ SUB.S $16, n
+ BMI loop16done
+
+ ADD prime1r, prime2r, v1
+ MOVW prime2r, v2
+ MOVW $0, v3
+ RSB $0, prime1r, v4
+
+ TST $3, p
+ BNE loop16unaligned
+
+loop16aligned:
+ SUB.S $16, n
+ round16aligned
+ BPL loop16aligned
+ B loop16finish
+
+loop16unaligned:
+ SUB.S $16, n
+ round16unaligned
+ BPL loop16unaligned
+
+loop16finish:
+ MOVW v1 @> 31, h
+ ADD v2 @> 25, h
+ ADD v3 @> 20, h
+ ADD v4 @> 14, h
+
+ // h += len(input) with v2 as temporary.
+ MOVW input_len+4(FP), v2
+ ADD v2, h
+
+loop16done:
+ ADD $16, n // Restore number of bytes left.
+
+ SUB.S $4, n
+ MOVW $const_prime3, prime3r
+ BMI loop4done
+ MOVW $const_prime4, prime4r
+
+ TST $3, p
+ BNE loop4unaligned
+
+loop4aligned:
+ SUB.S $4, n
+
+ MOVW.P 4(p), x1
+ MULA prime3r, x1, h, h
+ MOVW h @> 15, h
+ MUL prime4r, h
+
+ BPL loop4aligned
+ B loop4done
+
+loop4unaligned:
+ SUB.S $4, n
+
+ MOVBU.P 4(p), x1
+ MOVBU -3(p), x2
+ ORR x2 << 8, x1
+ MOVBU -2(p), x3
+ ORR x3 << 16, x1
+ MOVBU -1(p), x4
+ ORR x4 << 24, x1
+
+ MULA prime3r, x1, h, h
+ MOVW h @> 15, h
+ MUL prime4r, h
+
+ BPL loop4unaligned
+
+loop4done:
+ ADD.S $4, n // Restore number of bytes left.
+ BEQ end
+
+ MOVW $const_prime5, prime5r
+
+loop1:
+ SUB.S $1, n
+
+ MOVBU.P 1(p), x1
+ MULA prime5r, x1, h, h
+ MOVW h @> 21, h
+ MUL prime1r, h
+
+ BNE loop1
+
+end:
+ MOVW $const_prime3, prime3r
+ EOR h >> 15, h
+ MUL prime2r, h
+ EOR h >> 13, h
+ MUL prime3r, h
+ EOR h >> 16, h
+
+ MOVW h, ret+12(FP)
+ RET
+
+
+// func update(v *[4]uint64, buf *[16]byte, p []byte)
+TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20
+ MOVW v+0(FP), p
+ MOVM.IA (p), [v1, v2, v3, v4]
+
+ MOVW $const_prime1, prime1r
+ MOVW $const_prime2, prime2r
+
+ // Process buf, if not nil.
+ MOVW buf+4(FP), p
+ CMP $0, p
+ BEQ noBuffered
+
+ round16aligned
+
+noBuffered:
+ MOVW input_base +8(FP), p
+ MOVW input_len +12(FP), n
+
+ SUB.S $16, n
+ BMI end
+
+ TST $3, p
+ BNE loop16unaligned
+
+loop16aligned:
+ SUB.S $16, n
+ round16aligned
+ BPL loop16aligned
+ B end
+
+loop16unaligned:
+ SUB.S $16, n
+ round16unaligned
+ BPL loop16unaligned
+
+end:
+ MOVW v+0(FP), p
+ MOVM.IA [v1, v2, v3, v4], (p)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
new file mode 100644
index 000000000..c96b59b8c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
@@ -0,0 +1,10 @@
+// +build !arm noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) }
+
+func update(v *[4]uint32, buf *[16]byte, input []byte) {
+ updateGo(v, buf, input)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go
new file mode 100644
index 000000000..a62022e08
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/lz4.go
@@ -0,0 +1,157 @@
+// Package lz4 implements reading and writing lz4 compressed data.
+//
+// The package supports both the LZ4 stream format,
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// and the LZ4 block format, defined at
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html.
+//
+// See https://github.com/lz4/lz4 for the reference C implementation.
+package lz4
+
+import (
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+func _() {
+ // Safety checks for duplicated elements.
+ var x [1]struct{}
+ _ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
+ _ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
+ _ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
+ _ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
+ _ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+ return lz4block.CompressBlockBound(n)
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+ return lz4block.UncompressBlock(src, dst, nil)
+}
+
+// UncompressBlockWithDict uncompresses the source buffer into the destination one using a
+// dictionary, and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlockWithDict(src, dst, dict []byte) (int, error) {
+ return lz4block.UncompressBlock(src, dst, dict)
+}
+
+// A Compressor compresses data into the LZ4 block format.
+// It uses a fast compression algorithm.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type Compressor struct{ c lz4block.Compressor }
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+ return c.c.CompressBlock(src, dst)
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+
+// CompressBlock is equivalent to Compressor.CompressBlock.
+// The final argument is ignored and should be set to nil.
+//
+// This function is deprecated. Use a Compressor instead.
+func CompressBlock(src, dst []byte, _ []int) (int, error) {
+ return lz4block.CompressBlock(src, dst)
+}
+
+// A CompressorHC compresses data into the LZ4 block format.
+// Its compression ratio is potentially better than that of a Compressor,
+// but it is also slower and requires more memory.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type CompressorHC struct {
+ // Level is the maximum search depth for compression.
+ // Values <= 0 mean no maximum.
+ Level CompressionLevel
+ c lz4block.CompressorHC
+}
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) {
+ return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level))
+}
+
+// CompressBlockHC is equivalent to CompressorHC.CompressBlock.
+// The final two arguments are ignored and should be set to nil.
+//
+// This function is deprecated. Use a CompressorHC instead.
+func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) {
+ return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
+}
+
+const (
+ // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+ // block is corrupted or the destination buffer is not large enough for the uncompressed data.
+ ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer
+ // ErrInvalidFrame is returned when reading an invalid LZ4 archive.
+ ErrInvalidFrame = lz4errors.ErrInvalidFrame
+ // ErrInternalUnhandledState is an internal error.
+ ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState
+ // ErrInvalidHeaderChecksum is returned when reading a frame.
+ ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum
+ // ErrInvalidBlockChecksum is returned when reading a frame.
+ ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum
+ // ErrInvalidFrameChecksum is returned when reading a frame.
+ ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum
+ // ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
+ ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel
+ // ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
+ ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError
+ // ErrOptionInvalidBlockSize is returned when
+ ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize
+ // ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
+ ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable
+ // ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
+ ErrWriterNotClosed = lz4errors.ErrWriterNotClosed
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go
new file mode 100644
index 000000000..46a873803
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options.go
@@ -0,0 +1,214 @@
+package lz4
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
+
+type (
+ applier interface {
+ Apply(...Option) error
+ private()
+ }
+ // Option defines the parameters to setup an LZ4 Writer or Reader.
+ Option func(applier) error
+)
+
+// String returns a string representation of the option with its parameter(s).
+func (o Option) String() string {
+ return o(nil).Error()
+}
+
+// Default options.
+var (
+ DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
+ DefaultChecksumOption = ChecksumOption(true)
+ DefaultConcurrency = ConcurrencyOption(1)
+ defaultOnBlockDone = OnBlockDoneOption(nil)
+)
+
+const (
+ Block64Kb BlockSize = 1 << (16 + iota*2)
+ Block256Kb
+ Block1Mb
+ Block4Mb
+)
+
+// BlockSizeIndex defines the size of the blocks to be compressed.
+type BlockSize uint32
+
+// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
+func BlockSizeOption(size BlockSize) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("BlockSizeOption(%s)", size)
+ return lz4errors.Error(s)
+ case *Writer:
+ size := uint32(size)
+ if !lz4block.IsValid(size) {
+ return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+ }
+ w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// BlockChecksumOption enables or disables block checksum (default=false).
+func BlockChecksumOption(flag bool) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// ChecksumOption enables/disables all blocks or content checksum (default=true).
+func ChecksumOption(flag bool) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("ChecksumOption(%v)", flag)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
+// whole uncompressed data stream.
+func SizeOption(size uint64) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("SizeOption(%d)", size)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.SizeSet(size > 0)
+ w.frame.Descriptor.ContentSize = size
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// ConcurrencyOption sets the number of go routines used for compression.
+// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
+func ConcurrencyOption(n int) Option {
+ if n <= 0 {
+ n = runtime.GOMAXPROCS(0)
+ }
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("ConcurrencyOption(%d)", n)
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.num = n
+ return nil
+ case *Reader:
+ rw.num = n
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
+type CompressionLevel uint32
+
+const (
+ Fast CompressionLevel = 0
+ Level1 CompressionLevel = 1 << (8 + iota)
+ Level2
+ Level3
+ Level4
+ Level5
+ Level6
+ Level7
+ Level8
+ Level9
+)
+
+// CompressionLevelOption defines the compression level (default=Fast).
+func CompressionLevelOption(level CompressionLevel) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("CompressionLevelOption(%s)", level)
+ return lz4errors.Error(s)
+ case *Writer:
+ switch level {
+ case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+ default:
+ return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+ }
+ w.level = lz4block.CompressionLevel(level)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+func onBlockDone(int) {}
+
+// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
+// for a Reader, it is when it has been uncompressed.
+func OnBlockDoneOption(handler func(size int)) Option {
+ if handler == nil {
+ handler = onBlockDone
+ }
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.handler = handler
+ return nil
+ case *Reader:
+ rw.handler = handler
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// LegacyOption provides support for writing LZ4 frames in the legacy format.
+//
+// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
+//
+// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
+// the compressed stream is followed by the original (uncompressed) size of
+// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
+// This is also supported as a special case.
+func LegacyOption(legacy bool) Option {
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("LegacyOption(%v)", legacy)
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.legacy = legacy
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go
new file mode 100644
index 000000000..2de814909
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Block64Kb-65536]
+ _ = x[Block256Kb-262144]
+ _ = x[Block1Mb-1048576]
+ _ = x[Block4Mb-4194304]
+}
+
+const (
+ _BlockSize_name_0 = "Block64Kb"
+ _BlockSize_name_1 = "Block256Kb"
+ _BlockSize_name_2 = "Block1Mb"
+ _BlockSize_name_3 = "Block4Mb"
+)
+
+func (i BlockSize) String() string {
+ switch {
+ case i == 65536:
+ return _BlockSize_name_0
+ case i == 262144:
+ return _BlockSize_name_1
+ case i == 1048576:
+ return _BlockSize_name_2
+ case i == 4194304:
+ return _BlockSize_name_3
+ default:
+ return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Fast-0]
+ _ = x[Level1-512]
+ _ = x[Level2-1024]
+ _ = x[Level3-2048]
+ _ = x[Level4-4096]
+ _ = x[Level5-8192]
+ _ = x[Level6-16384]
+ _ = x[Level7-32768]
+ _ = x[Level8-65536]
+ _ = x[Level9-131072]
+}
+
+const (
+ _CompressionLevel_name_0 = "Fast"
+ _CompressionLevel_name_1 = "Level1"
+ _CompressionLevel_name_2 = "Level2"
+ _CompressionLevel_name_3 = "Level3"
+ _CompressionLevel_name_4 = "Level4"
+ _CompressionLevel_name_5 = "Level5"
+ _CompressionLevel_name_6 = "Level6"
+ _CompressionLevel_name_7 = "Level7"
+ _CompressionLevel_name_8 = "Level8"
+ _CompressionLevel_name_9 = "Level9"
+)
+
+func (i CompressionLevel) String() string {
+ switch {
+ case i == 0:
+ return _CompressionLevel_name_0
+ case i == 512:
+ return _CompressionLevel_name_1
+ case i == 1024:
+ return _CompressionLevel_name_2
+ case i == 2048:
+ return _CompressionLevel_name_3
+ case i == 4096:
+ return _CompressionLevel_name_4
+ case i == 8192:
+ return _CompressionLevel_name_5
+ case i == 16384:
+ return _CompressionLevel_name_6
+ case i == 32768:
+ return _CompressionLevel_name_7
+ case i == 65536:
+ return _CompressionLevel_name_8
+ case i == 131072:
+ return _CompressionLevel_name_9
+ default:
+ return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go
new file mode 100644
index 000000000..275daad7c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/reader.go
@@ -0,0 +1,275 @@
+package lz4
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var readerStates = []aState{
+ noState: newState,
+ errorState: newState,
+ newState: readState,
+ readState: closedState,
+ closedState: newState,
+}
+
+// NewReader returns a new LZ4 frame decoder.
+func NewReader(r io.Reader) *Reader {
+ return newReader(r, false)
+}
+
+func newReader(r io.Reader, legacy bool) *Reader {
+ zr := &Reader{frame: lz4stream.NewFrame()}
+ zr.state.init(readerStates)
+ _ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
+ zr.Reset(r)
+ return zr
+}
+
+// Reader allows reading an LZ4 stream.
+type Reader struct {
+ state _State
+ src io.Reader // source reader
+ num int // concurrency level
+ frame *lz4stream.Frame // frame being read
+ data []byte // block buffer allocated in non concurrent mode
+ reads chan []byte // pending data
+ idx int // size of pending data
+ handler func(int)
+ cum uint32
+ dict []byte
+}
+
+func (*Reader) private() {}
+
+func (r *Reader) Apply(options ...Option) (err error) {
+ defer r.state.check(&err)
+ switch r.state.state {
+ case newState:
+ case errorState:
+ return r.state.err
+ default:
+ return lz4errors.ErrOptionClosedOrError
+ }
+ for _, o := range options {
+ if err = o(r); err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Size returns the size of the underlying uncompressed data, if set in the stream.
+func (r *Reader) Size() int {
+ switch r.state.state {
+ case readState, closedState:
+ if r.frame.Descriptor.Flags.Size() {
+ return int(r.frame.Descriptor.ContentSize)
+ }
+ }
+ return 0
+}
+
+func (r *Reader) isNotConcurrent() bool {
+ return r.num == 1
+}
+
+func (r *Reader) init() error {
+ err := r.frame.ParseHeaders(r.src)
+ if err != nil {
+ return err
+ }
+ if !r.frame.Descriptor.Flags.BlockIndependence() {
+ // We can't decompress dependent blocks concurrently.
+ // Instead of throwing an error to the user, silently drop concurrency
+ r.num = 1
+ }
+ data, err := r.frame.InitR(r.src, r.num)
+ if err != nil {
+ return err
+ }
+ r.reads = data
+ r.idx = 0
+ size := r.frame.Descriptor.Flags.BlockSizeIndex()
+ r.data = size.Get()
+ r.cum = 0
+ return nil
+}
+
+func (r *Reader) Read(buf []byte) (n int, err error) {
+ defer r.state.check(&err)
+ switch r.state.state {
+ case readState:
+ case closedState, errorState:
+ return 0, r.state.err
+ case newState:
+ // First initialization.
+ if err = r.init(); r.state.next(err) {
+ return
+ }
+ default:
+ return 0, r.state.fail()
+ }
+ for len(buf) > 0 {
+ var bn int
+ if r.idx == 0 {
+ if r.isNotConcurrent() {
+ bn, err = r.read(buf)
+ } else {
+ lz4block.Put(r.data)
+ r.data = <-r.reads
+ if len(r.data) == 0 {
+ // No uncompressed data: something went wrong or we are done.
+ err = r.frame.Blocks.ErrorR()
+ }
+ }
+ switch err {
+ case nil:
+ case io.EOF:
+ if er := r.frame.CloseR(r.src); er != nil {
+ err = er
+ }
+ lz4block.Put(r.data)
+ r.data = nil
+ return
+ default:
+ return
+ }
+ }
+ if bn == 0 {
+ // Fill buf with buffered data.
+ bn = copy(buf, r.data[r.idx:])
+ r.idx += bn
+ if r.idx == len(r.data) {
+ // All data read, get ready for the next Read.
+ r.idx = 0
+ }
+ }
+ buf = buf[bn:]
+ n += bn
+ r.handler(bn)
+ }
+ return
+}
+
+// read uncompresses the next block as follow:
+// - if buf has enough room, the block is uncompressed into it directly
+// and the lenght of used space is returned
+// - else, the uncompress data is stored in r.data and 0 is returned
+func (r *Reader) read(buf []byte) (int, error) {
+ block := r.frame.Blocks.Block
+ _, err := block.Read(r.frame, r.src, r.cum)
+ if err != nil {
+ return 0, err
+ }
+ var direct bool
+ dst := r.data[:cap(r.data)]
+ if len(buf) >= len(dst) {
+ // Uncompress directly into buf.
+ direct = true
+ dst = buf
+ }
+ dst, err = block.Uncompress(r.frame, dst, r.dict, true)
+ if err != nil {
+ return 0, err
+ }
+ if !r.frame.Descriptor.Flags.BlockIndependence() {
+ if len(r.dict)+len(dst) > 128*1024 {
+ preserveSize := 64*1024 - len(dst)
+ if preserveSize < 0 {
+ preserveSize = 0
+ }
+ r.dict = r.dict[len(r.dict)-preserveSize:]
+ }
+ r.dict = append(r.dict, dst...)
+ }
+ r.cum += uint32(len(dst))
+ if direct {
+ return len(dst), nil
+ }
+ r.data = dst
+ return 0, nil
+}
+
+// Reset clears the state of the Reader r such that it is equivalent to its
+// initial state from NewReader, but instead reading from reader.
+// No access to reader is performed.
+func (r *Reader) Reset(reader io.Reader) {
+ if r.data != nil {
+ lz4block.Put(r.data)
+ r.data = nil
+ }
+ r.frame.Reset(r.num)
+ r.state.reset()
+ r.src = reader
+ r.reads = nil
+}
+
+// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
+func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ switch r.state.state {
+ case closedState, errorState:
+ return 0, r.state.err
+ case newState:
+ if err = r.init(); r.state.next(err) {
+ return
+ }
+ default:
+ return 0, r.state.fail()
+ }
+ defer r.state.nextd(&err)
+
+ var data []byte
+ if r.isNotConcurrent() {
+ size := r.frame.Descriptor.Flags.BlockSizeIndex()
+ data = size.Get()
+ defer lz4block.Put(data)
+ }
+ for {
+ var bn int
+ var dst []byte
+ if r.isNotConcurrent() {
+ bn, err = r.read(data)
+ dst = data[:bn]
+ } else {
+ lz4block.Put(dst)
+ dst = <-r.reads
+ bn = len(dst)
+ if bn == 0 {
+ // No uncompressed data: something went wrong or we are done.
+ err = r.frame.Blocks.ErrorR()
+ }
+ }
+ switch err {
+ case nil:
+ case io.EOF:
+ err = r.frame.CloseR(r.src)
+ return
+ default:
+ return
+ }
+ r.handler(bn)
+ bn, err = w.Write(dst)
+ n += int64(bn)
+ if err != nil {
+ return
+ }
+ }
+}
+
+// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header.
+func ValidFrameHeader(in []byte) (bool, error) {
+ f := lz4stream.NewFrame()
+ err := f.ParseHeaders(bytes.NewReader(in))
+ if err == nil {
+ return true, nil
+ }
+ if err == lz4errors.ErrInvalidFrame {
+ return false, nil
+ }
+ return false, err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go
new file mode 100644
index 000000000..d94f04d05
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state.go
@@ -0,0 +1,75 @@
+package lz4
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
+
+const (
+ noState aState = iota // uninitialized reader
+ errorState // unrecoverable error encountered
+ newState // instantiated object
+ readState // reading data
+ writeState // writing data
+ closedState // all done
+)
+
+type (
+ aState uint8
+ _State struct {
+ states []aState
+ state aState
+ err error
+ }
+)
+
+func (s *_State) init(states []aState) {
+ s.states = states
+ s.state = states[0]
+}
+
+func (s *_State) reset() {
+ s.state = s.states[0]
+ s.err = nil
+}
+
+// next sets the state to the next one unless it is passed a non nil error.
+// It returns whether or not it is in error.
+func (s *_State) next(err error) bool {
+ if err != nil {
+ s.err = fmt.Errorf("%s: %w", s.state, err)
+ s.state = errorState
+ return true
+ }
+ s.state = s.states[s.state]
+ return false
+}
+
+// nextd is like next but for defers.
+func (s *_State) nextd(errp *error) bool {
+ return errp != nil && s.next(*errp)
+}
+
+// check sets s in error if not already in error and if the error is not nil or io.EOF,
+func (s *_State) check(errp *error) {
+ if s.state == errorState || errp == nil {
+ return
+ }
+ if err := *errp; err != nil {
+ s.err = fmt.Errorf("%w[%s]", err, s.state)
+ if !errors.Is(err, io.EOF) {
+ s.state = errorState
+ }
+ }
+}
+
+func (s *_State) fail() error {
+ s.state = errorState
+ s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
+ return s.err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go
new file mode 100644
index 000000000..75fb82892
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[noState-0]
+ _ = x[errorState-1]
+ _ = x[newState-2]
+ _ = x[readState-3]
+ _ = x[writeState-4]
+ _ = x[closedState-5]
+}
+
+const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState"
+
+var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55}
+
+func (i aState) String() string {
+ if i >= aState(len(_aState_index)-1) {
+ return "aState(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _aState_name[_aState_index[i]:_aState_index[i+1]]
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go
new file mode 100644
index 000000000..77699f2b5
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/writer.go
@@ -0,0 +1,238 @@
+package lz4
+
+import (
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var writerStates = []aState{
+ noState: newState,
+ newState: writeState,
+ writeState: closedState,
+ closedState: newState,
+ errorState: newState,
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+func NewWriter(w io.Writer) *Writer {
+ zw := &Writer{frame: lz4stream.NewFrame()}
+ zw.state.init(writerStates)
+ _ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
+ zw.Reset(w)
+ return zw
+}
+
+// Writer allows writing an LZ4 stream.
+type Writer struct {
+ state _State
+ src io.Writer // destination writer
+ level lz4block.CompressionLevel // how hard to try
+ num int // concurrency level
+ frame *lz4stream.Frame // frame being built
+ data []byte // pending data
+ idx int // size of pending data
+ handler func(int)
+ legacy bool
+}
+
+func (*Writer) private() {}
+
+func (w *Writer) Apply(options ...Option) (err error) {
+ defer w.state.check(&err)
+ switch w.state.state {
+ case newState:
+ case errorState:
+ return w.state.err
+ default:
+ return lz4errors.ErrOptionClosedOrError
+ }
+ w.Reset(w.src)
+ for _, o := range options {
+ if err = o(w); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (w *Writer) isNotConcurrent() bool {
+ return w.num == 1
+}
+
+// init sets up the Writer when in newState. It does not change the Writer state.
+func (w *Writer) init() error {
+ w.frame.InitW(w.src, w.num, w.legacy)
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ w.data = size.Get()
+ w.idx = 0
+ return w.frame.Descriptor.Write(w.frame, w.src)
+}
+
+func (w *Writer) Write(buf []byte) (n int, err error) {
+ defer w.state.check(&err)
+ switch w.state.state {
+ case writeState:
+ case closedState, errorState:
+ return 0, w.state.err
+ case newState:
+ if err = w.init(); w.state.next(err) {
+ return
+ }
+ default:
+ return 0, w.state.fail()
+ }
+
+ zn := len(w.data)
+ for len(buf) > 0 {
+ if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn {
+ // Avoid a copy as there is enough data for a block.
+ if err = w.write(buf[:zn], false); err != nil {
+ return
+ }
+ n += zn
+ buf = buf[zn:]
+ continue
+ }
+ // Accumulate the data to be compressed.
+ m := copy(w.data[w.idx:], buf)
+ n += m
+ w.idx += m
+ buf = buf[m:]
+
+ if w.idx < len(w.data) {
+ // Buffer not filled.
+ return
+ }
+
+ // Buffer full.
+ if err = w.write(w.data, true); err != nil {
+ return
+ }
+ if !w.isNotConcurrent() {
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ w.data = size.Get()
+ }
+ w.idx = 0
+ }
+ return
+}
+
+func (w *Writer) write(data []byte, safe bool) error {
+ if w.isNotConcurrent() {
+ block := w.frame.Blocks.Block
+ err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src)
+ w.handler(len(block.Data))
+ return err
+ }
+ c := make(chan *lz4stream.FrameDataBlock)
+ w.frame.Blocks.Blocks <- c
+ go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) {
+ b := lz4stream.NewFrameDataBlock(w.frame)
+ c <- b.Compress(w.frame, data, w.level)
+ <-c
+ w.handler(len(b.Data))
+ b.Close(w.frame)
+ if safe {
+ // safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed
+ lz4block.Put(data)
+ }
+ }(c, data, safe)
+
+ return nil
+}
+
+// Flush any buffered data to the underlying writer immediately.
+func (w *Writer) Flush() (err error) {
+ switch w.state.state {
+ case writeState:
+ case errorState:
+ return w.state.err
+ default:
+ return nil
+ }
+
+ if w.idx > 0 {
+ // Flush pending data, disable w.data freeing as it is done later on.
+ if err = w.write(w.data[:w.idx], false); err != nil {
+ return err
+ }
+ w.idx = 0
+ }
+ return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying writer
+// without closing it.
+func (w *Writer) Close() error {
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ err := w.frame.CloseW(w.src, w.num)
+ // It is now safe to free the buffer.
+ if w.data != nil {
+ lz4block.Put(w.data)
+ w.data = nil
+ }
+ return err
+}
+
+// Reset clears the state of the Writer w such that it is equivalent to its
+// initial state from NewWriter, but instead writing to writer.
+// Reset keeps the previous options unless overwritten by the supplied ones.
+// No access to writer is performed.
+//
+// w.Close must be called before Reset or pending data may be dropped.
+func (w *Writer) Reset(writer io.Writer) {
+ w.frame.Reset(w.num)
+ w.state.reset()
+ w.src = writer
+}
+
+// ReadFrom efficiently reads from r and compressed into the Writer destination.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ switch w.state.state {
+ case closedState, errorState:
+ return 0, w.state.err
+ case newState:
+ if err = w.init(); w.state.next(err) {
+ return
+ }
+ default:
+ return 0, w.state.fail()
+ }
+ defer w.state.check(&err)
+
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ var done bool
+ var rn int
+ data := size.Get()
+ if w.isNotConcurrent() {
+ // Keep the same buffer for the whole process.
+ defer lz4block.Put(data)
+ }
+ for !done {
+ rn, err = io.ReadFull(r, data)
+ switch err {
+ case nil:
+ case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+ done = true
+ default:
+ return
+ }
+ n += int64(rn)
+ err = w.write(data[:rn], true)
+ if err != nil {
+ return
+ }
+ w.handler(rn)
+ if !done && !w.isNotConcurrent() {
+ // The buffer will be returned automatically by go routines (safe=true)
+ // so get a new one fo the next round.
+ data = size.Get()
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/pion/dtls/v2/.editorconfig b/vendor/github.com/pion/dtls/v2/.editorconfig
new file mode 100644
index 000000000..d2b32061a
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/.editorconfig
@@ -0,0 +1,21 @@
+# http://editorconfig.org/
+
+root = true
+
+[*]
+charset = utf-8
+insert_final_newline = true
+trim_trailing_whitespace = true
+end_of_line = lf
+
+[*.go]
+indent_style = tab
+indent_size = 4
+
+[{*.yml,*.yaml}]
+indent_style = space
+indent_size = 2
+
+# Makefiles always use tabs for indentation
+[Makefile]
+indent_style = tab
diff --git a/vendor/github.com/pion/dtls/v2/.gitignore b/vendor/github.com/pion/dtls/v2/.gitignore
new file mode 100644
index 000000000..f977e7485
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/.gitignore
@@ -0,0 +1,25 @@
+### JetBrains IDE ###
+#####################
+.idea/
+
+### Emacs Temporary Files ###
+#############################
+*~
+
+### Folders ###
+###############
+bin/
+vendor/
+node_modules/
+
+### Files ###
+#############
+*.ivf
+*.ogg
+tags
+cover.out
+*.sw[poe]
+*.wasm
+examples/sfu-ws/cert.pem
+examples/sfu-ws/key.pem
+wasm_exec.js
diff --git a/vendor/github.com/pion/dtls/v2/.golangci.yml b/vendor/github.com/pion/dtls/v2/.golangci.yml
new file mode 100644
index 000000000..48696f16b
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/.golangci.yml
@@ -0,0 +1,116 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ misspell:
+ locale: US
+ exhaustive:
+ default-signifies-exhaustive: true
+ gomodguard:
+ blocked:
+ modules:
+ - github.com/pkg/errors:
+ recommendations:
+ - errors
+
+linters:
+ enable:
+ - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
+ - bidichk # Checks for dangerous unicode character sequences
+ - bodyclose # checks whether HTTP response body is closed successfully
+ - contextcheck # check the function whether use a non-inherited context
+ - decorder # check declaration order and count of types, constants, variables and functions
+ - depguard # Go linter that checks if package imports are in a list of acceptable packages
+ - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
+ - dupl # Tool for code clone detection
+ - durationcheck # check for two durations multiplied together
+ - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
+ - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted.
+ - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`.
+ - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
+ - exhaustive # check exhaustiveness of enum switch statements
+ - exportloopref # checks for pointers to enclosing loop variables
+ - forcetypeassert # finds forced type assertions
+ - gci # Gci control golang package import order and make it always deterministic.
+ - gochecknoglobals # Checks that no globals are present in Go code
+ - gochecknoinits # Checks that no init functions are present in Go code
+ - gocognit # Computes and checks the cognitive complexity of functions
+ - goconst # Finds repeated strings that could be replaced by a constant
+ - gocritic # The most opinionated Go source code linter
+ - godox # Tool for detection of FIXME, TODO and other comment keywords
+ - goerr113 # Golang linter to check the errors handling expressions
+ - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
+ - gofumpt # Gofumpt checks whether code was gofumpt-ed.
+ - goheader # Checks is file header matches to pattern
+ - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports
+ - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod.
+ - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations.
+ - goprintffuncname # Checks that printf-like functions are named with `f` at the end
+ - gosec # Inspects source code for security problems
+ - gosimple # Linter for Go source code that specializes in simplifying a code
+ - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
+ - grouper # An analyzer to analyze expression groups.
+ - importas # Enforces consistent import aliases
+ - ineffassign # Detects when assignments to existing variables are not used
+ - misspell # Finds commonly misspelled English words in comments
+ - nakedret # Finds naked returns in functions greater than a specified function length
+ - nilerr # Finds the code that returns nil even if it checks that the error is not nil.
+ - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value.
+ - noctx # noctx finds sending http request without context.Context
+ - predeclared # find code that shadows one of Go's predeclared identifiers
+ - revive # golint replacement, finds style mistakes
+ - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks
+ - stylecheck # Stylecheck is a replacement for golint
+ - tagliatelle # Checks the struct tags.
+ - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17
+ - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
+ - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
+ - unconvert # Remove unnecessary type conversions
+ - unparam # Reports unused function parameters
+ - unused # Checks Go code for unused constants, variables, functions and types
+ - wastedassign # wastedassign finds wasted assignment statements
+ - whitespace # Tool for detection of leading and trailing whitespace
+ disable:
+ - containedctx # containedctx is a linter that detects struct contained context.Context field
+ - cyclop # checks function and package cyclomatic complexity
+ - exhaustivestruct # Checks if all struct's fields are initialized
+ - forbidigo # Forbids identifiers
+ - funlen # Tool for detection of long functions
+ - gocyclo # Computes and checks the cyclomatic complexity of functions
+ - godot # Check if comments end in a period
+ - gomnd # An analyzer to detect magic numbers.
+ - ifshort # Checks that your code uses short syntax for if-statements whenever possible
+ - ireturn # Accept Interfaces, Return Concrete Types
+ - lll # Reports long lines
+ - maintidx # maintidx measures the maintainability index of each function.
+ - makezero # Finds slice declarations with non-zero initial length
+ - maligned # Tool to detect Go structs that would take less memory if their fields were sorted
+ - nestif # Reports deeply nested if statements
+ - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity
+ - nolintlint # Reports ill-formed or insufficient nolint directives
+ - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test
+ - prealloc # Finds slice declarations that could potentially be preallocated
+ - promlinter # Check Prometheus metrics naming via promlint
+ - rowserrcheck # checks whether Err of rows is checked successfully
+ - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed.
+ - testpackage # linter that makes you use a separate _test package
+ - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers
+ - varnamelen # checks that the length of a variable's name matches its scope
+ - wrapcheck # Checks that errors returned from external packages are wrapped
+ - wsl # Whitespace Linter - Forces you to use empty lines!
+
+issues:
+ exclude-use-default: false
+ exclude-rules:
+ # Allow complex tests, better to be self contained
+ - path: _test\.go
+ linters:
+ - gocognit
+
+ # Allow complex main function in examples
+ - path: examples
+ text: "of func `main` is high"
+ linters:
+ - gocognit
+
+run:
+ skip-dirs-use-default: false
diff --git a/vendor/github.com/pion/dtls/v2/.goreleaser.yml b/vendor/github.com/pion/dtls/v2/.goreleaser.yml
new file mode 100644
index 000000000..2caa5fbd3
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/.goreleaser.yml
@@ -0,0 +1,2 @@
+builds:
+- skip: true
diff --git a/vendor/github.com/pion/dtls/v2/AUTHORS.txt b/vendor/github.com/pion/dtls/v2/AUTHORS.txt
new file mode 100644
index 000000000..2f5448e7f
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/AUTHORS.txt
@@ -0,0 +1,55 @@
+# Thank you to everyone that made Pion possible. If you are interested in contributing
+# we would love to have you https://github.com/pion/webrtc/wiki/Contributing
+#
+# This file is auto generated, using git to list all individuals contributors.
+# see https://github.com/pion/.goassets/blob/master/scripts/generate-authors.sh for the scripting
+Aleksandr Razumov
+alvarowolfx
+Arlo Breault
+Atsushi Watanabe
+backkem
+bjdgyc
+boks1971
+Bragadeesh
+Carson Hoffman
+Cecylia Bocovich
+Chris Hiszpanski
+Daniele Sluijters
+folbrich
+Hayden James
+Hugo Arregui
+Hugo Arregui
+igolaizola <11333576+igolaizola@users.noreply.github.com>
+Jeffrey Stoke
+Jeroen de Bruijn
+Jeroen de Bruijn
+Jim Wert
+jinleileiking
+Jozef Kralik
+Julien Salleyron
+Juliusz Chroboczek
+Kegan Dougal
+Kevin Wang
+Lander Noterman
+Len
+Lukas Lihotzki
+ManuelBk <26275612+ManuelBk@users.noreply.github.com>
+Michael Zabka
+Michiel De Backker
+Rachel Chen
+Robert Eperjesi
+Ryan Gordon
+Sam Lancia
+Sean DuBois
+Sean DuBois
+Shelikhoo
+Stefan Tatschner
+Steffen Vogel
+Vadim
+Vadim Filimonov
+wmiao
+ZHENK
+吕海涛
+
+# List of contributors not appearing in Git history
+
diff --git a/vendor/github.com/pion/dtls/v2/LICENSE b/vendor/github.com/pion/dtls/v2/LICENSE
new file mode 100644
index 000000000..ab602974d
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/pion/dtls/v2/README.md b/vendor/github.com/pion/dtls/v2/README.md
new file mode 100644
index 000000000..01631a5fc
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/README.md
@@ -0,0 +1,139 @@
+
+
+ Pion DTLS
+
+
+
A Go implementation of DTLS
+
+
+
+
+
+
+
+
+
+
+
+
+
+Native [DTLS 1.2][rfc6347] implementation in the Go programming language.
+
+A long term goal is a professional security review, and maybe an inclusion in stdlib.
+
+[rfc6347]: https://tools.ietf.org/html/rfc6347
+
+### Goals/Progress
+This will only be targeting DTLS 1.2, and the most modern/common cipher suites.
+We would love contributions that fall under the 'Planned Features' and any bug fixes!
+
+#### Current features
+* DTLS 1.2 Client/Server
+* Key Exchange via ECDHE(curve25519, nistp256, nistp384) and PSK
+* Packet loss and re-ordering is handled during handshaking
+* Key export ([RFC 5705][rfc5705])
+* Serialization and Resumption of sessions
+* Extended Master Secret extension ([RFC 7627][rfc7627])
+* ALPN extension ([RFC 7301][rfc7301])
+
+[rfc5705]: https://tools.ietf.org/html/rfc5705
+[rfc7627]: https://tools.ietf.org/html/rfc7627
+[rfc7301]: https://tools.ietf.org/html/rfc7301
+
+#### Supported ciphers
+
+##### ECDHE
+
+* TLS_ECDHE_ECDSA_WITH_AES_128_CCM ([RFC 6655][rfc6655])
+* TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 ([RFC 6655][rfc6655])
+* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ([RFC 5289][rfc5289])
+* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ([RFC 5289][rfc5289])
+* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ([RFC 5289][rfc5289])
+* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ([RFC 5289][rfc5289])
+* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA ([RFC 8422][rfc8422])
+* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA ([RFC 8422][rfc8422])
+
+##### PSK
+
+* TLS_PSK_WITH_AES_128_CCM ([RFC 6655][rfc6655])
+* TLS_PSK_WITH_AES_128_CCM_8 ([RFC 6655][rfc6655])
+* TLS_PSK_WITH_AES_256_CCM_8 ([RFC 6655][rfc6655])
+* TLS_PSK_WITH_AES_128_GCM_SHA256 ([RFC 5487][rfc5487])
+* TLS_PSK_WITH_AES_128_CBC_SHA256 ([RFC 5487][rfc5487])
+
+##### ECDHE & PSK
+
+* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 ([RFC 5489][rfc5489])
+
+[rfc5289]: https://tools.ietf.org/html/rfc5289
+[rfc8422]: https://tools.ietf.org/html/rfc8422
+[rfc6655]: https://tools.ietf.org/html/rfc6655
+[rfc5487]: https://tools.ietf.org/html/rfc5487
+[rfc5489]: https://tools.ietf.org/html/rfc5489
+
+#### Planned Features
+* Chacha20Poly1305
+
+#### Excluded Features
+* DTLS 1.0
+* Renegotiation
+* Compression
+
+### Using
+
+This library needs at least Go 1.13, and you should have [Go modules
+enabled](https://github.com/golang/go/wiki/Modules).
+
+#### Pion DTLS
+For a DTLS 1.2 Server that listens on 127.0.0.1:4444
+```sh
+go run examples/listen/selfsign/main.go
+```
+
+For a DTLS 1.2 Client that connects to 127.0.0.1:4444
+```sh
+go run examples/dial/selfsign/main.go
+```
+
+#### OpenSSL
+Pion DTLS can connect to itself and OpenSSL.
+```
+ // Generate a certificate
+ openssl ecparam -out key.pem -name prime256v1 -genkey
+ openssl req -new -sha256 -key key.pem -out server.csr
+ openssl x509 -req -sha256 -days 365 -in server.csr -signkey key.pem -out cert.pem
+
+ // Use with examples/dial/selfsign/main.go
+ openssl s_server -dtls1_2 -cert cert.pem -key key.pem -accept 4444
+
+ // Use with examples/listen/selfsign/main.go
+ openssl s_client -dtls1_2 -connect 127.0.0.1:4444 -debug -cert cert.pem -key key.pem
+```
+
+### Using with PSK
+Pion DTLS also comes with examples that do key exchange via PSK
+
+
+#### Pion DTLS
+```sh
+go run examples/listen/psk/main.go
+```
+
+```sh
+go run examples/dial/psk/main.go
+```
+
+#### OpenSSL
+```
+ // Use with examples/dial/psk/main.go
+ openssl s_server -dtls1_2 -accept 4444 -nocert -psk abc123 -cipher PSK-AES128-CCM8
+
+ // Use with examples/listen/psk/main.go
+ openssl s_client -dtls1_2 -connect 127.0.0.1:4444 -psk abc123 -cipher PSK-AES128-CCM8
+```
+
+### Contributing
+Check out the **[contributing wiki](https://github.com/pion/webrtc/wiki/Contributing)** to join the group of amazing people making this project possible:
+
+### License
+MIT License - see [LICENSE](LICENSE) for full text
diff --git a/vendor/github.com/pion/dtls/v2/certificate.go b/vendor/github.com/pion/dtls/v2/certificate.go
new file mode 100644
index 000000000..7211ae092
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/certificate.go
@@ -0,0 +1,154 @@
+package dtls
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "strings"
+)
+
+// ClientHelloInfo contains information from a ClientHello message in order to
+// guide application logic in the GetCertificate.
+type ClientHelloInfo struct {
+ // ServerName indicates the name of the server requested by the client
+ // in order to support virtual hosting. ServerName is only set if the
+ // client is using SNI (see RFC 4366, Section 3.1).
+ ServerName string
+
+ // CipherSuites lists the CipherSuites supported by the client (e.g.
+ // TLS_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).
+ CipherSuites []CipherSuiteID
+}
+
+// CertificateRequestInfo contains information from a server's
+// CertificateRequest message, which is used to demand a certificate and proof
+// of control from a client.
+type CertificateRequestInfo struct {
+ // AcceptableCAs contains zero or more, DER-encoded, X.501
+ // Distinguished Names. These are the names of root or intermediate CAs
+ // that the server wishes the returned certificate to be signed by. An
+ // empty slice indicates that the server has no preference.
+ AcceptableCAs [][]byte
+}
+
+// SupportsCertificate returns nil if the provided certificate is supported by
+// the server that sent the CertificateRequest. Otherwise, it returns an error
+// describing the reason for the incompatibility.
+// NOTE: original src: https://github.com/golang/go/blob/29b9a328d268d53833d2cc063d1d8b4bf6852675/src/crypto/tls/common.go#L1273
+func (cri *CertificateRequestInfo) SupportsCertificate(c *tls.Certificate) error {
+ if len(cri.AcceptableCAs) == 0 {
+ return nil
+ }
+
+ for j, cert := range c.Certificate {
+ x509Cert := c.Leaf
+ // Parse the certificate if this isn't the leaf node, or if
+ // chain.Leaf was nil.
+ if j != 0 || x509Cert == nil {
+ var err error
+ if x509Cert, err = x509.ParseCertificate(cert); err != nil {
+ return fmt.Errorf("failed to parse certificate #%d in the chain: %w", j, err)
+ }
+ }
+
+ for _, ca := range cri.AcceptableCAs {
+ if bytes.Equal(x509Cert.RawIssuer, ca) {
+ return nil
+ }
+ }
+ }
+ return errNotAcceptableCertificateChain
+}
+
+func (c *handshakeConfig) setNameToCertificateLocked() {
+ nameToCertificate := make(map[string]*tls.Certificate)
+ for i := range c.localCertificates {
+ cert := &c.localCertificates[i]
+ x509Cert := cert.Leaf
+ if x509Cert == nil {
+ var parseErr error
+ x509Cert, parseErr = x509.ParseCertificate(cert.Certificate[0])
+ if parseErr != nil {
+ continue
+ }
+ }
+ if len(x509Cert.Subject.CommonName) > 0 {
+ nameToCertificate[strings.ToLower(x509Cert.Subject.CommonName)] = cert
+ }
+ for _, san := range x509Cert.DNSNames {
+ nameToCertificate[strings.ToLower(san)] = cert
+ }
+ }
+ c.nameToCertificate = nameToCertificate
+}
+
+func (c *handshakeConfig) getCertificate(clientHelloInfo *ClientHelloInfo) (*tls.Certificate, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.localGetCertificate != nil &&
+ (len(c.localCertificates) == 0 || len(clientHelloInfo.ServerName) > 0) {
+ cert, err := c.localGetCertificate(clientHelloInfo)
+ if cert != nil || err != nil {
+ return cert, err
+ }
+ }
+
+ if c.nameToCertificate == nil {
+ c.setNameToCertificateLocked()
+ }
+
+ if len(c.localCertificates) == 0 {
+ return nil, errNoCertificates
+ }
+
+ if len(c.localCertificates) == 1 {
+ // There's only one choice, so no point doing any work.
+ return &c.localCertificates[0], nil
+ }
+
+ if len(clientHelloInfo.ServerName) == 0 {
+ return &c.localCertificates[0], nil
+ }
+
+ name := strings.TrimRight(strings.ToLower(clientHelloInfo.ServerName), ".")
+
+ if cert, ok := c.nameToCertificate[name]; ok {
+ return cert, nil
+ }
+
+ // try replacing labels in the name with wildcards until we get a
+ // match.
+ labels := strings.Split(name, ".")
+ for i := range labels {
+ labels[i] = "*"
+ candidate := strings.Join(labels, ".")
+ if cert, ok := c.nameToCertificate[candidate]; ok {
+ return cert, nil
+ }
+ }
+
+ // If nothing matches, return the first certificate.
+ return &c.localCertificates[0], nil
+}
+
+// NOTE: original src: https://github.com/golang/go/blob/29b9a328d268d53833d2cc063d1d8b4bf6852675/src/crypto/tls/handshake_client.go#L974
+func (c *handshakeConfig) getClientCertificate(cri *CertificateRequestInfo) (*tls.Certificate, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.localGetClientCertificate != nil {
+ return c.localGetClientCertificate(cri)
+ }
+
+ for i := range c.localCertificates {
+ chain := c.localCertificates[i]
+ if err := cri.SupportsCertificate(&chain); err != nil {
+ continue
+ }
+ return &chain, nil
+ }
+
+ // No acceptable certificate found. Don't send a certificate.
+ return new(tls.Certificate), nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/cipher_suite.go b/vendor/github.com/pion/dtls/v2/cipher_suite.go
new file mode 100644
index 000000000..ddea8f6bb
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/cipher_suite.go
@@ -0,0 +1,273 @@
+package dtls
+
+import (
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/tls"
+ "fmt"
+ "hash"
+
+ "github.com/pion/dtls/v2/internal/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// CipherSuiteID is an ID for our supported CipherSuites
+type CipherSuiteID = ciphersuite.ID
+
+// Supported Cipher Suites
+const (
+ // AES-128-CCM
+ TLS_ECDHE_ECDSA_WITH_AES_128_CCM CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_CCM //nolint:revive,stylecheck
+ TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 //nolint:revive,stylecheck
+
+ // AES-128-GCM-SHA256
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck
+
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 //nolint:revive,stylecheck
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 //nolint:revive,stylecheck
+
+ // AES-256-CBC-SHA
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA //nolint:revive,stylecheck
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA //nolint:revive,stylecheck
+
+ TLS_PSK_WITH_AES_128_CCM CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CCM //nolint:revive,stylecheck
+ TLS_PSK_WITH_AES_128_CCM_8 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CCM_8 //nolint:revive,stylecheck
+ TLS_PSK_WITH_AES_256_CCM_8 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_256_CCM_8 //nolint:revive,stylecheck
+ TLS_PSK_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck
+ TLS_PSK_WITH_AES_128_CBC_SHA256 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CBC_SHA256 //nolint:revive,stylecheck
+
+ TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 //nolint:revive,stylecheck
+)
+
+// CipherSuiteAuthenticationType controls what authentication method is using during the handshake for a CipherSuite
+type CipherSuiteAuthenticationType = ciphersuite.AuthenticationType
+
+// AuthenticationType Enums
+const (
+ CipherSuiteAuthenticationTypeCertificate CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypeCertificate
+ CipherSuiteAuthenticationTypePreSharedKey CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypePreSharedKey
+ CipherSuiteAuthenticationTypeAnonymous CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypeAnonymous
+)
+
+// CipherSuiteKeyExchangeAlgorithm controls what exchange algorithm is using during the handshake for a CipherSuite
+type CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithm
+
+// CipherSuiteKeyExchangeAlgorithm Bitmask
+const (
+ CipherSuiteKeyExchangeAlgorithmNone CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmNone
+ CipherSuiteKeyExchangeAlgorithmPsk CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmPsk
+ CipherSuiteKeyExchangeAlgorithmEcdhe CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmEcdhe
+)
+
+var _ = allCipherSuites() // Necessary until this function isn't only used by Go 1.14
+
+// CipherSuite is an interface that all DTLS CipherSuites must satisfy
+type CipherSuite interface {
+ // String of CipherSuite, only used for logging
+ String() string
+
+ // ID of CipherSuite.
+ ID() CipherSuiteID
+
+ // What type of Certificate does this CipherSuite use
+ CertificateType() clientcertificate.Type
+
+ // What Hash function is used during verification
+ HashFunc() func() hash.Hash
+
+ // AuthenticationType controls what authentication method is using during the handshake
+ AuthenticationType() CipherSuiteAuthenticationType
+
+ // KeyExchangeAlgorithm controls what exchange algorithm is using during the handshake
+ KeyExchangeAlgorithm() CipherSuiteKeyExchangeAlgorithm
+
+ // ECC (Elliptic Curve Cryptography) determines whether ECC extesions will be send during handshake.
+ // https://datatracker.ietf.org/doc/html/rfc4492#page-10
+ ECC() bool
+
+ // Called when keying material has been generated, should initialize the internal cipher
+ Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error
+ IsInitialized() bool
+ Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error)
+ Decrypt(in []byte) ([]byte, error)
+}
+
+// CipherSuiteName provides the same functionality as tls.CipherSuiteName
+// that appeared first in Go 1.14.
+//
+// Our implementation differs slightly in that it takes in a CiperSuiteID,
+// like the rest of our library, instead of a uint16 like crypto/tls.
+func CipherSuiteName(id CipherSuiteID) string {
+ suite := cipherSuiteForID(id, nil)
+ if suite != nil {
+ return suite.String()
+ }
+ return fmt.Sprintf("0x%04X", uint16(id))
+}
+
+// Taken from https://www.iana.org/assignments/tls-parameters/tls-parameters.xml
+// A cipherSuite is a specific combination of key agreement, cipher and MAC
+// function.
+func cipherSuiteForID(id CipherSuiteID, customCiphers func() []CipherSuite) CipherSuite {
+ switch id { //nolint:exhaustive
+ case TLS_ECDHE_ECDSA_WITH_AES_128_CCM:
+ return ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm()
+ case TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8:
+ return ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm8()
+ case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ return &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}
+ case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+ return &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{}
+ case TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+ return &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{}
+ case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ return &ciphersuite.TLSEcdheRsaWithAes256CbcSha{}
+ case TLS_PSK_WITH_AES_128_CCM:
+ return ciphersuite.NewTLSPskWithAes128Ccm()
+ case TLS_PSK_WITH_AES_128_CCM_8:
+ return ciphersuite.NewTLSPskWithAes128Ccm8()
+ case TLS_PSK_WITH_AES_256_CCM_8:
+ return ciphersuite.NewTLSPskWithAes256Ccm8()
+ case TLS_PSK_WITH_AES_128_GCM_SHA256:
+ return &ciphersuite.TLSPskWithAes128GcmSha256{}
+ case TLS_PSK_WITH_AES_128_CBC_SHA256:
+ return &ciphersuite.TLSPskWithAes128CbcSha256{}
+ case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:
+ return &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{}
+ case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:
+ return &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{}
+ case TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256:
+ return ciphersuite.NewTLSEcdhePskWithAes128CbcSha256()
+ }
+
+ if customCiphers != nil {
+ for _, c := range customCiphers() {
+ if c.ID() == id {
+ return c
+ }
+ }
+ }
+
+ return nil
+}
+
+// CipherSuites we support in order of preference
+func defaultCipherSuites() []CipherSuite {
+ return []CipherSuite{
+ &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{},
+ &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{},
+ &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{},
+ &ciphersuite.TLSEcdheRsaWithAes256CbcSha{},
+ &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{},
+ &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{},
+ }
+}
+
+func allCipherSuites() []CipherSuite {
+ return []CipherSuite{
+ ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm(),
+ ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm8(),
+ &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{},
+ &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{},
+ &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{},
+ &ciphersuite.TLSEcdheRsaWithAes256CbcSha{},
+ ciphersuite.NewTLSPskWithAes128Ccm(),
+ ciphersuite.NewTLSPskWithAes128Ccm8(),
+ ciphersuite.NewTLSPskWithAes256Ccm8(),
+ &ciphersuite.TLSPskWithAes128GcmSha256{},
+ &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{},
+ &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{},
+ }
+}
+
+func cipherSuiteIDs(cipherSuites []CipherSuite) []uint16 {
+ rtrn := []uint16{}
+ for _, c := range cipherSuites {
+ rtrn = append(rtrn, uint16(c.ID()))
+ }
+ return rtrn
+}
+
+func parseCipherSuites(userSelectedSuites []CipherSuiteID, customCipherSuites func() []CipherSuite, includeCertificateSuites, includePSKSuites bool) ([]CipherSuite, error) {
+ cipherSuitesForIDs := func(ids []CipherSuiteID) ([]CipherSuite, error) {
+ cipherSuites := []CipherSuite{}
+ for _, id := range ids {
+ c := cipherSuiteForID(id, nil)
+ if c == nil {
+ return nil, &invalidCipherSuiteError{id}
+ }
+ cipherSuites = append(cipherSuites, c)
+ }
+ return cipherSuites, nil
+ }
+
+ var (
+ cipherSuites []CipherSuite
+ err error
+ i int
+ )
+ if userSelectedSuites != nil {
+ cipherSuites, err = cipherSuitesForIDs(userSelectedSuites)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ cipherSuites = defaultCipherSuites()
+ }
+
+ // Put CustomCipherSuites before ID selected suites
+ if customCipherSuites != nil {
+ cipherSuites = append(customCipherSuites(), cipherSuites...)
+ }
+
+ var foundCertificateSuite, foundPSKSuite, foundAnonymousSuite bool
+ for _, c := range cipherSuites {
+ switch {
+ case includeCertificateSuites && c.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate:
+ foundCertificateSuite = true
+ case includePSKSuites && c.AuthenticationType() == CipherSuiteAuthenticationTypePreSharedKey:
+ foundPSKSuite = true
+ case c.AuthenticationType() == CipherSuiteAuthenticationTypeAnonymous:
+ foundAnonymousSuite = true
+ default:
+ continue
+ }
+ cipherSuites[i] = c
+ i++
+ }
+
+ switch {
+ case includeCertificateSuites && !foundCertificateSuite && !foundAnonymousSuite:
+ return nil, errNoAvailableCertificateCipherSuite
+ case includePSKSuites && !foundPSKSuite:
+ return nil, errNoAvailablePSKCipherSuite
+ case i == 0:
+ return nil, errNoAvailableCipherSuites
+ }
+
+ return cipherSuites[:i], nil
+}
+
+func filterCipherSuitesForCertificate(cert *tls.Certificate, cipherSuites []CipherSuite) []CipherSuite {
+ if cert == nil || cert.PrivateKey == nil {
+ return cipherSuites
+ }
+ var certType clientcertificate.Type
+ switch cert.PrivateKey.(type) {
+ case ed25519.PrivateKey, *ecdsa.PrivateKey:
+ certType = clientcertificate.ECDSASign
+ case *rsa.PrivateKey:
+ certType = clientcertificate.RSASign
+ }
+
+ filtered := []CipherSuite{}
+ for _, c := range cipherSuites {
+ if c.AuthenticationType() != CipherSuiteAuthenticationTypeCertificate || certType == c.CertificateType() {
+ filtered = append(filtered, c)
+ }
+ }
+ return filtered
+}
diff --git a/vendor/github.com/pion/dtls/v2/cipher_suite_go114.go b/vendor/github.com/pion/dtls/v2/cipher_suite_go114.go
new file mode 100644
index 000000000..5c63c0913
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/cipher_suite_go114.go
@@ -0,0 +1,41 @@
+//go:build go1.14
+// +build go1.14
+
+package dtls
+
+import (
+ "crypto/tls"
+)
+
+// VersionDTLS12 is the DTLS version in the same style as
+// VersionTLSXX from crypto/tls
+const VersionDTLS12 = 0xfefd
+
+// Convert from our cipherSuite interface to a tls.CipherSuite struct
+func toTLSCipherSuite(c CipherSuite) *tls.CipherSuite {
+ return &tls.CipherSuite{
+ ID: uint16(c.ID()),
+ Name: c.String(),
+ SupportedVersions: []uint16{VersionDTLS12},
+ Insecure: false,
+ }
+}
+
+// CipherSuites returns a list of cipher suites currently implemented by this
+// package, excluding those with security issues, which are returned by
+// InsecureCipherSuites.
+func CipherSuites() []*tls.CipherSuite {
+ suites := allCipherSuites()
+ res := make([]*tls.CipherSuite, len(suites))
+ for i, c := range suites {
+ res[i] = toTLSCipherSuite(c)
+ }
+ return res
+}
+
+// InsecureCipherSuites returns a list of cipher suites currently implemented by
+// this package and which have security issues.
+func InsecureCipherSuites() []*tls.CipherSuite {
+ var res []*tls.CipherSuite
+ return res
+}
diff --git a/vendor/github.com/pion/dtls/v2/codecov.yml b/vendor/github.com/pion/dtls/v2/codecov.yml
new file mode 100644
index 000000000..085200a48
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/codecov.yml
@@ -0,0 +1,20 @@
+#
+# DO NOT EDIT THIS FILE
+#
+# It is automatically copied from https://github.com/pion/.goassets repository.
+#
+
+coverage:
+ status:
+ project:
+ default:
+ # Allow decreasing 2% of total coverage to avoid noise.
+ threshold: 2%
+ patch:
+ default:
+ target: 70%
+ only_pulls: true
+
+ignore:
+ - "examples/*"
+ - "examples/**/*"
diff --git a/vendor/github.com/pion/dtls/v2/compression_method.go b/vendor/github.com/pion/dtls/v2/compression_method.go
new file mode 100644
index 000000000..693eb7a52
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/compression_method.go
@@ -0,0 +1,9 @@
+package dtls
+
+import "github.com/pion/dtls/v2/pkg/protocol"
+
+func defaultCompressionMethods() []*protocol.CompressionMethod {
+ return []*protocol.CompressionMethod{
+ {},
+ }
+}
diff --git a/vendor/github.com/pion/dtls/v2/config.go b/vendor/github.com/pion/dtls/v2/config.go
new file mode 100644
index 000000000..13a172251
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/config.go
@@ -0,0 +1,250 @@
+package dtls
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "io"
+ "time"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/logging"
+)
+
+const keyLogLabelTLS12 = "CLIENT_RANDOM"
+
+// Config is used to configure a DTLS client or server.
+// After a Config is passed to a DTLS function it must not be modified.
+type Config struct {
+ // Certificates contains certificate chain to present to the other side of the connection.
+ // Server MUST set this if PSK is non-nil
+ // client SHOULD sets this so CertificateRequests can be handled if PSK is non-nil
+ Certificates []tls.Certificate
+
+ // CipherSuites is a list of supported cipher suites.
+ // If CipherSuites is nil, a default list is used
+ CipherSuites []CipherSuiteID
+
+ // CustomCipherSuites is a list of CipherSuites that can be
+ // provided by the user. This allow users to user Ciphers that are reserved
+ // for private usage.
+ CustomCipherSuites func() []CipherSuite
+
+ // SignatureSchemes contains the signature and hash schemes that the peer requests to verify.
+ SignatureSchemes []tls.SignatureScheme
+
+ // SRTPProtectionProfiles are the supported protection profiles
+ // Clients will send this via use_srtp and assert that the server properly responds
+ // Servers will assert that clients send one of these profiles and will respond as needed
+ SRTPProtectionProfiles []SRTPProtectionProfile
+
+ // ClientAuth determines the server's policy for
+ // TLS Client Authentication. The default is NoClientCert.
+ ClientAuth ClientAuthType
+
+ // RequireExtendedMasterSecret determines if the "Extended Master Secret" extension
+ // should be disabled, requested, or required (default requested).
+ ExtendedMasterSecret ExtendedMasterSecretType
+
+ // FlightInterval controls how often we send outbound handshake messages
+ // defaults to time.Second
+ FlightInterval time.Duration
+
+ // PSK sets the pre-shared key used by this DTLS connection
+ // If PSK is non-nil only PSK CipherSuites will be used
+ PSK PSKCallback
+ PSKIdentityHint []byte
+
+ // InsecureSkipVerify controls whether a client verifies the
+ // server's certificate chain and host name.
+ // If InsecureSkipVerify is true, TLS accepts any certificate
+ // presented by the server and any host name in that certificate.
+ // In this mode, TLS is susceptible to man-in-the-middle attacks.
+ // This should be used only for testing.
+ InsecureSkipVerify bool
+
+ // InsecureHashes allows the use of hashing algorithms that are known
+ // to be vulnerable.
+ InsecureHashes bool
+
+ // VerifyPeerCertificate, if not nil, is called after normal
+ // certificate verification by either a client or server. It
+ // receives the certificate provided by the peer and also a flag
+ // that tells if normal verification has succeedded. If it returns a
+ // non-nil error, the handshake is aborted and that error results.
+ //
+ // If normal verification fails then the handshake will abort before
+ // considering this callback. If normal verification is disabled by
+ // setting InsecureSkipVerify, or (for a server) when ClientAuth is
+ // RequestClientCert or RequireAnyClientCert, then this callback will
+ // be considered but the verifiedChains will always be nil.
+ VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
+
+ // VerifyConnection, if not nil, is called after normal certificate
+ // verification/PSK and after VerifyPeerCertificate by either a TLS client
+ // or server. If it returns a non-nil error, the handshake is aborted
+ // and that error results.
+ //
+ // If normal verification fails then the handshake will abort before
+ // considering this callback. This callback will run for all connections
+ // regardless of InsecureSkipVerify or ClientAuth settings.
+ VerifyConnection func(*State) error
+
+ // RootCAs defines the set of root certificate authorities
+ // that one peer uses when verifying the other peer's certificates.
+ // If RootCAs is nil, TLS uses the host's root CA set.
+ RootCAs *x509.CertPool
+
+ // ClientCAs defines the set of root certificate authorities
+ // that servers use if required to verify a client certificate
+ // by the policy in ClientAuth.
+ ClientCAs *x509.CertPool
+
+ // ServerName is used to verify the hostname on the returned
+ // certificates unless InsecureSkipVerify is given.
+ ServerName string
+
+ LoggerFactory logging.LoggerFactory
+
+ // ConnectContextMaker is a function to make a context used in Dial(),
+ // Client(), Server(), and Accept(). If nil, the default ConnectContextMaker
+ // is used. It can be implemented as following.
+ //
+ // func ConnectContextMaker() (context.Context, func()) {
+ // return context.WithTimeout(context.Background(), 30*time.Second)
+ // }
+ ConnectContextMaker func() (context.Context, func())
+
+ // MTU is the length at which handshake messages will be fragmented to
+ // fit within the maximum transmission unit (default is 1200 bytes)
+ MTU int
+
+ // ReplayProtectionWindow is the size of the replay attack protection window.
+ // Duplication of the sequence number is checked in this window size.
+ // Packet with sequence number older than this value compared to the latest
+ // accepted packet will be discarded. (default is 64)
+ ReplayProtectionWindow int
+
+ // KeyLogWriter optionally specifies a destination for TLS master secrets
+ // in NSS key log format that can be used to allow external programs
+ // such as Wireshark to decrypt TLS connections.
+ // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format.
+ // Use of KeyLogWriter compromises security and should only be
+ // used for debugging.
+ KeyLogWriter io.Writer
+
+ // SessionStore is the container to store session for resumption.
+ SessionStore SessionStore
+
+ // List of application protocols the peer supports, for ALPN
+ SupportedProtocols []string
+
+ // List of Elliptic Curves to use
+ //
+ // If an ECC ciphersuite is configured and EllipticCurves is empty
+ // it will default to X25519, P-256, P-384 in this specific order.
+ EllipticCurves []elliptic.Curve
+
+ // GetCertificate returns a Certificate based on the given
+ // ClientHelloInfo. It will only be called if the client supplies SNI
+ // information or if Certificates is empty.
+ //
+ // If GetCertificate is nil or returns nil, then the certificate is
+ // retrieved from NameToCertificate. If NameToCertificate is nil, the
+ // best element of Certificates will be used.
+ GetCertificate func(*ClientHelloInfo) (*tls.Certificate, error)
+
+ // GetClientCertificate, if not nil, is called when a server requests a
+ // certificate from a client. If set, the contents of Certificates will
+ // be ignored.
+ //
+ // If GetClientCertificate returns an error, the handshake will be
+ // aborted and that error will be returned. Otherwise
+ // GetClientCertificate must return a non-nil Certificate. If
+ // Certificate.Certificate is empty then no certificate will be sent to
+ // the server. If this is unacceptable to the server then it may abort
+ // the handshake.
+ GetClientCertificate func(*CertificateRequestInfo) (*tls.Certificate, error)
+
+ // InsecureSkipVerifyHello, if true and when acting as server, allow client to
+ // skip hello verify phase and receive ServerHello after initial ClientHello.
+ // This have implication on DoS attack resistance.
+ InsecureSkipVerifyHello bool
+}
+
+func defaultConnectContextMaker() (context.Context, func()) {
+ return context.WithTimeout(context.Background(), 30*time.Second)
+}
+
+func (c *Config) connectContextMaker() (context.Context, func()) {
+ if c.ConnectContextMaker == nil {
+ return defaultConnectContextMaker()
+ }
+ return c.ConnectContextMaker()
+}
+
+func (c *Config) includeCertificateSuites() bool {
+ return c.PSK == nil || len(c.Certificates) > 0 || c.GetCertificate != nil || c.GetClientCertificate != nil
+}
+
+const defaultMTU = 1200 // bytes
+
+var defaultCurves = []elliptic.Curve{elliptic.X25519, elliptic.P256, elliptic.P384} //nolint:gochecknoglobals
+
+// PSKCallback is called once we have the remote's PSKIdentityHint.
+// If the remote provided none it will be nil
+type PSKCallback func([]byte) ([]byte, error)
+
+// ClientAuthType declares the policy the server will follow for
+// TLS Client Authentication.
+type ClientAuthType int
+
+// ClientAuthType enums
+const (
+ NoClientCert ClientAuthType = iota
+ RequestClientCert
+ RequireAnyClientCert
+ VerifyClientCertIfGiven
+ RequireAndVerifyClientCert
+)
+
+// ExtendedMasterSecretType declares the policy the client and server
+// will follow for the Extended Master Secret extension
+type ExtendedMasterSecretType int
+
+// ExtendedMasterSecretType enums
+const (
+ RequestExtendedMasterSecret ExtendedMasterSecretType = iota
+ RequireExtendedMasterSecret
+ DisableExtendedMasterSecret
+)
+
+func validateConfig(config *Config) error {
+ switch {
+ case config == nil:
+ return errNoConfigProvided
+ case config.PSKIdentityHint != nil && config.PSK == nil:
+ return errIdentityNoPSK
+ }
+
+ for _, cert := range config.Certificates {
+ if cert.Certificate == nil {
+ return errInvalidCertificate
+ }
+ if cert.PrivateKey != nil {
+ switch cert.PrivateKey.(type) {
+ case ed25519.PrivateKey:
+ case *ecdsa.PrivateKey:
+ case *rsa.PrivateKey:
+ default:
+ return errInvalidPrivateKey
+ }
+ }
+ }
+
+ _, err := parseCipherSuites(config.CipherSuites, config.CustomCipherSuites, config.includeCertificateSuites(), config.PSK != nil)
+ return err
+}
diff --git a/vendor/github.com/pion/dtls/v2/conn.go b/vendor/github.com/pion/dtls/v2/conn.go
new file mode 100644
index 000000000..bea31c2b6
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/conn.go
@@ -0,0 +1,1024 @@
+package dtls
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/pion/dtls/v2/internal/closer"
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/crypto/signaturehash"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+ "github.com/pion/logging"
+ "github.com/pion/transport/v2/connctx"
+ "github.com/pion/transport/v2/deadline"
+ "github.com/pion/transport/v2/replaydetector"
+)
+
+const (
+ initialTickerInterval = time.Second
+ cookieLength = 20
+ sessionLength = 32
+ defaultNamedCurve = elliptic.X25519
+ inboundBufferSize = 8192
+ // Default replay protection window is specified by RFC 6347 Section 4.1.2.6
+ defaultReplayProtectionWindow = 64
+)
+
+func invalidKeyingLabels() map[string]bool {
+ return map[string]bool{
+ "client finished": true,
+ "server finished": true,
+ "master secret": true,
+ "key expansion": true,
+ }
+}
+
+// Conn represents a DTLS connection
+type Conn struct {
+ lock sync.RWMutex // Internal lock (must not be public)
+ nextConn connctx.ConnCtx // Embedded Conn, typically a udpconn we read/write from
+ fragmentBuffer *fragmentBuffer // out-of-order and missing fragment handling
+ handshakeCache *handshakeCache // caching of handshake messages for verifyData generation
+ decrypted chan interface{} // Decrypted Application Data or error, pull by calling `Read`
+
+ state State // Internal state
+
+ maximumTransmissionUnit int
+
+ handshakeCompletedSuccessfully atomic.Value
+
+ encryptedPackets [][]byte
+
+ connectionClosedByUser bool
+ closeLock sync.Mutex
+ closed *closer.Closer
+ handshakeLoopsFinished sync.WaitGroup
+
+ readDeadline *deadline.Deadline
+ writeDeadline *deadline.Deadline
+
+ log logging.LeveledLogger
+
+ reading chan struct{}
+ handshakeRecv chan chan struct{}
+ cancelHandshaker func()
+ cancelHandshakeReader func()
+
+ fsm *handshakeFSM
+
+ replayProtectionWindow uint
+}
+
+func createConn(ctx context.Context, nextConn net.Conn, config *Config, isClient bool, initialState *State) (*Conn, error) {
+ err := validateConfig(config)
+ if err != nil {
+ return nil, err
+ }
+
+ if nextConn == nil {
+ return nil, errNilNextConn
+ }
+
+ cipherSuites, err := parseCipherSuites(config.CipherSuites, config.CustomCipherSuites, config.includeCertificateSuites(), config.PSK != nil)
+ if err != nil {
+ return nil, err
+ }
+
+ signatureSchemes, err := signaturehash.ParseSignatureSchemes(config.SignatureSchemes, config.InsecureHashes)
+ if err != nil {
+ return nil, err
+ }
+
+ workerInterval := initialTickerInterval
+ if config.FlightInterval != 0 {
+ workerInterval = config.FlightInterval
+ }
+
+ loggerFactory := config.LoggerFactory
+ if loggerFactory == nil {
+ loggerFactory = logging.NewDefaultLoggerFactory()
+ }
+
+ logger := loggerFactory.NewLogger("dtls")
+
+ mtu := config.MTU
+ if mtu <= 0 {
+ mtu = defaultMTU
+ }
+
+ replayProtectionWindow := config.ReplayProtectionWindow
+ if replayProtectionWindow <= 0 {
+ replayProtectionWindow = defaultReplayProtectionWindow
+ }
+
+ c := &Conn{
+ nextConn: connctx.New(nextConn),
+ fragmentBuffer: newFragmentBuffer(),
+ handshakeCache: newHandshakeCache(),
+ maximumTransmissionUnit: mtu,
+
+ decrypted: make(chan interface{}, 1),
+ log: logger,
+
+ readDeadline: deadline.New(),
+ writeDeadline: deadline.New(),
+
+ reading: make(chan struct{}, 1),
+ handshakeRecv: make(chan chan struct{}),
+ closed: closer.NewCloser(),
+ cancelHandshaker: func() {},
+
+ replayProtectionWindow: uint(replayProtectionWindow),
+
+ state: State{
+ isClient: isClient,
+ },
+ }
+
+ c.setRemoteEpoch(0)
+ c.setLocalEpoch(0)
+
+ serverName := config.ServerName
+ // Do not allow the use of an IP address literal as an SNI value.
+ // See RFC 6066, Section 3.
+ if net.ParseIP(serverName) != nil {
+ serverName = ""
+ }
+
+ curves := config.EllipticCurves
+ if len(curves) == 0 {
+ curves = defaultCurves
+ }
+
+ hsCfg := &handshakeConfig{
+ localPSKCallback: config.PSK,
+ localPSKIdentityHint: config.PSKIdentityHint,
+ localCipherSuites: cipherSuites,
+ localSignatureSchemes: signatureSchemes,
+ extendedMasterSecret: config.ExtendedMasterSecret,
+ localSRTPProtectionProfiles: config.SRTPProtectionProfiles,
+ serverName: serverName,
+ supportedProtocols: config.SupportedProtocols,
+ clientAuth: config.ClientAuth,
+ localCertificates: config.Certificates,
+ insecureSkipVerify: config.InsecureSkipVerify,
+ verifyPeerCertificate: config.VerifyPeerCertificate,
+ verifyConnection: config.VerifyConnection,
+ rootCAs: config.RootCAs,
+ clientCAs: config.ClientCAs,
+ customCipherSuites: config.CustomCipherSuites,
+ retransmitInterval: workerInterval,
+ log: logger,
+ initialEpoch: 0,
+ keyLogWriter: config.KeyLogWriter,
+ sessionStore: config.SessionStore,
+ ellipticCurves: curves,
+ localGetCertificate: config.GetCertificate,
+ localGetClientCertificate: config.GetClientCertificate,
+ insecureSkipHelloVerify: config.InsecureSkipVerifyHello,
+ }
+
+ // rfc5246#section-7.4.3
+ // In addition, the hash and signature algorithms MUST be compatible
+ // with the key in the server's end-entity certificate.
+ if !isClient {
+ cert, err := hsCfg.getCertificate(&ClientHelloInfo{})
+ if err != nil && !errors.Is(err, errNoCertificates) {
+ return nil, err
+ }
+ hsCfg.localCipherSuites = filterCipherSuitesForCertificate(cert, cipherSuites)
+ }
+
+ var initialFlight flightVal
+ var initialFSMState handshakeState
+
+ if initialState != nil {
+ if c.state.isClient {
+ initialFlight = flight5
+ } else {
+ initialFlight = flight6
+ }
+ initialFSMState = handshakeFinished
+
+ c.state = *initialState
+ } else {
+ if c.state.isClient {
+ initialFlight = flight1
+ } else {
+ initialFlight = flight0
+ }
+ initialFSMState = handshakePreparing
+ }
+ // Do handshake
+ if err := c.handshake(ctx, hsCfg, initialFlight, initialFSMState); err != nil {
+ return nil, err
+ }
+
+ c.log.Trace("Handshake Completed")
+
+ return c, nil
+}
+
+// Dial connects to the given network address and establishes a DTLS connection on top.
+// Connection handshake will timeout using ConnectContextMaker in the Config.
+// If you want to specify the timeout duration, use DialWithContext() instead.
+func Dial(network string, raddr *net.UDPAddr, config *Config) (*Conn, error) {
+ ctx, cancel := config.connectContextMaker()
+ defer cancel()
+
+ return DialWithContext(ctx, network, raddr, config)
+}
+
+// Client establishes a DTLS connection over an existing connection.
+// Connection handshake will timeout using ConnectContextMaker in the Config.
+// If you want to specify the timeout duration, use ClientWithContext() instead.
+func Client(conn net.Conn, config *Config) (*Conn, error) {
+ ctx, cancel := config.connectContextMaker()
+ defer cancel()
+
+ return ClientWithContext(ctx, conn, config)
+}
+
+// Server listens for incoming DTLS connections.
+// Connection handshake will timeout using ConnectContextMaker in the Config.
+// If you want to specify the timeout duration, use ServerWithContext() instead.
+func Server(conn net.Conn, config *Config) (*Conn, error) {
+ ctx, cancel := config.connectContextMaker()
+ defer cancel()
+
+ return ServerWithContext(ctx, conn, config)
+}
+
+// DialWithContext connects to the given network address and establishes a DTLS connection on top.
+func DialWithContext(ctx context.Context, network string, raddr *net.UDPAddr, config *Config) (*Conn, error) {
+ pConn, err := net.DialUDP(network, nil, raddr)
+ if err != nil {
+ return nil, err
+ }
+ return ClientWithContext(ctx, pConn, config)
+}
+
+// ClientWithContext establishes a DTLS connection over an existing connection.
+func ClientWithContext(ctx context.Context, conn net.Conn, config *Config) (*Conn, error) {
+ switch {
+ case config == nil:
+ return nil, errNoConfigProvided
+ case config.PSK != nil && config.PSKIdentityHint == nil:
+ return nil, errPSKAndIdentityMustBeSetForClient
+ }
+
+ return createConn(ctx, conn, config, true, nil)
+}
+
+// ServerWithContext listens for incoming DTLS connections.
+func ServerWithContext(ctx context.Context, conn net.Conn, config *Config) (*Conn, error) {
+ if config == nil {
+ return nil, errNoConfigProvided
+ }
+
+ return createConn(ctx, conn, config, false, nil)
+}
+
+// Read reads data from the connection.
+func (c *Conn) Read(p []byte) (n int, err error) {
+ if !c.isHandshakeCompletedSuccessfully() {
+ return 0, errHandshakeInProgress
+ }
+
+ select {
+ case <-c.readDeadline.Done():
+ return 0, errDeadlineExceeded
+ default:
+ }
+
+ for {
+ select {
+ case <-c.readDeadline.Done():
+ return 0, errDeadlineExceeded
+ case out, ok := <-c.decrypted:
+ if !ok {
+ return 0, io.EOF
+ }
+ switch val := out.(type) {
+ case ([]byte):
+ if len(p) < len(val) {
+ return 0, errBufferTooSmall
+ }
+ copy(p, val)
+ return len(val), nil
+ case (error):
+ return 0, val
+ }
+ }
+ }
+}
+
+// Write writes len(p) bytes from p to the DTLS connection
+func (c *Conn) Write(p []byte) (int, error) {
+ if c.isConnectionClosed() {
+ return 0, ErrConnClosed
+ }
+
+ select {
+ case <-c.writeDeadline.Done():
+ return 0, errDeadlineExceeded
+ default:
+ }
+
+ if !c.isHandshakeCompletedSuccessfully() {
+ return 0, errHandshakeInProgress
+ }
+
+ return len(p), c.writePackets(c.writeDeadline, []*packet{
+ {
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Epoch: c.state.getLocalEpoch(),
+ Version: protocol.Version1_2,
+ },
+ Content: &protocol.ApplicationData{
+ Data: p,
+ },
+ },
+ shouldEncrypt: true,
+ },
+ })
+}
+
+// Close closes the connection.
+func (c *Conn) Close() error {
+ err := c.close(true) //nolint:contextcheck
+ c.handshakeLoopsFinished.Wait()
+ return err
+}
+
+// ConnectionState returns basic DTLS details about the connection.
+// Note that this replaced the `Export` function of v1.
+func (c *Conn) ConnectionState() State {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return *c.state.clone()
+}
+
+// SelectedSRTPProtectionProfile returns the selected SRTPProtectionProfile
+func (c *Conn) SelectedSRTPProtectionProfile() (SRTPProtectionProfile, bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ if c.state.srtpProtectionProfile == 0 {
+ return 0, false
+ }
+
+ return c.state.srtpProtectionProfile, true
+}
+
+func (c *Conn) writePackets(ctx context.Context, pkts []*packet) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ var rawPackets [][]byte
+
+ for _, p := range pkts {
+ if h, ok := p.record.Content.(*handshake.Handshake); ok {
+ handshakeRaw, err := p.record.Marshal()
+ if err != nil {
+ return err
+ }
+
+ c.log.Tracef("[handshake:%v] -> %s (epoch: %d, seq: %d)",
+ srvCliStr(c.state.isClient), h.Header.Type.String(),
+ p.record.Header.Epoch, h.Header.MessageSequence)
+ c.handshakeCache.push(handshakeRaw[recordlayer.HeaderSize:], p.record.Header.Epoch, h.Header.MessageSequence, h.Header.Type, c.state.isClient)
+
+ rawHandshakePackets, err := c.processHandshakePacket(p, h)
+ if err != nil {
+ return err
+ }
+ rawPackets = append(rawPackets, rawHandshakePackets...)
+ } else {
+ rawPacket, err := c.processPacket(p)
+ if err != nil {
+ return err
+ }
+ rawPackets = append(rawPackets, rawPacket)
+ }
+ }
+ if len(rawPackets) == 0 {
+ return nil
+ }
+ compactedRawPackets := c.compactRawPackets(rawPackets)
+
+ for _, compactedRawPackets := range compactedRawPackets {
+ if _, err := c.nextConn.WriteContext(ctx, compactedRawPackets); err != nil {
+ return netError(err)
+ }
+ }
+
+ return nil
+}
+
+func (c *Conn) compactRawPackets(rawPackets [][]byte) [][]byte {
+ combinedRawPackets := make([][]byte, 0)
+ currentCombinedRawPacket := make([]byte, 0)
+
+ for _, rawPacket := range rawPackets {
+ if len(currentCombinedRawPacket) > 0 && len(currentCombinedRawPacket)+len(rawPacket) >= c.maximumTransmissionUnit {
+ combinedRawPackets = append(combinedRawPackets, currentCombinedRawPacket)
+ currentCombinedRawPacket = []byte{}
+ }
+ currentCombinedRawPacket = append(currentCombinedRawPacket, rawPacket...)
+ }
+
+ combinedRawPackets = append(combinedRawPackets, currentCombinedRawPacket)
+
+ return combinedRawPackets
+}
+
+func (c *Conn) processPacket(p *packet) ([]byte, error) {
+ epoch := p.record.Header.Epoch
+ for len(c.state.localSequenceNumber) <= int(epoch) {
+ c.state.localSequenceNumber = append(c.state.localSequenceNumber, uint64(0))
+ }
+ seq := atomic.AddUint64(&c.state.localSequenceNumber[epoch], 1) - 1
+ if seq > recordlayer.MaxSequenceNumber {
+ // RFC 6347 Section 4.1.0
+ // The implementation must either abandon an association or rehandshake
+ // prior to allowing the sequence number to wrap.
+ return nil, errSequenceNumberOverflow
+ }
+ p.record.Header.SequenceNumber = seq
+
+ rawPacket, err := p.record.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.shouldEncrypt {
+ var err error
+ rawPacket, err = c.state.cipherSuite.Encrypt(p.record, rawPacket)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return rawPacket, nil
+}
+
+func (c *Conn) processHandshakePacket(p *packet, h *handshake.Handshake) ([][]byte, error) {
+ rawPackets := make([][]byte, 0)
+
+ handshakeFragments, err := c.fragmentHandshake(h)
+ if err != nil {
+ return nil, err
+ }
+ epoch := p.record.Header.Epoch
+ for len(c.state.localSequenceNumber) <= int(epoch) {
+ c.state.localSequenceNumber = append(c.state.localSequenceNumber, uint64(0))
+ }
+
+ for _, handshakeFragment := range handshakeFragments {
+ seq := atomic.AddUint64(&c.state.localSequenceNumber[epoch], 1) - 1
+ if seq > recordlayer.MaxSequenceNumber {
+ return nil, errSequenceNumberOverflow
+ }
+
+ recordlayerHeader := &recordlayer.Header{
+ Version: p.record.Header.Version,
+ ContentType: p.record.Header.ContentType,
+ ContentLen: uint16(len(handshakeFragment)),
+ Epoch: p.record.Header.Epoch,
+ SequenceNumber: seq,
+ }
+
+ rawPacket, err := recordlayerHeader.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ p.record.Header = *recordlayerHeader
+
+ rawPacket = append(rawPacket, handshakeFragment...)
+ if p.shouldEncrypt {
+ var err error
+ rawPacket, err = c.state.cipherSuite.Encrypt(p.record, rawPacket)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ rawPackets = append(rawPackets, rawPacket)
+ }
+
+ return rawPackets, nil
+}
+
+func (c *Conn) fragmentHandshake(h *handshake.Handshake) ([][]byte, error) {
+ content, err := h.Message.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ fragmentedHandshakes := make([][]byte, 0)
+
+ contentFragments := splitBytes(content, c.maximumTransmissionUnit)
+ if len(contentFragments) == 0 {
+ contentFragments = [][]byte{
+ {},
+ }
+ }
+
+ offset := 0
+ for _, contentFragment := range contentFragments {
+ contentFragmentLen := len(contentFragment)
+
+ headerFragment := &handshake.Header{
+ Type: h.Header.Type,
+ Length: h.Header.Length,
+ MessageSequence: h.Header.MessageSequence,
+ FragmentOffset: uint32(offset),
+ FragmentLength: uint32(contentFragmentLen),
+ }
+
+ offset += contentFragmentLen
+
+ fragmentedHandshake, err := headerFragment.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ fragmentedHandshake = append(fragmentedHandshake, contentFragment...)
+ fragmentedHandshakes = append(fragmentedHandshakes, fragmentedHandshake)
+ }
+
+ return fragmentedHandshakes, nil
+}
+
+var poolReadBuffer = sync.Pool{ //nolint:gochecknoglobals
+ New: func() interface{} {
+ b := make([]byte, inboundBufferSize)
+ return &b
+ },
+}
+
+func (c *Conn) readAndBuffer(ctx context.Context) error {
+ bufptr, ok := poolReadBuffer.Get().(*[]byte)
+ if !ok {
+ return errFailedToAccessPoolReadBuffer
+ }
+ defer poolReadBuffer.Put(bufptr)
+
+ b := *bufptr
+ i, err := c.nextConn.ReadContext(ctx, b)
+ if err != nil {
+ return netError(err)
+ }
+
+ pkts, err := recordlayer.UnpackDatagram(b[:i])
+ if err != nil {
+ return err
+ }
+
+ var hasHandshake bool
+ for _, p := range pkts {
+ hs, alert, err := c.handleIncomingPacket(ctx, p, true)
+ if alert != nil {
+ if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil {
+ if err == nil {
+ err = alertErr
+ }
+ }
+ }
+ if hs {
+ hasHandshake = true
+ }
+
+ var e *alertError
+ if errors.As(err, &e) {
+ if e.IsFatalOrCloseNotify() {
+ return e
+ }
+ } else if err != nil {
+ return e
+ }
+ }
+ if hasHandshake {
+ done := make(chan struct{})
+ select {
+ case c.handshakeRecv <- done:
+ // If the other party may retransmit the flight,
+ // we should respond even if it not a new message.
+ <-done
+ case <-c.fsm.Done():
+ }
+ }
+ return nil
+}
+
+func (c *Conn) handleQueuedPackets(ctx context.Context) error {
+ pkts := c.encryptedPackets
+ c.encryptedPackets = nil
+
+ for _, p := range pkts {
+ _, alert, err := c.handleIncomingPacket(ctx, p, false) // don't re-enqueue
+ if alert != nil {
+ if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil {
+ if err == nil {
+ err = alertErr
+ }
+ }
+ }
+ var e *alertError
+ if errors.As(err, &e) {
+ if e.IsFatalOrCloseNotify() {
+ return e
+ }
+ } else if err != nil {
+ return e
+ }
+ }
+ return nil
+}
+
+func (c *Conn) handleIncomingPacket(ctx context.Context, buf []byte, enqueue bool) (bool, *alert.Alert, error) { //nolint:gocognit
+ h := &recordlayer.Header{}
+ if err := h.Unmarshal(buf); err != nil {
+ // Decode error must be silently discarded
+ // [RFC6347 Section-4.1.2.7]
+ c.log.Debugf("discarded broken packet: %v", err)
+ return false, nil, nil
+ }
+
+ // Validate epoch
+ remoteEpoch := c.state.getRemoteEpoch()
+ if h.Epoch > remoteEpoch {
+ if h.Epoch > remoteEpoch+1 {
+ c.log.Debugf("discarded future packet (epoch: %d, seq: %d)",
+ h.Epoch, h.SequenceNumber,
+ )
+ return false, nil, nil
+ }
+ if enqueue {
+ c.log.Debug("received packet of next epoch, queuing packet")
+ c.encryptedPackets = append(c.encryptedPackets, buf)
+ }
+ return false, nil, nil
+ }
+
+ // Anti-replay protection
+ for len(c.state.replayDetector) <= int(h.Epoch) {
+ c.state.replayDetector = append(c.state.replayDetector,
+ replaydetector.New(c.replayProtectionWindow, recordlayer.MaxSequenceNumber),
+ )
+ }
+ markPacketAsValid, ok := c.state.replayDetector[int(h.Epoch)].Check(h.SequenceNumber)
+ if !ok {
+ c.log.Debugf("discarded duplicated packet (epoch: %d, seq: %d)",
+ h.Epoch, h.SequenceNumber,
+ )
+ return false, nil, nil
+ }
+
+ // Decrypt
+ if h.Epoch != 0 {
+ if c.state.cipherSuite == nil || !c.state.cipherSuite.IsInitialized() {
+ if enqueue {
+ c.encryptedPackets = append(c.encryptedPackets, buf)
+ c.log.Debug("handshake not finished, queuing packet")
+ }
+ return false, nil, nil
+ }
+
+ var err error
+ buf, err = c.state.cipherSuite.Decrypt(buf)
+ if err != nil {
+ c.log.Debugf("%s: decrypt failed: %s", srvCliStr(c.state.isClient), err)
+ return false, nil, nil
+ }
+ }
+
+ isHandshake, err := c.fragmentBuffer.push(append([]byte{}, buf...))
+ if err != nil {
+ // Decode error must be silently discarded
+ // [RFC6347 Section-4.1.2.7]
+ c.log.Debugf("defragment failed: %s", err)
+ return false, nil, nil
+ } else if isHandshake {
+ markPacketAsValid()
+ for out, epoch := c.fragmentBuffer.pop(); out != nil; out, epoch = c.fragmentBuffer.pop() {
+ header := &handshake.Header{}
+ if err := header.Unmarshal(out); err != nil {
+ c.log.Debugf("%s: handshake parse failed: %s", srvCliStr(c.state.isClient), err)
+ continue
+ }
+ c.handshakeCache.push(out, epoch, header.MessageSequence, header.Type, !c.state.isClient)
+ }
+
+ return true, nil, nil
+ }
+
+ r := &recordlayer.RecordLayer{}
+ if err := r.Unmarshal(buf); err != nil {
+ return false, &alert.Alert{Level: alert.Fatal, Description: alert.DecodeError}, err
+ }
+
+ switch content := r.Content.(type) {
+ case *alert.Alert:
+ c.log.Tracef("%s: <- %s", srvCliStr(c.state.isClient), content.String())
+ var a *alert.Alert
+ if content.Description == alert.CloseNotify {
+ // Respond with a close_notify [RFC5246 Section 7.2.1]
+ a = &alert.Alert{Level: alert.Warning, Description: alert.CloseNotify}
+ }
+ markPacketAsValid()
+ return false, a, &alertError{content}
+ case *protocol.ChangeCipherSpec:
+ if c.state.cipherSuite == nil || !c.state.cipherSuite.IsInitialized() {
+ if enqueue {
+ c.encryptedPackets = append(c.encryptedPackets, buf)
+ c.log.Debugf("CipherSuite not initialized, queuing packet")
+ }
+ return false, nil, nil
+ }
+
+ newRemoteEpoch := h.Epoch + 1
+ c.log.Tracef("%s: <- ChangeCipherSpec (epoch: %d)", srvCliStr(c.state.isClient), newRemoteEpoch)
+
+ if c.state.getRemoteEpoch()+1 == newRemoteEpoch {
+ c.setRemoteEpoch(newRemoteEpoch)
+ markPacketAsValid()
+ }
+ case *protocol.ApplicationData:
+ if h.Epoch == 0 {
+ return false, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, errApplicationDataEpochZero
+ }
+
+ markPacketAsValid()
+
+ select {
+ case c.decrypted <- content.Data:
+ case <-c.closed.Done():
+ case <-ctx.Done():
+ }
+
+ default:
+ return false, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, fmt.Errorf("%w: %d", errUnhandledContextType, content.ContentType())
+ }
+ return false, nil, nil
+}
+
+func (c *Conn) recvHandshake() <-chan chan struct{} {
+ return c.handshakeRecv
+}
+
+func (c *Conn) notify(ctx context.Context, level alert.Level, desc alert.Description) error {
+ if level == alert.Fatal && len(c.state.SessionID) > 0 {
+ // According to the RFC, we need to delete the stored session.
+ // https://datatracker.ietf.org/doc/html/rfc5246#section-7.2
+ if ss := c.fsm.cfg.sessionStore; ss != nil {
+ c.log.Tracef("clean invalid session: %s", c.state.SessionID)
+ if err := ss.Del(c.sessionKey()); err != nil {
+ return err
+ }
+ }
+ }
+ return c.writePackets(ctx, []*packet{
+ {
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Epoch: c.state.getLocalEpoch(),
+ Version: protocol.Version1_2,
+ },
+ Content: &alert.Alert{
+ Level: level,
+ Description: desc,
+ },
+ },
+ shouldEncrypt: c.isHandshakeCompletedSuccessfully(),
+ },
+ })
+}
+
+func (c *Conn) setHandshakeCompletedSuccessfully() {
+ c.handshakeCompletedSuccessfully.Store(struct{ bool }{true})
+}
+
+func (c *Conn) isHandshakeCompletedSuccessfully() bool {
+ boolean, _ := c.handshakeCompletedSuccessfully.Load().(struct{ bool })
+ return boolean.bool
+}
+
+func (c *Conn) handshake(ctx context.Context, cfg *handshakeConfig, initialFlight flightVal, initialState handshakeState) error { //nolint:gocognit
+ c.fsm = newHandshakeFSM(&c.state, c.handshakeCache, cfg, initialFlight)
+
+ done := make(chan struct{})
+ ctxRead, cancelRead := context.WithCancel(context.Background())
+ c.cancelHandshakeReader = cancelRead
+ cfg.onFlightState = func(f flightVal, s handshakeState) {
+ if s == handshakeFinished && !c.isHandshakeCompletedSuccessfully() {
+ c.setHandshakeCompletedSuccessfully()
+ close(done)
+ }
+ }
+
+ ctxHs, cancel := context.WithCancel(context.Background())
+ c.cancelHandshaker = cancel
+
+ firstErr := make(chan error, 1)
+
+ c.handshakeLoopsFinished.Add(2)
+
+ // Handshake routine should be live until close.
+ // The other party may request retransmission of the last flight to cope with packet drop.
+ go func() {
+ defer c.handshakeLoopsFinished.Done()
+ err := c.fsm.Run(ctxHs, c, initialState)
+ if !errors.Is(err, context.Canceled) {
+ select {
+ case firstErr <- err:
+ default:
+ }
+ }
+ }()
+ go func() {
+ defer func() {
+ // Escaping read loop.
+ // It's safe to close decrypted channnel now.
+ close(c.decrypted)
+
+ // Force stop handshaker when the underlying connection is closed.
+ cancel()
+ }()
+ defer c.handshakeLoopsFinished.Done()
+ for {
+ if err := c.readAndBuffer(ctxRead); err != nil {
+ var e *alertError
+ if errors.As(err, &e) {
+ if !e.IsFatalOrCloseNotify() {
+ if c.isHandshakeCompletedSuccessfully() {
+ // Pass the error to Read()
+ select {
+ case c.decrypted <- err:
+ case <-c.closed.Done():
+ case <-ctxRead.Done():
+ }
+ }
+ continue // non-fatal alert must not stop read loop
+ }
+ } else {
+ switch {
+ case errors.Is(err, context.DeadlineExceeded), errors.Is(err, context.Canceled), errors.Is(err, io.EOF):
+ default:
+ if c.isHandshakeCompletedSuccessfully() {
+ // Keep read loop and pass the read error to Read()
+ select {
+ case c.decrypted <- err:
+ case <-c.closed.Done():
+ case <-ctxRead.Done():
+ }
+ continue // non-fatal alert must not stop read loop
+ }
+ }
+ }
+
+ select {
+ case firstErr <- err:
+ default:
+ }
+
+ if e != nil {
+ if e.IsFatalOrCloseNotify() {
+ _ = c.close(false) //nolint:contextcheck
+ }
+ }
+ if !c.isConnectionClosed() && errors.Is(err, context.Canceled) {
+ c.log.Trace("handshake timeouts - closing underline connection")
+ _ = c.close(false) //nolint:contextcheck
+ }
+ return
+ }
+ }
+ }()
+
+ select {
+ case err := <-firstErr:
+ cancelRead()
+ cancel()
+ c.handshakeLoopsFinished.Wait()
+ return c.translateHandshakeCtxError(err)
+ case <-ctx.Done():
+ cancelRead()
+ cancel()
+ c.handshakeLoopsFinished.Wait()
+ return c.translateHandshakeCtxError(ctx.Err())
+ case <-done:
+ return nil
+ }
+}
+
+func (c *Conn) translateHandshakeCtxError(err error) error {
+ if err == nil {
+ return nil
+ }
+ if errors.Is(err, context.Canceled) && c.isHandshakeCompletedSuccessfully() {
+ return nil
+ }
+ return &HandshakeError{Err: err}
+}
+
+func (c *Conn) close(byUser bool) error {
+ c.cancelHandshaker()
+ c.cancelHandshakeReader()
+
+ if c.isHandshakeCompletedSuccessfully() && byUser {
+ // Discard error from notify() to return non-error on the first user call of Close()
+ // even if the underlying connection is already closed.
+ _ = c.notify(context.Background(), alert.Warning, alert.CloseNotify)
+ }
+
+ c.closeLock.Lock()
+ // Don't return ErrConnClosed at the first time of the call from user.
+ closedByUser := c.connectionClosedByUser
+ if byUser {
+ c.connectionClosedByUser = true
+ }
+ isClosed := c.isConnectionClosed()
+ c.closed.Close()
+ c.closeLock.Unlock()
+
+ if closedByUser {
+ return ErrConnClosed
+ }
+
+ if isClosed {
+ return nil
+ }
+
+ return c.nextConn.Close()
+}
+
+func (c *Conn) isConnectionClosed() bool {
+ select {
+ case <-c.closed.Done():
+ return true
+ default:
+ return false
+ }
+}
+
+func (c *Conn) setLocalEpoch(epoch uint16) {
+ c.state.localEpoch.Store(epoch)
+}
+
+func (c *Conn) setRemoteEpoch(epoch uint16) {
+ c.state.remoteEpoch.Store(epoch)
+}
+
+// LocalAddr implements net.Conn.LocalAddr
+func (c *Conn) LocalAddr() net.Addr {
+ return c.nextConn.LocalAddr()
+}
+
+// RemoteAddr implements net.Conn.RemoteAddr
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.nextConn.RemoteAddr()
+}
+
+func (c *Conn) sessionKey() []byte {
+ if c.state.isClient {
+ // As ServerName can be like 0.example.com, it's better to add
+ // delimiter character which is not allowed to be in
+ // neither address or domain name.
+ return []byte(c.nextConn.RemoteAddr().String() + "_" + c.fsm.cfg.serverName)
+ }
+ return c.state.SessionID
+}
+
+// SetDeadline implements net.Conn.SetDeadline
+func (c *Conn) SetDeadline(t time.Time) error {
+ c.readDeadline.Set(t)
+ return c.SetWriteDeadline(t)
+}
+
+// SetReadDeadline implements net.Conn.SetReadDeadline
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ c.readDeadline.Set(t)
+ // Read deadline is fully managed by this layer.
+ // Don't set read deadline to underlying connection.
+ return nil
+}
+
+// SetWriteDeadline implements net.Conn.SetWriteDeadline
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline.Set(t)
+ // Write deadline is also fully managed by this layer.
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/crypto.go b/vendor/github.com/pion/dtls/v2/crypto.go
new file mode 100644
index 000000000..885a9a56b
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/crypto.go
@@ -0,0 +1,225 @@
+package dtls
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/binary"
+ "math/big"
+ "time"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/crypto/hash"
+)
+
+type ecdsaSignature struct {
+ R, S *big.Int
+}
+
+func valueKeyMessage(clientRandom, serverRandom, publicKey []byte, namedCurve elliptic.Curve) []byte {
+ serverECDHParams := make([]byte, 4)
+ serverECDHParams[0] = 3 // named curve
+ binary.BigEndian.PutUint16(serverECDHParams[1:], uint16(namedCurve))
+ serverECDHParams[3] = byte(len(publicKey))
+
+ plaintext := []byte{}
+ plaintext = append(plaintext, clientRandom...)
+ plaintext = append(plaintext, serverRandom...)
+ plaintext = append(plaintext, serverECDHParams...)
+ plaintext = append(plaintext, publicKey...)
+
+ return plaintext
+}
+
+// If the client provided a "signature_algorithms" extension, then all
+// certificates provided by the server MUST be signed by a
+// hash/signature algorithm pair that appears in that extension
+//
+// https://tools.ietf.org/html/rfc5246#section-7.4.2
+func generateKeySignature(clientRandom, serverRandom, publicKey []byte, namedCurve elliptic.Curve, privateKey crypto.PrivateKey, hashAlgorithm hash.Algorithm) ([]byte, error) {
+ msg := valueKeyMessage(clientRandom, serverRandom, publicKey, namedCurve)
+ switch p := privateKey.(type) {
+ case ed25519.PrivateKey:
+ // https://crypto.stackexchange.com/a/55483
+ return p.Sign(rand.Reader, msg, crypto.Hash(0))
+ case *ecdsa.PrivateKey:
+ hashed := hashAlgorithm.Digest(msg)
+ return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash())
+ case *rsa.PrivateKey:
+ hashed := hashAlgorithm.Digest(msg)
+ return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash())
+ }
+
+ return nil, errKeySignatureGenerateUnimplemented
+}
+
+func verifyKeySignature(message, remoteKeySignature []byte, hashAlgorithm hash.Algorithm, rawCertificates [][]byte) error { //nolint:dupl
+ if len(rawCertificates) == 0 {
+ return errLengthMismatch
+ }
+ certificate, err := x509.ParseCertificate(rawCertificates[0])
+ if err != nil {
+ return err
+ }
+
+ switch p := certificate.PublicKey.(type) {
+ case ed25519.PublicKey:
+ if ok := ed25519.Verify(p, message, remoteKeySignature); !ok {
+ return errKeySignatureMismatch
+ }
+ return nil
+ case *ecdsa.PublicKey:
+ ecdsaSig := &ecdsaSignature{}
+ if _, err := asn1.Unmarshal(remoteKeySignature, ecdsaSig); err != nil {
+ return err
+ }
+ if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
+ return errInvalidECDSASignature
+ }
+ hashed := hashAlgorithm.Digest(message)
+ if !ecdsa.Verify(p, hashed, ecdsaSig.R, ecdsaSig.S) {
+ return errKeySignatureMismatch
+ }
+ return nil
+ case *rsa.PublicKey:
+ switch certificate.SignatureAlgorithm {
+ case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
+ hashed := hashAlgorithm.Digest(message)
+ return rsa.VerifyPKCS1v15(p, hashAlgorithm.CryptoHash(), hashed, remoteKeySignature)
+ default:
+ return errKeySignatureVerifyUnimplemented
+ }
+ }
+
+ return errKeySignatureVerifyUnimplemented
+}
+
+// If the server has sent a CertificateRequest message, the client MUST send the Certificate
+// message. The ClientKeyExchange message is now sent, and the content
+// of that message will depend on the public key algorithm selected
+// between the ClientHello and the ServerHello. If the client has sent
+// a certificate with signing ability, a digitally-signed
+// CertificateVerify message is sent to explicitly verify possession of
+// the private key in the certificate.
+// https://tools.ietf.org/html/rfc5246#section-7.3
+func generateCertificateVerify(handshakeBodies []byte, privateKey crypto.PrivateKey, hashAlgorithm hash.Algorithm) ([]byte, error) {
+ if p, ok := privateKey.(ed25519.PrivateKey); ok {
+ // https://pkg.go.dev/crypto/ed25519#PrivateKey.Sign
+ // Sign signs the given message with priv. Ed25519 performs two passes over
+ // messages to be signed and therefore cannot handle pre-hashed messages.
+ return p.Sign(rand.Reader, handshakeBodies, crypto.Hash(0))
+ }
+
+ h := sha256.New()
+ if _, err := h.Write(handshakeBodies); err != nil {
+ return nil, err
+ }
+ hashed := h.Sum(nil)
+
+ switch p := privateKey.(type) {
+ case *ecdsa.PrivateKey:
+ return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash())
+ case *rsa.PrivateKey:
+ return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash())
+ }
+
+ return nil, errInvalidSignatureAlgorithm
+}
+
+func verifyCertificateVerify(handshakeBodies []byte, hashAlgorithm hash.Algorithm, remoteKeySignature []byte, rawCertificates [][]byte) error { //nolint:dupl
+ if len(rawCertificates) == 0 {
+ return errLengthMismatch
+ }
+ certificate, err := x509.ParseCertificate(rawCertificates[0])
+ if err != nil {
+ return err
+ }
+
+ switch p := certificate.PublicKey.(type) {
+ case ed25519.PublicKey:
+ if ok := ed25519.Verify(p, handshakeBodies, remoteKeySignature); !ok {
+ return errKeySignatureMismatch
+ }
+ return nil
+ case *ecdsa.PublicKey:
+ ecdsaSig := &ecdsaSignature{}
+ if _, err := asn1.Unmarshal(remoteKeySignature, ecdsaSig); err != nil {
+ return err
+ }
+ if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
+ return errInvalidECDSASignature
+ }
+ hash := hashAlgorithm.Digest(handshakeBodies)
+ if !ecdsa.Verify(p, hash, ecdsaSig.R, ecdsaSig.S) {
+ return errKeySignatureMismatch
+ }
+ return nil
+ case *rsa.PublicKey:
+ switch certificate.SignatureAlgorithm {
+ case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
+ hash := hashAlgorithm.Digest(handshakeBodies)
+ return rsa.VerifyPKCS1v15(p, hashAlgorithm.CryptoHash(), hash, remoteKeySignature)
+ default:
+ return errKeySignatureVerifyUnimplemented
+ }
+ }
+
+ return errKeySignatureVerifyUnimplemented
+}
+
+func loadCerts(rawCertificates [][]byte) ([]*x509.Certificate, error) {
+ if len(rawCertificates) == 0 {
+ return nil, errLengthMismatch
+ }
+
+ certs := make([]*x509.Certificate, 0, len(rawCertificates))
+ for _, rawCert := range rawCertificates {
+ cert, err := x509.ParseCertificate(rawCert)
+ if err != nil {
+ return nil, err
+ }
+ certs = append(certs, cert)
+ }
+ return certs, nil
+}
+
+func verifyClientCert(rawCertificates [][]byte, roots *x509.CertPool) (chains [][]*x509.Certificate, err error) {
+ certificate, err := loadCerts(rawCertificates)
+ if err != nil {
+ return nil, err
+ }
+ intermediateCAPool := x509.NewCertPool()
+ for _, cert := range certificate[1:] {
+ intermediateCAPool.AddCert(cert)
+ }
+ opts := x509.VerifyOptions{
+ Roots: roots,
+ CurrentTime: time.Now(),
+ Intermediates: intermediateCAPool,
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
+ }
+ return certificate[0].Verify(opts)
+}
+
+func verifyServerCert(rawCertificates [][]byte, roots *x509.CertPool, serverName string) (chains [][]*x509.Certificate, err error) {
+ certificate, err := loadCerts(rawCertificates)
+ if err != nil {
+ return nil, err
+ }
+ intermediateCAPool := x509.NewCertPool()
+ for _, cert := range certificate[1:] {
+ intermediateCAPool.AddCert(cert)
+ }
+ opts := x509.VerifyOptions{
+ Roots: roots,
+ CurrentTime: time.Now(),
+ DNSName: serverName,
+ Intermediates: intermediateCAPool,
+ }
+ return certificate[0].Verify(opts)
+}
diff --git a/vendor/github.com/pion/dtls/v2/dtls.go b/vendor/github.com/pion/dtls/v2/dtls.go
new file mode 100644
index 000000000..125b904e5
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/dtls.go
@@ -0,0 +1,2 @@
+// Package dtls implements Datagram Transport Layer Security (DTLS) 1.2
+package dtls
diff --git a/vendor/github.com/pion/dtls/v2/errors.go b/vendor/github.com/pion/dtls/v2/errors.go
new file mode 100644
index 000000000..df16a21de
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/errors.go
@@ -0,0 +1,154 @@
+package dtls
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+)
+
+// Typed errors
+var (
+ ErrConnClosed = &FatalError{Err: errors.New("conn is closed")} //nolint:goerr113
+
+ errDeadlineExceeded = &TimeoutError{Err: fmt.Errorf("read/write timeout: %w", context.DeadlineExceeded)}
+ errInvalidContentType = &TemporaryError{Err: errors.New("invalid content type")} //nolint:goerr113
+
+ errBufferTooSmall = &TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113
+ errContextUnsupported = &TemporaryError{Err: errors.New("context is not supported for ExportKeyingMaterial")} //nolint:goerr113
+ errHandshakeInProgress = &TemporaryError{Err: errors.New("handshake is in progress")} //nolint:goerr113
+ errReservedExportKeyingMaterial = &TemporaryError{Err: errors.New("ExportKeyingMaterial can not be used with a reserved label")} //nolint:goerr113
+ errApplicationDataEpochZero = &TemporaryError{Err: errors.New("ApplicationData with epoch of 0")} //nolint:goerr113
+ errUnhandledContextType = &TemporaryError{Err: errors.New("unhandled contentType")} //nolint:goerr113
+
+ errCertificateVerifyNoCertificate = &FatalError{Err: errors.New("client sent certificate verify but we have no certificate to verify")} //nolint:goerr113
+ errCipherSuiteNoIntersection = &FatalError{Err: errors.New("client+server do not support any shared cipher suites")} //nolint:goerr113
+ errClientCertificateNotVerified = &FatalError{Err: errors.New("client sent certificate but did not verify it")} //nolint:goerr113
+ errClientCertificateRequired = &FatalError{Err: errors.New("server required client verification, but got none")} //nolint:goerr113
+ errClientNoMatchingSRTPProfile = &FatalError{Err: errors.New("server responded with SRTP Profile we do not support")} //nolint:goerr113
+ errClientRequiredButNoServerEMS = &FatalError{Err: errors.New("client required Extended Master Secret extension, but server does not support it")} //nolint:goerr113
+ errCookieMismatch = &FatalError{Err: errors.New("client+server cookie does not match")} //nolint:goerr113
+ errIdentityNoPSK = &FatalError{Err: errors.New("PSK Identity Hint provided but PSK is nil")} //nolint:goerr113
+ errInvalidCertificate = &FatalError{Err: errors.New("no certificate provided")} //nolint:goerr113
+ errInvalidCipherSuite = &FatalError{Err: errors.New("invalid or unknown cipher suite")} //nolint:goerr113
+ errInvalidECDSASignature = &FatalError{Err: errors.New("ECDSA signature contained zero or negative values")} //nolint:goerr113
+ errInvalidPrivateKey = &FatalError{Err: errors.New("invalid private key type")} //nolint:goerr113
+ errInvalidSignatureAlgorithm = &FatalError{Err: errors.New("invalid signature algorithm")} //nolint:goerr113
+ errKeySignatureMismatch = &FatalError{Err: errors.New("expected and actual key signature do not match")} //nolint:goerr113
+ errNilNextConn = &FatalError{Err: errors.New("Conn can not be created with a nil nextConn")} //nolint:goerr113
+ errNoAvailableCipherSuites = &FatalError{Err: errors.New("connection can not be created, no CipherSuites satisfy this Config")} //nolint:goerr113
+ errNoAvailablePSKCipherSuite = &FatalError{Err: errors.New("connection can not be created, pre-shared key present but no compatible CipherSuite")} //nolint:goerr113
+ errNoAvailableCertificateCipherSuite = &FatalError{Err: errors.New("connection can not be created, certificate present but no compatible CipherSuite")} //nolint:goerr113
+ errNoAvailableSignatureSchemes = &FatalError{Err: errors.New("connection can not be created, no SignatureScheme satisfy this Config")} //nolint:goerr113
+ errNoCertificates = &FatalError{Err: errors.New("no certificates configured")} //nolint:goerr113
+ errNoConfigProvided = &FatalError{Err: errors.New("no config provided")} //nolint:goerr113
+ errNoSupportedEllipticCurves = &FatalError{Err: errors.New("client requested zero or more elliptic curves that are not supported by the server")} //nolint:goerr113
+ errUnsupportedProtocolVersion = &FatalError{Err: errors.New("unsupported protocol version")} //nolint:goerr113
+ errPSKAndIdentityMustBeSetForClient = &FatalError{Err: errors.New("PSK and PSK Identity Hint must both be set for client")} //nolint:goerr113
+ errRequestedButNoSRTPExtension = &FatalError{Err: errors.New("SRTP support was requested but server did not respond with use_srtp extension")} //nolint:goerr113
+ errServerNoMatchingSRTPProfile = &FatalError{Err: errors.New("client requested SRTP but we have no matching profiles")} //nolint:goerr113
+ errServerRequiredButNoClientEMS = &FatalError{Err: errors.New("server requires the Extended Master Secret extension, but the client does not support it")} //nolint:goerr113
+ errVerifyDataMismatch = &FatalError{Err: errors.New("expected and actual verify data does not match")} //nolint:goerr113
+ errNotAcceptableCertificateChain = &FatalError{Err: errors.New("certificate chain is not signed by an acceptable CA")} //nolint:goerr113
+
+ errInvalidFlight = &InternalError{Err: errors.New("invalid flight number")} //nolint:goerr113
+ errKeySignatureGenerateUnimplemented = &InternalError{Err: errors.New("unable to generate key signature, unimplemented")} //nolint:goerr113
+ errKeySignatureVerifyUnimplemented = &InternalError{Err: errors.New("unable to verify key signature, unimplemented")} //nolint:goerr113
+ errLengthMismatch = &InternalError{Err: errors.New("data length and declared length do not match")} //nolint:goerr113
+ errSequenceNumberOverflow = &InternalError{Err: errors.New("sequence number overflow")} //nolint:goerr113
+ errInvalidFSMTransition = &InternalError{Err: errors.New("invalid state machine transition")} //nolint:goerr113
+ errFailedToAccessPoolReadBuffer = &InternalError{Err: errors.New("failed to access pool read buffer")} //nolint:goerr113
+ errFragmentBufferOverflow = &InternalError{Err: errors.New("fragment buffer overflow")} //nolint:goerr113
+)
+
+// FatalError indicates that the DTLS connection is no longer available.
+// It is mainly caused by wrong configuration of server or client.
+type FatalError = protocol.FatalError
+
+// InternalError indicates and internal error caused by the implementation, and the DTLS connection is no longer available.
+// It is mainly caused by bugs or tried to use unimplemented features.
+type InternalError = protocol.InternalError
+
+// TemporaryError indicates that the DTLS connection is still available, but the request was failed temporary.
+type TemporaryError = protocol.TemporaryError
+
+// TimeoutError indicates that the request was timed out.
+type TimeoutError = protocol.TimeoutError
+
+// HandshakeError indicates that the handshake failed.
+type HandshakeError = protocol.HandshakeError
+
+// errInvalidCipherSuite indicates an attempt at using an unsupported cipher suite.
+type invalidCipherSuiteError struct {
+ id CipherSuiteID
+}
+
+func (e *invalidCipherSuiteError) Error() string {
+ return fmt.Sprintf("CipherSuite with id(%d) is not valid", e.id)
+}
+
+func (e *invalidCipherSuiteError) Is(err error) bool {
+ var other *invalidCipherSuiteError
+ if errors.As(err, &other) {
+ return e.id == other.id
+ }
+ return false
+}
+
+// errAlert wraps DTLS alert notification as an error
+type alertError struct {
+ *alert.Alert
+}
+
+func (e *alertError) Error() string {
+ return fmt.Sprintf("alert: %s", e.Alert.String())
+}
+
+func (e *alertError) IsFatalOrCloseNotify() bool {
+ return e.Level == alert.Fatal || e.Description == alert.CloseNotify
+}
+
+func (e *alertError) Is(err error) bool {
+ var other *alertError
+ if errors.As(err, &other) {
+ return e.Level == other.Level && e.Description == other.Description
+ }
+ return false
+}
+
+// netError translates an error from underlying Conn to corresponding net.Error.
+func netError(err error) error {
+ switch {
+ case errors.Is(err, io.EOF), errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded):
+ // Return io.EOF and context errors as is.
+ return err
+ }
+
+ var (
+ ne net.Error
+ opError *net.OpError
+ se *os.SyscallError
+ )
+
+ if errors.As(err, &opError) {
+ if errors.As(opError, &se) {
+ if se.Timeout() {
+ return &TimeoutError{Err: err}
+ }
+ if isOpErrorTemporary(se) {
+ return &TemporaryError{Err: err}
+ }
+ }
+ }
+
+ if errors.As(err, &ne) {
+ return err
+ }
+
+ return &FatalError{Err: err}
+}
diff --git a/vendor/github.com/pion/dtls/v2/errors_errno.go b/vendor/github.com/pion/dtls/v2/errors_errno.go
new file mode 100644
index 000000000..ddb0ebbc0
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/errors_errno.go
@@ -0,0 +1,19 @@
+//go:build aix || darwin || dragonfly || freebsd || linux || nacl || nacljs || netbsd || openbsd || solaris || windows
+// +build aix darwin dragonfly freebsd linux nacl nacljs netbsd openbsd solaris windows
+
+// For systems having syscall.Errno.
+// Update build targets by following command:
+// $ grep -R ECONN $(go env GOROOT)/src/syscall/zerrors_*.go \
+// | tr "." "_" | cut -d"_" -f"2" | sort | uniq
+
+package dtls
+
+import (
+ "errors"
+ "os"
+ "syscall"
+)
+
+func isOpErrorTemporary(err *os.SyscallError) bool {
+ return errors.Is(err.Err, syscall.ECONNREFUSED)
+}
diff --git a/vendor/github.com/pion/dtls/v2/errors_noerrno.go b/vendor/github.com/pion/dtls/v2/errors_noerrno.go
new file mode 100644
index 000000000..ad1bf8523
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/errors_noerrno.go
@@ -0,0 +1,15 @@
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !nacl && !nacljs && !netbsd && !openbsd && !solaris && !windows
+// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!nacl,!nacljs,!netbsd,!openbsd,!solaris,!windows
+
+// For systems without syscall.Errno.
+// Build targets must be inverse of errors_errno.go
+
+package dtls
+
+import (
+ "os"
+)
+
+func isOpErrorTemporary(err *os.SyscallError) bool {
+ return false
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight.go b/vendor/github.com/pion/dtls/v2/flight.go
new file mode 100644
index 000000000..bed3f8c9a
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight.go
@@ -0,0 +1,101 @@
+package dtls
+
+/*
+ DTLS messages are grouped into a series of message flights, according
+ to the diagrams below. Although each flight of messages may consist
+ of a number of messages, they should be viewed as monolithic for the
+ purpose of timeout and retransmission.
+ https://tools.ietf.org/html/rfc4347#section-4.2.4
+
+ Message flights for full handshake:
+
+ Client Server
+ ------ ------
+ Waiting Flight 0
+
+ ClientHello --------> Flight 1
+
+ <------- HelloVerifyRequest Flight 2
+
+ ClientHello --------> Flight 3
+
+ ServerHello \
+ Certificate* \
+ ServerKeyExchange* Flight 4
+ CertificateRequest* /
+ <-------- ServerHelloDone /
+
+ Certificate* \
+ ClientKeyExchange \
+ CertificateVerify* Flight 5
+ [ChangeCipherSpec] /
+ Finished --------> /
+
+ [ChangeCipherSpec] \ Flight 6
+ <-------- Finished /
+
+ Message flights for session-resuming handshake (no cookie exchange):
+
+ Client Server
+ ------ ------
+ Waiting Flight 0
+
+ ClientHello --------> Flight 1
+
+ ServerHello \
+ [ChangeCipherSpec] Flight 4b
+ <-------- Finished /
+
+ [ChangeCipherSpec] \ Flight 5b
+ Finished --------> /
+
+ [ChangeCipherSpec] \ Flight 6
+ <-------- Finished /
+*/
+
+type flightVal uint8
+
+const (
+ flight0 flightVal = iota + 1
+ flight1
+ flight2
+ flight3
+ flight4
+ flight4b
+ flight5
+ flight5b
+ flight6
+)
+
+func (f flightVal) String() string {
+ switch f {
+ case flight0:
+ return "Flight 0"
+ case flight1:
+ return "Flight 1"
+ case flight2:
+ return "Flight 2"
+ case flight3:
+ return "Flight 3"
+ case flight4:
+ return "Flight 4"
+ case flight4b:
+ return "Flight 4b"
+ case flight5:
+ return "Flight 5"
+ case flight5b:
+ return "Flight 5b"
+ case flight6:
+ return "Flight 6"
+ default:
+ return "Invalid Flight"
+ }
+}
+
+func (f flightVal) isLastSendFlight() bool {
+ return f == flight6 || f == flight5b
+}
+
+func (f flightVal) isLastRecvFlight() bool {
+ return f == flight5 || f == flight4b
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight0handler.go b/vendor/github.com/pion/dtls/v2/flight0handler.go
new file mode 100644
index 000000000..fa4720e0e
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight0handler.go
@@ -0,0 +1,135 @@
+package dtls
+
+import (
+ "context"
+ "crypto/rand"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/extension"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+)
+
+func flight0Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) {
+ seq, msgs, ok := cache.fullPullMap(0, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+ state.handshakeRecvSequence = seq
+
+ var clientHello *handshake.MessageClientHello
+
+ // Validate type
+ if clientHello, ok = msgs[handshake.TypeClientHello].(*handshake.MessageClientHello); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+
+ if !clientHello.Version.Equal(protocol.Version1_2) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion
+ }
+
+ state.remoteRandom = clientHello.Random
+
+ cipherSuites := []CipherSuite{}
+ for _, id := range clientHello.CipherSuiteIDs {
+ if c := cipherSuiteForID(CipherSuiteID(id), cfg.customCipherSuites); c != nil {
+ cipherSuites = append(cipherSuites, c)
+ }
+ }
+
+ if state.cipherSuite, ok = findMatchingCipherSuite(cipherSuites, cfg.localCipherSuites); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errCipherSuiteNoIntersection
+ }
+
+ for _, val := range clientHello.Extensions {
+ switch e := val.(type) {
+ case *extension.SupportedEllipticCurves:
+ if len(e.EllipticCurves) == 0 {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoSupportedEllipticCurves
+ }
+ state.namedCurve = e.EllipticCurves[0]
+ case *extension.UseSRTP:
+ profile, ok := findMatchingSRTPProfile(e.ProtectionProfiles, cfg.localSRTPProtectionProfiles)
+ if !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errServerNoMatchingSRTPProfile
+ }
+ state.srtpProtectionProfile = profile
+ case *extension.UseExtendedMasterSecret:
+ if cfg.extendedMasterSecret != DisableExtendedMasterSecret {
+ state.extendedMasterSecret = true
+ }
+ case *extension.ServerName:
+ state.serverName = e.ServerName // remote server name
+ case *extension.ALPN:
+ state.peerSupportedProtocols = e.ProtocolNameList
+ }
+ }
+
+ if cfg.extendedMasterSecret == RequireExtendedMasterSecret && !state.extendedMasterSecret {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errServerRequiredButNoClientEMS
+ }
+
+ if state.localKeypair == nil {
+ var err error
+ state.localKeypair, err = elliptic.GenerateKeypair(state.namedCurve)
+ if err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err
+ }
+ }
+
+ nextFlight := flight2
+
+ if cfg.insecureSkipHelloVerify {
+ nextFlight = flight4
+ }
+
+ return handleHelloResume(clientHello.SessionID, state, cfg, nextFlight)
+}
+
+func handleHelloResume(sessionID []byte, state *State, cfg *handshakeConfig, next flightVal) (flightVal, *alert.Alert, error) {
+ if len(sessionID) > 0 && cfg.sessionStore != nil {
+ if s, err := cfg.sessionStore.Get(sessionID); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ } else if s.ID != nil {
+ cfg.log.Tracef("[handshake] resume session: %x", sessionID)
+
+ state.SessionID = sessionID
+ state.masterSecret = s.Secret
+
+ if err := state.initCipherSuite(); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+
+ clientRandom := state.localRandom.MarshalFixed()
+ cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret)
+
+ return flight4b, nil, nil
+ }
+ }
+ return next, nil, nil
+}
+
+func flight0Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) {
+ // Initialize
+ if !cfg.insecureSkipHelloVerify {
+ state.cookie = make([]byte, cookieLength)
+ if _, err := rand.Read(state.cookie); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ var zeroEpoch uint16
+ state.localEpoch.Store(zeroEpoch)
+ state.remoteEpoch.Store(zeroEpoch)
+ state.namedCurve = defaultNamedCurve
+
+ if err := state.localRandom.Populate(); err != nil {
+ return nil, nil, err
+ }
+
+ return nil, nil, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight1handler.go b/vendor/github.com/pion/dtls/v2/flight1handler.go
new file mode 100644
index 000000000..7cfcea8fb
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight1handler.go
@@ -0,0 +1,138 @@
+package dtls
+
+import (
+ "context"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/extension"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+func flight1Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) {
+ // HelloVerifyRequest can be skipped by the server,
+ // so allow ServerHello during flight1 also
+ seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeHelloVerifyRequest, cfg.initialEpoch, false, true},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, true},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+
+ if _, ok := msgs[handshake.TypeServerHello]; ok {
+ // Flight1 and flight2 were skipped.
+ // Parse as flight3.
+ return flight3Parse(ctx, c, state, cache, cfg)
+ }
+
+ if h, ok := msgs[handshake.TypeHelloVerifyRequest].(*handshake.MessageHelloVerifyRequest); ok {
+ // DTLS 1.2 clients must not assume that the server will use the protocol version
+ // specified in HelloVerifyRequest message. RFC 6347 Section 4.2.1
+ if !h.Version.Equal(protocol.Version1_0) && !h.Version.Equal(protocol.Version1_2) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion
+ }
+ state.cookie = append([]byte{}, h.Cookie...)
+ state.handshakeRecvSequence = seq
+ return flight3, nil, nil
+ }
+
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+}
+
+func flight1Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) {
+ var zeroEpoch uint16
+ state.localEpoch.Store(zeroEpoch)
+ state.remoteEpoch.Store(zeroEpoch)
+ state.namedCurve = defaultNamedCurve
+ state.cookie = nil
+
+ if err := state.localRandom.Populate(); err != nil {
+ return nil, nil, err
+ }
+
+ extensions := []extension.Extension{
+ &extension.SupportedSignatureAlgorithms{
+ SignatureHashAlgorithms: cfg.localSignatureSchemes,
+ },
+ &extension.RenegotiationInfo{
+ RenegotiatedConnection: 0,
+ },
+ }
+
+ var setEllipticCurveCryptographyClientHelloExtensions bool
+ for _, c := range cfg.localCipherSuites {
+ if c.ECC() {
+ setEllipticCurveCryptographyClientHelloExtensions = true
+ break
+ }
+ }
+
+ if setEllipticCurveCryptographyClientHelloExtensions {
+ extensions = append(extensions, []extension.Extension{
+ &extension.SupportedEllipticCurves{
+ EllipticCurves: cfg.ellipticCurves,
+ },
+ &extension.SupportedPointFormats{
+ PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed},
+ },
+ }...)
+ }
+
+ if len(cfg.localSRTPProtectionProfiles) > 0 {
+ extensions = append(extensions, &extension.UseSRTP{
+ ProtectionProfiles: cfg.localSRTPProtectionProfiles,
+ })
+ }
+
+ if cfg.extendedMasterSecret == RequestExtendedMasterSecret ||
+ cfg.extendedMasterSecret == RequireExtendedMasterSecret {
+ extensions = append(extensions, &extension.UseExtendedMasterSecret{
+ Supported: true,
+ })
+ }
+
+ if len(cfg.serverName) > 0 {
+ extensions = append(extensions, &extension.ServerName{ServerName: cfg.serverName})
+ }
+
+ if len(cfg.supportedProtocols) > 0 {
+ extensions = append(extensions, &extension.ALPN{ProtocolNameList: cfg.supportedProtocols})
+ }
+
+ if cfg.sessionStore != nil {
+ cfg.log.Tracef("[handshake] try to resume session")
+ if s, err := cfg.sessionStore.Get(c.sessionKey()); err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ } else if s.ID != nil {
+ cfg.log.Tracef("[handshake] get saved session: %x", s.ID)
+
+ state.SessionID = s.ID
+ state.masterSecret = s.Secret
+ }
+ }
+
+ return []*packet{
+ {
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageClientHello{
+ Version: protocol.Version1_2,
+ SessionID: state.SessionID,
+ Cookie: state.cookie,
+ Random: state.localRandom,
+ CipherSuiteIDs: cipherSuiteIDs(cfg.localCipherSuites),
+ CompressionMethods: defaultCompressionMethods(),
+ Extensions: extensions,
+ },
+ },
+ },
+ },
+ }, nil, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight2handler.go b/vendor/github.com/pion/dtls/v2/flight2handler.go
new file mode 100644
index 000000000..9a1bf34f7
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight2handler.go
@@ -0,0 +1,61 @@
+package dtls
+
+import (
+ "bytes"
+ "context"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+func flight2Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) {
+ seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ )
+ if !ok {
+ // Client may retransmit the first ClientHello when HelloVerifyRequest is dropped.
+ // Parse as flight 0 in this case.
+ return flight0Parse(ctx, c, state, cache, cfg)
+ }
+ state.handshakeRecvSequence = seq
+
+ var clientHello *handshake.MessageClientHello
+
+ // Validate type
+ if clientHello, ok = msgs[handshake.TypeClientHello].(*handshake.MessageClientHello); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+
+ if !clientHello.Version.Equal(protocol.Version1_2) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion
+ }
+
+ if len(clientHello.Cookie) == 0 {
+ return 0, nil, nil
+ }
+ if !bytes.Equal(state.cookie, clientHello.Cookie) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.AccessDenied}, errCookieMismatch
+ }
+ return flight4, nil, nil
+}
+
+func flight2Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) {
+ state.handshakeSendSequence = 0
+ return []*packet{
+ {
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageHelloVerifyRequest{
+ Version: protocol.Version1_2,
+ Cookie: state.cookie,
+ },
+ },
+ },
+ },
+ }, nil, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight3handler.go b/vendor/github.com/pion/dtls/v2/flight3handler.go
new file mode 100644
index 000000000..0a8c5d952
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight3handler.go
@@ -0,0 +1,288 @@
+package dtls
+
+import (
+ "bytes"
+ "context"
+
+ "github.com/pion/dtls/v2/internal/ciphersuite/types"
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/extension"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+func flight3Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { //nolint:gocognit
+ // Clients may receive multiple HelloVerifyRequest messages with different cookies.
+ // Clients SHOULD handle this by sending a new ClientHello with a cookie in response
+ // to the new HelloVerifyRequest. RFC 6347 Section 4.2.1
+ seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeHelloVerifyRequest, cfg.initialEpoch, false, true},
+ )
+ if ok {
+ if h, msgOk := msgs[handshake.TypeHelloVerifyRequest].(*handshake.MessageHelloVerifyRequest); msgOk {
+ // DTLS 1.2 clients must not assume that the server will use the protocol version
+ // specified in HelloVerifyRequest message. RFC 6347 Section 4.2.1
+ if !h.Version.Equal(protocol.Version1_0) && !h.Version.Equal(protocol.Version1_2) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion
+ }
+ state.cookie = append([]byte{}, h.Cookie...)
+ state.handshakeRecvSequence = seq
+ return flight3, nil, nil
+ }
+ }
+
+ _, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ )
+ if !ok {
+ // Don't have enough messages. Keep reading
+ return 0, nil, nil
+ }
+
+ if h, msgOk := msgs[handshake.TypeServerHello].(*handshake.MessageServerHello); msgOk {
+ if !h.Version.Equal(protocol.Version1_2) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion
+ }
+ for _, v := range h.Extensions {
+ switch e := v.(type) {
+ case *extension.UseSRTP:
+ profile, found := findMatchingSRTPProfile(e.ProtectionProfiles, cfg.localSRTPProtectionProfiles)
+ if !found {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, errClientNoMatchingSRTPProfile
+ }
+ state.srtpProtectionProfile = profile
+ case *extension.UseExtendedMasterSecret:
+ if cfg.extendedMasterSecret != DisableExtendedMasterSecret {
+ state.extendedMasterSecret = true
+ }
+ case *extension.ALPN:
+ if len(e.ProtocolNameList) > 1 { // This should be exactly 1, the zero case is handle when unmarshalling
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, extension.ErrALPNInvalidFormat // Meh, internal error?
+ }
+ state.NegotiatedProtocol = e.ProtocolNameList[0]
+ }
+ }
+ if cfg.extendedMasterSecret == RequireExtendedMasterSecret && !state.extendedMasterSecret {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errClientRequiredButNoServerEMS
+ }
+ if len(cfg.localSRTPProtectionProfiles) > 0 && state.srtpProtectionProfile == 0 {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errRequestedButNoSRTPExtension
+ }
+
+ remoteCipherSuite := cipherSuiteForID(CipherSuiteID(*h.CipherSuiteID), cfg.customCipherSuites)
+ if remoteCipherSuite == nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errCipherSuiteNoIntersection
+ }
+
+ selectedCipherSuite, found := findMatchingCipherSuite([]CipherSuite{remoteCipherSuite}, cfg.localCipherSuites)
+ if !found {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite
+ }
+
+ state.cipherSuite = selectedCipherSuite
+ state.remoteRandom = h.Random
+ cfg.log.Tracef("[handshake] use cipher suite: %s", selectedCipherSuite.String())
+
+ if len(h.SessionID) > 0 && bytes.Equal(state.SessionID, h.SessionID) {
+ return handleResumption(ctx, c, state, cache, cfg)
+ }
+
+ if len(state.SessionID) > 0 {
+ cfg.log.Tracef("[handshake] clean old session : %s", state.SessionID)
+ if err := cfg.sessionStore.Del(state.SessionID); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ if cfg.sessionStore == nil {
+ state.SessionID = []byte{}
+ } else {
+ state.SessionID = h.SessionID
+ }
+
+ state.masterSecret = []byte{}
+ }
+
+ if cfg.localPSKCallback != nil {
+ seq, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, true},
+ handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false},
+ )
+ } else {
+ seq, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, true},
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, true},
+ handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false},
+ )
+ }
+ if !ok {
+ // Don't have enough messages. Keep reading
+ return 0, nil, nil
+ }
+ state.handshakeRecvSequence = seq
+
+ if h, ok := msgs[handshake.TypeCertificate].(*handshake.MessageCertificate); ok {
+ state.PeerCertificates = h.Certificate
+ } else if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errInvalidCertificate
+ }
+
+ if h, ok := msgs[handshake.TypeServerKeyExchange].(*handshake.MessageServerKeyExchange); ok {
+ alertPtr, err := handleServerKeyExchange(c, state, cfg, h)
+ if err != nil {
+ return 0, alertPtr, err
+ }
+ }
+
+ if _, ok := msgs[handshake.TypeCertificateRequest].(*handshake.MessageCertificateRequest); ok {
+ state.remoteRequestedCertificate = true
+ }
+
+ return flight5, nil, nil
+}
+
+func handleResumption(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) {
+ if err := state.initCipherSuite(); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+
+ // Now, encrypted packets can be handled
+ if err := c.handleQueuedPackets(ctx); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+
+ _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+
+ var finished *handshake.MessageFinished
+ if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+ plainText := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ )
+
+ expectedVerifyData, err := prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc())
+ if err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ if !bytes.Equal(expectedVerifyData, finished.VerifyData) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch
+ }
+
+ clientRandom := state.localRandom.MarshalFixed()
+ cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret)
+
+ return flight5b, nil, nil
+}
+
+func handleServerKeyExchange(_ flightConn, state *State, cfg *handshakeConfig, h *handshake.MessageServerKeyExchange) (*alert.Alert, error) {
+ var err error
+ if state.cipherSuite == nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite
+ }
+ if cfg.localPSKCallback != nil {
+ var psk []byte
+ if psk, err = cfg.localPSKCallback(h.IdentityHint); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ state.IdentityHint = h.IdentityHint
+ switch state.cipherSuite.KeyExchangeAlgorithm() {
+ case types.KeyExchangeAlgorithmPsk:
+ state.preMasterSecret = prf.PSKPreMasterSecret(psk)
+ case (types.KeyExchangeAlgorithmEcdhe | types.KeyExchangeAlgorithmPsk):
+ if state.localKeypair, err = elliptic.GenerateKeypair(h.NamedCurve); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ state.preMasterSecret, err = prf.EcdhePSKPreMasterSecret(psk, h.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve)
+ if err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ default:
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite
+ }
+ } else {
+ if state.localKeypair, err = elliptic.GenerateKeypair(h.NamedCurve); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+
+ if state.preMasterSecret, err = prf.PreMasterSecret(h.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ return nil, nil //nolint:nilnil
+}
+
+func flight3Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) {
+ extensions := []extension.Extension{
+ &extension.SupportedSignatureAlgorithms{
+ SignatureHashAlgorithms: cfg.localSignatureSchemes,
+ },
+ &extension.RenegotiationInfo{
+ RenegotiatedConnection: 0,
+ },
+ }
+ if state.namedCurve != 0 {
+ extensions = append(extensions, []extension.Extension{
+ &extension.SupportedEllipticCurves{
+ EllipticCurves: []elliptic.Curve{elliptic.X25519, elliptic.P256, elliptic.P384},
+ },
+ &extension.SupportedPointFormats{
+ PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed},
+ },
+ }...)
+ }
+
+ if len(cfg.localSRTPProtectionProfiles) > 0 {
+ extensions = append(extensions, &extension.UseSRTP{
+ ProtectionProfiles: cfg.localSRTPProtectionProfiles,
+ })
+ }
+
+ if cfg.extendedMasterSecret == RequestExtendedMasterSecret ||
+ cfg.extendedMasterSecret == RequireExtendedMasterSecret {
+ extensions = append(extensions, &extension.UseExtendedMasterSecret{
+ Supported: true,
+ })
+ }
+
+ if len(cfg.serverName) > 0 {
+ extensions = append(extensions, &extension.ServerName{ServerName: cfg.serverName})
+ }
+
+ if len(cfg.supportedProtocols) > 0 {
+ extensions = append(extensions, &extension.ALPN{ProtocolNameList: cfg.supportedProtocols})
+ }
+
+ return []*packet{
+ {
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageClientHello{
+ Version: protocol.Version1_2,
+ SessionID: state.SessionID,
+ Cookie: state.cookie,
+ Random: state.localRandom,
+ CipherSuiteIDs: cipherSuiteIDs(cfg.localCipherSuites),
+ CompressionMethods: defaultCompressionMethods(),
+ Extensions: extensions,
+ },
+ },
+ },
+ },
+ }, nil, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight4bhandler.go b/vendor/github.com/pion/dtls/v2/flight4bhandler.go
new file mode 100644
index 000000000..46515c57f
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight4bhandler.go
@@ -0,0 +1,141 @@
+package dtls
+
+import (
+ "bytes"
+ "context"
+
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/extension"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+func flight4bParse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) {
+ _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+
+ var finished *handshake.MessageFinished
+ if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+
+ plainText := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false},
+ )
+
+ expectedVerifyData, err := prf.VerifyDataClient(state.masterSecret, plainText, state.cipherSuite.HashFunc())
+ if err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ if !bytes.Equal(expectedVerifyData, finished.VerifyData) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch
+ }
+
+ // Other party may re-transmit the last flight. Keep state to be flight4b.
+ return flight4b, nil, nil
+}
+
+func flight4bGenerate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) {
+ var pkts []*packet
+
+ extensions := []extension.Extension{&extension.RenegotiationInfo{
+ RenegotiatedConnection: 0,
+ }}
+ if (cfg.extendedMasterSecret == RequestExtendedMasterSecret ||
+ cfg.extendedMasterSecret == RequireExtendedMasterSecret) && state.extendedMasterSecret {
+ extensions = append(extensions, &extension.UseExtendedMasterSecret{
+ Supported: true,
+ })
+ }
+ if state.srtpProtectionProfile != 0 {
+ extensions = append(extensions, &extension.UseSRTP{
+ ProtectionProfiles: []SRTPProtectionProfile{state.srtpProtectionProfile},
+ })
+ }
+
+ selectedProto, err := extension.ALPNProtocolSelection(cfg.supportedProtocols, state.peerSupportedProtocols)
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.NoApplicationProtocol}, err
+ }
+ if selectedProto != "" {
+ extensions = append(extensions, &extension.ALPN{
+ ProtocolNameList: []string{selectedProto},
+ })
+ state.NegotiatedProtocol = selectedProto
+ }
+
+ cipherSuiteID := uint16(state.cipherSuite.ID())
+ serverHello := &handshake.Handshake{
+ Message: &handshake.MessageServerHello{
+ Version: protocol.Version1_2,
+ Random: state.localRandom,
+ SessionID: state.SessionID,
+ CipherSuiteID: &cipherSuiteID,
+ CompressionMethod: defaultCompressionMethods()[0],
+ Extensions: extensions,
+ },
+ }
+
+ serverHello.Header.MessageSequence = uint16(state.handshakeSendSequence)
+
+ if len(state.localVerifyData) == 0 {
+ plainText := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ )
+ raw, err := serverHello.Marshal()
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ plainText = append(plainText, raw...)
+
+ state.localVerifyData, err = prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc())
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: serverHello,
+ },
+ },
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &protocol.ChangeCipherSpec{},
+ },
+ },
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ Epoch: 1,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageFinished{
+ VerifyData: state.localVerifyData,
+ },
+ },
+ },
+ shouldEncrypt: true,
+ resetLocalSequenceNumber: true,
+ },
+ )
+
+ return pkts, nil, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight4handler.go b/vendor/github.com/pion/dtls/v2/flight4handler.go
new file mode 100644
index 000000000..9792e0a92
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight4handler.go
@@ -0,0 +1,399 @@
+package dtls
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/x509"
+
+ "github.com/pion/dtls/v2/internal/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/crypto/signaturehash"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/extension"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+func flight4Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { //nolint:gocognit
+ seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, true},
+ handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, true},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+
+ // Validate type
+ var clientKeyExchange *handshake.MessageClientKeyExchange
+ if clientKeyExchange, ok = msgs[handshake.TypeClientKeyExchange].(*handshake.MessageClientKeyExchange); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+
+ if h, hasCert := msgs[handshake.TypeCertificate].(*handshake.MessageCertificate); hasCert {
+ state.PeerCertificates = h.Certificate
+ // If the client offer its certificate, just disable session resumption.
+ // Otherwise, we have to store the certificate identitfication and expire time.
+ // And we have to check whether this certificate expired, revoked or changed.
+ //
+ // https://curl.se/docs/CVE-2016-5419.html
+ state.SessionID = nil
+ }
+
+ if h, hasCertVerify := msgs[handshake.TypeCertificateVerify].(*handshake.MessageCertificateVerify); hasCertVerify {
+ if state.PeerCertificates == nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errCertificateVerifyNoCertificate
+ }
+
+ plainText := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false},
+ )
+
+ // Verify that the pair of hash algorithm and signiture is listed.
+ var validSignatureScheme bool
+ for _, ss := range cfg.localSignatureSchemes {
+ if ss.Hash == h.HashAlgorithm && ss.Signature == h.SignatureAlgorithm {
+ validSignatureScheme = true
+ break
+ }
+ }
+ if !validSignatureScheme {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoAvailableSignatureSchemes
+ }
+
+ if err := verifyCertificateVerify(plainText, h.HashAlgorithm, h.Signature, state.PeerCertificates); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ var chains [][]*x509.Certificate
+ var err error
+ var verified bool
+ if cfg.clientAuth >= VerifyClientCertIfGiven {
+ if chains, err = verifyClientCert(state.PeerCertificates, cfg.clientCAs); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ verified = true
+ }
+ if cfg.verifyPeerCertificate != nil {
+ if err := cfg.verifyPeerCertificate(state.PeerCertificates, chains); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ }
+ state.peerCertificatesVerified = verified
+ } else if state.PeerCertificates != nil {
+ // A certificate was received, but we haven't seen a CertificateVerify
+ // keep reading until we receive one
+ return 0, nil, nil
+ }
+
+ if !state.cipherSuite.IsInitialized() {
+ serverRandom := state.localRandom.MarshalFixed()
+ clientRandom := state.remoteRandom.MarshalFixed()
+
+ var err error
+ var preMasterSecret []byte
+ if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypePreSharedKey {
+ var psk []byte
+ if psk, err = cfg.localPSKCallback(clientKeyExchange.IdentityHint); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ state.IdentityHint = clientKeyExchange.IdentityHint
+ switch state.cipherSuite.KeyExchangeAlgorithm() {
+ case CipherSuiteKeyExchangeAlgorithmPsk:
+ preMasterSecret = prf.PSKPreMasterSecret(psk)
+ case (CipherSuiteKeyExchangeAlgorithmPsk | CipherSuiteKeyExchangeAlgorithmEcdhe):
+ if preMasterSecret, err = prf.EcdhePSKPreMasterSecret(psk, clientKeyExchange.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ default:
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidCipherSuite
+ }
+ } else {
+ preMasterSecret, err = prf.PreMasterSecret(clientKeyExchange.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve)
+ if err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err
+ }
+ }
+
+ if state.extendedMasterSecret {
+ var sessionHash []byte
+ sessionHash, err = cache.sessionHash(state.cipherSuite.HashFunc(), cfg.initialEpoch)
+ if err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+
+ state.masterSecret, err = prf.ExtendedMasterSecret(preMasterSecret, sessionHash, state.cipherSuite.HashFunc())
+ if err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ } else {
+ state.masterSecret, err = prf.MasterSecret(preMasterSecret, clientRandom[:], serverRandom[:], state.cipherSuite.HashFunc())
+ if err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ if err := state.cipherSuite.Init(state.masterSecret, clientRandom[:], serverRandom[:], false); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret)
+ }
+
+ if len(state.SessionID) > 0 {
+ s := Session{
+ ID: state.SessionID,
+ Secret: state.masterSecret,
+ }
+ cfg.log.Tracef("[handshake] save new session: %x", s.ID)
+ if err := cfg.sessionStore.Set(state.SessionID, s); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ // Now, encrypted packets can be handled
+ if err := c.handleQueuedPackets(ctx); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+
+ seq, msgs, ok = cache.fullPullMap(seq, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+ state.handshakeRecvSequence = seq
+
+ if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+
+ if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeAnonymous {
+ if cfg.verifyConnection != nil {
+ if err := cfg.verifyConnection(state.clone()); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ }
+ return flight6, nil, nil
+ }
+
+ switch cfg.clientAuth {
+ case RequireAnyClientCert:
+ if state.PeerCertificates == nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errClientCertificateRequired
+ }
+ case VerifyClientCertIfGiven:
+ if state.PeerCertificates != nil && !state.peerCertificatesVerified {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, errClientCertificateNotVerified
+ }
+ case RequireAndVerifyClientCert:
+ if state.PeerCertificates == nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errClientCertificateRequired
+ }
+ if !state.peerCertificatesVerified {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, errClientCertificateNotVerified
+ }
+ case NoClientCert, RequestClientCert:
+ // go to flight6
+ }
+ if cfg.verifyConnection != nil {
+ if err := cfg.verifyConnection(state.clone()); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ }
+
+ return flight6, nil, nil
+}
+
+func flight4Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) {
+ extensions := []extension.Extension{&extension.RenegotiationInfo{
+ RenegotiatedConnection: 0,
+ }}
+ if (cfg.extendedMasterSecret == RequestExtendedMasterSecret ||
+ cfg.extendedMasterSecret == RequireExtendedMasterSecret) && state.extendedMasterSecret {
+ extensions = append(extensions, &extension.UseExtendedMasterSecret{
+ Supported: true,
+ })
+ }
+ if state.srtpProtectionProfile != 0 {
+ extensions = append(extensions, &extension.UseSRTP{
+ ProtectionProfiles: []SRTPProtectionProfile{state.srtpProtectionProfile},
+ })
+ }
+ if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate {
+ extensions = append(extensions, &extension.SupportedPointFormats{
+ PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed},
+ })
+ }
+
+ selectedProto, err := extension.ALPNProtocolSelection(cfg.supportedProtocols, state.peerSupportedProtocols)
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.NoApplicationProtocol}, err
+ }
+ if selectedProto != "" {
+ extensions = append(extensions, &extension.ALPN{
+ ProtocolNameList: []string{selectedProto},
+ })
+ state.NegotiatedProtocol = selectedProto
+ }
+
+ var pkts []*packet
+ cipherSuiteID := uint16(state.cipherSuite.ID())
+
+ if cfg.sessionStore != nil {
+ state.SessionID = make([]byte, sessionLength)
+ if _, err := rand.Read(state.SessionID); err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ pkts = append(pkts, &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageServerHello{
+ Version: protocol.Version1_2,
+ Random: state.localRandom,
+ SessionID: state.SessionID,
+ CipherSuiteID: &cipherSuiteID,
+ CompressionMethod: defaultCompressionMethods()[0],
+ Extensions: extensions,
+ },
+ },
+ },
+ })
+
+ switch {
+ case state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate:
+ certificate, err := cfg.getCertificate(&ClientHelloInfo{
+ ServerName: state.serverName,
+ CipherSuites: []ciphersuite.ID{state.cipherSuite.ID()},
+ })
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, err
+ }
+
+ pkts = append(pkts, &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageCertificate{
+ Certificate: certificate.Certificate,
+ },
+ },
+ },
+ })
+
+ serverRandom := state.localRandom.MarshalFixed()
+ clientRandom := state.remoteRandom.MarshalFixed()
+
+ // Find compatible signature scheme
+ signatureHashAlgo, err := signaturehash.SelectSignatureScheme(cfg.localSignatureSchemes, certificate.PrivateKey)
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, err
+ }
+
+ signature, err := generateKeySignature(clientRandom[:], serverRandom[:], state.localKeypair.PublicKey, state.namedCurve, certificate.PrivateKey, signatureHashAlgo.Hash)
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ state.localKeySignature = signature
+
+ pkts = append(pkts, &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageServerKeyExchange{
+ EllipticCurveType: elliptic.CurveTypeNamedCurve,
+ NamedCurve: state.namedCurve,
+ PublicKey: state.localKeypair.PublicKey,
+ HashAlgorithm: signatureHashAlgo.Hash,
+ SignatureAlgorithm: signatureHashAlgo.Signature,
+ Signature: state.localKeySignature,
+ },
+ },
+ },
+ })
+
+ if cfg.clientAuth > NoClientCert {
+ // An empty list of certificateAuthorities signals to
+ // the client that it may send any certificate in response
+ // to our request. When we know the CAs we trust, then
+ // we can send them down, so that the client can choose
+ // an appropriate certificate to give to us.
+ var certificateAuthorities [][]byte
+ if cfg.clientCAs != nil {
+ // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool and it's ok if certificate authorities is empty.
+ certificateAuthorities = cfg.clientCAs.Subjects()
+ }
+ pkts = append(pkts, &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageCertificateRequest{
+ CertificateTypes: []clientcertificate.Type{clientcertificate.RSASign, clientcertificate.ECDSASign},
+ SignatureHashAlgorithms: cfg.localSignatureSchemes,
+ CertificateAuthoritiesNames: certificateAuthorities,
+ },
+ },
+ },
+ })
+ }
+ case cfg.localPSKIdentityHint != nil || state.cipherSuite.KeyExchangeAlgorithm().Has(CipherSuiteKeyExchangeAlgorithmEcdhe):
+ // To help the client in selecting which identity to use, the server
+ // can provide a "PSK identity hint" in the ServerKeyExchange message.
+ // If no hint is provided and cipher suite doesn't use elliptic curve,
+ // the ServerKeyExchange message is omitted.
+ //
+ // https://tools.ietf.org/html/rfc4279#section-2
+ srvExchange := &handshake.MessageServerKeyExchange{
+ IdentityHint: cfg.localPSKIdentityHint,
+ }
+ if state.cipherSuite.KeyExchangeAlgorithm().Has(CipherSuiteKeyExchangeAlgorithmEcdhe) {
+ srvExchange.EllipticCurveType = elliptic.CurveTypeNamedCurve
+ srvExchange.NamedCurve = state.namedCurve
+ srvExchange.PublicKey = state.localKeypair.PublicKey
+ }
+ pkts = append(pkts, &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: srvExchange,
+ },
+ },
+ })
+ }
+
+ pkts = append(pkts, &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageServerHelloDone{},
+ },
+ },
+ })
+
+ return pkts, nil, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight5bhandler.go b/vendor/github.com/pion/dtls/v2/flight5bhandler.go
new file mode 100644
index 000000000..bd330d515
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight5bhandler.go
@@ -0,0 +1,75 @@
+package dtls
+
+import (
+ "context"
+
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+func flight5bParse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) {
+ _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-1, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+
+ if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+
+ // Other party may re-transmit the last flight. Keep state to be flight5b.
+ return flight5b, nil, nil
+}
+
+func flight5bGenerate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { //nolint:gocognit
+ var pkts []*packet
+
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &protocol.ChangeCipherSpec{},
+ },
+ })
+
+ if len(state.localVerifyData) == 0 {
+ plainText := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false},
+ )
+
+ var err error
+ state.localVerifyData, err = prf.VerifyDataClient(state.masterSecret, plainText, state.cipherSuite.HashFunc())
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ Epoch: 1,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageFinished{
+ VerifyData: state.localVerifyData,
+ },
+ },
+ },
+ shouldEncrypt: true,
+ resetLocalSequenceNumber: true,
+ })
+
+ return pkts, nil, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight5handler.go b/vendor/github.com/pion/dtls/v2/flight5handler.go
new file mode 100644
index 000000000..5a60973d7
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight5handler.go
@@ -0,0 +1,354 @@
+package dtls
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/x509"
+
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/crypto/signaturehash"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+func flight5Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) {
+ _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+
+ var finished *handshake.MessageFinished
+ if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+ plainText := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false},
+ )
+
+ expectedVerifyData, err := prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc())
+ if err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ if !bytes.Equal(expectedVerifyData, finished.VerifyData) {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch
+ }
+
+ if len(state.SessionID) > 0 {
+ s := Session{
+ ID: state.SessionID,
+ Secret: state.masterSecret,
+ }
+ cfg.log.Tracef("[handshake] save new session: %x", s.ID)
+ if err := cfg.sessionStore.Set(c.sessionKey(), s); err != nil {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ return flight5, nil, nil
+}
+
+func flight5Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { //nolint:gocognit
+ var privateKey crypto.PrivateKey
+ var pkts []*packet
+ if state.remoteRequestedCertificate {
+ _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-2, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false})
+ if !ok {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errClientCertificateRequired
+ }
+ reqInfo := CertificateRequestInfo{}
+ if r, ok := msgs[handshake.TypeCertificateRequest].(*handshake.MessageCertificateRequest); ok {
+ reqInfo.AcceptableCAs = r.CertificateAuthoritiesNames
+ } else {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errClientCertificateRequired
+ }
+ certificate, err := cfg.getClientCertificate(&reqInfo)
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, err
+ }
+ if certificate == nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errNotAcceptableCertificateChain
+ }
+ if certificate.Certificate != nil {
+ privateKey = certificate.PrivateKey
+ }
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageCertificate{
+ Certificate: certificate.Certificate,
+ },
+ },
+ },
+ })
+ }
+
+ clientKeyExchange := &handshake.MessageClientKeyExchange{}
+ if cfg.localPSKCallback == nil {
+ clientKeyExchange.PublicKey = state.localKeypair.PublicKey
+ } else {
+ clientKeyExchange.IdentityHint = cfg.localPSKIdentityHint
+ }
+ if state != nil && state.localKeypair != nil && len(state.localKeypair.PublicKey) > 0 {
+ clientKeyExchange.PublicKey = state.localKeypair.PublicKey
+ }
+
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: clientKeyExchange,
+ },
+ },
+ })
+
+ serverKeyExchangeData := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false},
+ )
+
+ serverKeyExchange := &handshake.MessageServerKeyExchange{}
+
+ // handshakeMessageServerKeyExchange is optional for PSK
+ if len(serverKeyExchangeData) == 0 {
+ alertPtr, err := handleServerKeyExchange(c, state, cfg, &handshake.MessageServerKeyExchange{})
+ if err != nil {
+ return nil, alertPtr, err
+ }
+ } else {
+ rawHandshake := &handshake.Handshake{
+ KeyExchangeAlgorithm: state.cipherSuite.KeyExchangeAlgorithm(),
+ }
+ err := rawHandshake.Unmarshal(serverKeyExchangeData)
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, err
+ }
+
+ switch h := rawHandshake.Message.(type) {
+ case *handshake.MessageServerKeyExchange:
+ serverKeyExchange = h
+ default:
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, errInvalidContentType
+ }
+ }
+
+ // Append not-yet-sent packets
+ merged := []byte{}
+ seqPred := uint16(state.handshakeSendSequence)
+ for _, p := range pkts {
+ h, ok := p.record.Content.(*handshake.Handshake)
+ if !ok {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidContentType
+ }
+ h.Header.MessageSequence = seqPred
+ seqPred++
+ raw, err := h.Marshal()
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ merged = append(merged, raw...)
+ }
+
+ if alertPtr, err := initalizeCipherSuite(state, cache, cfg, serverKeyExchange, merged); err != nil {
+ return nil, alertPtr, err
+ }
+
+ // If the client has sent a certificate with signing ability, a digitally-signed
+ // CertificateVerify message is sent to explicitly verify possession of the
+ // private key in the certificate.
+ if state.remoteRequestedCertificate && privateKey != nil {
+ plainText := append(cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false},
+ ), merged...)
+
+ // Find compatible signature scheme
+ signatureHashAlgo, err := signaturehash.SelectSignatureScheme(cfg.localSignatureSchemes, privateKey)
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, err
+ }
+
+ certVerify, err := generateCertificateVerify(plainText, privateKey, signatureHashAlgo.Hash)
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ state.localCertificatesVerify = certVerify
+
+ p := &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageCertificateVerify{
+ HashAlgorithm: signatureHashAlgo.Hash,
+ SignatureAlgorithm: signatureHashAlgo.Signature,
+ Signature: state.localCertificatesVerify,
+ },
+ },
+ },
+ }
+ pkts = append(pkts, p)
+
+ h, ok := p.record.Content.(*handshake.Handshake)
+ if !ok {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidContentType
+ }
+ h.Header.MessageSequence = seqPred
+ // seqPred++ // this is the last use of seqPred
+ raw, err := h.Marshal()
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ merged = append(merged, raw...)
+ }
+
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &protocol.ChangeCipherSpec{},
+ },
+ })
+
+ if len(state.localVerifyData) == 0 {
+ plainText := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false},
+ )
+
+ var err error
+ state.localVerifyData, err = prf.VerifyDataClient(state.masterSecret, append(plainText, merged...), state.cipherSuite.HashFunc())
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ Epoch: 1,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageFinished{
+ VerifyData: state.localVerifyData,
+ },
+ },
+ },
+ shouldEncrypt: true,
+ resetLocalSequenceNumber: true,
+ })
+
+ return pkts, nil, nil
+}
+
+func initalizeCipherSuite(state *State, cache *handshakeCache, cfg *handshakeConfig, h *handshake.MessageServerKeyExchange, sendingPlainText []byte) (*alert.Alert, error) { //nolint:gocognit
+ if state.cipherSuite.IsInitialized() {
+ return nil, nil //nolint
+ }
+
+ clientRandom := state.localRandom.MarshalFixed()
+ serverRandom := state.remoteRandom.MarshalFixed()
+
+ var err error
+
+ if state.extendedMasterSecret {
+ var sessionHash []byte
+ sessionHash, err = cache.sessionHash(state.cipherSuite.HashFunc(), cfg.initialEpoch, sendingPlainText)
+ if err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+
+ state.masterSecret, err = prf.ExtendedMasterSecret(state.preMasterSecret, sessionHash, state.cipherSuite.HashFunc())
+ if err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err
+ }
+ } else {
+ state.masterSecret, err = prf.MasterSecret(state.preMasterSecret, clientRandom[:], serverRandom[:], state.cipherSuite.HashFunc())
+ if err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate {
+ // Verify that the pair of hash algorithm and signiture is listed.
+ var validSignatureScheme bool
+ for _, ss := range cfg.localSignatureSchemes {
+ if ss.Hash == h.HashAlgorithm && ss.Signature == h.SignatureAlgorithm {
+ validSignatureScheme = true
+ break
+ }
+ }
+ if !validSignatureScheme {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoAvailableSignatureSchemes
+ }
+
+ expectedMsg := valueKeyMessage(clientRandom[:], serverRandom[:], h.PublicKey, h.NamedCurve)
+ if err = verifyKeySignature(expectedMsg, h.Signature, h.HashAlgorithm, state.PeerCertificates); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ var chains [][]*x509.Certificate
+ if !cfg.insecureSkipVerify {
+ if chains, err = verifyServerCert(state.PeerCertificates, cfg.rootCAs, cfg.serverName); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ }
+ if cfg.verifyPeerCertificate != nil {
+ if err = cfg.verifyPeerCertificate(state.PeerCertificates, chains); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ }
+ }
+ if cfg.verifyConnection != nil {
+ if err = cfg.verifyConnection(state.clone()); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err
+ }
+ }
+
+ if err = state.cipherSuite.Init(state.masterSecret, clientRandom[:], serverRandom[:], true); err != nil {
+ return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+
+ cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret)
+
+ return nil, nil //nolint
+}
diff --git a/vendor/github.com/pion/dtls/v2/flight6handler.go b/vendor/github.com/pion/dtls/v2/flight6handler.go
new file mode 100644
index 000000000..d3d62f05b
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flight6handler.go
@@ -0,0 +1,82 @@
+package dtls
+
+import (
+ "context"
+
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+func flight6Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) {
+ _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-1, state.cipherSuite,
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false},
+ )
+ if !ok {
+ // No valid message received. Keep reading
+ return 0, nil, nil
+ }
+
+ if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok {
+ return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil
+ }
+
+ // Other party may re-transmit the last flight. Keep state to be flight6.
+ return flight6, nil, nil
+}
+
+func flight6Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) {
+ var pkts []*packet
+
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ },
+ Content: &protocol.ChangeCipherSpec{},
+ },
+ })
+
+ if len(state.localVerifyData) == 0 {
+ plainText := cache.pullAndMerge(
+ handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false},
+ handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false},
+ )
+
+ var err error
+ state.localVerifyData, err = prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc())
+ if err != nil {
+ return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err
+ }
+ }
+
+ pkts = append(pkts,
+ &packet{
+ record: &recordlayer.RecordLayer{
+ Header: recordlayer.Header{
+ Version: protocol.Version1_2,
+ Epoch: 1,
+ },
+ Content: &handshake.Handshake{
+ Message: &handshake.MessageFinished{
+ VerifyData: state.localVerifyData,
+ },
+ },
+ },
+ shouldEncrypt: true,
+ resetLocalSequenceNumber: true,
+ },
+ )
+ return pkts, nil, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/flighthandler.go b/vendor/github.com/pion/dtls/v2/flighthandler.go
new file mode 100644
index 000000000..f899ffa5b
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/flighthandler.go
@@ -0,0 +1,65 @@
+package dtls
+
+import (
+ "context"
+
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+)
+
+// Parse received handshakes and return next flightVal
+type flightParser func(context.Context, flightConn, *State, *handshakeCache, *handshakeConfig) (flightVal, *alert.Alert, error)
+
+// Generate flights
+type flightGenerator func(flightConn, *State, *handshakeCache, *handshakeConfig) ([]*packet, *alert.Alert, error)
+
+func (f flightVal) getFlightParser() (flightParser, error) {
+ switch f {
+ case flight0:
+ return flight0Parse, nil
+ case flight1:
+ return flight1Parse, nil
+ case flight2:
+ return flight2Parse, nil
+ case flight3:
+ return flight3Parse, nil
+ case flight4:
+ return flight4Parse, nil
+ case flight4b:
+ return flight4bParse, nil
+ case flight5:
+ return flight5Parse, nil
+ case flight5b:
+ return flight5bParse, nil
+ case flight6:
+ return flight6Parse, nil
+ default:
+ return nil, errInvalidFlight
+ }
+}
+
+func (f flightVal) getFlightGenerator() (gen flightGenerator, retransmit bool, err error) {
+ switch f {
+ case flight0:
+ return flight0Generate, true, nil
+ case flight1:
+ return flight1Generate, true, nil
+ case flight2:
+ // https://tools.ietf.org/html/rfc6347#section-3.2.1
+ // HelloVerifyRequests must not be retransmitted.
+ return flight2Generate, false, nil
+ case flight3:
+ return flight3Generate, true, nil
+ case flight4:
+ return flight4Generate, true, nil
+ case flight4b:
+ return flight4bGenerate, true, nil
+ case flight5:
+ return flight5Generate, true, nil
+ case flight5b:
+ return flight5bGenerate, true, nil
+ case flight6:
+ return flight6Generate, true, nil
+ default:
+ return nil, false, errInvalidFlight
+ }
+}
diff --git a/vendor/github.com/pion/dtls/v2/fragment_buffer.go b/vendor/github.com/pion/dtls/v2/fragment_buffer.go
new file mode 100644
index 000000000..c86a7e8f9
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/fragment_buffer.go
@@ -0,0 +1,129 @@
+package dtls
+
+import (
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// 2 megabytes
+const fragmentBufferMaxSize = 2000000
+
+type fragment struct {
+ recordLayerHeader recordlayer.Header
+ handshakeHeader handshake.Header
+ data []byte
+}
+
+type fragmentBuffer struct {
+ // map of MessageSequenceNumbers that hold slices of fragments
+ cache map[uint16][]*fragment
+
+ currentMessageSequenceNumber uint16
+}
+
+func newFragmentBuffer() *fragmentBuffer {
+ return &fragmentBuffer{cache: map[uint16][]*fragment{}}
+}
+
+// current total size of buffer
+func (f *fragmentBuffer) size() int {
+ size := 0
+ for i := range f.cache {
+ for j := range f.cache[i] {
+ size += len(f.cache[i][j].data)
+ }
+ }
+ return size
+}
+
+// Attempts to push a DTLS packet to the fragmentBuffer
+// when it returns true it means the fragmentBuffer has inserted and the buffer shouldn't be handled
+// when an error returns it is fatal, and the DTLS connection should be stopped
+func (f *fragmentBuffer) push(buf []byte) (bool, error) {
+ if f.size()+len(buf) >= fragmentBufferMaxSize {
+ return false, errFragmentBufferOverflow
+ }
+
+ frag := new(fragment)
+ if err := frag.recordLayerHeader.Unmarshal(buf); err != nil {
+ return false, err
+ }
+
+ // fragment isn't a handshake, we don't need to handle it
+ if frag.recordLayerHeader.ContentType != protocol.ContentTypeHandshake {
+ return false, nil
+ }
+
+ for buf = buf[recordlayer.HeaderSize:]; len(buf) != 0; frag = new(fragment) {
+ if err := frag.handshakeHeader.Unmarshal(buf); err != nil {
+ return false, err
+ }
+
+ if _, ok := f.cache[frag.handshakeHeader.MessageSequence]; !ok {
+ f.cache[frag.handshakeHeader.MessageSequence] = []*fragment{}
+ }
+
+ // end index should be the length of handshake header but if the handshake
+ // was fragmented, we should keep them all
+ end := int(handshake.HeaderLength + frag.handshakeHeader.Length)
+ if size := len(buf); end > size {
+ end = size
+ }
+
+ // Discard all headers, when rebuilding the packet we will re-build
+ frag.data = append([]byte{}, buf[handshake.HeaderLength:end]...)
+ f.cache[frag.handshakeHeader.MessageSequence] = append(f.cache[frag.handshakeHeader.MessageSequence], frag)
+ buf = buf[end:]
+ }
+
+ return true, nil
+}
+
+func (f *fragmentBuffer) pop() (content []byte, epoch uint16) {
+ frags, ok := f.cache[f.currentMessageSequenceNumber]
+ if !ok {
+ return nil, 0
+ }
+
+ // Go doesn't support recursive lambdas
+ var appendMessage func(targetOffset uint32) bool
+
+ rawMessage := []byte{}
+ appendMessage = func(targetOffset uint32) bool {
+ for _, f := range frags {
+ if f.handshakeHeader.FragmentOffset == targetOffset {
+ fragmentEnd := (f.handshakeHeader.FragmentOffset + f.handshakeHeader.FragmentLength)
+ if fragmentEnd != f.handshakeHeader.Length && f.handshakeHeader.FragmentLength != 0 {
+ if !appendMessage(fragmentEnd) {
+ return false
+ }
+ }
+
+ rawMessage = append(f.data, rawMessage...)
+ return true
+ }
+ }
+ return false
+ }
+
+ // Recursively collect up
+ if !appendMessage(0) {
+ return nil, 0
+ }
+
+ firstHeader := frags[0].handshakeHeader
+ firstHeader.FragmentOffset = 0
+ firstHeader.FragmentLength = firstHeader.Length
+
+ rawHeader, err := firstHeader.Marshal()
+ if err != nil {
+ return nil, 0
+ }
+
+ messageEpoch := frags[0].recordLayerHeader.Epoch
+
+ delete(f.cache, f.currentMessageSequenceNumber)
+ f.currentMessageSequenceNumber++
+ return append(rawHeader, rawMessage...), messageEpoch
+}
diff --git a/vendor/github.com/pion/dtls/v2/handshake_cache.go b/vendor/github.com/pion/dtls/v2/handshake_cache.go
new file mode 100644
index 000000000..27d246597
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/handshake_cache.go
@@ -0,0 +1,169 @@
+package dtls
+
+import (
+ "sync"
+
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+)
+
+type handshakeCacheItem struct {
+ typ handshake.Type
+ isClient bool
+ epoch uint16
+ messageSequence uint16
+ data []byte
+}
+
+type handshakeCachePullRule struct {
+ typ handshake.Type
+ epoch uint16
+ isClient bool
+ optional bool
+}
+
+type handshakeCache struct {
+ cache []*handshakeCacheItem
+ mu sync.Mutex
+}
+
+func newHandshakeCache() *handshakeCache {
+ return &handshakeCache{}
+}
+
+func (h *handshakeCache) push(data []byte, epoch, messageSequence uint16, typ handshake.Type, isClient bool) {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ h.cache = append(h.cache, &handshakeCacheItem{
+ data: append([]byte{}, data...),
+ epoch: epoch,
+ messageSequence: messageSequence,
+ typ: typ,
+ isClient: isClient,
+ })
+}
+
+// returns a list handshakes that match the requested rules
+// the list will contain null entries for rules that can't be satisfied
+// multiple entries may match a rule, but only the last match is returned (ie ClientHello with cookies)
+func (h *handshakeCache) pull(rules ...handshakeCachePullRule) []*handshakeCacheItem {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ out := make([]*handshakeCacheItem, len(rules))
+ for i, r := range rules {
+ for _, c := range h.cache {
+ if c.typ == r.typ && c.isClient == r.isClient && c.epoch == r.epoch {
+ switch {
+ case out[i] == nil:
+ out[i] = c
+ case out[i].messageSequence < c.messageSequence:
+ out[i] = c
+ }
+ }
+ }
+ }
+
+ return out
+}
+
+// fullPullMap pulls all handshakes between rules[0] to rules[len(rules)-1] as map.
+func (h *handshakeCache) fullPullMap(startSeq int, cipherSuite CipherSuite, rules ...handshakeCachePullRule) (int, map[handshake.Type]handshake.Message, bool) {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ ci := make(map[handshake.Type]*handshakeCacheItem)
+ for _, r := range rules {
+ var item *handshakeCacheItem
+ for _, c := range h.cache {
+ if c.typ == r.typ && c.isClient == r.isClient && c.epoch == r.epoch {
+ switch {
+ case item == nil:
+ item = c
+ case item.messageSequence < c.messageSequence:
+ item = c
+ }
+ }
+ }
+ if !r.optional && item == nil {
+ // Missing mandatory message.
+ return startSeq, nil, false
+ }
+ ci[r.typ] = item
+ }
+ out := make(map[handshake.Type]handshake.Message)
+ seq := startSeq
+ for _, r := range rules {
+ t := r.typ
+ i := ci[t]
+ if i == nil {
+ continue
+ }
+ var keyExchangeAlgorithm CipherSuiteKeyExchangeAlgorithm
+ if cipherSuite != nil {
+ keyExchangeAlgorithm = cipherSuite.KeyExchangeAlgorithm()
+ }
+ rawHandshake := &handshake.Handshake{
+ KeyExchangeAlgorithm: keyExchangeAlgorithm,
+ }
+ if err := rawHandshake.Unmarshal(i.data); err != nil {
+ return startSeq, nil, false
+ }
+ if uint16(seq) != rawHandshake.Header.MessageSequence {
+ // There is a gap. Some messages are not arrived.
+ return startSeq, nil, false
+ }
+ seq++
+ out[t] = rawHandshake.Message
+ }
+ return seq, out, true
+}
+
+// pullAndMerge calls pull and then merges the results, ignoring any null entries
+func (h *handshakeCache) pullAndMerge(rules ...handshakeCachePullRule) []byte {
+ merged := []byte{}
+
+ for _, p := range h.pull(rules...) {
+ if p != nil {
+ merged = append(merged, p.data...)
+ }
+ }
+ return merged
+}
+
+// sessionHash returns the session hash for Extended Master Secret support
+// https://tools.ietf.org/html/draft-ietf-tls-session-hash-06#section-4
+func (h *handshakeCache) sessionHash(hf prf.HashFunc, epoch uint16, additional ...[]byte) ([]byte, error) {
+ merged := []byte{}
+
+ // Order defined by https://tools.ietf.org/html/rfc5246#section-7.3
+ handshakeBuffer := h.pull(
+ handshakeCachePullRule{handshake.TypeClientHello, epoch, true, false},
+ handshakeCachePullRule{handshake.TypeServerHello, epoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, epoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerKeyExchange, epoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificateRequest, epoch, false, false},
+ handshakeCachePullRule{handshake.TypeServerHelloDone, epoch, false, false},
+ handshakeCachePullRule{handshake.TypeCertificate, epoch, true, false},
+ handshakeCachePullRule{handshake.TypeClientKeyExchange, epoch, true, false},
+ )
+
+ for _, p := range handshakeBuffer {
+ if p == nil {
+ continue
+ }
+
+ merged = append(merged, p.data...)
+ }
+ for _, a := range additional {
+ merged = append(merged, a...)
+ }
+
+ hash := hf()
+ if _, err := hash.Write(merged); err != nil {
+ return []byte{}, err
+ }
+
+ return hash.Sum(nil), nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/handshaker.go b/vendor/github.com/pion/dtls/v2/handshaker.go
new file mode 100644
index 000000000..e68802c0b
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/handshaker.go
@@ -0,0 +1,347 @@
+package dtls
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/crypto/signaturehash"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/logging"
+)
+
+// [RFC6347 Section-4.2.4]
+// +-----------+
+// +---> | PREPARING | <--------------------+
+// | +-----------+ |
+// | | |
+// | | Buffer next flight |
+// | | |
+// | \|/ |
+// | +-----------+ |
+// | | SENDING |<------------------+ | Send
+// | +-----------+ | | HelloRequest
+// Receive | | | |
+// next | | Send flight | | or
+// flight | +--------+ | |
+// | | | Set retransmit timer | | Receive
+// | | \|/ | | HelloRequest
+// | | +-----------+ | | Send
+// +--)--| WAITING |-------------------+ | ClientHello
+// | | +-----------+ Timer expires | |
+// | | | | |
+// | | +------------------------+ |
+// Receive | | Send Read retransmit |
+// last | | last |
+// flight | | flight |
+// | | |
+// \|/\|/ |
+// +-----------+ |
+// | FINISHED | -------------------------------+
+// +-----------+
+// | /|\
+// | |
+// +---+
+// Read retransmit
+// Retransmit last flight
+
+type handshakeState uint8
+
+const (
+ handshakeErrored handshakeState = iota
+ handshakePreparing
+ handshakeSending
+ handshakeWaiting
+ handshakeFinished
+)
+
+func (s handshakeState) String() string {
+ switch s {
+ case handshakeErrored:
+ return "Errored"
+ case handshakePreparing:
+ return "Preparing"
+ case handshakeSending:
+ return "Sending"
+ case handshakeWaiting:
+ return "Waiting"
+ case handshakeFinished:
+ return "Finished"
+ default:
+ return "Unknown"
+ }
+}
+
+type handshakeFSM struct {
+ currentFlight flightVal
+ flights []*packet
+ retransmit bool
+ state *State
+ cache *handshakeCache
+ cfg *handshakeConfig
+ closed chan struct{}
+}
+
+type handshakeConfig struct {
+ localPSKCallback PSKCallback
+ localPSKIdentityHint []byte
+ localCipherSuites []CipherSuite // Available CipherSuites
+ localSignatureSchemes []signaturehash.Algorithm // Available signature schemes
+ extendedMasterSecret ExtendedMasterSecretType // Policy for the Extended Master Support extension
+ localSRTPProtectionProfiles []SRTPProtectionProfile // Available SRTPProtectionProfiles, if empty no SRTP support
+ serverName string
+ supportedProtocols []string
+ clientAuth ClientAuthType // If we are a client should we request a client certificate
+ localCertificates []tls.Certificate
+ nameToCertificate map[string]*tls.Certificate
+ insecureSkipVerify bool
+ verifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
+ verifyConnection func(*State) error
+ sessionStore SessionStore
+ rootCAs *x509.CertPool
+ clientCAs *x509.CertPool
+ retransmitInterval time.Duration
+ customCipherSuites func() []CipherSuite
+ ellipticCurves []elliptic.Curve
+ insecureSkipHelloVerify bool
+
+ onFlightState func(flightVal, handshakeState)
+ log logging.LeveledLogger
+ keyLogWriter io.Writer
+
+ localGetCertificate func(*ClientHelloInfo) (*tls.Certificate, error)
+ localGetClientCertificate func(*CertificateRequestInfo) (*tls.Certificate, error)
+
+ initialEpoch uint16
+
+ mu sync.Mutex
+}
+
+type flightConn interface {
+ notify(ctx context.Context, level alert.Level, desc alert.Description) error
+ writePackets(context.Context, []*packet) error
+ recvHandshake() <-chan chan struct{}
+ setLocalEpoch(epoch uint16)
+ handleQueuedPackets(context.Context) error
+ sessionKey() []byte
+}
+
+func (c *handshakeConfig) writeKeyLog(label string, clientRandom, secret []byte) {
+ if c.keyLogWriter == nil {
+ return
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ _, err := c.keyLogWriter.Write([]byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret)))
+ if err != nil {
+ c.log.Debugf("failed to write key log file: %s", err)
+ }
+}
+
+func srvCliStr(isClient bool) string {
+ if isClient {
+ return "client"
+ }
+ return "server"
+}
+
+func newHandshakeFSM(
+ s *State, cache *handshakeCache, cfg *handshakeConfig,
+ initialFlight flightVal,
+) *handshakeFSM {
+ return &handshakeFSM{
+ currentFlight: initialFlight,
+ state: s,
+ cache: cache,
+ cfg: cfg,
+ closed: make(chan struct{}),
+ }
+}
+
+func (s *handshakeFSM) Run(ctx context.Context, c flightConn, initialState handshakeState) error {
+ state := initialState
+ defer func() {
+ close(s.closed)
+ }()
+ for {
+ s.cfg.log.Tracef("[handshake:%s] %s: %s", srvCliStr(s.state.isClient), s.currentFlight.String(), state.String())
+ if s.cfg.onFlightState != nil {
+ s.cfg.onFlightState(s.currentFlight, state)
+ }
+ var err error
+ switch state {
+ case handshakePreparing:
+ state, err = s.prepare(ctx, c)
+ case handshakeSending:
+ state, err = s.send(ctx, c)
+ case handshakeWaiting:
+ state, err = s.wait(ctx, c)
+ case handshakeFinished:
+ state, err = s.finish(ctx, c)
+ default:
+ return errInvalidFSMTransition
+ }
+ if err != nil {
+ return err
+ }
+ }
+}
+
+func (s *handshakeFSM) Done() <-chan struct{} {
+ return s.closed
+}
+
+func (s *handshakeFSM) prepare(ctx context.Context, c flightConn) (handshakeState, error) {
+ s.flights = nil
+ // Prepare flights
+ var (
+ a *alert.Alert
+ err error
+ pkts []*packet
+ )
+ gen, retransmit, errFlight := s.currentFlight.getFlightGenerator()
+ if errFlight != nil {
+ err = errFlight
+ a = &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}
+ } else {
+ pkts, a, err = gen(c, s.state, s.cache, s.cfg)
+ s.retransmit = retransmit
+ }
+ if a != nil {
+ if alertErr := c.notify(ctx, a.Level, a.Description); alertErr != nil {
+ if err != nil {
+ err = alertErr
+ }
+ }
+ }
+ if err != nil {
+ return handshakeErrored, err
+ }
+
+ s.flights = pkts
+ epoch := s.cfg.initialEpoch
+ nextEpoch := epoch
+ for _, p := range s.flights {
+ p.record.Header.Epoch += epoch
+ if p.record.Header.Epoch > nextEpoch {
+ nextEpoch = p.record.Header.Epoch
+ }
+ if h, ok := p.record.Content.(*handshake.Handshake); ok {
+ h.Header.MessageSequence = uint16(s.state.handshakeSendSequence)
+ s.state.handshakeSendSequence++
+ }
+ }
+ if epoch != nextEpoch {
+ s.cfg.log.Tracef("[handshake:%s] -> changeCipherSpec (epoch: %d)", srvCliStr(s.state.isClient), nextEpoch)
+ c.setLocalEpoch(nextEpoch)
+ }
+ return handshakeSending, nil
+}
+
+func (s *handshakeFSM) send(ctx context.Context, c flightConn) (handshakeState, error) {
+ // Send flights
+ if err := c.writePackets(ctx, s.flights); err != nil {
+ return handshakeErrored, err
+ }
+
+ if s.currentFlight.isLastSendFlight() {
+ return handshakeFinished, nil
+ }
+ return handshakeWaiting, nil
+}
+
+func (s *handshakeFSM) wait(ctx context.Context, c flightConn) (handshakeState, error) { //nolint:gocognit
+ parse, errFlight := s.currentFlight.getFlightParser()
+ if errFlight != nil {
+ if alertErr := c.notify(ctx, alert.Fatal, alert.InternalError); alertErr != nil {
+ if errFlight != nil {
+ return handshakeErrored, alertErr
+ }
+ }
+ return handshakeErrored, errFlight
+ }
+
+ retransmitTimer := time.NewTimer(s.cfg.retransmitInterval)
+ for {
+ select {
+ case done := <-c.recvHandshake():
+ nextFlight, alert, err := parse(ctx, c, s.state, s.cache, s.cfg)
+ close(done)
+ if alert != nil {
+ if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil {
+ if err != nil {
+ err = alertErr
+ }
+ }
+ }
+ if err != nil {
+ return handshakeErrored, err
+ }
+ if nextFlight == 0 {
+ break
+ }
+ s.cfg.log.Tracef("[handshake:%s] %s -> %s", srvCliStr(s.state.isClient), s.currentFlight.String(), nextFlight.String())
+ if nextFlight.isLastRecvFlight() && s.currentFlight == nextFlight {
+ return handshakeFinished, nil
+ }
+ s.currentFlight = nextFlight
+ return handshakePreparing, nil
+
+ case <-retransmitTimer.C:
+ if !s.retransmit {
+ return handshakeWaiting, nil
+ }
+ return handshakeSending, nil
+ case <-ctx.Done():
+ return handshakeErrored, ctx.Err()
+ }
+ }
+}
+
+func (s *handshakeFSM) finish(ctx context.Context, c flightConn) (handshakeState, error) {
+ parse, errFlight := s.currentFlight.getFlightParser()
+ if errFlight != nil {
+ if alertErr := c.notify(ctx, alert.Fatal, alert.InternalError); alertErr != nil {
+ if errFlight != nil {
+ return handshakeErrored, alertErr
+ }
+ }
+ return handshakeErrored, errFlight
+ }
+
+ retransmitTimer := time.NewTimer(s.cfg.retransmitInterval)
+ select {
+ case done := <-c.recvHandshake():
+ nextFlight, alert, err := parse(ctx, c, s.state, s.cache, s.cfg)
+ close(done)
+ if alert != nil {
+ if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil {
+ if err != nil {
+ err = alertErr
+ }
+ }
+ }
+ if err != nil {
+ return handshakeErrored, err
+ }
+ if nextFlight == 0 {
+ break
+ }
+ if nextFlight.isLastRecvFlight() && s.currentFlight == nextFlight {
+ return handshakeFinished, nil
+ }
+ <-retransmitTimer.C
+ // Retransmit last flight
+ return handshakeSending, nil
+
+ case <-ctx.Done():
+ return handshakeErrored, ctx.Err()
+ }
+ return handshakeFinished, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_128_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_128_ccm.go
new file mode 100644
index 000000000..afff7365b
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_128_ccm.go
@@ -0,0 +1,30 @@
+package ciphersuite
+
+import (
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+)
+
+// Aes128Ccm is a base class used by multiple AES-CCM Ciphers
+type Aes128Ccm struct {
+ AesCcm
+}
+
+func newAes128Ccm(clientCertificateType clientcertificate.Type, id ID, psk bool, cryptoCCMTagLen ciphersuite.CCMTagLen, keyExchangeAlgorithm KeyExchangeAlgorithm, ecc bool) *Aes128Ccm {
+ return &Aes128Ccm{
+ AesCcm: AesCcm{
+ clientCertificateType: clientCertificateType,
+ id: id,
+ psk: psk,
+ cryptoCCMTagLen: cryptoCCMTagLen,
+ keyExchangeAlgorithm: keyExchangeAlgorithm,
+ ecc: ecc,
+ },
+ }
+}
+
+// Init initializes the internal Cipher with keying material
+func (c *Aes128Ccm) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error {
+ const prfKeyLen = 16
+ return c.AesCcm.Init(masterSecret, clientRandom, serverRandom, isClient, prfKeyLen)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_256_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_256_ccm.go
new file mode 100644
index 000000000..d56ffc5c5
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_256_ccm.go
@@ -0,0 +1,30 @@
+package ciphersuite
+
+import (
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+)
+
+// Aes256Ccm is a base class used by multiple AES-CCM Ciphers
+type Aes256Ccm struct {
+ AesCcm
+}
+
+func newAes256Ccm(clientCertificateType clientcertificate.Type, id ID, psk bool, cryptoCCMTagLen ciphersuite.CCMTagLen, keyExchangeAlgorithm KeyExchangeAlgorithm, ecc bool) *Aes256Ccm {
+ return &Aes256Ccm{
+ AesCcm: AesCcm{
+ clientCertificateType: clientCertificateType,
+ id: id,
+ psk: psk,
+ cryptoCCMTagLen: cryptoCCMTagLen,
+ keyExchangeAlgorithm: keyExchangeAlgorithm,
+ ecc: ecc,
+ },
+ }
+}
+
+// Init initializes the internal Cipher with keying material
+func (c *Aes256Ccm) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error {
+ const prfKeyLen = 32
+ return c.AesCcm.Init(masterSecret, clientRandom, serverRandom, isClient, prfKeyLen)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_ccm.go
new file mode 100644
index 000000000..224d3906c
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_ccm.go
@@ -0,0 +1,110 @@
+package ciphersuite
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "sync/atomic"
+
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// AesCcm is a base class used by multiple AES-CCM Ciphers
+type AesCcm struct {
+ ccm atomic.Value // *cryptoCCM
+ clientCertificateType clientcertificate.Type
+ id ID
+ psk bool
+ keyExchangeAlgorithm KeyExchangeAlgorithm
+ cryptoCCMTagLen ciphersuite.CCMTagLen
+ ecc bool
+}
+
+// CertificateType returns what type of certificate this CipherSuite exchanges
+func (c *AesCcm) CertificateType() clientcertificate.Type {
+ return c.clientCertificateType
+}
+
+// ID returns the ID of the CipherSuite
+func (c *AesCcm) ID() ID {
+ return c.id
+}
+
+func (c *AesCcm) String() string {
+ return c.id.String()
+}
+
+// ECC uses Elliptic Curve Cryptography
+func (c *AesCcm) ECC() bool {
+ return c.ecc
+}
+
+// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake
+func (c *AesCcm) KeyExchangeAlgorithm() KeyExchangeAlgorithm {
+ return c.keyExchangeAlgorithm
+}
+
+// HashFunc returns the hashing func for this CipherSuite
+func (c *AesCcm) HashFunc() func() hash.Hash {
+ return sha256.New
+}
+
+// AuthenticationType controls what authentication method is using during the handshake
+func (c *AesCcm) AuthenticationType() AuthenticationType {
+ if c.psk {
+ return AuthenticationTypePreSharedKey
+ }
+ return AuthenticationTypeCertificate
+}
+
+// IsInitialized returns if the CipherSuite has keying material and can
+// encrypt/decrypt packets
+func (c *AesCcm) IsInitialized() bool {
+ return c.ccm.Load() != nil
+}
+
+// Init initializes the internal Cipher with keying material
+func (c *AesCcm) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool, prfKeyLen int) error {
+ const (
+ prfMacLen = 0
+ prfIvLen = 4
+ )
+
+ keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc())
+ if err != nil {
+ return err
+ }
+
+ var ccm *ciphersuite.CCM
+ if isClient {
+ ccm, err = ciphersuite.NewCCM(c.cryptoCCMTagLen, keys.ClientWriteKey, keys.ClientWriteIV, keys.ServerWriteKey, keys.ServerWriteIV)
+ } else {
+ ccm, err = ciphersuite.NewCCM(c.cryptoCCMTagLen, keys.ServerWriteKey, keys.ServerWriteIV, keys.ClientWriteKey, keys.ClientWriteIV)
+ }
+ c.ccm.Store(ccm)
+
+ return err
+}
+
+// Encrypt encrypts a single TLS RecordLayer
+func (c *AesCcm) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.ccm.Load().(*ciphersuite.CCM)
+ if !ok {
+ return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Encrypt(pkt, raw)
+}
+
+// Decrypt decrypts a single TLS RecordLayer
+func (c *AesCcm) Decrypt(raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.ccm.Load().(*ciphersuite.CCM)
+ if !ok {
+ return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Decrypt(raw)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/ciphersuite.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/ciphersuite.go
new file mode 100644
index 000000000..fbfadc119
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/ciphersuite.go
@@ -0,0 +1,95 @@
+// Package ciphersuite provides TLS Ciphers as registered with the IANA https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4
+package ciphersuite
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/pion/dtls/v2/internal/ciphersuite/types"
+ "github.com/pion/dtls/v2/pkg/protocol"
+)
+
+var errCipherSuiteNotInit = &protocol.TemporaryError{Err: errors.New("CipherSuite has not been initialized")} //nolint:goerr113
+
+// ID is an ID for our supported CipherSuites
+type ID uint16
+
+func (i ID) String() string {
+ switch i {
+ case TLS_ECDHE_ECDSA_WITH_AES_128_CCM:
+ return "TLS_ECDHE_ECDSA_WITH_AES_128_CCM"
+ case TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8:
+ return "TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8"
+ case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ return "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
+ case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+ return "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
+ case TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+ return "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
+ case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ return "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
+ case TLS_PSK_WITH_AES_128_CCM:
+ return "TLS_PSK_WITH_AES_128_CCM"
+ case TLS_PSK_WITH_AES_128_CCM_8:
+ return "TLS_PSK_WITH_AES_128_CCM_8"
+ case TLS_PSK_WITH_AES_256_CCM_8:
+ return "TLS_PSK_WITH_AES_256_CCM_8"
+ case TLS_PSK_WITH_AES_128_GCM_SHA256:
+ return "TLS_PSK_WITH_AES_128_GCM_SHA256"
+ case TLS_PSK_WITH_AES_128_CBC_SHA256:
+ return "TLS_PSK_WITH_AES_128_CBC_SHA256"
+ case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:
+ return "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
+ case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:
+ return "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
+ case TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256:
+ return "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256"
+ default:
+ return fmt.Sprintf("unknown(%v)", uint16(i))
+ }
+}
+
+// Supported Cipher Suites
+const (
+ // AES-128-CCM
+ TLS_ECDHE_ECDSA_WITH_AES_128_CCM ID = 0xc0ac //nolint:revive,stylecheck
+ TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 ID = 0xc0ae //nolint:revive,stylecheck
+
+ // AES-128-GCM-SHA256
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ID = 0xc02b //nolint:revive,stylecheck
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ID = 0xc02f //nolint:revive,stylecheck
+
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ID = 0xc02c //nolint:revive,stylecheck
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ID = 0xc030 //nolint:revive,stylecheck
+ // AES-256-CBC-SHA
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA ID = 0xc00a //nolint:revive,stylecheck
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA ID = 0xc014 //nolint:revive,stylecheck
+
+ TLS_PSK_WITH_AES_128_CCM ID = 0xc0a4 //nolint:revive,stylecheck
+ TLS_PSK_WITH_AES_128_CCM_8 ID = 0xc0a8 //nolint:revive,stylecheck
+ TLS_PSK_WITH_AES_256_CCM_8 ID = 0xc0a9 //nolint:revive,stylecheck
+ TLS_PSK_WITH_AES_128_GCM_SHA256 ID = 0x00a8 //nolint:revive,stylecheck
+ TLS_PSK_WITH_AES_128_CBC_SHA256 ID = 0x00ae //nolint:revive,stylecheck
+
+ TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 ID = 0xC037 //nolint:revive,stylecheck
+)
+
+// AuthenticationType controls what authentication method is using during the handshake
+type AuthenticationType = types.AuthenticationType
+
+// AuthenticationType Enums
+const (
+ AuthenticationTypeCertificate AuthenticationType = types.AuthenticationTypeCertificate
+ AuthenticationTypePreSharedKey AuthenticationType = types.AuthenticationTypePreSharedKey
+ AuthenticationTypeAnonymous AuthenticationType = types.AuthenticationTypeAnonymous
+)
+
+// KeyExchangeAlgorithm controls what exchange algorithm was chosen.
+type KeyExchangeAlgorithm = types.KeyExchangeAlgorithm
+
+// KeyExchangeAlgorithm Bitmask
+const (
+ KeyExchangeAlgorithmNone KeyExchangeAlgorithm = types.KeyExchangeAlgorithmNone
+ KeyExchangeAlgorithmPsk KeyExchangeAlgorithm = types.KeyExchangeAlgorithmPsk
+ KeyExchangeAlgorithmEcdhe KeyExchangeAlgorithm = types.KeyExchangeAlgorithmEcdhe
+)
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm.go
new file mode 100644
index 000000000..91189e139
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm.go
@@ -0,0 +1,11 @@
+package ciphersuite
+
+import (
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+)
+
+// NewTLSEcdheEcdsaWithAes128Ccm constructs a TLS_ECDHE_ECDSA_WITH_AES_128_CCM Cipher
+func NewTLSEcdheEcdsaWithAes128Ccm() *Aes128Ccm {
+ return newAes128Ccm(clientcertificate.ECDSASign, TLS_ECDHE_ECDSA_WITH_AES_128_CCM, false, ciphersuite.CCMTagLength, KeyExchangeAlgorithmEcdhe, true)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm8.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm8.go
new file mode 100644
index 000000000..81368f4ba
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm8.go
@@ -0,0 +1,11 @@
+package ciphersuite
+
+import (
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+)
+
+// NewTLSEcdheEcdsaWithAes128Ccm8 creates a new TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 CipherSuite
+func NewTLSEcdheEcdsaWithAes128Ccm8() *Aes128Ccm {
+ return newAes128Ccm(clientcertificate.ECDSASign, TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8, false, ciphersuite.CCMTagLength8, KeyExchangeAlgorithmEcdhe, true)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_gcm_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_gcm_sha256.go
new file mode 100644
index 000000000..3d1d5e21f
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_gcm_sha256.go
@@ -0,0 +1,105 @@
+package ciphersuite
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "sync/atomic"
+
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// TLSEcdheEcdsaWithAes128GcmSha256 represents a TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 CipherSuite
+type TLSEcdheEcdsaWithAes128GcmSha256 struct {
+ gcm atomic.Value // *cryptoGCM
+}
+
+// CertificateType returns what type of certficate this CipherSuite exchanges
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) CertificateType() clientcertificate.Type {
+ return clientcertificate.ECDSASign
+}
+
+// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) KeyExchangeAlgorithm() KeyExchangeAlgorithm {
+ return KeyExchangeAlgorithmEcdhe
+}
+
+// ECC uses Elliptic Curve Cryptography
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) ECC() bool {
+ return true
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) ID() ID {
+ return TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
+}
+
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) String() string {
+ return "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
+}
+
+// HashFunc returns the hashing func for this CipherSuite
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) HashFunc() func() hash.Hash {
+ return sha256.New
+}
+
+// AuthenticationType controls what authentication method is using during the handshake
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) AuthenticationType() AuthenticationType {
+ return AuthenticationTypeCertificate
+}
+
+// IsInitialized returns if the CipherSuite has keying material and can
+// encrypt/decrypt packets
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) IsInitialized() bool {
+ return c.gcm.Load() != nil
+}
+
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) init(masterSecret, clientRandom, serverRandom []byte, isClient bool, prfMacLen, prfKeyLen, prfIvLen int, hashFunc func() hash.Hash) error {
+ keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, hashFunc)
+ if err != nil {
+ return err
+ }
+
+ var gcm *ciphersuite.GCM
+ if isClient {
+ gcm, err = ciphersuite.NewGCM(keys.ClientWriteKey, keys.ClientWriteIV, keys.ServerWriteKey, keys.ServerWriteIV)
+ } else {
+ gcm, err = ciphersuite.NewGCM(keys.ServerWriteKey, keys.ServerWriteIV, keys.ClientWriteKey, keys.ClientWriteIV)
+ }
+ c.gcm.Store(gcm)
+ return err
+}
+
+// Init initializes the internal Cipher with keying material
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error {
+ const (
+ prfMacLen = 0
+ prfKeyLen = 16
+ prfIvLen = 4
+ )
+
+ return c.init(masterSecret, clientRandom, serverRandom, isClient, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc())
+}
+
+// Encrypt encrypts a single TLS RecordLayer
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.gcm.Load().(*ciphersuite.GCM)
+ if !ok {
+ return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Encrypt(pkt, raw)
+}
+
+// Decrypt decrypts a single TLS RecordLayer
+func (c *TLSEcdheEcdsaWithAes128GcmSha256) Decrypt(raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.gcm.Load().(*ciphersuite.GCM)
+ if !ok {
+ return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Decrypt(raw)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_cbc_sha.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_cbc_sha.go
new file mode 100644
index 000000000..a1e21fe9f
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_cbc_sha.go
@@ -0,0 +1,111 @@
+package ciphersuite
+
+import (
+ "crypto/sha1" //nolint: gosec,gci
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "sync/atomic"
+
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// TLSEcdheEcdsaWithAes256CbcSha represents a TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA CipherSuite
+type TLSEcdheEcdsaWithAes256CbcSha struct {
+ cbc atomic.Value // *cryptoCBC
+}
+
+// CertificateType returns what type of certficate this CipherSuite exchanges
+func (c *TLSEcdheEcdsaWithAes256CbcSha) CertificateType() clientcertificate.Type {
+ return clientcertificate.ECDSASign
+}
+
+// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake
+func (c *TLSEcdheEcdsaWithAes256CbcSha) KeyExchangeAlgorithm() KeyExchangeAlgorithm {
+ return KeyExchangeAlgorithmEcdhe
+}
+
+// ECC uses Elliptic Curve Cryptography
+func (c *TLSEcdheEcdsaWithAes256CbcSha) ECC() bool {
+ return true
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSEcdheEcdsaWithAes256CbcSha) ID() ID {
+ return TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
+}
+
+func (c *TLSEcdheEcdsaWithAes256CbcSha) String() string {
+ return "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
+}
+
+// HashFunc returns the hashing func for this CipherSuite
+func (c *TLSEcdheEcdsaWithAes256CbcSha) HashFunc() func() hash.Hash {
+ return sha256.New
+}
+
+// AuthenticationType controls what authentication method is using during the handshake
+func (c *TLSEcdheEcdsaWithAes256CbcSha) AuthenticationType() AuthenticationType {
+ return AuthenticationTypeCertificate
+}
+
+// IsInitialized returns if the CipherSuite has keying material and can
+// encrypt/decrypt packets
+func (c *TLSEcdheEcdsaWithAes256CbcSha) IsInitialized() bool {
+ return c.cbc.Load() != nil
+}
+
+// Init initializes the internal Cipher with keying material
+func (c *TLSEcdheEcdsaWithAes256CbcSha) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error {
+ const (
+ prfMacLen = 20
+ prfKeyLen = 32
+ prfIvLen = 16
+ )
+
+ keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc())
+ if err != nil {
+ return err
+ }
+
+ var cbc *ciphersuite.CBC
+ if isClient {
+ cbc, err = ciphersuite.NewCBC(
+ keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey,
+ keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey,
+ sha1.New,
+ )
+ } else {
+ cbc, err = ciphersuite.NewCBC(
+ keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey,
+ keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey,
+ sha1.New,
+ )
+ }
+ c.cbc.Store(cbc)
+
+ return err
+}
+
+// Encrypt encrypts a single TLS RecordLayer
+func (c *TLSEcdheEcdsaWithAes256CbcSha) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC)
+ if !ok {
+ return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Encrypt(pkt, raw)
+}
+
+// Decrypt decrypts a single TLS RecordLayer
+func (c *TLSEcdheEcdsaWithAes256CbcSha) Decrypt(raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC)
+ if !ok {
+ return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Decrypt(raw)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_gcm_sha384.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_gcm_sha384.go
new file mode 100644
index 000000000..a2fe30244
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_gcm_sha384.go
@@ -0,0 +1,36 @@
+package ciphersuite
+
+import (
+ "crypto/sha512"
+ "hash"
+)
+
+// TLSEcdheEcdsaWithAes256GcmSha384 represents a TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 CipherSuite
+type TLSEcdheEcdsaWithAes256GcmSha384 struct {
+ TLSEcdheEcdsaWithAes128GcmSha256
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSEcdheEcdsaWithAes256GcmSha384) ID() ID {
+ return TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
+}
+
+func (c *TLSEcdheEcdsaWithAes256GcmSha384) String() string {
+ return "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
+}
+
+// HashFunc returns the hashing func for this CipherSuite
+func (c *TLSEcdheEcdsaWithAes256GcmSha384) HashFunc() func() hash.Hash {
+ return sha512.New384
+}
+
+// Init initializes the internal Cipher with keying material
+func (c *TLSEcdheEcdsaWithAes256GcmSha384) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error {
+ const (
+ prfMacLen = 0
+ prfKeyLen = 32
+ prfIvLen = 4
+ )
+
+ return c.init(masterSecret, clientRandom, serverRandom, isClient, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc())
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_psk_with_aes_128_cbc_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_psk_with_aes_128_cbc_sha256.go
new file mode 100644
index 000000000..28c049d89
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_psk_with_aes_128_cbc_sha256.go
@@ -0,0 +1,115 @@
+package ciphersuite
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "sync/atomic"
+
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// TLSEcdhePskWithAes128CbcSha256 implements the TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 CipherSuite
+type TLSEcdhePskWithAes128CbcSha256 struct {
+ cbc atomic.Value // *cryptoCBC
+}
+
+// NewTLSEcdhePskWithAes128CbcSha256 creates TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 cipher.
+func NewTLSEcdhePskWithAes128CbcSha256() *TLSEcdhePskWithAes128CbcSha256 {
+ return &TLSEcdhePskWithAes128CbcSha256{}
+}
+
+// CertificateType returns what type of certificate this CipherSuite exchanges
+func (c *TLSEcdhePskWithAes128CbcSha256) CertificateType() clientcertificate.Type {
+ return clientcertificate.Type(0)
+}
+
+// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake
+func (c *TLSEcdhePskWithAes128CbcSha256) KeyExchangeAlgorithm() KeyExchangeAlgorithm {
+ return (KeyExchangeAlgorithmPsk | KeyExchangeAlgorithmEcdhe)
+}
+
+// ECC uses Elliptic Curve Cryptography
+func (c *TLSEcdhePskWithAes128CbcSha256) ECC() bool {
+ return true
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSEcdhePskWithAes128CbcSha256) ID() ID {
+ return TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256
+}
+
+func (c *TLSEcdhePskWithAes128CbcSha256) String() string {
+ return "TLS-ECDHE-PSK-WITH-AES-128-CBC-SHA256"
+}
+
+// HashFunc returns the hashing func for this CipherSuite
+func (c *TLSEcdhePskWithAes128CbcSha256) HashFunc() func() hash.Hash {
+ return sha256.New
+}
+
+// AuthenticationType controls what authentication method is using during the handshake
+func (c *TLSEcdhePskWithAes128CbcSha256) AuthenticationType() AuthenticationType {
+ return AuthenticationTypePreSharedKey
+}
+
+// IsInitialized returns if the CipherSuite has keying material and can
+// encrypt/decrypt packets
+func (c *TLSEcdhePskWithAes128CbcSha256) IsInitialized() bool {
+ return c.cbc.Load() != nil
+}
+
+// Init initializes the internal Cipher with keying material
+func (c *TLSEcdhePskWithAes128CbcSha256) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error {
+ const (
+ prfMacLen = 32
+ prfKeyLen = 16
+ prfIvLen = 16
+ )
+
+ keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc())
+ if err != nil {
+ return err
+ }
+
+ var cbc *ciphersuite.CBC
+ if isClient {
+ cbc, err = ciphersuite.NewCBC(
+ keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey,
+ keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey,
+ c.HashFunc(),
+ )
+ } else {
+ cbc, err = ciphersuite.NewCBC(
+ keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey,
+ keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey,
+ c.HashFunc(),
+ )
+ }
+ c.cbc.Store(cbc)
+
+ return err
+}
+
+// Encrypt encrypts a single TLS RecordLayer
+func (c *TLSEcdhePskWithAes128CbcSha256) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC)
+ if !ok { // !c.isInitialized()
+ return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Encrypt(pkt, raw)
+}
+
+// Decrypt decrypts a single TLS RecordLayer
+func (c *TLSEcdhePskWithAes128CbcSha256) Decrypt(raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC)
+ if !ok { // !c.isInitialized()
+ return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Decrypt(raw)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_128_gcm_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_128_gcm_sha256.go
new file mode 100644
index 000000000..70400c37d
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_128_gcm_sha256.go
@@ -0,0 +1,22 @@
+package ciphersuite
+
+import "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+
+// TLSEcdheRsaWithAes128GcmSha256 implements the TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 CipherSuite
+type TLSEcdheRsaWithAes128GcmSha256 struct {
+ TLSEcdheEcdsaWithAes128GcmSha256
+}
+
+// CertificateType returns what type of certificate this CipherSuite exchanges
+func (c *TLSEcdheRsaWithAes128GcmSha256) CertificateType() clientcertificate.Type {
+ return clientcertificate.RSASign
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSEcdheRsaWithAes128GcmSha256) ID() ID {
+ return TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+}
+
+func (c *TLSEcdheRsaWithAes128GcmSha256) String() string {
+ return "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_cbc_sha.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_cbc_sha.go
new file mode 100644
index 000000000..0d82dc3ad
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_cbc_sha.go
@@ -0,0 +1,22 @@
+package ciphersuite
+
+import "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+
+// TLSEcdheRsaWithAes256CbcSha implements the TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA CipherSuite
+type TLSEcdheRsaWithAes256CbcSha struct {
+ TLSEcdheEcdsaWithAes256CbcSha
+}
+
+// CertificateType returns what type of certificate this CipherSuite exchanges
+func (c *TLSEcdheRsaWithAes256CbcSha) CertificateType() clientcertificate.Type {
+ return clientcertificate.RSASign
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSEcdheRsaWithAes256CbcSha) ID() ID {
+ return TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
+}
+
+func (c *TLSEcdheRsaWithAes256CbcSha) String() string {
+ return "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_gcm_sha384.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_gcm_sha384.go
new file mode 100644
index 000000000..3473527e7
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_gcm_sha384.go
@@ -0,0 +1,22 @@
+package ciphersuite
+
+import "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+
+// TLSEcdheRsaWithAes256GcmSha384 implements the TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 CipherSuite
+type TLSEcdheRsaWithAes256GcmSha384 struct {
+ TLSEcdheEcdsaWithAes256GcmSha384
+}
+
+// CertificateType returns what type of certificate this CipherSuite exchanges
+func (c *TLSEcdheRsaWithAes256GcmSha384) CertificateType() clientcertificate.Type {
+ return clientcertificate.RSASign
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSEcdheRsaWithAes256GcmSha384) ID() ID {
+ return TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
+}
+
+func (c *TLSEcdheRsaWithAes256GcmSha384) String() string {
+ return "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_cbc_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_cbc_sha256.go
new file mode 100644
index 000000000..5c63ad513
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_cbc_sha256.go
@@ -0,0 +1,110 @@
+package ciphersuite
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "sync/atomic"
+
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// TLSPskWithAes128CbcSha256 implements the TLS_PSK_WITH_AES_128_CBC_SHA256 CipherSuite
+type TLSPskWithAes128CbcSha256 struct {
+ cbc atomic.Value // *cryptoCBC
+}
+
+// CertificateType returns what type of certificate this CipherSuite exchanges
+func (c *TLSPskWithAes128CbcSha256) CertificateType() clientcertificate.Type {
+ return clientcertificate.Type(0)
+}
+
+// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake
+func (c *TLSPskWithAes128CbcSha256) KeyExchangeAlgorithm() KeyExchangeAlgorithm {
+ return KeyExchangeAlgorithmPsk
+}
+
+// ECC uses Elliptic Curve Cryptography
+func (c *TLSPskWithAes128CbcSha256) ECC() bool {
+ return false
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSPskWithAes128CbcSha256) ID() ID {
+ return TLS_PSK_WITH_AES_128_CBC_SHA256
+}
+
+func (c *TLSPskWithAes128CbcSha256) String() string {
+ return "TLS_PSK_WITH_AES_128_CBC_SHA256"
+}
+
+// HashFunc returns the hashing func for this CipherSuite
+func (c *TLSPskWithAes128CbcSha256) HashFunc() func() hash.Hash {
+ return sha256.New
+}
+
+// AuthenticationType controls what authentication method is using during the handshake
+func (c *TLSPskWithAes128CbcSha256) AuthenticationType() AuthenticationType {
+ return AuthenticationTypePreSharedKey
+}
+
+// IsInitialized returns if the CipherSuite has keying material and can
+// encrypt/decrypt packets
+func (c *TLSPskWithAes128CbcSha256) IsInitialized() bool {
+ return c.cbc.Load() != nil
+}
+
+// Init initializes the internal Cipher with keying material
+func (c *TLSPskWithAes128CbcSha256) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error {
+ const (
+ prfMacLen = 32
+ prfKeyLen = 16
+ prfIvLen = 16
+ )
+
+ keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc())
+ if err != nil {
+ return err
+ }
+
+ var cbc *ciphersuite.CBC
+ if isClient {
+ cbc, err = ciphersuite.NewCBC(
+ keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey,
+ keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey,
+ c.HashFunc(),
+ )
+ } else {
+ cbc, err = ciphersuite.NewCBC(
+ keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey,
+ keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey,
+ c.HashFunc(),
+ )
+ }
+ c.cbc.Store(cbc)
+
+ return err
+}
+
+// Encrypt encrypts a single TLS RecordLayer
+func (c *TLSPskWithAes128CbcSha256) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC)
+ if !ok {
+ return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Encrypt(pkt, raw)
+}
+
+// Decrypt decrypts a single TLS RecordLayer
+func (c *TLSPskWithAes128CbcSha256) Decrypt(raw []byte) ([]byte, error) {
+ cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC)
+ if !ok {
+ return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit)
+ }
+
+ return cipherSuite.Decrypt(raw)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm.go
new file mode 100644
index 000000000..02b96404d
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm.go
@@ -0,0 +1,11 @@
+package ciphersuite
+
+import (
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+)
+
+// NewTLSPskWithAes128Ccm returns the TLS_PSK_WITH_AES_128_CCM CipherSuite
+func NewTLSPskWithAes128Ccm() *Aes128Ccm {
+ return newAes128Ccm(clientcertificate.Type(0), TLS_PSK_WITH_AES_128_CCM, true, ciphersuite.CCMTagLength, KeyExchangeAlgorithmPsk, false)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm8.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm8.go
new file mode 100644
index 000000000..faf9cb41a
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm8.go
@@ -0,0 +1,11 @@
+package ciphersuite
+
+import (
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+)
+
+// NewTLSPskWithAes128Ccm8 returns the TLS_PSK_WITH_AES_128_CCM_8 CipherSuite
+func NewTLSPskWithAes128Ccm8() *Aes128Ccm {
+ return newAes128Ccm(clientcertificate.Type(0), TLS_PSK_WITH_AES_128_CCM_8, true, ciphersuite.CCMTagLength8, KeyExchangeAlgorithmPsk, false)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_gcm_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_gcm_sha256.go
new file mode 100644
index 000000000..98f7a4afe
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_gcm_sha256.go
@@ -0,0 +1,32 @@
+package ciphersuite
+
+import "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+
+// TLSPskWithAes128GcmSha256 implements the TLS_PSK_WITH_AES_128_GCM_SHA256 CipherSuite
+type TLSPskWithAes128GcmSha256 struct {
+ TLSEcdheEcdsaWithAes128GcmSha256
+}
+
+// CertificateType returns what type of certificate this CipherSuite exchanges
+func (c *TLSPskWithAes128GcmSha256) CertificateType() clientcertificate.Type {
+ return clientcertificate.Type(0)
+}
+
+// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake
+func (c *TLSPskWithAes128GcmSha256) KeyExchangeAlgorithm() KeyExchangeAlgorithm {
+ return KeyExchangeAlgorithmPsk
+}
+
+// ID returns the ID of the CipherSuite
+func (c *TLSPskWithAes128GcmSha256) ID() ID {
+ return TLS_PSK_WITH_AES_128_GCM_SHA256
+}
+
+func (c *TLSPskWithAes128GcmSha256) String() string {
+ return "TLS_PSK_WITH_AES_128_GCM_SHA256"
+}
+
+// AuthenticationType controls what authentication method is using during the handshake
+func (c *TLSPskWithAes128GcmSha256) AuthenticationType() AuthenticationType {
+ return AuthenticationTypePreSharedKey
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_256_ccm8.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_256_ccm8.go
new file mode 100644
index 000000000..9058ff25d
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_256_ccm8.go
@@ -0,0 +1,11 @@
+package ciphersuite
+
+import (
+ "github.com/pion/dtls/v2/pkg/crypto/ciphersuite"
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+)
+
+// NewTLSPskWithAes256Ccm8 returns the TLS_PSK_WITH_AES_256_CCM_8 CipherSuite
+func NewTLSPskWithAes256Ccm8() *Aes256Ccm {
+ return newAes256Ccm(clientcertificate.Type(0), TLS_PSK_WITH_AES_256_CCM_8, true, ciphersuite.CCMTagLength8, KeyExchangeAlgorithmPsk, false)
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/authentication_type.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/authentication_type.go
new file mode 100644
index 000000000..75d599fe3
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/authentication_type.go
@@ -0,0 +1,11 @@
+package types
+
+// AuthenticationType controls what authentication method is using during the handshake
+type AuthenticationType int
+
+// AuthenticationType Enums
+const (
+ AuthenticationTypeCertificate AuthenticationType = iota + 1
+ AuthenticationTypePreSharedKey
+ AuthenticationTypeAnonymous
+)
diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/key_exchange_algorithm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/key_exchange_algorithm.go
new file mode 100644
index 000000000..fbf83471f
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/key_exchange_algorithm.go
@@ -0,0 +1,17 @@
+// Package types provides types for TLS Ciphers
+package types
+
+// KeyExchangeAlgorithm controls what exchange algorithm was chosen.
+type KeyExchangeAlgorithm int
+
+// KeyExchangeAlgorithm Bitmask
+const (
+ KeyExchangeAlgorithmNone KeyExchangeAlgorithm = 0
+ KeyExchangeAlgorithmPsk KeyExchangeAlgorithm = iota << 1
+ KeyExchangeAlgorithmEcdhe
+)
+
+// Has check if keyExchangeAlgorithm is supported.
+func (a KeyExchangeAlgorithm) Has(v KeyExchangeAlgorithm) bool {
+ return (a & v) == v
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/closer/closer.go b/vendor/github.com/pion/dtls/v2/internal/closer/closer.go
new file mode 100644
index 000000000..b99e13e44
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/closer/closer.go
@@ -0,0 +1,45 @@
+// Package closer provides signaling channel for shutdown
+package closer
+
+import (
+ "context"
+)
+
+// Closer allows for each signaling a channel for shutdown
+type Closer struct {
+ ctx context.Context
+ closeFunc func()
+}
+
+// NewCloser creates a new instance of Closer
+func NewCloser() *Closer {
+ ctx, closeFunc := context.WithCancel(context.Background())
+ return &Closer{
+ ctx: ctx,
+ closeFunc: closeFunc,
+ }
+}
+
+// NewCloserWithParent creates a new instance of Closer with a parent context
+func NewCloserWithParent(ctx context.Context) *Closer {
+ ctx, closeFunc := context.WithCancel(ctx)
+ return &Closer{
+ ctx: ctx,
+ closeFunc: closeFunc,
+ }
+}
+
+// Done returns a channel signaling when it is done
+func (c *Closer) Done() <-chan struct{} {
+ return c.ctx.Done()
+}
+
+// Err returns an error of the context
+func (c *Closer) Err() error {
+ return c.ctx.Err()
+}
+
+// Close sends a signal to trigger the ctx done channel
+func (c *Closer) Close() {
+ c.closeFunc()
+}
diff --git a/vendor/github.com/pion/dtls/v2/internal/util/util.go b/vendor/github.com/pion/dtls/v2/internal/util/util.go
new file mode 100644
index 000000000..746a670f4
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/internal/util/util.go
@@ -0,0 +1,39 @@
+// Package util contains small helpers used across the repo
+package util
+
+import (
+ "encoding/binary"
+)
+
+// BigEndianUint24 returns the value of a big endian uint24
+func BigEndianUint24(raw []byte) uint32 {
+ if len(raw) < 3 {
+ return 0
+ }
+
+ rawCopy := make([]byte, 4)
+ copy(rawCopy[1:], raw)
+ return binary.BigEndian.Uint32(rawCopy)
+}
+
+// PutBigEndianUint24 encodes a uint24 and places into out
+func PutBigEndianUint24(out []byte, in uint32) {
+ tmp := make([]byte, 4)
+ binary.BigEndian.PutUint32(tmp, in)
+ copy(out, tmp[1:])
+}
+
+// PutBigEndianUint48 encodes a uint64 and places into out
+func PutBigEndianUint48(out []byte, in uint64) {
+ tmp := make([]byte, 8)
+ binary.BigEndian.PutUint64(tmp, in)
+ copy(out, tmp[2:])
+}
+
+// Max returns the larger value
+func Max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/pion/dtls/v2/listener.go b/vendor/github.com/pion/dtls/v2/listener.go
new file mode 100644
index 000000000..bf80345b1
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/listener.go
@@ -0,0 +1,80 @@
+package dtls
+
+import (
+ "net"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+ "github.com/pion/udp"
+)
+
+// Listen creates a DTLS listener
+func Listen(network string, laddr *net.UDPAddr, config *Config) (net.Listener, error) {
+ if err := validateConfig(config); err != nil {
+ return nil, err
+ }
+
+ lc := udp.ListenConfig{
+ AcceptFilter: func(packet []byte) bool {
+ pkts, err := recordlayer.UnpackDatagram(packet)
+ if err != nil || len(pkts) < 1 {
+ return false
+ }
+ h := &recordlayer.Header{}
+ if err := h.Unmarshal(pkts[0]); err != nil {
+ return false
+ }
+ return h.ContentType == protocol.ContentTypeHandshake
+ },
+ }
+ parent, err := lc.Listen(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ return &listener{
+ config: config,
+ parent: parent,
+ }, nil
+}
+
+// NewListener creates a DTLS listener which accepts connections from an inner Listener.
+func NewListener(inner net.Listener, config *Config) (net.Listener, error) {
+ if err := validateConfig(config); err != nil {
+ return nil, err
+ }
+
+ return &listener{
+ config: config,
+ parent: inner,
+ }, nil
+}
+
+// listener represents a DTLS listener
+type listener struct {
+ config *Config
+ parent net.Listener
+}
+
+// Accept waits for and returns the next connection to the listener.
+// You have to either close or read on all connection that are created.
+// Connection handshake will timeout using ConnectContextMaker in the Config.
+// If you want to specify the timeout duration, set ConnectContextMaker.
+func (l *listener) Accept() (net.Conn, error) {
+ c, err := l.parent.Accept()
+ if err != nil {
+ return nil, err
+ }
+ return Server(c, l.config)
+}
+
+// Close closes the listener.
+// Any blocked Accept operations will be unblocked and return errors.
+// Already Accepted connections are not closed.
+func (l *listener) Close() error {
+ return l.parent.Close()
+}
+
+// Addr returns the listener's network address.
+func (l *listener) Addr() net.Addr {
+ return l.parent.Addr()
+}
diff --git a/vendor/github.com/pion/dtls/v2/packet.go b/vendor/github.com/pion/dtls/v2/packet.go
new file mode 100644
index 000000000..8366a3c3d
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/packet.go
@@ -0,0 +1,9 @@
+package dtls
+
+import "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+
+type packet struct {
+ record *recordlayer.RecordLayer
+ shouldEncrypt bool
+ resetLocalSequenceNumber bool
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ccm/ccm.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ccm/ccm.go
new file mode 100644
index 000000000..20e3436e2
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ccm/ccm.go
@@ -0,0 +1,251 @@
+// Package ccm implements a CCM, Counter with CBC-MAC
+// as per RFC 3610.
+//
+// See https://tools.ietf.org/html/rfc3610
+//
+// This code was lifted from https://github.com/bocajim/dtls/blob/a3300364a283fcb490d28a93d7fcfa7ba437fbbe/ccm/ccm.go
+// and as such was not written by the Pions authors. Like Pions this
+// code is licensed under MIT.
+//
+// A request for including CCM into the Go standard library
+// can be found as issue #27484 on the https://github.com/golang/go/
+// repository.
+package ccm
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "math"
+)
+
+// ccm represents a Counter with CBC-MAC with a specific key.
+type ccm struct {
+ b cipher.Block
+ M uint8
+ L uint8
+}
+
+const ccmBlockSize = 16
+
+// CCM is a block cipher in Counter with CBC-MAC mode.
+// Providing authenticated encryption with associated data via the cipher.AEAD interface.
+type CCM interface {
+ cipher.AEAD
+ // MaxLength returns the maxium length of plaintext in calls to Seal.
+ // The maximum length of ciphertext in calls to Open is MaxLength()+Overhead().
+ // The maximum length is related to CCM's `L` parameter (15-noncesize) and
+ // is 1<<(8*L) - 1 (but also limited by the maxium size of an int).
+ MaxLength() int
+}
+
+var (
+ errInvalidBlockSize = errors.New("ccm: NewCCM requires 128-bit block cipher")
+ errInvalidTagSize = errors.New("ccm: tagsize must be 4, 6, 8, 10, 12, 14, or 16")
+ errInvalidNonceSize = errors.New("ccm: invalid nonce size")
+)
+
+// NewCCM returns the given 128-bit block cipher wrapped in CCM.
+// The tagsize must be an even integer between 4 and 16 inclusive
+// and is used as CCM's `M` parameter.
+// The noncesize must be an integer between 7 and 13 inclusive,
+// 15-noncesize is used as CCM's `L` parameter.
+func NewCCM(b cipher.Block, tagsize, noncesize int) (CCM, error) {
+ if b.BlockSize() != ccmBlockSize {
+ return nil, errInvalidBlockSize
+ }
+ if tagsize < 4 || tagsize > 16 || tagsize&1 != 0 {
+ return nil, errInvalidTagSize
+ }
+ lensize := 15 - noncesize
+ if lensize < 2 || lensize > 8 {
+ return nil, errInvalidNonceSize
+ }
+ c := &ccm{b: b, M: uint8(tagsize), L: uint8(lensize)}
+ return c, nil
+}
+
+func (c *ccm) NonceSize() int { return 15 - int(c.L) }
+func (c *ccm) Overhead() int { return int(c.M) }
+func (c *ccm) MaxLength() int { return maxlen(c.L, c.Overhead()) }
+
+func maxlen(l uint8, tagsize int) int {
+ max := (uint64(1) << (8 * l)) - 1
+ if m64 := uint64(math.MaxInt64) - uint64(tagsize); l > 8 || max > m64 {
+ max = m64 // The maximum lentgh on a 64bit arch
+ }
+ if max != uint64(int(max)) {
+ return math.MaxInt32 - tagsize // We have only 32bit int's
+ }
+ return int(max)
+}
+
+// MaxNonceLength returns the maximum nonce length for a given plaintext length.
+// A return value <= 0 indicates that plaintext length is too large for
+// any nonce length.
+func MaxNonceLength(pdatalen int) int {
+ const tagsize = 16
+ for L := 2; L <= 8; L++ {
+ if maxlen(uint8(L), tagsize) >= pdatalen {
+ return 15 - L
+ }
+ }
+ return 0
+}
+
+func (c *ccm) cbcRound(mac, data []byte) {
+ for i := 0; i < ccmBlockSize; i++ {
+ mac[i] ^= data[i]
+ }
+ c.b.Encrypt(mac, mac)
+}
+
+func (c *ccm) cbcData(mac, data []byte) {
+ for len(data) >= ccmBlockSize {
+ c.cbcRound(mac, data[:ccmBlockSize])
+ data = data[ccmBlockSize:]
+ }
+ if len(data) > 0 {
+ var block [ccmBlockSize]byte
+ copy(block[:], data)
+ c.cbcRound(mac, block[:])
+ }
+}
+
+var errPlaintextTooLong = errors.New("ccm: plaintext too large")
+
+func (c *ccm) tag(nonce, plaintext, adata []byte) ([]byte, error) {
+ var mac [ccmBlockSize]byte
+
+ if len(adata) > 0 {
+ mac[0] |= 1 << 6
+ }
+ mac[0] |= (c.M - 2) << 2
+ mac[0] |= c.L - 1
+ if len(nonce) != c.NonceSize() {
+ return nil, errInvalidNonceSize
+ }
+ if len(plaintext) > c.MaxLength() {
+ return nil, errPlaintextTooLong
+ }
+ binary.BigEndian.PutUint64(mac[ccmBlockSize-8:], uint64(len(plaintext)))
+ copy(mac[1:ccmBlockSize-c.L], nonce)
+ c.b.Encrypt(mac[:], mac[:])
+
+ var block [ccmBlockSize]byte
+ if n := uint64(len(adata)); n > 0 {
+ // First adata block includes adata length
+ i := 2
+ if n <= 0xfeff {
+ binary.BigEndian.PutUint16(block[:i], uint16(n))
+ } else {
+ block[0] = 0xfe
+ block[1] = 0xff
+ if n < uint64(1<<32) {
+ i = 2 + 4
+ binary.BigEndian.PutUint32(block[2:i], uint32(n))
+ } else {
+ i = 2 + 8
+ binary.BigEndian.PutUint64(block[2:i], n)
+ }
+ }
+ i = copy(block[i:], adata)
+ c.cbcRound(mac[:], block[:])
+ c.cbcData(mac[:], adata[i:])
+ }
+
+ if len(plaintext) > 0 {
+ c.cbcData(mac[:], plaintext)
+ }
+
+ return mac[:c.M], nil
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+// From crypto/cipher/gcm.go
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// Seal encrypts and authenticates plaintext, authenticates the
+// additional data and appends the result to dst, returning the updated
+// slice. The nonce must be NonceSize() bytes long and unique for all
+// time, for a given key.
+// The plaintext must be no longer than MaxLength() bytes long.
+//
+// The plaintext and dst may alias exactly or not at all.
+func (c *ccm) Seal(dst, nonce, plaintext, adata []byte) []byte {
+ tag, err := c.tag(nonce, plaintext, adata)
+ if err != nil {
+ // The cipher.AEAD interface doesn't allow for an error return.
+ panic(err) // nolint
+ }
+
+ var iv, s0 [ccmBlockSize]byte
+ iv[0] = c.L - 1
+ copy(iv[1:ccmBlockSize-c.L], nonce)
+ c.b.Encrypt(s0[:], iv[:])
+ for i := 0; i < int(c.M); i++ {
+ tag[i] ^= s0[i]
+ }
+ iv[len(iv)-1] |= 1
+ stream := cipher.NewCTR(c.b, iv[:])
+ ret, out := sliceForAppend(dst, len(plaintext)+int(c.M))
+ stream.XORKeyStream(out, plaintext)
+ copy(out[len(plaintext):], tag)
+ return ret
+}
+
+var (
+ errOpen = errors.New("ccm: message authentication failed")
+ errCiphertextTooShort = errors.New("ccm: ciphertext too short")
+ errCiphertextTooLong = errors.New("ccm: ciphertext too long")
+)
+
+func (c *ccm) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
+ if len(ciphertext) < int(c.M) {
+ return nil, errCiphertextTooShort
+ }
+ if len(ciphertext) > c.MaxLength()+c.Overhead() {
+ return nil, errCiphertextTooLong
+ }
+
+ tag := make([]byte, int(c.M))
+ copy(tag, ciphertext[len(ciphertext)-int(c.M):])
+ ciphertextWithoutTag := ciphertext[:len(ciphertext)-int(c.M)]
+
+ var iv, s0 [ccmBlockSize]byte
+ iv[0] = c.L - 1
+ copy(iv[1:ccmBlockSize-c.L], nonce)
+ c.b.Encrypt(s0[:], iv[:])
+ for i := 0; i < int(c.M); i++ {
+ tag[i] ^= s0[i]
+ }
+ iv[len(iv)-1] |= 1
+ stream := cipher.NewCTR(c.b, iv[:])
+
+ // Cannot decrypt directly to dst since we're not supposed to
+ // reveal the plaintext to the caller if authentication fails.
+ plaintext := make([]byte, len(ciphertextWithoutTag))
+ stream.XORKeyStream(plaintext, ciphertextWithoutTag)
+ expectedTag, err := c.tag(nonce, plaintext, adata)
+ if err != nil {
+ return nil, err
+ }
+
+ if subtle.ConstantTimeCompare(tag, expectedTag) != 1 {
+ return nil, errOpen
+ }
+ return append(dst, plaintext...), nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/cbc.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/cbc.go
new file mode 100644
index 000000000..9cb123b98
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/cbc.go
@@ -0,0 +1,174 @@
+package ciphersuite
+
+import ( //nolint:gci
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/rand"
+ "encoding/binary"
+ "hash"
+
+ "github.com/pion/dtls/v2/internal/util"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// block ciphers using cipher block chaining.
+type cbcMode interface {
+ cipher.BlockMode
+ SetIV([]byte)
+}
+
+// CBC Provides an API to Encrypt/Decrypt DTLS 1.2 Packets
+type CBC struct {
+ writeCBC, readCBC cbcMode
+ writeMac, readMac []byte
+ h prf.HashFunc
+}
+
+// NewCBC creates a DTLS CBC Cipher
+func NewCBC(localKey, localWriteIV, localMac, remoteKey, remoteWriteIV, remoteMac []byte, h prf.HashFunc) (*CBC, error) {
+ writeBlock, err := aes.NewCipher(localKey)
+ if err != nil {
+ return nil, err
+ }
+
+ readBlock, err := aes.NewCipher(remoteKey)
+ if err != nil {
+ return nil, err
+ }
+
+ writeCBC, ok := cipher.NewCBCEncrypter(writeBlock, localWriteIV).(cbcMode)
+ if !ok {
+ return nil, errFailedToCast
+ }
+
+ readCBC, ok := cipher.NewCBCDecrypter(readBlock, remoteWriteIV).(cbcMode)
+ if !ok {
+ return nil, errFailedToCast
+ }
+
+ return &CBC{
+ writeCBC: writeCBC,
+ writeMac: localMac,
+
+ readCBC: readCBC,
+ readMac: remoteMac,
+ h: h,
+ }, nil
+}
+
+// Encrypt encrypt a DTLS RecordLayer message
+func (c *CBC) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {
+ payload := raw[recordlayer.HeaderSize:]
+ raw = raw[:recordlayer.HeaderSize]
+ blockSize := c.writeCBC.BlockSize()
+
+ // Generate + Append MAC
+ h := pkt.Header
+
+ MAC, err := c.hmac(h.Epoch, h.SequenceNumber, h.ContentType, h.Version, payload, c.writeMac, c.h)
+ if err != nil {
+ return nil, err
+ }
+ payload = append(payload, MAC...)
+
+ // Generate + Append padding
+ padding := make([]byte, blockSize-len(payload)%blockSize)
+ paddingLen := len(padding)
+ for i := 0; i < paddingLen; i++ {
+ padding[i] = byte(paddingLen - 1)
+ }
+ payload = append(payload, padding...)
+
+ // Generate IV
+ iv := make([]byte, blockSize)
+ if _, err := rand.Read(iv); err != nil {
+ return nil, err
+ }
+
+ // Set IV + Encrypt + Prepend IV
+ c.writeCBC.SetIV(iv)
+ c.writeCBC.CryptBlocks(payload, payload)
+ payload = append(iv, payload...)
+
+ // Prepend unencrypte header with encrypted payload
+ raw = append(raw, payload...)
+
+ // Update recordLayer size to include IV+MAC+Padding
+ binary.BigEndian.PutUint16(raw[recordlayer.HeaderSize-2:], uint16(len(raw)-recordlayer.HeaderSize))
+
+ return raw, nil
+}
+
+// Decrypt decrypts a DTLS RecordLayer message
+func (c *CBC) Decrypt(in []byte) ([]byte, error) {
+ body := in[recordlayer.HeaderSize:]
+ blockSize := c.readCBC.BlockSize()
+ mac := c.h()
+
+ var h recordlayer.Header
+ err := h.Unmarshal(in)
+ switch {
+ case err != nil:
+ return nil, err
+ case h.ContentType == protocol.ContentTypeChangeCipherSpec:
+ // Nothing to encrypt with ChangeCipherSpec
+ return in, nil
+ case len(body)%blockSize != 0 || len(body) < blockSize+util.Max(mac.Size()+1, blockSize):
+ return nil, errNotEnoughRoomForNonce
+ }
+
+ // Set + remove per record IV
+ c.readCBC.SetIV(body[:blockSize])
+ body = body[blockSize:]
+
+ // Decrypt
+ c.readCBC.CryptBlocks(body, body)
+
+ // Padding+MAC needs to be checked in constant time
+ // Otherwise we reveal information about the level of correctness
+ paddingLen, paddingGood := examinePadding(body)
+ if paddingGood != 255 {
+ return nil, errInvalidMAC
+ }
+
+ macSize := mac.Size()
+ if len(body) < macSize {
+ return nil, errInvalidMAC
+ }
+
+ dataEnd := len(body) - macSize - paddingLen
+
+ expectedMAC := body[dataEnd : dataEnd+macSize]
+ actualMAC, err := c.hmac(h.Epoch, h.SequenceNumber, h.ContentType, h.Version, body[:dataEnd], c.readMac, c.h)
+
+ // Compute Local MAC and compare
+ if err != nil || !hmac.Equal(actualMAC, expectedMAC) {
+ return nil, errInvalidMAC
+ }
+
+ return append(in[:recordlayer.HeaderSize], body[:dataEnd]...), nil
+}
+
+func (c *CBC) hmac(epoch uint16, sequenceNumber uint64, contentType protocol.ContentType, protocolVersion protocol.Version, payload []byte, key []byte, hf func() hash.Hash) ([]byte, error) {
+ h := hmac.New(hf, key)
+
+ msg := make([]byte, 13)
+
+ binary.BigEndian.PutUint16(msg, epoch)
+ util.PutBigEndianUint48(msg[2:], sequenceNumber)
+ msg[8] = byte(contentType)
+ msg[9] = protocolVersion.Major
+ msg[10] = protocolVersion.Minor
+ binary.BigEndian.PutUint16(msg[11:], uint16(len(payload)))
+
+ if _, err := h.Write(msg); err != nil {
+ return nil, err
+ } else if _, err := h.Write(payload); err != nil {
+ return nil, err
+ }
+
+ return h.Sum(nil), nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ccm.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ccm.go
new file mode 100644
index 000000000..354b1cc50
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ccm.go
@@ -0,0 +1,104 @@
+package ciphersuite
+
+import (
+ "crypto/aes"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/pion/dtls/v2/pkg/crypto/ccm"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+// CCMTagLen is the length of Authentication Tag
+type CCMTagLen int
+
+// CCM Enums
+const (
+ CCMTagLength8 CCMTagLen = 8
+ CCMTagLength CCMTagLen = 16
+ ccmNonceLength = 12
+)
+
+// CCM Provides an API to Encrypt/Decrypt DTLS 1.2 Packets
+type CCM struct {
+ localCCM, remoteCCM ccm.CCM
+ localWriteIV, remoteWriteIV []byte
+ tagLen CCMTagLen
+}
+
+// NewCCM creates a DTLS GCM Cipher
+func NewCCM(tagLen CCMTagLen, localKey, localWriteIV, remoteKey, remoteWriteIV []byte) (*CCM, error) {
+ localBlock, err := aes.NewCipher(localKey)
+ if err != nil {
+ return nil, err
+ }
+ localCCM, err := ccm.NewCCM(localBlock, int(tagLen), ccmNonceLength)
+ if err != nil {
+ return nil, err
+ }
+
+ remoteBlock, err := aes.NewCipher(remoteKey)
+ if err != nil {
+ return nil, err
+ }
+ remoteCCM, err := ccm.NewCCM(remoteBlock, int(tagLen), ccmNonceLength)
+ if err != nil {
+ return nil, err
+ }
+
+ return &CCM{
+ localCCM: localCCM,
+ localWriteIV: localWriteIV,
+ remoteCCM: remoteCCM,
+ remoteWriteIV: remoteWriteIV,
+ tagLen: tagLen,
+ }, nil
+}
+
+// Encrypt encrypt a DTLS RecordLayer message
+func (c *CCM) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {
+ payload := raw[recordlayer.HeaderSize:]
+ raw = raw[:recordlayer.HeaderSize]
+
+ nonce := append(append([]byte{}, c.localWriteIV[:4]...), make([]byte, 8)...)
+ if _, err := rand.Read(nonce[4:]); err != nil {
+ return nil, err
+ }
+
+ additionalData := generateAEADAdditionalData(&pkt.Header, len(payload))
+ encryptedPayload := c.localCCM.Seal(nil, nonce, payload, additionalData)
+
+ encryptedPayload = append(nonce[4:], encryptedPayload...)
+ raw = append(raw, encryptedPayload...)
+
+ // Update recordLayer size to include explicit nonce
+ binary.BigEndian.PutUint16(raw[recordlayer.HeaderSize-2:], uint16(len(raw)-recordlayer.HeaderSize))
+ return raw, nil
+}
+
+// Decrypt decrypts a DTLS RecordLayer message
+func (c *CCM) Decrypt(in []byte) ([]byte, error) {
+ var h recordlayer.Header
+ err := h.Unmarshal(in)
+ switch {
+ case err != nil:
+ return nil, err
+ case h.ContentType == protocol.ContentTypeChangeCipherSpec:
+ // Nothing to encrypt with ChangeCipherSpec
+ return in, nil
+ case len(in) <= (8 + recordlayer.HeaderSize):
+ return nil, errNotEnoughRoomForNonce
+ }
+
+ nonce := append(append([]byte{}, c.remoteWriteIV[:4]...), in[recordlayer.HeaderSize:recordlayer.HeaderSize+8]...)
+ out := in[recordlayer.HeaderSize+8:]
+
+ additionalData := generateAEADAdditionalData(&h, len(out)-int(c.tagLen))
+ out, err = c.remoteCCM.Open(out[:0], nonce, out, additionalData)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %v", errDecryptPacket, err)
+ }
+ return append(in[:recordlayer.HeaderSize], out...), nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ciphersuite.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ciphersuite.go
new file mode 100644
index 000000000..afe63d8ae
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ciphersuite.go
@@ -0,0 +1,73 @@
+// Package ciphersuite provides the crypto operations needed for a DTLS CipherSuite
+package ciphersuite
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+var (
+ errNotEnoughRoomForNonce = &protocol.InternalError{Err: errors.New("buffer not long enough to contain nonce")} //nolint:goerr113
+ errDecryptPacket = &protocol.TemporaryError{Err: errors.New("failed to decrypt packet")} //nolint:goerr113
+ errInvalidMAC = &protocol.TemporaryError{Err: errors.New("invalid mac")} //nolint:goerr113
+ errFailedToCast = &protocol.FatalError{Err: errors.New("failed to cast")} //nolint:goerr113
+)
+
+func generateAEADAdditionalData(h *recordlayer.Header, payloadLen int) []byte {
+ var additionalData [13]byte
+ // SequenceNumber MUST be set first
+ // we only want uint48, clobbering an extra 2 (using uint64, Golang doesn't have uint48)
+ binary.BigEndian.PutUint64(additionalData[:], h.SequenceNumber)
+ binary.BigEndian.PutUint16(additionalData[:], h.Epoch)
+ additionalData[8] = byte(h.ContentType)
+ additionalData[9] = h.Version.Major
+ additionalData[10] = h.Version.Minor
+ binary.BigEndian.PutUint16(additionalData[len(additionalData)-2:], uint16(payloadLen))
+
+ return additionalData[:]
+}
+
+// examinePadding returns, in constant time, the length of the padding to remove
+// from the end of payload. It also returns a byte which is equal to 255 if the
+// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2.
+//
+// https://github.com/golang/go/blob/039c2081d1178f90a8fa2f4e6958693129f8de33/src/crypto/tls/conn.go#L245
+func examinePadding(payload []byte) (toRemove int, good byte) {
+ if len(payload) < 1 {
+ return 0, 0
+ }
+
+ paddingLen := payload[len(payload)-1]
+ t := uint(len(payload)-1) - uint(paddingLen)
+ // if len(payload) >= (paddingLen - 1) then the MSB of t is zero
+ good = byte(int32(^t) >> 31)
+
+ // The maximum possible padding length plus the actual length field
+ toCheck := 256
+ // The length of the padded data is public, so we can use an if here
+ if toCheck > len(payload) {
+ toCheck = len(payload)
+ }
+
+ for i := 0; i < toCheck; i++ {
+ t := uint(paddingLen) - uint(i)
+ // if i <= paddingLen then the MSB of t is zero
+ mask := byte(int32(^t) >> 31)
+ b := payload[len(payload)-1-i]
+ good &^= mask&paddingLen ^ mask&b
+ }
+
+ // We AND together the bits of good and replicate the result across
+ // all the bits.
+ good &= good << 4
+ good &= good << 2
+ good &= good << 1
+ good = uint8(int8(good) >> 7)
+
+ toRemove = int(paddingLen) + 1
+
+ return toRemove, good
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/gcm.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/gcm.go
new file mode 100644
index 000000000..af986d46e
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/gcm.go
@@ -0,0 +1,100 @@
+package ciphersuite
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/recordlayer"
+)
+
+const (
+ gcmTagLength = 16
+ gcmNonceLength = 12
+)
+
+// GCM Provides an API to Encrypt/Decrypt DTLS 1.2 Packets
+type GCM struct {
+ localGCM, remoteGCM cipher.AEAD
+ localWriteIV, remoteWriteIV []byte
+}
+
+// NewGCM creates a DTLS GCM Cipher
+func NewGCM(localKey, localWriteIV, remoteKey, remoteWriteIV []byte) (*GCM, error) {
+ localBlock, err := aes.NewCipher(localKey)
+ if err != nil {
+ return nil, err
+ }
+ localGCM, err := cipher.NewGCM(localBlock)
+ if err != nil {
+ return nil, err
+ }
+
+ remoteBlock, err := aes.NewCipher(remoteKey)
+ if err != nil {
+ return nil, err
+ }
+ remoteGCM, err := cipher.NewGCM(remoteBlock)
+ if err != nil {
+ return nil, err
+ }
+
+ return &GCM{
+ localGCM: localGCM,
+ localWriteIV: localWriteIV,
+ remoteGCM: remoteGCM,
+ remoteWriteIV: remoteWriteIV,
+ }, nil
+}
+
+// Encrypt encrypt a DTLS RecordLayer message
+func (g *GCM) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {
+ payload := raw[recordlayer.HeaderSize:]
+ raw = raw[:recordlayer.HeaderSize]
+
+ nonce := make([]byte, gcmNonceLength)
+ copy(nonce, g.localWriteIV[:4])
+ if _, err := rand.Read(nonce[4:]); err != nil {
+ return nil, err
+ }
+
+ additionalData := generateAEADAdditionalData(&pkt.Header, len(payload))
+ encryptedPayload := g.localGCM.Seal(nil, nonce, payload, additionalData)
+ r := make([]byte, len(raw)+len(nonce[4:])+len(encryptedPayload))
+ copy(r, raw)
+ copy(r[len(raw):], nonce[4:])
+ copy(r[len(raw)+len(nonce[4:]):], encryptedPayload)
+
+ // Update recordLayer size to include explicit nonce
+ binary.BigEndian.PutUint16(r[recordlayer.HeaderSize-2:], uint16(len(r)-recordlayer.HeaderSize))
+ return r, nil
+}
+
+// Decrypt decrypts a DTLS RecordLayer message
+func (g *GCM) Decrypt(in []byte) ([]byte, error) {
+ var h recordlayer.Header
+ err := h.Unmarshal(in)
+ switch {
+ case err != nil:
+ return nil, err
+ case h.ContentType == protocol.ContentTypeChangeCipherSpec:
+ // Nothing to encrypt with ChangeCipherSpec
+ return in, nil
+ case len(in) <= (8 + recordlayer.HeaderSize):
+ return nil, errNotEnoughRoomForNonce
+ }
+
+ nonce := make([]byte, 0, gcmNonceLength)
+ nonce = append(append(nonce, g.remoteWriteIV[:4]...), in[recordlayer.HeaderSize:recordlayer.HeaderSize+8]...)
+ out := in[recordlayer.HeaderSize+8:]
+
+ additionalData := generateAEADAdditionalData(&h, len(out)-gcmTagLength)
+ out, err = g.remoteGCM.Open(out[:0], nonce, out, additionalData)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %v", errDecryptPacket, err)
+ }
+ return append(in[:recordlayer.HeaderSize], out...), nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/clientcertificate/client_certificate.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/clientcertificate/client_certificate.go
new file mode 100644
index 000000000..c222c01c7
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/clientcertificate/client_certificate.go
@@ -0,0 +1,22 @@
+// Package clientcertificate provides all the support Client Certificate types
+package clientcertificate
+
+// Type is used to communicate what
+// type of certificate is being transported
+//
+//https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-2
+type Type byte
+
+// ClientCertificateType enums
+const (
+ RSASign Type = 1
+ ECDSASign Type = 64
+)
+
+// Types returns all valid ClientCertificate Types
+func Types() map[Type]bool {
+ return map[Type]bool{
+ RSASign: true,
+ ECDSASign: true,
+ }
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/elliptic/elliptic.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/elliptic/elliptic.go
new file mode 100644
index 000000000..023bf9035
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/elliptic/elliptic.go
@@ -0,0 +1,112 @@
+// Package elliptic provides elliptic curve cryptography for DTLS
+package elliptic
+
+import (
+ "crypto/elliptic"
+ "crypto/rand"
+ "errors"
+ "fmt"
+
+ "golang.org/x/crypto/curve25519"
+)
+
+var errInvalidNamedCurve = errors.New("invalid named curve")
+
+// CurvePointFormat is used to represent the IANA registered curve points
+//
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9
+type CurvePointFormat byte
+
+// CurvePointFormat enums
+const (
+ CurvePointFormatUncompressed CurvePointFormat = 0
+)
+
+// Keypair is a Curve with a Private/Public Keypair
+type Keypair struct {
+ Curve Curve
+ PublicKey []byte
+ PrivateKey []byte
+}
+
+// CurveType is used to represent the IANA registered curve types for TLS
+//
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-10
+type CurveType byte
+
+// CurveType enums
+const (
+ CurveTypeNamedCurve CurveType = 0x03
+)
+
+// CurveTypes returns all known curves
+func CurveTypes() map[CurveType]struct{} {
+ return map[CurveType]struct{}{
+ CurveTypeNamedCurve: {},
+ }
+}
+
+// Curve is used to represent the IANA registered curves for TLS
+//
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8
+type Curve uint16
+
+// Curve enums
+const (
+ P256 Curve = 0x0017
+ P384 Curve = 0x0018
+ X25519 Curve = 0x001d
+)
+
+func (c Curve) String() string {
+ switch c {
+ case P256:
+ return "P-256"
+ case P384:
+ return "P-384"
+ case X25519:
+ return "X25519"
+ }
+ return fmt.Sprintf("%#x", uint16(c))
+}
+
+// Curves returns all curves we implement
+func Curves() map[Curve]bool {
+ return map[Curve]bool{
+ X25519: true,
+ P256: true,
+ P384: true,
+ }
+}
+
+// GenerateKeypair generates a keypair for the given Curve
+func GenerateKeypair(c Curve) (*Keypair, error) {
+ switch c { //nolint:revive
+ case X25519:
+ tmp := make([]byte, 32)
+ if _, err := rand.Read(tmp); err != nil {
+ return nil, err
+ }
+
+ var public, private [32]byte
+ copy(private[:], tmp)
+
+ curve25519.ScalarBaseMult(&public, &private)
+ return &Keypair{X25519, public[:], private[:]}, nil
+ case P256:
+ return ellipticCurveKeypair(P256, elliptic.P256(), elliptic.P256())
+ case P384:
+ return ellipticCurveKeypair(P384, elliptic.P384(), elliptic.P384())
+ default:
+ return nil, errInvalidNamedCurve
+ }
+}
+
+func ellipticCurveKeypair(nc Curve, c1, c2 elliptic.Curve) (*Keypair, error) {
+ privateKey, x, y, err := elliptic.GenerateKey(c1, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Keypair{nc, elliptic.Marshal(c2, x, y), privateKey}, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/hash/hash.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/hash/hash.go
new file mode 100644
index 000000000..660326f78
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/hash/hash.go
@@ -0,0 +1,126 @@
+// Package hash provides TLS HashAlgorithm as defined in TLS 1.2
+package hash
+
+import ( //nolint:gci
+ "crypto"
+ "crypto/md5" //nolint:gosec
+ "crypto/sha1" //nolint:gosec
+ "crypto/sha256"
+ "crypto/sha512"
+)
+
+// Algorithm is used to indicate the hash algorithm used
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-18
+type Algorithm uint16
+
+// Supported hash algorithms
+const (
+ None Algorithm = 0 // Blacklisted
+ MD5 Algorithm = 1 // Blacklisted
+ SHA1 Algorithm = 2 // Blacklisted
+ SHA224 Algorithm = 3
+ SHA256 Algorithm = 4
+ SHA384 Algorithm = 5
+ SHA512 Algorithm = 6
+ Ed25519 Algorithm = 8
+)
+
+// String makes hashAlgorithm printable
+func (a Algorithm) String() string {
+ switch a {
+ case None:
+ return "none"
+ case MD5:
+ return "md5" // [RFC3279]
+ case SHA1:
+ return "sha-1" // [RFC3279]
+ case SHA224:
+ return "sha-224" // [RFC4055]
+ case SHA256:
+ return "sha-256" // [RFC4055]
+ case SHA384:
+ return "sha-384" // [RFC4055]
+ case SHA512:
+ return "sha-512" // [RFC4055]
+ case Ed25519:
+ return "null"
+ default:
+ return "unknown or unsupported hash algorithm"
+ }
+}
+
+// Digest performs a digest on the passed value
+func (a Algorithm) Digest(b []byte) []byte {
+ switch a {
+ case None:
+ return nil
+ case MD5:
+ hash := md5.Sum(b) // #nosec
+ return hash[:]
+ case SHA1:
+ hash := sha1.Sum(b) // #nosec
+ return hash[:]
+ case SHA224:
+ hash := sha256.Sum224(b)
+ return hash[:]
+ case SHA256:
+ hash := sha256.Sum256(b)
+ return hash[:]
+ case SHA384:
+ hash := sha512.Sum384(b)
+ return hash[:]
+ case SHA512:
+ hash := sha512.Sum512(b)
+ return hash[:]
+ default:
+ return nil
+ }
+}
+
+// Insecure returns if the given HashAlgorithm is considered secure in DTLS 1.2
+func (a Algorithm) Insecure() bool {
+ switch a {
+ case None, MD5, SHA1:
+ return true
+ default:
+ return false
+ }
+}
+
+// CryptoHash returns the crypto.Hash implementation for the given HashAlgorithm
+func (a Algorithm) CryptoHash() crypto.Hash {
+ switch a {
+ case None:
+ return crypto.Hash(0)
+ case MD5:
+ return crypto.MD5
+ case SHA1:
+ return crypto.SHA1
+ case SHA224:
+ return crypto.SHA224
+ case SHA256:
+ return crypto.SHA256
+ case SHA384:
+ return crypto.SHA384
+ case SHA512:
+ return crypto.SHA512
+ case Ed25519:
+ return crypto.Hash(0)
+ default:
+ return crypto.Hash(0)
+ }
+}
+
+// Algorithms returns all the supported Hash Algorithms
+func Algorithms() map[Algorithm]struct{} {
+ return map[Algorithm]struct{}{
+ None: {},
+ MD5: {},
+ SHA1: {},
+ SHA224: {},
+ SHA256: {},
+ SHA384: {},
+ SHA512: {},
+ Ed25519: {},
+ }
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/prf/prf.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/prf/prf.go
new file mode 100644
index 000000000..11f53a190
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/prf/prf.go
@@ -0,0 +1,252 @@
+// Package prf implements TLS 1.2 Pseudorandom functions
+package prf
+
+import ( //nolint:gci
+ ellipticStdlib "crypto/elliptic"
+ "crypto/hmac"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash"
+ "math"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "golang.org/x/crypto/curve25519"
+)
+
+const (
+ masterSecretLabel = "master secret"
+ extendedMasterSecretLabel = "extended master secret"
+ keyExpansionLabel = "key expansion"
+ verifyDataClientLabel = "client finished"
+ verifyDataServerLabel = "server finished"
+)
+
+// HashFunc allows callers to decide what hash is used in PRF
+type HashFunc func() hash.Hash
+
+// EncryptionKeys is all the state needed for a TLS CipherSuite
+type EncryptionKeys struct {
+ MasterSecret []byte
+ ClientMACKey []byte
+ ServerMACKey []byte
+ ClientWriteKey []byte
+ ServerWriteKey []byte
+ ClientWriteIV []byte
+ ServerWriteIV []byte
+}
+
+var errInvalidNamedCurve = &protocol.FatalError{Err: errors.New("invalid named curve")} //nolint:goerr113
+
+func (e *EncryptionKeys) String() string {
+ return fmt.Sprintf(`encryptionKeys:
+- masterSecret: %#v
+- clientMACKey: %#v
+- serverMACKey: %#v
+- clientWriteKey: %#v
+- serverWriteKey: %#v
+- clientWriteIV: %#v
+- serverWriteIV: %#v
+`,
+ e.MasterSecret,
+ e.ClientMACKey,
+ e.ServerMACKey,
+ e.ClientWriteKey,
+ e.ServerWriteKey,
+ e.ClientWriteIV,
+ e.ServerWriteIV)
+}
+
+// PSKPreMasterSecret generates the PSK Premaster Secret
+// The premaster secret is formed as follows: if the PSK is N octets
+// long, concatenate a uint16 with the value N, N zero octets, a second
+// uint16 with the value N, and the PSK itself.
+//
+// https://tools.ietf.org/html/rfc4279#section-2
+func PSKPreMasterSecret(psk []byte) []byte {
+ pskLen := uint16(len(psk))
+
+ out := append(make([]byte, 2+pskLen+2), psk...)
+ binary.BigEndian.PutUint16(out, pskLen)
+ binary.BigEndian.PutUint16(out[2+pskLen:], pskLen)
+
+ return out
+}
+
+// EcdhePSKPreMasterSecret implements TLS 1.2 Premaster Secret generation given a psk, a keypair and a curve
+//
+// https://datatracker.ietf.org/doc/html/rfc5489#section-2
+func EcdhePSKPreMasterSecret(psk, publicKey, privateKey []byte, curve elliptic.Curve) ([]byte, error) {
+ preMasterSecret, err := PreMasterSecret(publicKey, privateKey, curve)
+ if err != nil {
+ return nil, err
+ }
+ out := make([]byte, 2+len(preMasterSecret)+2+len(psk))
+
+ // write preMasterSecret length
+ offset := 0
+ binary.BigEndian.PutUint16(out[offset:], uint16(len(preMasterSecret)))
+ offset += 2
+
+ // write preMasterSecret
+ copy(out[offset:], preMasterSecret)
+ offset += len(preMasterSecret)
+
+ // write psk length
+ binary.BigEndian.PutUint16(out[offset:], uint16(len(psk)))
+ offset += 2
+
+ // write psk
+ copy(out[offset:], psk)
+ return out, nil
+}
+
+// PreMasterSecret implements TLS 1.2 Premaster Secret generation given a keypair and a curve
+func PreMasterSecret(publicKey, privateKey []byte, curve elliptic.Curve) ([]byte, error) {
+ switch curve {
+ case elliptic.X25519:
+ return curve25519.X25519(privateKey, publicKey)
+ case elliptic.P256:
+ return ellipticCurvePreMasterSecret(publicKey, privateKey, ellipticStdlib.P256(), ellipticStdlib.P256())
+ case elliptic.P384:
+ return ellipticCurvePreMasterSecret(publicKey, privateKey, ellipticStdlib.P384(), ellipticStdlib.P384())
+ default:
+ return nil, errInvalidNamedCurve
+ }
+}
+
+func ellipticCurvePreMasterSecret(publicKey, privateKey []byte, c1, c2 ellipticStdlib.Curve) ([]byte, error) {
+ x, y := ellipticStdlib.Unmarshal(c1, publicKey)
+ if x == nil || y == nil {
+ return nil, errInvalidNamedCurve
+ }
+
+ result, _ := c2.ScalarMult(x, y, privateKey)
+ preMasterSecret := make([]byte, (c2.Params().BitSize+7)>>3)
+ resultBytes := result.Bytes()
+ copy(preMasterSecret[len(preMasterSecret)-len(resultBytes):], resultBytes)
+ return preMasterSecret, nil
+}
+
+// PHash is PRF is the SHA-256 hash function is used for all cipher suites
+// defined in this TLS 1.2 document and in TLS documents published prior to this
+// document when TLS 1.2 is negotiated. New cipher suites MUST explicitly
+// specify a PRF and, in general, SHOULD use the TLS PRF with SHA-256 or a
+// stronger standard hash function.
+//
+// P_hash(secret, seed) = HMAC_hash(secret, A(1) + seed) +
+// HMAC_hash(secret, A(2) + seed) +
+// HMAC_hash(secret, A(3) + seed) + ...
+//
+// A() is defined as:
+//
+// A(0) = seed
+// A(i) = HMAC_hash(secret, A(i-1))
+//
+// P_hash can be iterated as many times as necessary to produce the
+// required quantity of data. For example, if P_SHA256 is being used to
+// create 80 bytes of data, it will have to be iterated three times
+// (through A(3)), creating 96 bytes of output data; the last 16 bytes
+// of the final iteration will then be discarded, leaving 80 bytes of
+// output data.
+//
+// https://tools.ietf.org/html/rfc4346w
+func PHash(secret, seed []byte, requestedLength int, h HashFunc) ([]byte, error) {
+ hmacSHA256 := func(key, data []byte) ([]byte, error) {
+ mac := hmac.New(h, key)
+ if _, err := mac.Write(data); err != nil {
+ return nil, err
+ }
+ return mac.Sum(nil), nil
+ }
+
+ var err error
+ lastRound := seed
+ out := []byte{}
+
+ iterations := int(math.Ceil(float64(requestedLength) / float64(h().Size())))
+ for i := 0; i < iterations; i++ {
+ lastRound, err = hmacSHA256(secret, lastRound)
+ if err != nil {
+ return nil, err
+ }
+ withSecret, err := hmacSHA256(secret, append(lastRound, seed...))
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, withSecret...)
+ }
+
+ return out[:requestedLength], nil
+}
+
+// ExtendedMasterSecret generates a Extended MasterSecret as defined in
+// https://tools.ietf.org/html/rfc7627
+func ExtendedMasterSecret(preMasterSecret, sessionHash []byte, h HashFunc) ([]byte, error) {
+ seed := append([]byte(extendedMasterSecretLabel), sessionHash...)
+ return PHash(preMasterSecret, seed, 48, h)
+}
+
+// MasterSecret generates a TLS 1.2 MasterSecret
+func MasterSecret(preMasterSecret, clientRandom, serverRandom []byte, h HashFunc) ([]byte, error) {
+ seed := append(append([]byte(masterSecretLabel), clientRandom...), serverRandom...)
+ return PHash(preMasterSecret, seed, 48, h)
+}
+
+// GenerateEncryptionKeys is the final step TLS 1.2 PRF. Given all state generated so far generates
+// the final keys need for encryption
+func GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int, h HashFunc) (*EncryptionKeys, error) {
+ seed := append(append([]byte(keyExpansionLabel), serverRandom...), clientRandom...)
+ keyMaterial, err := PHash(masterSecret, seed, (2*macLen)+(2*keyLen)+(2*ivLen), h)
+ if err != nil {
+ return nil, err
+ }
+
+ clientMACKey := keyMaterial[:macLen]
+ keyMaterial = keyMaterial[macLen:]
+
+ serverMACKey := keyMaterial[:macLen]
+ keyMaterial = keyMaterial[macLen:]
+
+ clientWriteKey := keyMaterial[:keyLen]
+ keyMaterial = keyMaterial[keyLen:]
+
+ serverWriteKey := keyMaterial[:keyLen]
+ keyMaterial = keyMaterial[keyLen:]
+
+ clientWriteIV := keyMaterial[:ivLen]
+ keyMaterial = keyMaterial[ivLen:]
+
+ serverWriteIV := keyMaterial[:ivLen]
+
+ return &EncryptionKeys{
+ MasterSecret: masterSecret,
+ ClientMACKey: clientMACKey,
+ ServerMACKey: serverMACKey,
+ ClientWriteKey: clientWriteKey,
+ ServerWriteKey: serverWriteKey,
+ ClientWriteIV: clientWriteIV,
+ ServerWriteIV: serverWriteIV,
+ }, nil
+}
+
+func prfVerifyData(masterSecret, handshakeBodies []byte, label string, hashFunc HashFunc) ([]byte, error) {
+ h := hashFunc()
+ if _, err := h.Write(handshakeBodies); err != nil {
+ return nil, err
+ }
+
+ seed := append([]byte(label), h.Sum(nil)...)
+ return PHash(masterSecret, seed, 12, hashFunc)
+}
+
+// VerifyDataClient is caled on the Client Side to either verify or generate the VerifyData message
+func VerifyDataClient(masterSecret, handshakeBodies []byte, h HashFunc) ([]byte, error) {
+ return prfVerifyData(masterSecret, handshakeBodies, verifyDataClientLabel, h)
+}
+
+// VerifyDataServer is caled on the Server Side to either verify or generate the VerifyData message
+func VerifyDataServer(masterSecret, handshakeBodies []byte, h HashFunc) ([]byte, error) {
+ return prfVerifyData(masterSecret, handshakeBodies, verifyDataServerLabel, h)
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/signature/signature.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/signature/signature.go
new file mode 100644
index 000000000..d9150eb8c
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/signature/signature.go
@@ -0,0 +1,24 @@
+// Package signature provides our implemented Signature Algorithms
+package signature
+
+// Algorithm as defined in TLS 1.2
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-16
+type Algorithm uint16
+
+// SignatureAlgorithm enums
+const (
+ Anonymous Algorithm = 0
+ RSA Algorithm = 1
+ ECDSA Algorithm = 3
+ Ed25519 Algorithm = 7
+)
+
+// Algorithms returns all implemented Signature Algorithms
+func Algorithms() map[Algorithm]struct{} {
+ return map[Algorithm]struct{}{
+ Anonymous: {},
+ RSA: {},
+ ECDSA: {},
+ Ed25519: {},
+ }
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/errors.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/errors.go
new file mode 100644
index 000000000..9d9d3b309
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/errors.go
@@ -0,0 +1,9 @@
+package signaturehash
+
+import "errors"
+
+var (
+ errNoAvailableSignatureSchemes = errors.New("connection can not be created, no SignatureScheme satisfy this Config")
+ errInvalidSignatureAlgorithm = errors.New("invalid signature algorithm")
+ errInvalidHashAlgorithm = errors.New("invalid hash algorithm")
+)
diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/signaturehash.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/signaturehash.go
new file mode 100644
index 000000000..7959e1f04
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/signaturehash.go
@@ -0,0 +1,93 @@
+// Package signaturehash provides the SignatureHashAlgorithm as defined in TLS 1.2
+package signaturehash
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/tls"
+ "fmt"
+
+ "github.com/pion/dtls/v2/pkg/crypto/hash"
+ "github.com/pion/dtls/v2/pkg/crypto/signature"
+)
+
+// Algorithm is a signature/hash algorithm pairs which may be used in
+// digital signatures.
+//
+// https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1
+type Algorithm struct {
+ Hash hash.Algorithm
+ Signature signature.Algorithm
+}
+
+// Algorithms are all the know SignatureHash Algorithms
+func Algorithms() []Algorithm {
+ return []Algorithm{
+ {hash.SHA256, signature.ECDSA},
+ {hash.SHA384, signature.ECDSA},
+ {hash.SHA512, signature.ECDSA},
+ {hash.SHA256, signature.RSA},
+ {hash.SHA384, signature.RSA},
+ {hash.SHA512, signature.RSA},
+ {hash.Ed25519, signature.Ed25519},
+ }
+}
+
+// SelectSignatureScheme returns most preferred and compatible scheme.
+func SelectSignatureScheme(sigs []Algorithm, privateKey crypto.PrivateKey) (Algorithm, error) {
+ for _, ss := range sigs {
+ if ss.isCompatible(privateKey) {
+ return ss, nil
+ }
+ }
+ return Algorithm{}, errNoAvailableSignatureSchemes
+}
+
+// isCompatible checks that given private key is compatible with the signature scheme.
+func (a *Algorithm) isCompatible(privateKey crypto.PrivateKey) bool {
+ switch privateKey.(type) {
+ case ed25519.PrivateKey:
+ return a.Signature == signature.Ed25519
+ case *ecdsa.PrivateKey:
+ return a.Signature == signature.ECDSA
+ case *rsa.PrivateKey:
+ return a.Signature == signature.RSA
+ default:
+ return false
+ }
+}
+
+// ParseSignatureSchemes translates []tls.SignatureScheme to []signatureHashAlgorithm.
+// It returns default signature scheme list if no SignatureScheme is passed.
+func ParseSignatureSchemes(sigs []tls.SignatureScheme, insecureHashes bool) ([]Algorithm, error) {
+ if len(sigs) == 0 {
+ return Algorithms(), nil
+ }
+ out := []Algorithm{}
+ for _, ss := range sigs {
+ sig := signature.Algorithm(ss & 0xFF)
+ if _, ok := signature.Algorithms()[sig]; !ok {
+ return nil,
+ fmt.Errorf("SignatureScheme %04x: %w", ss, errInvalidSignatureAlgorithm)
+ }
+ h := hash.Algorithm(ss >> 8)
+ if _, ok := hash.Algorithms()[h]; !ok || (ok && h == hash.None) {
+ return nil, fmt.Errorf("SignatureScheme %04x: %w", ss, errInvalidHashAlgorithm)
+ }
+ if h.Insecure() && !insecureHashes {
+ continue
+ }
+ out = append(out, Algorithm{
+ Hash: h,
+ Signature: sig,
+ })
+ }
+
+ if len(out) == 0 {
+ return nil, errNoAvailableSignatureSchemes
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/alert/alert.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/alert/alert.go
new file mode 100644
index 000000000..663c6b379
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/alert/alert.go
@@ -0,0 +1,163 @@
+// Package alert implements TLS alert protocol https://tools.ietf.org/html/rfc5246#section-7.2
+package alert
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+)
+
+var errBufferTooSmall = &protocol.TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113
+
+// Level is the level of the TLS Alert
+type Level byte
+
+// Level enums
+const (
+ Warning Level = 1
+ Fatal Level = 2
+)
+
+func (l Level) String() string {
+ switch l {
+ case Warning:
+ return "Warning"
+ case Fatal:
+ return "Fatal"
+ default:
+ return "Invalid alert level"
+ }
+}
+
+// Description is the extended info of the TLS Alert
+type Description byte
+
+// Description enums
+const (
+ CloseNotify Description = 0
+ UnexpectedMessage Description = 10
+ BadRecordMac Description = 20
+ DecryptionFailed Description = 21
+ RecordOverflow Description = 22
+ DecompressionFailure Description = 30
+ HandshakeFailure Description = 40
+ NoCertificate Description = 41
+ BadCertificate Description = 42
+ UnsupportedCertificate Description = 43
+ CertificateRevoked Description = 44
+ CertificateExpired Description = 45
+ CertificateUnknown Description = 46
+ IllegalParameter Description = 47
+ UnknownCA Description = 48
+ AccessDenied Description = 49
+ DecodeError Description = 50
+ DecryptError Description = 51
+ ExportRestriction Description = 60
+ ProtocolVersion Description = 70
+ InsufficientSecurity Description = 71
+ InternalError Description = 80
+ UserCanceled Description = 90
+ NoRenegotiation Description = 100
+ UnsupportedExtension Description = 110
+ NoApplicationProtocol Description = 120
+)
+
+func (d Description) String() string {
+ switch d {
+ case CloseNotify:
+ return "CloseNotify"
+ case UnexpectedMessage:
+ return "UnexpectedMessage"
+ case BadRecordMac:
+ return "BadRecordMac"
+ case DecryptionFailed:
+ return "DecryptionFailed"
+ case RecordOverflow:
+ return "RecordOverflow"
+ case DecompressionFailure:
+ return "DecompressionFailure"
+ case HandshakeFailure:
+ return "HandshakeFailure"
+ case NoCertificate:
+ return "NoCertificate"
+ case BadCertificate:
+ return "BadCertificate"
+ case UnsupportedCertificate:
+ return "UnsupportedCertificate"
+ case CertificateRevoked:
+ return "CertificateRevoked"
+ case CertificateExpired:
+ return "CertificateExpired"
+ case CertificateUnknown:
+ return "CertificateUnknown"
+ case IllegalParameter:
+ return "IllegalParameter"
+ case UnknownCA:
+ return "UnknownCA"
+ case AccessDenied:
+ return "AccessDenied"
+ case DecodeError:
+ return "DecodeError"
+ case DecryptError:
+ return "DecryptError"
+ case ExportRestriction:
+ return "ExportRestriction"
+ case ProtocolVersion:
+ return "ProtocolVersion"
+ case InsufficientSecurity:
+ return "InsufficientSecurity"
+ case InternalError:
+ return "InternalError"
+ case UserCanceled:
+ return "UserCanceled"
+ case NoRenegotiation:
+ return "NoRenegotiation"
+ case UnsupportedExtension:
+ return "UnsupportedExtension"
+ case NoApplicationProtocol:
+ return "NoApplicationProtocol"
+ default:
+ return "Invalid alert description"
+ }
+}
+
+// Alert is one of the content types supported by the TLS record layer.
+// Alert messages convey the severity of the message
+// (warning or fatal) and a description of the alert. Alert messages
+// with a level of fatal result in the immediate termination of the
+// connection. In this case, other connections corresponding to the
+// session may continue, but the session identifier MUST be invalidated,
+// preventing the failed session from being used to establish new
+// connections. Like other messages, alert messages are encrypted and
+// compressed, as specified by the current connection state.
+// https://tools.ietf.org/html/rfc5246#section-7.2
+type Alert struct {
+ Level Level
+ Description Description
+}
+
+// ContentType returns the ContentType of this Content
+func (a Alert) ContentType() protocol.ContentType {
+ return protocol.ContentTypeAlert
+}
+
+// Marshal returns the encoded alert
+func (a *Alert) Marshal() ([]byte, error) {
+ return []byte{byte(a.Level), byte(a.Description)}, nil
+}
+
+// Unmarshal populates the alert from binary data
+func (a *Alert) Unmarshal(data []byte) error {
+ if len(data) != 2 {
+ return errBufferTooSmall
+ }
+
+ a.Level = Level(data[0])
+ a.Description = Description(data[1])
+ return nil
+}
+
+func (a *Alert) String() string {
+ return fmt.Sprintf("Alert %s: %s", a.Level, a.Description)
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/application_data.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/application_data.go
new file mode 100644
index 000000000..e5fd6f549
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/application_data.go
@@ -0,0 +1,26 @@
+package protocol
+
+// ApplicationData messages are carried by the record layer and are
+// fragmented, compressed, and encrypted based on the current connection
+// state. The messages are treated as transparent data to the record
+// layer.
+// https://tools.ietf.org/html/rfc5246#section-10
+type ApplicationData struct {
+ Data []byte
+}
+
+// ContentType returns the ContentType of this content
+func (a ApplicationData) ContentType() ContentType {
+ return ContentTypeApplicationData
+}
+
+// Marshal encodes the ApplicationData to binary
+func (a *ApplicationData) Marshal() ([]byte, error) {
+ return append([]byte{}, a.Data...), nil
+}
+
+// Unmarshal populates the ApplicationData from binary
+func (a *ApplicationData) Unmarshal(data []byte) error {
+ a.Data = append([]byte{}, data...)
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/change_cipher_spec.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/change_cipher_spec.go
new file mode 100644
index 000000000..b42647a05
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/change_cipher_spec.go
@@ -0,0 +1,27 @@
+package protocol
+
+// ChangeCipherSpec protocol exists to signal transitions in
+// ciphering strategies. The protocol consists of a single message,
+// which is encrypted and compressed under the current (not the pending)
+// connection state. The message consists of a single byte of value 1.
+// https://tools.ietf.org/html/rfc5246#section-7.1
+type ChangeCipherSpec struct{}
+
+// ContentType returns the ContentType of this content
+func (c ChangeCipherSpec) ContentType() ContentType {
+ return ContentTypeChangeCipherSpec
+}
+
+// Marshal encodes the ChangeCipherSpec to binary
+func (c *ChangeCipherSpec) Marshal() ([]byte, error) {
+ return []byte{0x01}, nil
+}
+
+// Unmarshal populates the ChangeCipherSpec from binary
+func (c *ChangeCipherSpec) Unmarshal(data []byte) error {
+ if len(data) == 1 && data[0] == 0x01 {
+ return nil
+ }
+
+ return errInvalidCipherSpec
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/compression_method.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/compression_method.go
new file mode 100644
index 000000000..678e816cb
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/compression_method.go
@@ -0,0 +1,48 @@
+package protocol
+
+// CompressionMethodID is the ID for a CompressionMethod
+type CompressionMethodID byte
+
+const (
+ compressionMethodNull CompressionMethodID = 0
+)
+
+// CompressionMethod represents a TLS Compression Method
+type CompressionMethod struct {
+ ID CompressionMethodID
+}
+
+// CompressionMethods returns all supported CompressionMethods
+func CompressionMethods() map[CompressionMethodID]*CompressionMethod {
+ return map[CompressionMethodID]*CompressionMethod{
+ compressionMethodNull: {ID: compressionMethodNull},
+ }
+}
+
+// DecodeCompressionMethods the given compression methods
+func DecodeCompressionMethods(buf []byte) ([]*CompressionMethod, error) {
+ if len(buf) < 1 {
+ return nil, errBufferTooSmall
+ }
+ compressionMethodsCount := int(buf[0])
+ c := []*CompressionMethod{}
+ for i := 0; i < compressionMethodsCount; i++ {
+ if len(buf) <= i+1 {
+ return nil, errBufferTooSmall
+ }
+ id := CompressionMethodID(buf[i+1])
+ if compressionMethod, ok := CompressionMethods()[id]; ok {
+ c = append(c, compressionMethod)
+ }
+ }
+ return c, nil
+}
+
+// EncodeCompressionMethods the given compression methods
+func EncodeCompressionMethods(c []*CompressionMethod) []byte {
+ out := []byte{byte(len(c))}
+ for i := len(c); i > 0; i-- {
+ out = append(out, byte(c[i-1].ID))
+ }
+ return out
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/content.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/content.go
new file mode 100644
index 000000000..47e5c96bb
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/content.go
@@ -0,0 +1,21 @@
+package protocol
+
+// ContentType represents the IANA Registered ContentTypes
+//
+// https://tools.ietf.org/html/rfc4346#section-6.2.1
+type ContentType uint8
+
+// ContentType enums
+const (
+ ContentTypeChangeCipherSpec ContentType = 20
+ ContentTypeAlert ContentType = 21
+ ContentTypeHandshake ContentType = 22
+ ContentTypeApplicationData ContentType = 23
+)
+
+// Content is the top level distinguisher for a DTLS Datagram
+type Content interface {
+ ContentType() ContentType
+ Marshal() ([]byte, error)
+ Unmarshal(data []byte) error
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/errors.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/errors.go
new file mode 100644
index 000000000..fbeb0774c
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/errors.go
@@ -0,0 +1,106 @@
+package protocol
+
+import (
+ "errors"
+ "fmt"
+ "net"
+)
+
+var (
+ errBufferTooSmall = &TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113
+ errInvalidCipherSpec = &FatalError{Err: errors.New("cipher spec invalid")} //nolint:goerr113
+)
+
+// FatalError indicates that the DTLS connection is no longer available.
+// It is mainly caused by wrong configuration of server or client.
+type FatalError struct {
+ Err error
+}
+
+// InternalError indicates and internal error caused by the implementation, and the DTLS connection is no longer available.
+// It is mainly caused by bugs or tried to use unimplemented features.
+type InternalError struct {
+ Err error
+}
+
+// TemporaryError indicates that the DTLS connection is still available, but the request was failed temporary.
+type TemporaryError struct {
+ Err error
+}
+
+// TimeoutError indicates that the request was timed out.
+type TimeoutError struct {
+ Err error
+}
+
+// HandshakeError indicates that the handshake failed.
+type HandshakeError struct {
+ Err error
+}
+
+// Timeout implements net.Error.Timeout()
+func (*FatalError) Timeout() bool { return false }
+
+// Temporary implements net.Error.Temporary()
+func (*FatalError) Temporary() bool { return false }
+
+// Unwrap implements Go1.13 error unwrapper.
+func (e *FatalError) Unwrap() error { return e.Err }
+
+func (e *FatalError) Error() string { return fmt.Sprintf("dtls fatal: %v", e.Err) }
+
+// Timeout implements net.Error.Timeout()
+func (*InternalError) Timeout() bool { return false }
+
+// Temporary implements net.Error.Temporary()
+func (*InternalError) Temporary() bool { return false }
+
+// Unwrap implements Go1.13 error unwrapper.
+func (e *InternalError) Unwrap() error { return e.Err }
+
+func (e *InternalError) Error() string { return fmt.Sprintf("dtls internal: %v", e.Err) }
+
+// Timeout implements net.Error.Timeout()
+func (*TemporaryError) Timeout() bool { return false }
+
+// Temporary implements net.Error.Temporary()
+func (*TemporaryError) Temporary() bool { return true }
+
+// Unwrap implements Go1.13 error unwrapper.
+func (e *TemporaryError) Unwrap() error { return e.Err }
+
+func (e *TemporaryError) Error() string { return fmt.Sprintf("dtls temporary: %v", e.Err) }
+
+// Timeout implements net.Error.Timeout()
+func (*TimeoutError) Timeout() bool { return true }
+
+// Temporary implements net.Error.Temporary()
+func (*TimeoutError) Temporary() bool { return true }
+
+// Unwrap implements Go1.13 error unwrapper.
+func (e *TimeoutError) Unwrap() error { return e.Err }
+
+func (e *TimeoutError) Error() string { return fmt.Sprintf("dtls timeout: %v", e.Err) }
+
+// Timeout implements net.Error.Timeout()
+func (e *HandshakeError) Timeout() bool {
+ var netErr net.Error
+ if errors.As(e.Err, &netErr) {
+ return netErr.Timeout()
+ }
+ return false
+}
+
+// Temporary implements net.Error.Temporary()
+func (e *HandshakeError) Temporary() bool {
+ var netErr net.Error
+ if errors.As(e.Err, &netErr) {
+ return netErr.Temporary() //nolint
+ }
+ return false
+}
+
+// Unwrap implements Go1.13 error unwrapper.
+func (e *HandshakeError) Unwrap() error { return e.Err }
+
+func (e *HandshakeError) Error() string { return fmt.Sprintf("handshake error: %v", e.Err) }
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/alpn.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/alpn.go
new file mode 100644
index 000000000..8d7e1123e
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/alpn.go
@@ -0,0 +1,77 @@
+package extension
+
+import (
+ "golang.org/x/crypto/cryptobyte"
+)
+
+// ALPN is a TLS extension for application-layer protocol negotiation within
+// the TLS handshake.
+//
+// https://tools.ietf.org/html/rfc7301
+type ALPN struct {
+ ProtocolNameList []string
+}
+
+// TypeValue returns the extension TypeValue
+func (a ALPN) TypeValue() TypeValue {
+ return ALPNTypeValue
+}
+
+// Marshal encodes the extension
+func (a *ALPN) Marshal() ([]byte, error) {
+ var b cryptobyte.Builder
+ b.AddUint16(uint16(a.TypeValue()))
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, proto := range a.ProtocolNameList {
+ p := proto // Satisfy range scope lint
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(p))
+ })
+ }
+ })
+ })
+ return b.Bytes()
+}
+
+// Unmarshal populates the extension from encoded data
+func (a *ALPN) Unmarshal(data []byte) error {
+ val := cryptobyte.String(data)
+
+ var extension uint16
+ val.ReadUint16(&extension)
+ if TypeValue(extension) != a.TypeValue() {
+ return errInvalidExtensionType
+ }
+
+ var extData cryptobyte.String
+ val.ReadUint16LengthPrefixed(&extData)
+
+ var protoList cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
+ return ErrALPNInvalidFormat
+ }
+ for !protoList.Empty() {
+ var proto cryptobyte.String
+ if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
+ return ErrALPNInvalidFormat
+ }
+ a.ProtocolNameList = append(a.ProtocolNameList, string(proto))
+ }
+ return nil
+}
+
+// ALPNProtocolSelection negotiates a shared protocol according to #3.2 of rfc7301
+func ALPNProtocolSelection(supportedProtocols, peerSupportedProtocols []string) (string, error) {
+ if len(supportedProtocols) == 0 || len(peerSupportedProtocols) == 0 {
+ return "", nil
+ }
+ for _, s := range supportedProtocols {
+ for _, c := range peerSupportedProtocols {
+ if s == c {
+ return s, nil
+ }
+ }
+ }
+ return "", errALPNNoAppProto
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/errors.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/errors.go
new file mode 100644
index 000000000..82d8b3408
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/errors.go
@@ -0,0 +1,17 @@
+package extension
+
+import (
+ "errors"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+)
+
+var (
+ // ErrALPNInvalidFormat is raised when the ALPN format is invalid
+ ErrALPNInvalidFormat = &protocol.FatalError{Err: errors.New("invalid alpn format")} //nolint:goerr113
+ errALPNNoAppProto = &protocol.FatalError{Err: errors.New("no application protocol")} //nolint:goerr113
+ errBufferTooSmall = &protocol.TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113
+ errInvalidExtensionType = &protocol.FatalError{Err: errors.New("invalid extension type")} //nolint:goerr113
+ errInvalidSNIFormat = &protocol.FatalError{Err: errors.New("invalid server name format")} //nolint:goerr113
+ errLengthMismatch = &protocol.InternalError{Err: errors.New("data length and declared length do not match")} //nolint:goerr113
+)
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/extension.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/extension.go
new file mode 100644
index 000000000..ec4c1ff5c
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/extension.go
@@ -0,0 +1,99 @@
+// Package extension implements the extension values in the ClientHello/ServerHello
+package extension
+
+import "encoding/binary"
+
+// TypeValue is the 2 byte value for a TLS Extension as registered in the IANA
+//
+// https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml
+type TypeValue uint16
+
+// TypeValue constants
+const (
+ ServerNameTypeValue TypeValue = 0
+ SupportedEllipticCurvesTypeValue TypeValue = 10
+ SupportedPointFormatsTypeValue TypeValue = 11
+ SupportedSignatureAlgorithmsTypeValue TypeValue = 13
+ UseSRTPTypeValue TypeValue = 14
+ ALPNTypeValue TypeValue = 16
+ UseExtendedMasterSecretTypeValue TypeValue = 23
+ RenegotiationInfoTypeValue TypeValue = 65281
+)
+
+// Extension represents a single TLS extension
+type Extension interface {
+ Marshal() ([]byte, error)
+ Unmarshal(data []byte) error
+ TypeValue() TypeValue
+}
+
+// Unmarshal many extensions at once
+func Unmarshal(buf []byte) ([]Extension, error) {
+ switch {
+ case len(buf) == 0:
+ return []Extension{}, nil
+ case len(buf) < 2:
+ return nil, errBufferTooSmall
+ }
+
+ declaredLen := binary.BigEndian.Uint16(buf)
+ if len(buf)-2 != int(declaredLen) {
+ return nil, errLengthMismatch
+ }
+
+ extensions := []Extension{}
+ unmarshalAndAppend := func(data []byte, e Extension) error {
+ err := e.Unmarshal(data)
+ if err != nil {
+ return err
+ }
+ extensions = append(extensions, e)
+ return nil
+ }
+
+ for offset := 2; offset < len(buf); {
+ if len(buf) < (offset + 2) {
+ return nil, errBufferTooSmall
+ }
+ var err error
+ switch TypeValue(binary.BigEndian.Uint16(buf[offset:])) {
+ case ServerNameTypeValue:
+ err = unmarshalAndAppend(buf[offset:], &ServerName{})
+ case SupportedEllipticCurvesTypeValue:
+ err = unmarshalAndAppend(buf[offset:], &SupportedEllipticCurves{})
+ case UseSRTPTypeValue:
+ err = unmarshalAndAppend(buf[offset:], &UseSRTP{})
+ case ALPNTypeValue:
+ err = unmarshalAndAppend(buf[offset:], &ALPN{})
+ case UseExtendedMasterSecretTypeValue:
+ err = unmarshalAndAppend(buf[offset:], &UseExtendedMasterSecret{})
+ case RenegotiationInfoTypeValue:
+ err = unmarshalAndAppend(buf[offset:], &RenegotiationInfo{})
+ default:
+ }
+ if err != nil {
+ return nil, err
+ }
+ if len(buf) < (offset + 4) {
+ return nil, errBufferTooSmall
+ }
+ extensionLength := binary.BigEndian.Uint16(buf[offset+2:])
+ offset += (4 + int(extensionLength))
+ }
+ return extensions, nil
+}
+
+// Marshal many extensions at once
+func Marshal(e []Extension) ([]byte, error) {
+ extensions := []byte{}
+ for _, e := range e {
+ raw, err := e.Marshal()
+ if err != nil {
+ return nil, err
+ }
+ extensions = append(extensions, raw...)
+ }
+ out := []byte{0x00, 0x00}
+ binary.BigEndian.PutUint16(out, uint16(len(extensions)))
+ return append(out, extensions...), nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/renegotiation_info.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/renegotiation_info.go
new file mode 100644
index 000000000..8378c3d94
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/renegotiation_info.go
@@ -0,0 +1,43 @@
+package extension
+
+import "encoding/binary"
+
+const (
+ renegotiationInfoHeaderSize = 5
+)
+
+// RenegotiationInfo allows a Client/Server to
+// communicate their renegotation support
+//
+// https://tools.ietf.org/html/rfc5746
+type RenegotiationInfo struct {
+ RenegotiatedConnection uint8
+}
+
+// TypeValue returns the extension TypeValue
+func (r RenegotiationInfo) TypeValue() TypeValue {
+ return RenegotiationInfoTypeValue
+}
+
+// Marshal encodes the extension
+func (r *RenegotiationInfo) Marshal() ([]byte, error) {
+ out := make([]byte, renegotiationInfoHeaderSize)
+
+ binary.BigEndian.PutUint16(out, uint16(r.TypeValue()))
+ binary.BigEndian.PutUint16(out[2:], uint16(1)) // length
+ out[4] = r.RenegotiatedConnection
+ return out, nil
+}
+
+// Unmarshal populates the extension from encoded data
+func (r *RenegotiationInfo) Unmarshal(data []byte) error {
+ if len(data) < renegotiationInfoHeaderSize {
+ return errBufferTooSmall
+ } else if TypeValue(binary.BigEndian.Uint16(data)) != r.TypeValue() {
+ return errInvalidExtensionType
+ }
+
+ r.RenegotiatedConnection = data[4]
+
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/server_name.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/server_name.go
new file mode 100644
index 000000000..9a1cc2926
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/server_name.go
@@ -0,0 +1,78 @@
+package extension
+
+import (
+ "strings"
+
+ "golang.org/x/crypto/cryptobyte"
+)
+
+const serverNameTypeDNSHostName = 0
+
+// ServerName allows the client to inform the server the specific
+// name it wishes to contact. Useful if multiple DNS names resolve
+// to one IP
+//
+// https://tools.ietf.org/html/rfc6066#section-3
+type ServerName struct {
+ ServerName string
+}
+
+// TypeValue returns the extension TypeValue
+func (s ServerName) TypeValue() TypeValue {
+ return ServerNameTypeValue
+}
+
+// Marshal encodes the extension
+func (s *ServerName) Marshal() ([]byte, error) {
+ var b cryptobyte.Builder
+ b.AddUint16(uint16(s.TypeValue()))
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8(serverNameTypeDNSHostName)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(s.ServerName))
+ })
+ })
+ })
+ return b.Bytes()
+}
+
+// Unmarshal populates the extension from encoded data
+func (s *ServerName) Unmarshal(data []byte) error {
+ val := cryptobyte.String(data)
+ var extension uint16
+ val.ReadUint16(&extension)
+ if TypeValue(extension) != s.TypeValue() {
+ return errInvalidExtensionType
+ }
+
+ var extData cryptobyte.String
+ val.ReadUint16LengthPrefixed(&extData)
+
+ var nameList cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
+ return errInvalidSNIFormat
+ }
+ for !nameList.Empty() {
+ var nameType uint8
+ var serverName cryptobyte.String
+ if !nameList.ReadUint8(&nameType) ||
+ !nameList.ReadUint16LengthPrefixed(&serverName) ||
+ serverName.Empty() {
+ return errInvalidSNIFormat
+ }
+ if nameType != serverNameTypeDNSHostName {
+ continue
+ }
+ if len(s.ServerName) != 0 {
+ // Multiple names of the same name_type are prohibited.
+ return errInvalidSNIFormat
+ }
+ s.ServerName = string(serverName)
+ // An SNI value may not include a trailing dot.
+ if strings.HasSuffix(s.ServerName, ".") {
+ return errInvalidSNIFormat
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go
new file mode 100644
index 000000000..2c4d1d4a6
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go
@@ -0,0 +1,21 @@
+package extension
+
+// SRTPProtectionProfile defines the parameters and options that are in effect for the SRTP processing
+// https://tools.ietf.org/html/rfc5764#section-4.1.2
+type SRTPProtectionProfile uint16
+
+const (
+ SRTP_AES128_CM_HMAC_SHA1_80 SRTPProtectionProfile = 0x0001 // nolint
+ SRTP_AES128_CM_HMAC_SHA1_32 SRTPProtectionProfile = 0x0002 // nolint
+ SRTP_AEAD_AES_128_GCM SRTPProtectionProfile = 0x0007 // nolint
+ SRTP_AEAD_AES_256_GCM SRTPProtectionProfile = 0x0008 // nolint
+)
+
+func srtpProtectionProfiles() map[SRTPProtectionProfile]bool {
+ return map[SRTPProtectionProfile]bool{
+ SRTP_AES128_CM_HMAC_SHA1_80: true,
+ SRTP_AES128_CM_HMAC_SHA1_32: true,
+ SRTP_AEAD_AES_128_GCM: true,
+ SRTP_AEAD_AES_256_GCM: true,
+ }
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_elliptic_curves.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_elliptic_curves.go
new file mode 100644
index 000000000..8f077fcc7
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_elliptic_curves.go
@@ -0,0 +1,62 @@
+package extension
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+)
+
+const (
+ supportedGroupsHeaderSize = 6
+)
+
+// SupportedEllipticCurves allows a Client/Server to communicate
+// what curves they both support
+//
+// https://tools.ietf.org/html/rfc8422#section-5.1.1
+type SupportedEllipticCurves struct {
+ EllipticCurves []elliptic.Curve
+}
+
+// TypeValue returns the extension TypeValue
+func (s SupportedEllipticCurves) TypeValue() TypeValue {
+ return SupportedEllipticCurvesTypeValue
+}
+
+// Marshal encodes the extension
+func (s *SupportedEllipticCurves) Marshal() ([]byte, error) {
+ out := make([]byte, supportedGroupsHeaderSize)
+
+ binary.BigEndian.PutUint16(out, uint16(s.TypeValue()))
+ binary.BigEndian.PutUint16(out[2:], uint16(2+(len(s.EllipticCurves)*2)))
+ binary.BigEndian.PutUint16(out[4:], uint16(len(s.EllipticCurves)*2))
+
+ for _, v := range s.EllipticCurves {
+ out = append(out, []byte{0x00, 0x00}...)
+ binary.BigEndian.PutUint16(out[len(out)-2:], uint16(v))
+ }
+
+ return out, nil
+}
+
+// Unmarshal populates the extension from encoded data
+func (s *SupportedEllipticCurves) Unmarshal(data []byte) error {
+ if len(data) <= supportedGroupsHeaderSize {
+ return errBufferTooSmall
+ } else if TypeValue(binary.BigEndian.Uint16(data)) != s.TypeValue() {
+ return errInvalidExtensionType
+ }
+
+ groupCount := int(binary.BigEndian.Uint16(data[4:]) / 2)
+ if supportedGroupsHeaderSize+(groupCount*2) > len(data) {
+ return errLengthMismatch
+ }
+
+ for i := 0; i < groupCount; i++ {
+ supportedGroupID := elliptic.Curve(binary.BigEndian.Uint16(data[(supportedGroupsHeaderSize + (i * 2)):]))
+ if _, ok := elliptic.Curves()[supportedGroupID]; ok {
+ s.EllipticCurves = append(s.EllipticCurves, supportedGroupID)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_point_formats.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_point_formats.go
new file mode 100644
index 000000000..873d07827
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_point_formats.go
@@ -0,0 +1,62 @@
+package extension
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+)
+
+const (
+ supportedPointFormatsSize = 5
+)
+
+// SupportedPointFormats allows a Client/Server to negotiate
+// the EllipticCurvePointFormats
+//
+// https://tools.ietf.org/html/rfc4492#section-5.1.2
+type SupportedPointFormats struct {
+ PointFormats []elliptic.CurvePointFormat
+}
+
+// TypeValue returns the extension TypeValue
+func (s SupportedPointFormats) TypeValue() TypeValue {
+ return SupportedPointFormatsTypeValue
+}
+
+// Marshal encodes the extension
+func (s *SupportedPointFormats) Marshal() ([]byte, error) {
+ out := make([]byte, supportedPointFormatsSize)
+
+ binary.BigEndian.PutUint16(out, uint16(s.TypeValue()))
+ binary.BigEndian.PutUint16(out[2:], uint16(1+(len(s.PointFormats))))
+ out[4] = byte(len(s.PointFormats))
+
+ for _, v := range s.PointFormats {
+ out = append(out, byte(v))
+ }
+ return out, nil
+}
+
+// Unmarshal populates the extension from encoded data
+func (s *SupportedPointFormats) Unmarshal(data []byte) error {
+ if len(data) <= supportedPointFormatsSize {
+ return errBufferTooSmall
+ } else if TypeValue(binary.BigEndian.Uint16(data)) != s.TypeValue() {
+ return errInvalidExtensionType
+ }
+
+ pointFormatCount := int(binary.BigEndian.Uint16(data[4:]))
+ if supportedGroupsHeaderSize+(pointFormatCount) > len(data) {
+ return errLengthMismatch
+ }
+
+ for i := 0; i < pointFormatCount; i++ {
+ p := elliptic.CurvePointFormat(data[supportedPointFormatsSize+i])
+ switch p {
+ case elliptic.CurvePointFormatUncompressed:
+ s.PointFormats = append(s.PointFormats, p)
+ default:
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_signature_algorithms.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_signature_algorithms.go
new file mode 100644
index 000000000..ee284f6e1
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_signature_algorithms.go
@@ -0,0 +1,70 @@
+package extension
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/pkg/crypto/hash"
+ "github.com/pion/dtls/v2/pkg/crypto/signature"
+ "github.com/pion/dtls/v2/pkg/crypto/signaturehash"
+)
+
+const (
+ supportedSignatureAlgorithmsHeaderSize = 6
+)
+
+// SupportedSignatureAlgorithms allows a Client/Server to
+// negotiate what SignatureHash Algorithms they both support
+//
+// https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1
+type SupportedSignatureAlgorithms struct {
+ SignatureHashAlgorithms []signaturehash.Algorithm
+}
+
+// TypeValue returns the extension TypeValue
+func (s SupportedSignatureAlgorithms) TypeValue() TypeValue {
+ return SupportedSignatureAlgorithmsTypeValue
+}
+
+// Marshal encodes the extension
+func (s *SupportedSignatureAlgorithms) Marshal() ([]byte, error) {
+ out := make([]byte, supportedSignatureAlgorithmsHeaderSize)
+
+ binary.BigEndian.PutUint16(out, uint16(s.TypeValue()))
+ binary.BigEndian.PutUint16(out[2:], uint16(2+(len(s.SignatureHashAlgorithms)*2)))
+ binary.BigEndian.PutUint16(out[4:], uint16(len(s.SignatureHashAlgorithms)*2))
+ for _, v := range s.SignatureHashAlgorithms {
+ out = append(out, []byte{0x00, 0x00}...)
+ out[len(out)-2] = byte(v.Hash)
+ out[len(out)-1] = byte(v.Signature)
+ }
+
+ return out, nil
+}
+
+// Unmarshal populates the extension from encoded data
+func (s *SupportedSignatureAlgorithms) Unmarshal(data []byte) error {
+ if len(data) <= supportedSignatureAlgorithmsHeaderSize {
+ return errBufferTooSmall
+ } else if TypeValue(binary.BigEndian.Uint16(data)) != s.TypeValue() {
+ return errInvalidExtensionType
+ }
+
+ algorithmCount := int(binary.BigEndian.Uint16(data[4:]) / 2)
+ if supportedSignatureAlgorithmsHeaderSize+(algorithmCount*2) > len(data) {
+ return errLengthMismatch
+ }
+ for i := 0; i < algorithmCount; i++ {
+ supportedHashAlgorithm := hash.Algorithm(data[supportedSignatureAlgorithmsHeaderSize+(i*2)])
+ supportedSignatureAlgorithm := signature.Algorithm(data[supportedSignatureAlgorithmsHeaderSize+(i*2)+1])
+ if _, ok := hash.Algorithms()[supportedHashAlgorithm]; ok {
+ if _, ok := signature.Algorithms()[supportedSignatureAlgorithm]; ok {
+ s.SignatureHashAlgorithms = append(s.SignatureHashAlgorithms, signaturehash.Algorithm{
+ Hash: supportedHashAlgorithm,
+ Signature: supportedSignatureAlgorithm,
+ })
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_master_secret.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_master_secret.go
new file mode 100644
index 000000000..04ddc956a
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_master_secret.go
@@ -0,0 +1,45 @@
+package extension
+
+import "encoding/binary"
+
+const (
+ useExtendedMasterSecretHeaderSize = 4
+)
+
+// UseExtendedMasterSecret defines a TLS extension that contextually binds the
+// master secret to a log of the full handshake that computes it, thus
+// preventing MITM attacks.
+type UseExtendedMasterSecret struct {
+ Supported bool
+}
+
+// TypeValue returns the extension TypeValue
+func (u UseExtendedMasterSecret) TypeValue() TypeValue {
+ return UseExtendedMasterSecretTypeValue
+}
+
+// Marshal encodes the extension
+func (u *UseExtendedMasterSecret) Marshal() ([]byte, error) {
+ if !u.Supported {
+ return []byte{}, nil
+ }
+
+ out := make([]byte, useExtendedMasterSecretHeaderSize)
+
+ binary.BigEndian.PutUint16(out, uint16(u.TypeValue()))
+ binary.BigEndian.PutUint16(out[2:], uint16(0)) // length
+ return out, nil
+}
+
+// Unmarshal populates the extension from encoded data
+func (u *UseExtendedMasterSecret) Unmarshal(data []byte) error {
+ if len(data) < useExtendedMasterSecretHeaderSize {
+ return errBufferTooSmall
+ } else if TypeValue(binary.BigEndian.Uint16(data)) != u.TypeValue() {
+ return errInvalidExtensionType
+ }
+
+ u.Supported = true
+
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_srtp.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_srtp.go
new file mode 100644
index 000000000..729fa3a98
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_srtp.go
@@ -0,0 +1,59 @@
+package extension
+
+import "encoding/binary"
+
+const (
+ useSRTPHeaderSize = 6
+)
+
+// UseSRTP allows a Client/Server to negotiate what SRTPProtectionProfiles
+// they both support
+//
+// https://tools.ietf.org/html/rfc8422
+type UseSRTP struct {
+ ProtectionProfiles []SRTPProtectionProfile
+}
+
+// TypeValue returns the extension TypeValue
+func (u UseSRTP) TypeValue() TypeValue {
+ return UseSRTPTypeValue
+}
+
+// Marshal encodes the extension
+func (u *UseSRTP) Marshal() ([]byte, error) {
+ out := make([]byte, useSRTPHeaderSize)
+
+ binary.BigEndian.PutUint16(out, uint16(u.TypeValue()))
+ binary.BigEndian.PutUint16(out[2:], uint16(2+(len(u.ProtectionProfiles)*2)+ /* MKI Length */ 1))
+ binary.BigEndian.PutUint16(out[4:], uint16(len(u.ProtectionProfiles)*2))
+
+ for _, v := range u.ProtectionProfiles {
+ out = append(out, []byte{0x00, 0x00}...)
+ binary.BigEndian.PutUint16(out[len(out)-2:], uint16(v))
+ }
+
+ out = append(out, 0x00) /* MKI Length */
+ return out, nil
+}
+
+// Unmarshal populates the extension from encoded data
+func (u *UseSRTP) Unmarshal(data []byte) error {
+ if len(data) <= useSRTPHeaderSize {
+ return errBufferTooSmall
+ } else if TypeValue(binary.BigEndian.Uint16(data)) != u.TypeValue() {
+ return errInvalidExtensionType
+ }
+
+ profileCount := int(binary.BigEndian.Uint16(data[4:]) / 2)
+ if supportedGroupsHeaderSize+(profileCount*2) > len(data) {
+ return errLengthMismatch
+ }
+
+ for i := 0; i < profileCount; i++ {
+ supportedProfile := SRTPProtectionProfile(binary.BigEndian.Uint16(data[(useSRTPHeaderSize + (i * 2)):]))
+ if _, ok := srtpProtectionProfiles()[supportedProfile]; ok {
+ u.ProtectionProfiles = append(u.ProtectionProfiles, supportedProfile)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/cipher_suite.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/cipher_suite.go
new file mode 100644
index 000000000..e8fbdeae7
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/cipher_suite.go
@@ -0,0 +1,29 @@
+package handshake
+
+import "encoding/binary"
+
+func decodeCipherSuiteIDs(buf []byte) ([]uint16, error) {
+ if len(buf) < 2 {
+ return nil, errBufferTooSmall
+ }
+ cipherSuitesCount := int(binary.BigEndian.Uint16(buf[0:])) / 2
+ rtrn := make([]uint16, cipherSuitesCount)
+ for i := 0; i < cipherSuitesCount; i++ {
+ if len(buf) < (i*2 + 4) {
+ return nil, errBufferTooSmall
+ }
+
+ rtrn[i] = binary.BigEndian.Uint16(buf[(i*2)+2:])
+ }
+ return rtrn, nil
+}
+
+func encodeCipherSuiteIDs(cipherSuiteIDs []uint16) []byte {
+ out := []byte{0x00, 0x00}
+ binary.BigEndian.PutUint16(out[len(out)-2:], uint16(len(cipherSuiteIDs)*2))
+ for _, id := range cipherSuiteIDs {
+ out = append(out, []byte{0x00, 0x00}...)
+ binary.BigEndian.PutUint16(out[len(out)-2:], id)
+ }
+ return out
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/errors.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/errors.go
new file mode 100644
index 000000000..ac77c0434
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/errors.go
@@ -0,0 +1,25 @@
+package handshake
+
+import (
+ "errors"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+)
+
+// Typed errors
+var (
+ errUnableToMarshalFragmented = &protocol.InternalError{Err: errors.New("unable to marshal fragmented handshakes")} //nolint:goerr113
+ errHandshakeMessageUnset = &protocol.InternalError{Err: errors.New("handshake message unset, unable to marshal")} //nolint:goerr113
+ errBufferTooSmall = &protocol.TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113
+ errLengthMismatch = &protocol.InternalError{Err: errors.New("data length and declared length do not match")} //nolint:goerr113
+ errInvalidClientKeyExchange = &protocol.FatalError{Err: errors.New("unable to determine if ClientKeyExchange is a public key or PSK Identity")} //nolint:goerr113
+ errInvalidHashAlgorithm = &protocol.FatalError{Err: errors.New("invalid hash algorithm")} //nolint:goerr113
+ errInvalidSignatureAlgorithm = &protocol.FatalError{Err: errors.New("invalid signature algorithm")} //nolint:goerr113
+ errCookieTooLong = &protocol.FatalError{Err: errors.New("cookie must not be longer then 255 bytes")} //nolint:goerr113
+ errInvalidEllipticCurveType = &protocol.FatalError{Err: errors.New("invalid or unknown elliptic curve type")} //nolint:goerr113
+ errInvalidNamedCurve = &protocol.FatalError{Err: errors.New("invalid named curve")} //nolint:goerr113
+ errCipherSuiteUnset = &protocol.FatalError{Err: errors.New("server hello can not be created without a cipher suite")} //nolint:goerr113
+ errCompressionMethodUnset = &protocol.FatalError{Err: errors.New("server hello can not be created without a compression method")} //nolint:goerr113
+ errInvalidCompressionMethod = &protocol.FatalError{Err: errors.New("invalid or unknown compression method")} //nolint:goerr113
+ errNotImplemented = &protocol.InternalError{Err: errors.New("feature has not been implemented yet")} //nolint:goerr113
+)
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/handshake.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/handshake.go
new file mode 100644
index 000000000..55f1a7c1c
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/handshake.go
@@ -0,0 +1,147 @@
+// Package handshake provides the DTLS wire protocol for handshakes
+package handshake
+
+import (
+ "github.com/pion/dtls/v2/internal/ciphersuite/types"
+ "github.com/pion/dtls/v2/internal/util"
+ "github.com/pion/dtls/v2/pkg/protocol"
+)
+
+// Type is the unique identifier for each handshake message
+// https://tools.ietf.org/html/rfc5246#section-7.4
+type Type uint8
+
+// Types of DTLS Handshake messages we know about
+const (
+ TypeHelloRequest Type = 0
+ TypeClientHello Type = 1
+ TypeServerHello Type = 2
+ TypeHelloVerifyRequest Type = 3
+ TypeCertificate Type = 11
+ TypeServerKeyExchange Type = 12
+ TypeCertificateRequest Type = 13
+ TypeServerHelloDone Type = 14
+ TypeCertificateVerify Type = 15
+ TypeClientKeyExchange Type = 16
+ TypeFinished Type = 20
+)
+
+// String returns the string representation of this type
+func (t Type) String() string {
+ switch t {
+ case TypeHelloRequest:
+ return "HelloRequest"
+ case TypeClientHello:
+ return "ClientHello"
+ case TypeServerHello:
+ return "ServerHello"
+ case TypeHelloVerifyRequest:
+ return "HelloVerifyRequest"
+ case TypeCertificate:
+ return "TypeCertificate"
+ case TypeServerKeyExchange:
+ return "ServerKeyExchange"
+ case TypeCertificateRequest:
+ return "CertificateRequest"
+ case TypeServerHelloDone:
+ return "ServerHelloDone"
+ case TypeCertificateVerify:
+ return "CertificateVerify"
+ case TypeClientKeyExchange:
+ return "ClientKeyExchange"
+ case TypeFinished:
+ return "Finished"
+ }
+ return ""
+}
+
+// Message is the body of a Handshake datagram
+type Message interface {
+ Marshal() ([]byte, error)
+ Unmarshal(data []byte) error
+ Type() Type
+}
+
+// Handshake protocol is responsible for selecting a cipher spec and
+// generating a master secret, which together comprise the primary
+// cryptographic parameters associated with a secure session. The
+// handshake protocol can also optionally authenticate parties who have
+// certificates signed by a trusted certificate authority.
+// https://tools.ietf.org/html/rfc5246#section-7.3
+type Handshake struct {
+ Header Header
+ Message Message
+
+ KeyExchangeAlgorithm types.KeyExchangeAlgorithm
+}
+
+// ContentType returns what kind of content this message is carying
+func (h Handshake) ContentType() protocol.ContentType {
+ return protocol.ContentTypeHandshake
+}
+
+// Marshal encodes a handshake into a binary message
+func (h *Handshake) Marshal() ([]byte, error) {
+ if h.Message == nil {
+ return nil, errHandshakeMessageUnset
+ } else if h.Header.FragmentOffset != 0 {
+ return nil, errUnableToMarshalFragmented
+ }
+
+ msg, err := h.Message.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ h.Header.Length = uint32(len(msg))
+ h.Header.FragmentLength = h.Header.Length
+ h.Header.Type = h.Message.Type()
+ header, err := h.Header.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ return append(header, msg...), nil
+}
+
+// Unmarshal decodes a handshake from a binary message
+func (h *Handshake) Unmarshal(data []byte) error {
+ if err := h.Header.Unmarshal(data); err != nil {
+ return err
+ }
+
+ reportedLen := util.BigEndianUint24(data[1:])
+ if uint32(len(data)-HeaderLength) != reportedLen {
+ return errLengthMismatch
+ } else if reportedLen != h.Header.FragmentLength {
+ return errLengthMismatch
+ }
+
+ switch Type(data[0]) {
+ case TypeHelloRequest:
+ return errNotImplemented
+ case TypeClientHello:
+ h.Message = &MessageClientHello{}
+ case TypeHelloVerifyRequest:
+ h.Message = &MessageHelloVerifyRequest{}
+ case TypeServerHello:
+ h.Message = &MessageServerHello{}
+ case TypeCertificate:
+ h.Message = &MessageCertificate{}
+ case TypeServerKeyExchange:
+ h.Message = &MessageServerKeyExchange{KeyExchangeAlgorithm: h.KeyExchangeAlgorithm}
+ case TypeCertificateRequest:
+ h.Message = &MessageCertificateRequest{}
+ case TypeServerHelloDone:
+ h.Message = &MessageServerHelloDone{}
+ case TypeClientKeyExchange:
+ h.Message = &MessageClientKeyExchange{KeyExchangeAlgorithm: h.KeyExchangeAlgorithm}
+ case TypeFinished:
+ h.Message = &MessageFinished{}
+ case TypeCertificateVerify:
+ h.Message = &MessageCertificateVerify{}
+ default:
+ return errNotImplemented
+ }
+ return h.Message.Unmarshal(data[HeaderLength:])
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/header.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/header.go
new file mode 100644
index 000000000..cb6a22489
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/header.go
@@ -0,0 +1,50 @@
+package handshake
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/internal/util"
+)
+
+// HeaderLength msg_len for Handshake messages assumes an extra
+// 12 bytes for sequence, fragment and version information vs TLS
+const HeaderLength = 12
+
+// Header is the static first 12 bytes of each RecordLayer
+// of type Handshake. These fields allow us to support message loss, reordering, and
+// message fragmentation,
+//
+// https://tools.ietf.org/html/rfc6347#section-4.2.2
+type Header struct {
+ Type Type
+ Length uint32 // uint24 in spec
+ MessageSequence uint16
+ FragmentOffset uint32 // uint24 in spec
+ FragmentLength uint32 // uint24 in spec
+}
+
+// Marshal encodes the Header
+func (h *Header) Marshal() ([]byte, error) {
+ out := make([]byte, HeaderLength)
+
+ out[0] = byte(h.Type)
+ util.PutBigEndianUint24(out[1:], h.Length)
+ binary.BigEndian.PutUint16(out[4:], h.MessageSequence)
+ util.PutBigEndianUint24(out[6:], h.FragmentOffset)
+ util.PutBigEndianUint24(out[9:], h.FragmentLength)
+ return out, nil
+}
+
+// Unmarshal populates the header from encoded data
+func (h *Header) Unmarshal(data []byte) error {
+ if len(data) < HeaderLength {
+ return errBufferTooSmall
+ }
+
+ h.Type = Type(data[0])
+ h.Length = util.BigEndianUint24(data[1:])
+ h.MessageSequence = binary.BigEndian.Uint16(data[4:])
+ h.FragmentOffset = util.BigEndianUint24(data[6:])
+ h.FragmentLength = util.BigEndianUint24(data[9:])
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate.go
new file mode 100644
index 000000000..05fb74656
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate.go
@@ -0,0 +1,66 @@
+package handshake
+
+import (
+ "github.com/pion/dtls/v2/internal/util"
+)
+
+// MessageCertificate is a DTLS Handshake Message
+// it can contain either a Client or Server Certificate
+//
+// https://tools.ietf.org/html/rfc5246#section-7.4.2
+type MessageCertificate struct {
+ Certificate [][]byte
+}
+
+// Type returns the Handshake Type
+func (m MessageCertificate) Type() Type {
+ return TypeCertificate
+}
+
+const (
+ handshakeMessageCertificateLengthFieldSize = 3
+)
+
+// Marshal encodes the Handshake
+func (m *MessageCertificate) Marshal() ([]byte, error) {
+ out := make([]byte, handshakeMessageCertificateLengthFieldSize)
+
+ for _, r := range m.Certificate {
+ // Certificate Length
+ out = append(out, make([]byte, handshakeMessageCertificateLengthFieldSize)...)
+ util.PutBigEndianUint24(out[len(out)-handshakeMessageCertificateLengthFieldSize:], uint32(len(r)))
+
+ // Certificate body
+ out = append(out, append([]byte{}, r...)...)
+ }
+
+ // Total Payload Size
+ util.PutBigEndianUint24(out[0:], uint32(len(out[handshakeMessageCertificateLengthFieldSize:])))
+ return out, nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageCertificate) Unmarshal(data []byte) error {
+ if len(data) < handshakeMessageCertificateLengthFieldSize {
+ return errBufferTooSmall
+ }
+
+ if certificateBodyLen := int(util.BigEndianUint24(data)); certificateBodyLen+handshakeMessageCertificateLengthFieldSize != len(data) {
+ return errLengthMismatch
+ }
+
+ offset := handshakeMessageCertificateLengthFieldSize
+ for offset < len(data) {
+ certificateLen := int(util.BigEndianUint24(data[offset:]))
+ offset += handshakeMessageCertificateLengthFieldSize
+
+ if offset+certificateLen > len(data) {
+ return errLengthMismatch
+ }
+
+ m.Certificate = append(m.Certificate, append([]byte{}, data[offset:offset+certificateLen]...))
+ offset += certificateLen
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_request.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_request.go
new file mode 100644
index 000000000..86d687f09
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_request.go
@@ -0,0 +1,141 @@
+package handshake
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/pkg/crypto/clientcertificate"
+ "github.com/pion/dtls/v2/pkg/crypto/hash"
+ "github.com/pion/dtls/v2/pkg/crypto/signature"
+ "github.com/pion/dtls/v2/pkg/crypto/signaturehash"
+)
+
+/*
+MessageCertificateRequest is so a non-anonymous server can optionally
+request a certificate from the client, if appropriate for the selected cipher
+suite. This message, if sent, will immediately follow the ServerKeyExchange
+message (if it is sent; otherwise, this message follows the
+server's Certificate message).
+
+https://tools.ietf.org/html/rfc5246#section-7.4.4
+*/
+type MessageCertificateRequest struct {
+ CertificateTypes []clientcertificate.Type
+ SignatureHashAlgorithms []signaturehash.Algorithm
+ CertificateAuthoritiesNames [][]byte
+}
+
+const (
+ messageCertificateRequestMinLength = 5
+)
+
+// Type returns the Handshake Type
+func (m MessageCertificateRequest) Type() Type {
+ return TypeCertificateRequest
+}
+
+// Marshal encodes the Handshake
+func (m *MessageCertificateRequest) Marshal() ([]byte, error) {
+ out := []byte{byte(len(m.CertificateTypes))}
+ for _, v := range m.CertificateTypes {
+ out = append(out, byte(v))
+ }
+
+ out = append(out, []byte{0x00, 0x00}...)
+ binary.BigEndian.PutUint16(out[len(out)-2:], uint16(len(m.SignatureHashAlgorithms)*2))
+ for _, v := range m.SignatureHashAlgorithms {
+ out = append(out, byte(v.Hash))
+ out = append(out, byte(v.Signature))
+ }
+
+ // Distinguished Names
+ casLength := 0
+ for _, ca := range m.CertificateAuthoritiesNames {
+ casLength += len(ca) + 2
+ }
+ out = append(out, []byte{0x00, 0x00}...)
+ binary.BigEndian.PutUint16(out[len(out)-2:], uint16(casLength))
+ if casLength > 0 {
+ for _, ca := range m.CertificateAuthoritiesNames {
+ out = append(out, []byte{0x00, 0x00}...)
+ binary.BigEndian.PutUint16(out[len(out)-2:], uint16(len(ca)))
+ out = append(out, ca...)
+ }
+ }
+ return out, nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageCertificateRequest) Unmarshal(data []byte) error {
+ if len(data) < messageCertificateRequestMinLength {
+ return errBufferTooSmall
+ }
+
+ offset := 0
+ certificateTypesLength := int(data[0])
+ offset++
+
+ if (offset + certificateTypesLength) > len(data) {
+ return errBufferTooSmall
+ }
+
+ for i := 0; i < certificateTypesLength; i++ {
+ certType := clientcertificate.Type(data[offset+i])
+ if _, ok := clientcertificate.Types()[certType]; ok {
+ m.CertificateTypes = append(m.CertificateTypes, certType)
+ }
+ }
+ offset += certificateTypesLength
+ if len(data) < offset+2 {
+ return errBufferTooSmall
+ }
+ signatureHashAlgorithmsLength := int(binary.BigEndian.Uint16(data[offset:]))
+ offset += 2
+
+ if (offset + signatureHashAlgorithmsLength) > len(data) {
+ return errBufferTooSmall
+ }
+
+ for i := 0; i < signatureHashAlgorithmsLength; i += 2 {
+ if len(data) < (offset + i + 2) {
+ return errBufferTooSmall
+ }
+ h := hash.Algorithm(data[offset+i])
+ s := signature.Algorithm(data[offset+i+1])
+
+ if _, ok := hash.Algorithms()[h]; !ok {
+ continue
+ } else if _, ok := signature.Algorithms()[s]; !ok {
+ continue
+ }
+ m.SignatureHashAlgorithms = append(m.SignatureHashAlgorithms, signaturehash.Algorithm{Signature: s, Hash: h})
+ }
+
+ offset += signatureHashAlgorithmsLength
+ if len(data) < offset+2 {
+ return errBufferTooSmall
+ }
+ casLength := int(binary.BigEndian.Uint16(data[offset:]))
+ offset += 2
+ if (offset + casLength) > len(data) {
+ return errBufferTooSmall
+ }
+ cas := make([]byte, casLength)
+ copy(cas, data[offset:offset+casLength])
+ m.CertificateAuthoritiesNames = nil
+ for len(cas) > 0 {
+ if len(cas) < 2 {
+ return errBufferTooSmall
+ }
+ caLen := binary.BigEndian.Uint16(cas)
+ cas = cas[2:]
+
+ if len(cas) < int(caLen) {
+ return errBufferTooSmall
+ }
+
+ m.CertificateAuthoritiesNames = append(m.CertificateAuthoritiesNames, cas[:caLen])
+ cas = cas[caLen:]
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_verify.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_verify.go
new file mode 100644
index 000000000..fb5e4639d
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_verify.go
@@ -0,0 +1,61 @@
+package handshake
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/pkg/crypto/hash"
+ "github.com/pion/dtls/v2/pkg/crypto/signature"
+)
+
+// MessageCertificateVerify provide explicit verification of a
+// client certificate.
+//
+// https://tools.ietf.org/html/rfc5246#section-7.4.8
+type MessageCertificateVerify struct {
+ HashAlgorithm hash.Algorithm
+ SignatureAlgorithm signature.Algorithm
+ Signature []byte
+}
+
+const handshakeMessageCertificateVerifyMinLength = 4
+
+// Type returns the Handshake Type
+func (m MessageCertificateVerify) Type() Type {
+ return TypeCertificateVerify
+}
+
+// Marshal encodes the Handshake
+func (m *MessageCertificateVerify) Marshal() ([]byte, error) {
+ out := make([]byte, 1+1+2+len(m.Signature))
+
+ out[0] = byte(m.HashAlgorithm)
+ out[1] = byte(m.SignatureAlgorithm)
+ binary.BigEndian.PutUint16(out[2:], uint16(len(m.Signature)))
+ copy(out[4:], m.Signature)
+ return out, nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageCertificateVerify) Unmarshal(data []byte) error {
+ if len(data) < handshakeMessageCertificateVerifyMinLength {
+ return errBufferTooSmall
+ }
+
+ m.HashAlgorithm = hash.Algorithm(data[0])
+ if _, ok := hash.Algorithms()[m.HashAlgorithm]; !ok {
+ return errInvalidHashAlgorithm
+ }
+
+ m.SignatureAlgorithm = signature.Algorithm(data[1])
+ if _, ok := signature.Algorithms()[m.SignatureAlgorithm]; !ok {
+ return errInvalidSignatureAlgorithm
+ }
+
+ signatureLength := int(binary.BigEndian.Uint16(data[2:]))
+ if (signatureLength + 4) != len(data) {
+ return errBufferTooSmall
+ }
+
+ m.Signature = append([]byte{}, data[4:]...)
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_hello.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_hello.go
new file mode 100644
index 000000000..1deca38aa
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_hello.go
@@ -0,0 +1,138 @@
+package handshake
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/extension"
+)
+
+/*
+MessageClientHello is for when a client first connects to a server it is
+required to send the client hello as its first message. The client can also send a
+client hello in response to a hello request or on its own
+initiative in order to renegotiate the security parameters in an
+existing connection.
+*/
+type MessageClientHello struct {
+ Version protocol.Version
+ Random Random
+ Cookie []byte
+
+ SessionID []byte
+
+ CipherSuiteIDs []uint16
+ CompressionMethods []*protocol.CompressionMethod
+ Extensions []extension.Extension
+}
+
+const handshakeMessageClientHelloVariableWidthStart = 34
+
+// Type returns the Handshake Type
+func (m MessageClientHello) Type() Type {
+ return TypeClientHello
+}
+
+// Marshal encodes the Handshake
+func (m *MessageClientHello) Marshal() ([]byte, error) {
+ if len(m.Cookie) > 255 {
+ return nil, errCookieTooLong
+ }
+
+ out := make([]byte, handshakeMessageClientHelloVariableWidthStart)
+ out[0] = m.Version.Major
+ out[1] = m.Version.Minor
+
+ rand := m.Random.MarshalFixed()
+ copy(out[2:], rand[:])
+
+ out = append(out, byte(len(m.SessionID)))
+ out = append(out, m.SessionID...)
+
+ out = append(out, byte(len(m.Cookie)))
+ out = append(out, m.Cookie...)
+ out = append(out, encodeCipherSuiteIDs(m.CipherSuiteIDs)...)
+ out = append(out, protocol.EncodeCompressionMethods(m.CompressionMethods)...)
+
+ extensions, err := extension.Marshal(m.Extensions)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(out, extensions...), nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageClientHello) Unmarshal(data []byte) error {
+ if len(data) < 2+RandomLength {
+ return errBufferTooSmall
+ }
+
+ m.Version.Major = data[0]
+ m.Version.Minor = data[1]
+
+ var random [RandomLength]byte
+ copy(random[:], data[2:])
+ m.Random.UnmarshalFixed(random)
+
+ // rest of packet has variable width sections
+ currOffset := handshakeMessageClientHelloVariableWidthStart
+
+ currOffset++
+ if len(data) <= currOffset {
+ return errBufferTooSmall
+ }
+ n := int(data[currOffset-1])
+ if len(data) <= currOffset+n {
+ return errBufferTooSmall
+ }
+ m.SessionID = append([]byte{}, data[currOffset:currOffset+n]...)
+ currOffset += len(m.SessionID)
+
+ currOffset++
+ if len(data) <= currOffset {
+ return errBufferTooSmall
+ }
+ n = int(data[currOffset-1])
+ if len(data) <= currOffset+n {
+ return errBufferTooSmall
+ }
+ m.Cookie = append([]byte{}, data[currOffset:currOffset+n]...)
+ currOffset += len(m.Cookie)
+
+ // Cipher Suites
+ if len(data) < currOffset {
+ return errBufferTooSmall
+ }
+ cipherSuiteIDs, err := decodeCipherSuiteIDs(data[currOffset:])
+ if err != nil {
+ return err
+ }
+ m.CipherSuiteIDs = cipherSuiteIDs
+ if len(data) < currOffset+2 {
+ return errBufferTooSmall
+ }
+ currOffset += int(binary.BigEndian.Uint16(data[currOffset:])) + 2
+
+ // Compression Methods
+ if len(data) < currOffset {
+ return errBufferTooSmall
+ }
+ compressionMethods, err := protocol.DecodeCompressionMethods(data[currOffset:])
+ if err != nil {
+ return err
+ }
+ m.CompressionMethods = compressionMethods
+ if len(data) < currOffset {
+ return errBufferTooSmall
+ }
+ currOffset += int(data[currOffset]) + 1
+
+ // Extensions
+ extensions, err := extension.Unmarshal(data[currOffset:])
+ if err != nil {
+ return err
+ }
+ m.Extensions = extensions
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_key_exchange.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_key_exchange.go
new file mode 100644
index 000000000..34c5c48ef
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_key_exchange.go
@@ -0,0 +1,78 @@
+package handshake
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/internal/ciphersuite/types"
+)
+
+// MessageClientKeyExchange is a DTLS Handshake Message
+// With this message, the premaster secret is set, either by direct
+// transmission of the RSA-encrypted secret or by the transmission of
+// Diffie-Hellman parameters that will allow each side to agree upon
+// the same premaster secret.
+//
+// https://tools.ietf.org/html/rfc5246#section-7.4.7
+type MessageClientKeyExchange struct {
+ IdentityHint []byte
+ PublicKey []byte
+
+ // for unmarshaling
+ KeyExchangeAlgorithm types.KeyExchangeAlgorithm
+}
+
+// Type returns the Handshake Type
+func (m MessageClientKeyExchange) Type() Type {
+ return TypeClientKeyExchange
+}
+
+// Marshal encodes the Handshake
+func (m *MessageClientKeyExchange) Marshal() (out []byte, err error) {
+ if m.IdentityHint == nil && m.PublicKey == nil {
+ return nil, errInvalidClientKeyExchange
+ }
+
+ if m.IdentityHint != nil {
+ out = append([]byte{0x00, 0x00}, m.IdentityHint...)
+ binary.BigEndian.PutUint16(out, uint16(len(out)-2))
+ }
+
+ if m.PublicKey != nil {
+ out = append(out, byte(len(m.PublicKey)))
+ out = append(out, m.PublicKey...)
+ }
+
+ return out, nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageClientKeyExchange) Unmarshal(data []byte) error {
+ switch {
+ case len(data) < 2:
+ return errBufferTooSmall
+ case m.KeyExchangeAlgorithm == types.KeyExchangeAlgorithmNone:
+ return errCipherSuiteUnset
+ }
+
+ offset := 0
+ if m.KeyExchangeAlgorithm.Has(types.KeyExchangeAlgorithmPsk) {
+ pskLength := int(binary.BigEndian.Uint16(data))
+ if pskLength > len(data)-2 {
+ return errBufferTooSmall
+ }
+
+ m.IdentityHint = append([]byte{}, data[2:pskLength+2]...)
+ offset += pskLength + 2
+ }
+
+ if m.KeyExchangeAlgorithm.Has(types.KeyExchangeAlgorithmEcdhe) {
+ publicKeyLength := int(data[offset])
+ if publicKeyLength > len(data)-1-offset {
+ return errBufferTooSmall
+ }
+
+ m.PublicKey = append([]byte{}, data[offset+1:]...)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_finished.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_finished.go
new file mode 100644
index 000000000..c65d42abb
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_finished.go
@@ -0,0 +1,27 @@
+package handshake
+
+// MessageFinished is a DTLS Handshake Message
+// this message is the first one protected with the just
+// negotiated algorithms, keys, and secrets. Recipients of Finished
+// messages MUST verify that the contents are correct.
+//
+// https://tools.ietf.org/html/rfc5246#section-7.4.9
+type MessageFinished struct {
+ VerifyData []byte
+}
+
+// Type returns the Handshake Type
+func (m MessageFinished) Type() Type {
+ return TypeFinished
+}
+
+// Marshal encodes the Handshake
+func (m *MessageFinished) Marshal() ([]byte, error) {
+ return append([]byte{}, m.VerifyData...), nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageFinished) Unmarshal(data []byte) error {
+ m.VerifyData = append([]byte{}, data...)
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_hello_verify_request.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_hello_verify_request.go
new file mode 100644
index 000000000..20c63773d
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_hello_verify_request.go
@@ -0,0 +1,62 @@
+package handshake
+
+import (
+ "github.com/pion/dtls/v2/pkg/protocol"
+)
+
+// MessageHelloVerifyRequest is as follows:
+//
+// struct {
+// ProtocolVersion server_version;
+// opaque cookie<0..2^8-1>;
+// } HelloVerifyRequest;
+//
+// The HelloVerifyRequest message type is hello_verify_request(3).
+//
+// When the client sends its ClientHello message to the server, the server
+// MAY respond with a HelloVerifyRequest message. This message contains
+// a stateless cookie generated using the technique of [PHOTURIS]. The
+// client MUST retransmit the ClientHello with the cookie added.
+//
+// https://tools.ietf.org/html/rfc6347#section-4.2.1
+type MessageHelloVerifyRequest struct {
+ Version protocol.Version
+ Cookie []byte
+}
+
+// Type returns the Handshake Type
+func (m MessageHelloVerifyRequest) Type() Type {
+ return TypeHelloVerifyRequest
+}
+
+// Marshal encodes the Handshake
+func (m *MessageHelloVerifyRequest) Marshal() ([]byte, error) {
+ if len(m.Cookie) > 255 {
+ return nil, errCookieTooLong
+ }
+
+ out := make([]byte, 3+len(m.Cookie))
+ out[0] = m.Version.Major
+ out[1] = m.Version.Minor
+ out[2] = byte(len(m.Cookie))
+ copy(out[3:], m.Cookie)
+
+ return out, nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageHelloVerifyRequest) Unmarshal(data []byte) error {
+ if len(data) < 3 {
+ return errBufferTooSmall
+ }
+ m.Version.Major = data[0]
+ m.Version.Minor = data[1]
+ cookieLength := int(data[2])
+ if len(data) < cookieLength+3 {
+ return errBufferTooSmall
+ }
+ m.Cookie = make([]byte, cookieLength)
+
+ copy(m.Cookie, data[3:3+cookieLength])
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello.go
new file mode 100644
index 000000000..b4157e25d
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello.go
@@ -0,0 +1,119 @@
+package handshake
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/extension"
+)
+
+// MessageServerHello is sent in response to a ClientHello
+// message when it was able to find an acceptable set of algorithms.
+// If it cannot find such a match, it will respond with a handshake
+// failure alert.
+//
+// https://tools.ietf.org/html/rfc5246#section-7.4.1.3
+type MessageServerHello struct {
+ Version protocol.Version
+ Random Random
+
+ SessionID []byte
+
+ CipherSuiteID *uint16
+ CompressionMethod *protocol.CompressionMethod
+ Extensions []extension.Extension
+}
+
+const messageServerHelloVariableWidthStart = 2 + RandomLength
+
+// Type returns the Handshake Type
+func (m MessageServerHello) Type() Type {
+ return TypeServerHello
+}
+
+// Marshal encodes the Handshake
+func (m *MessageServerHello) Marshal() ([]byte, error) {
+ if m.CipherSuiteID == nil {
+ return nil, errCipherSuiteUnset
+ } else if m.CompressionMethod == nil {
+ return nil, errCompressionMethodUnset
+ }
+
+ out := make([]byte, messageServerHelloVariableWidthStart)
+ out[0] = m.Version.Major
+ out[1] = m.Version.Minor
+
+ rand := m.Random.MarshalFixed()
+ copy(out[2:], rand[:])
+
+ out = append(out, byte(len(m.SessionID)))
+ out = append(out, m.SessionID...)
+
+ out = append(out, []byte{0x00, 0x00}...)
+ binary.BigEndian.PutUint16(out[len(out)-2:], *m.CipherSuiteID)
+
+ out = append(out, byte(m.CompressionMethod.ID))
+
+ extensions, err := extension.Marshal(m.Extensions)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(out, extensions...), nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageServerHello) Unmarshal(data []byte) error {
+ if len(data) < 2+RandomLength {
+ return errBufferTooSmall
+ }
+
+ m.Version.Major = data[0]
+ m.Version.Minor = data[1]
+
+ var random [RandomLength]byte
+ copy(random[:], data[2:])
+ m.Random.UnmarshalFixed(random)
+
+ currOffset := messageServerHelloVariableWidthStart
+ currOffset++
+ if len(data) <= currOffset {
+ return errBufferTooSmall
+ }
+
+ n := int(data[currOffset-1])
+ if len(data) <= currOffset+n {
+ return errBufferTooSmall
+ }
+ m.SessionID = append([]byte{}, data[currOffset:currOffset+n]...)
+ currOffset += len(m.SessionID)
+
+ if len(data) < currOffset+2 {
+ return errBufferTooSmall
+ }
+ m.CipherSuiteID = new(uint16)
+ *m.CipherSuiteID = binary.BigEndian.Uint16(data[currOffset:])
+ currOffset += 2
+
+ if len(data) <= currOffset {
+ return errBufferTooSmall
+ }
+ if compressionMethod, ok := protocol.CompressionMethods()[protocol.CompressionMethodID(data[currOffset])]; ok {
+ m.CompressionMethod = compressionMethod
+ currOffset++
+ } else {
+ return errInvalidCompressionMethod
+ }
+
+ if len(data) <= currOffset {
+ m.Extensions = []extension.Extension{}
+ return nil
+ }
+
+ extensions, err := extension.Unmarshal(data[currOffset:])
+ if err != nil {
+ return err
+ }
+ m.Extensions = extensions
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello_done.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello_done.go
new file mode 100644
index 000000000..0f65b198e
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello_done.go
@@ -0,0 +1,21 @@
+package handshake
+
+// MessageServerHelloDone is final non-encrypted message from server
+// this communicates server has sent all its handshake messages and next
+// should be MessageFinished
+type MessageServerHelloDone struct{}
+
+// Type returns the Handshake Type
+func (m MessageServerHelloDone) Type() Type {
+ return TypeServerHelloDone
+}
+
+// Marshal encodes the Handshake
+func (m *MessageServerHelloDone) Marshal() ([]byte, error) {
+ return []byte{}, nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageServerHelloDone) Unmarshal(data []byte) error {
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_key_exchange.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_key_exchange.go
new file mode 100644
index 000000000..e84734a73
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_key_exchange.go
@@ -0,0 +1,145 @@
+package handshake
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/internal/ciphersuite/types"
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/crypto/hash"
+ "github.com/pion/dtls/v2/pkg/crypto/signature"
+)
+
+// MessageServerKeyExchange supports ECDH and PSK
+type MessageServerKeyExchange struct {
+ IdentityHint []byte
+
+ EllipticCurveType elliptic.CurveType
+ NamedCurve elliptic.Curve
+ PublicKey []byte
+ HashAlgorithm hash.Algorithm
+ SignatureAlgorithm signature.Algorithm
+ Signature []byte
+
+ // for unmarshaling
+ KeyExchangeAlgorithm types.KeyExchangeAlgorithm
+}
+
+// Type returns the Handshake Type
+func (m MessageServerKeyExchange) Type() Type {
+ return TypeServerKeyExchange
+}
+
+// Marshal encodes the Handshake
+func (m *MessageServerKeyExchange) Marshal() ([]byte, error) {
+ var out []byte
+ if m.IdentityHint != nil {
+ out = append([]byte{0x00, 0x00}, m.IdentityHint...)
+ binary.BigEndian.PutUint16(out, uint16(len(out)-2))
+ }
+
+ if m.EllipticCurveType == 0 || len(m.PublicKey) == 0 {
+ return out, nil
+ }
+ out = append(out, byte(m.EllipticCurveType), 0x00, 0x00)
+ binary.BigEndian.PutUint16(out[len(out)-2:], uint16(m.NamedCurve))
+
+ out = append(out, byte(len(m.PublicKey)))
+ out = append(out, m.PublicKey...)
+ switch {
+ case m.HashAlgorithm != hash.None && len(m.Signature) == 0:
+ return nil, errInvalidHashAlgorithm
+ case m.HashAlgorithm == hash.None && len(m.Signature) > 0:
+ return nil, errInvalidHashAlgorithm
+ case m.SignatureAlgorithm == signature.Anonymous && (m.HashAlgorithm != hash.None || len(m.Signature) > 0):
+ return nil, errInvalidSignatureAlgorithm
+ case m.SignatureAlgorithm == signature.Anonymous:
+ return out, nil
+ }
+
+ out = append(out, []byte{byte(m.HashAlgorithm), byte(m.SignatureAlgorithm), 0x00, 0x00}...)
+ binary.BigEndian.PutUint16(out[len(out)-2:], uint16(len(m.Signature)))
+ out = append(out, m.Signature...)
+
+ return out, nil
+}
+
+// Unmarshal populates the message from encoded data
+func (m *MessageServerKeyExchange) Unmarshal(data []byte) error {
+ switch {
+ case len(data) < 2:
+ return errBufferTooSmall
+ case m.KeyExchangeAlgorithm == types.KeyExchangeAlgorithmNone:
+ return errCipherSuiteUnset
+ }
+
+ hintLength := binary.BigEndian.Uint16(data)
+ if int(hintLength) <= len(data)-2 && m.KeyExchangeAlgorithm.Has(types.KeyExchangeAlgorithmPsk) {
+ m.IdentityHint = append([]byte{}, data[2:2+hintLength]...)
+ data = data[2+hintLength:]
+ }
+ if m.KeyExchangeAlgorithm == types.KeyExchangeAlgorithmPsk {
+ if len(data) == 0 {
+ return nil
+ }
+ return errLengthMismatch
+ }
+
+ if !m.KeyExchangeAlgorithm.Has(types.KeyExchangeAlgorithmEcdhe) {
+ return errLengthMismatch
+ }
+
+ if _, ok := elliptic.CurveTypes()[elliptic.CurveType(data[0])]; ok {
+ m.EllipticCurveType = elliptic.CurveType(data[0])
+ } else {
+ return errInvalidEllipticCurveType
+ }
+
+ if len(data[1:]) < 2 {
+ return errBufferTooSmall
+ }
+ m.NamedCurve = elliptic.Curve(binary.BigEndian.Uint16(data[1:3]))
+ if _, ok := elliptic.Curves()[m.NamedCurve]; !ok {
+ return errInvalidNamedCurve
+ }
+ if len(data) < 4 {
+ return errBufferTooSmall
+ }
+
+ publicKeyLength := int(data[3])
+ offset := 4 + publicKeyLength
+ if len(data) < offset {
+ return errBufferTooSmall
+ }
+ m.PublicKey = append([]byte{}, data[4:offset]...)
+
+ // Anon connection doesn't contains hashAlgorithm, signatureAlgorithm, signature
+ if len(data) == offset {
+ return nil
+ } else if len(data) <= offset {
+ return errBufferTooSmall
+ }
+
+ m.HashAlgorithm = hash.Algorithm(data[offset])
+ if _, ok := hash.Algorithms()[m.HashAlgorithm]; !ok {
+ return errInvalidHashAlgorithm
+ }
+ offset++
+ if len(data) <= offset {
+ return errBufferTooSmall
+ }
+ m.SignatureAlgorithm = signature.Algorithm(data[offset])
+ if _, ok := signature.Algorithms()[m.SignatureAlgorithm]; !ok {
+ return errInvalidSignatureAlgorithm
+ }
+ offset++
+ if len(data) < offset+2 {
+ return errBufferTooSmall
+ }
+ signatureLength := int(binary.BigEndian.Uint16(data[offset:]))
+ offset += 2
+ if len(data) < offset+signatureLength {
+ return errBufferTooSmall
+ }
+ m.Signature = append([]byte{}, data[offset:offset+signatureLength]...)
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/random.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/random.go
new file mode 100644
index 000000000..0ade936eb
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/random.go
@@ -0,0 +1,49 @@
+package handshake
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "time"
+)
+
+// Consts for Random in Handshake
+const (
+ RandomBytesLength = 28
+ RandomLength = RandomBytesLength + 4
+)
+
+// Random value that is used in ClientHello and ServerHello
+//
+// https://tools.ietf.org/html/rfc4346#section-7.4.1.2
+type Random struct {
+ GMTUnixTime time.Time
+ RandomBytes [RandomBytesLength]byte
+}
+
+// MarshalFixed encodes the Handshake
+func (r *Random) MarshalFixed() [RandomLength]byte {
+ var out [RandomLength]byte
+
+ binary.BigEndian.PutUint32(out[0:], uint32(r.GMTUnixTime.Unix()))
+ copy(out[4:], r.RandomBytes[:])
+
+ return out
+}
+
+// UnmarshalFixed populates the message from encoded data
+func (r *Random) UnmarshalFixed(data [RandomLength]byte) {
+ r.GMTUnixTime = time.Unix(int64(binary.BigEndian.Uint32(data[0:])), 0)
+ copy(r.RandomBytes[:], data[4:])
+}
+
+// Populate fills the handshakeRandom with random values
+// may be called multiple times
+func (r *Random) Populate() error {
+ r.GMTUnixTime = time.Now()
+
+ tmp := make([]byte, RandomBytesLength)
+ _, err := rand.Read(tmp)
+ copy(r.RandomBytes[:], tmp)
+
+ return err
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/errors.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/errors.go
new file mode 100644
index 000000000..7033d4058
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/errors.go
@@ -0,0 +1,16 @@
+// Package recordlayer implements the TLS Record Layer https://tools.ietf.org/html/rfc5246#section-6
+package recordlayer
+
+import (
+ "errors"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+)
+
+var (
+ errBufferTooSmall = &protocol.TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113
+ errInvalidPacketLength = &protocol.TemporaryError{Err: errors.New("packet length and declared length do not match")} //nolint:goerr113
+ errSequenceNumberOverflow = &protocol.InternalError{Err: errors.New("sequence number overflow")} //nolint:goerr113
+ errUnsupportedProtocolVersion = &protocol.FatalError{Err: errors.New("unsupported protocol version")} //nolint:goerr113
+ errInvalidContentType = &protocol.TemporaryError{Err: errors.New("invalid content type")} //nolint:goerr113
+)
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/header.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/header.go
new file mode 100644
index 000000000..65047d767
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/header.go
@@ -0,0 +1,61 @@
+package recordlayer
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/internal/util"
+ "github.com/pion/dtls/v2/pkg/protocol"
+)
+
+// Header implements a TLS RecordLayer header
+type Header struct {
+ ContentType protocol.ContentType
+ ContentLen uint16
+ Version protocol.Version
+ Epoch uint16
+ SequenceNumber uint64 // uint48 in spec
+}
+
+// RecordLayer enums
+const (
+ HeaderSize = 13
+ MaxSequenceNumber = 0x0000FFFFFFFFFFFF
+)
+
+// Marshal encodes a TLS RecordLayer Header to binary
+func (h *Header) Marshal() ([]byte, error) {
+ if h.SequenceNumber > MaxSequenceNumber {
+ return nil, errSequenceNumberOverflow
+ }
+
+ out := make([]byte, HeaderSize)
+ out[0] = byte(h.ContentType)
+ out[1] = h.Version.Major
+ out[2] = h.Version.Minor
+ binary.BigEndian.PutUint16(out[3:], h.Epoch)
+ util.PutBigEndianUint48(out[5:], h.SequenceNumber)
+ binary.BigEndian.PutUint16(out[HeaderSize-2:], h.ContentLen)
+ return out, nil
+}
+
+// Unmarshal populates a TLS RecordLayer Header from binary
+func (h *Header) Unmarshal(data []byte) error {
+ if len(data) < HeaderSize {
+ return errBufferTooSmall
+ }
+ h.ContentType = protocol.ContentType(data[0])
+ h.Version.Major = data[1]
+ h.Version.Minor = data[2]
+ h.Epoch = binary.BigEndian.Uint16(data[3:])
+
+ // SequenceNumber is stored as uint48, make into uint64
+ seqCopy := make([]byte, 8)
+ copy(seqCopy[2:], data[5:11])
+ h.SequenceNumber = binary.BigEndian.Uint64(seqCopy)
+
+ if !h.Version.Equal(protocol.Version1_0) && !h.Version.Equal(protocol.Version1_2) {
+ return errUnsupportedProtocolVersion
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/recordlayer.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/recordlayer.go
new file mode 100644
index 000000000..67e5a727b
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/recordlayer.go
@@ -0,0 +1,99 @@
+package recordlayer
+
+import (
+ "encoding/binary"
+
+ "github.com/pion/dtls/v2/pkg/protocol"
+ "github.com/pion/dtls/v2/pkg/protocol/alert"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+)
+
+// RecordLayer which handles all data transport.
+// The record layer is assumed to sit directly on top of some
+// reliable transport such as TCP. The record layer can carry four types of content:
+//
+// 1. Handshake messages—used for algorithm negotiation and key establishment.
+// 2. ChangeCipherSpec messages—really part of the handshake but technically a separate kind of message.
+// 3. Alert messages—used to signal that errors have occurred
+// 4. Application layer data
+//
+// The DTLS record layer is extremely similar to that of TLS 1.1. The
+// only change is the inclusion of an explicit sequence number in the
+// record. This sequence number allows the recipient to correctly
+// verify the TLS MAC.
+//
+// https://tools.ietf.org/html/rfc4347#section-4.1
+type RecordLayer struct {
+ Header Header
+ Content protocol.Content
+}
+
+// Marshal encodes the RecordLayer to binary
+func (r *RecordLayer) Marshal() ([]byte, error) {
+ contentRaw, err := r.Content.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ r.Header.ContentLen = uint16(len(contentRaw))
+ r.Header.ContentType = r.Content.ContentType()
+
+ headerRaw, err := r.Header.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ return append(headerRaw, contentRaw...), nil
+}
+
+// Unmarshal populates the RecordLayer from binary
+func (r *RecordLayer) Unmarshal(data []byte) error {
+ if len(data) < HeaderSize {
+ return errBufferTooSmall
+ }
+ if err := r.Header.Unmarshal(data); err != nil {
+ return err
+ }
+
+ switch protocol.ContentType(data[0]) {
+ case protocol.ContentTypeChangeCipherSpec:
+ r.Content = &protocol.ChangeCipherSpec{}
+ case protocol.ContentTypeAlert:
+ r.Content = &alert.Alert{}
+ case protocol.ContentTypeHandshake:
+ r.Content = &handshake.Handshake{}
+ case protocol.ContentTypeApplicationData:
+ r.Content = &protocol.ApplicationData{}
+ default:
+ return errInvalidContentType
+ }
+
+ return r.Content.Unmarshal(data[HeaderSize:])
+}
+
+// UnpackDatagram extracts all RecordLayer messages from a single datagram.
+// Note that as with TLS, multiple handshake messages may be placed in
+// the same DTLS record, provided that there is room and that they are
+// part of the same flight. Thus, there are two acceptable ways to pack
+// two DTLS messages into the same datagram: in the same record or in
+// separate records.
+// https://tools.ietf.org/html/rfc6347#section-4.2.3
+func UnpackDatagram(buf []byte) ([][]byte, error) {
+ out := [][]byte{}
+
+ for offset := 0; len(buf) != offset; {
+ if len(buf)-offset <= HeaderSize {
+ return nil, errInvalidPacketLength
+ }
+
+ pktLen := (HeaderSize + int(binary.BigEndian.Uint16(buf[offset+11:])))
+ if offset+pktLen > len(buf) {
+ return nil, errInvalidPacketLength
+ }
+
+ out = append(out, buf[offset:offset+pktLen])
+ offset += pktLen
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/version.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/version.go
new file mode 100644
index 000000000..d5ddb1d00
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/version.go
@@ -0,0 +1,21 @@
+// Package protocol provides the DTLS wire format
+package protocol
+
+// Version enums
+var (
+ Version1_0 = Version{Major: 0xfe, Minor: 0xff} //nolint:gochecknoglobals
+ Version1_2 = Version{Major: 0xfe, Minor: 0xfd} //nolint:gochecknoglobals
+)
+
+// Version is the minor/major value in the RecordLayer
+// and ClientHello/ServerHello
+//
+// https://tools.ietf.org/html/rfc4346#section-6.2.1
+type Version struct {
+ Major, Minor uint8
+}
+
+// Equal determines if two protocol versions are equal
+func (v Version) Equal(x Version) bool {
+ return v.Major == x.Major && v.Minor == x.Minor
+}
diff --git a/vendor/github.com/pion/dtls/v2/renovate.json b/vendor/github.com/pion/dtls/v2/renovate.json
new file mode 100644
index 000000000..f1bb98c6a
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/renovate.json
@@ -0,0 +1,6 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "github>pion/renovate-config"
+ ]
+}
diff --git a/vendor/github.com/pion/dtls/v2/resume.go b/vendor/github.com/pion/dtls/v2/resume.go
new file mode 100644
index 000000000..40e55e449
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/resume.go
@@ -0,0 +1,19 @@
+package dtls
+
+import (
+ "context"
+ "net"
+)
+
+// Resume imports an already established dtls connection using a specific dtls state
+func Resume(state *State, conn net.Conn, config *Config) (*Conn, error) {
+ if err := state.initCipherSuite(); err != nil {
+ return nil, err
+ }
+ c, err := createConn(context.Background(), conn, config, state.isClient, state)
+ if err != nil {
+ return nil, err
+ }
+
+ return c, nil
+}
diff --git a/vendor/github.com/pion/dtls/v2/session.go b/vendor/github.com/pion/dtls/v2/session.go
new file mode 100644
index 000000000..f52120cd8
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/session.go
@@ -0,0 +1,21 @@
+package dtls
+
+// Session store data needed in resumption
+type Session struct {
+ // ID store session id
+ ID []byte
+ // Secret store session master secret
+ Secret []byte
+}
+
+// SessionStore defines methods needed for session resumption.
+type SessionStore interface {
+ // Set save a session.
+ // For client, use server name as key.
+ // For server, use session id.
+ Set(key []byte, s Session) error
+ // Get fetch a session.
+ Get(key []byte) (Session, error)
+ // Del clean saved session.
+ Del(key []byte) error
+}
diff --git a/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go b/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go
new file mode 100644
index 000000000..92cc7c191
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go
@@ -0,0 +1,14 @@
+package dtls
+
+import "github.com/pion/dtls/v2/pkg/protocol/extension"
+
+// SRTPProtectionProfile defines the parameters and options that are in effect for the SRTP processing
+// https://tools.ietf.org/html/rfc5764#section-4.1.2
+type SRTPProtectionProfile = extension.SRTPProtectionProfile
+
+const (
+ SRTP_AES128_CM_HMAC_SHA1_80 SRTPProtectionProfile = extension.SRTP_AES128_CM_HMAC_SHA1_80 // nolint:revive,stylecheck
+ SRTP_AES128_CM_HMAC_SHA1_32 SRTPProtectionProfile = extension.SRTP_AES128_CM_HMAC_SHA1_32 // nolint:revive,stylecheck
+ SRTP_AEAD_AES_128_GCM SRTPProtectionProfile = extension.SRTP_AEAD_AES_128_GCM // nolint:revive,stylecheck
+ SRTP_AEAD_AES_256_GCM SRTPProtectionProfile = extension.SRTP_AEAD_AES_256_GCM // nolint:revive,stylecheck
+)
diff --git a/vendor/github.com/pion/dtls/v2/state.go b/vendor/github.com/pion/dtls/v2/state.go
new file mode 100644
index 000000000..22c65c744
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/state.go
@@ -0,0 +1,215 @@
+package dtls
+
+import (
+ "bytes"
+ "encoding/gob"
+ "sync/atomic"
+
+ "github.com/pion/dtls/v2/pkg/crypto/elliptic"
+ "github.com/pion/dtls/v2/pkg/crypto/prf"
+ "github.com/pion/dtls/v2/pkg/protocol/handshake"
+ "github.com/pion/transport/v2/replaydetector"
+)
+
+// State holds the dtls connection state and implements both encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
+type State struct {
+ localEpoch, remoteEpoch atomic.Value
+ localSequenceNumber []uint64 // uint48
+ localRandom, remoteRandom handshake.Random
+ masterSecret []byte
+ cipherSuite CipherSuite // nil if a cipherSuite hasn't been chosen
+
+ srtpProtectionProfile SRTPProtectionProfile // Negotiated SRTPProtectionProfile
+ PeerCertificates [][]byte
+ IdentityHint []byte
+ SessionID []byte
+
+ isClient bool
+
+ preMasterSecret []byte
+ extendedMasterSecret bool
+
+ namedCurve elliptic.Curve
+ localKeypair *elliptic.Keypair
+ cookie []byte
+ handshakeSendSequence int
+ handshakeRecvSequence int
+ serverName string
+ remoteRequestedCertificate bool // Did we get a CertificateRequest
+ localCertificatesVerify []byte // cache CertificateVerify
+ localVerifyData []byte // cached VerifyData
+ localKeySignature []byte // cached keySignature
+ peerCertificatesVerified bool
+
+ replayDetector []replaydetector.ReplayDetector
+
+ peerSupportedProtocols []string
+ NegotiatedProtocol string
+}
+
+type serializedState struct {
+ LocalEpoch uint16
+ RemoteEpoch uint16
+ LocalRandom [handshake.RandomLength]byte
+ RemoteRandom [handshake.RandomLength]byte
+ CipherSuiteID uint16
+ MasterSecret []byte
+ SequenceNumber uint64
+ SRTPProtectionProfile uint16
+ PeerCertificates [][]byte
+ IdentityHint []byte
+ SessionID []byte
+ IsClient bool
+}
+
+func (s *State) clone() *State {
+ serialized := s.serialize()
+ state := &State{}
+ state.deserialize(*serialized)
+
+ return state
+}
+
+func (s *State) serialize() *serializedState {
+ // Marshal random values
+ localRnd := s.localRandom.MarshalFixed()
+ remoteRnd := s.remoteRandom.MarshalFixed()
+
+ epoch := s.getLocalEpoch()
+ return &serializedState{
+ LocalEpoch: s.getLocalEpoch(),
+ RemoteEpoch: s.getRemoteEpoch(),
+ CipherSuiteID: uint16(s.cipherSuite.ID()),
+ MasterSecret: s.masterSecret,
+ SequenceNumber: atomic.LoadUint64(&s.localSequenceNumber[epoch]),
+ LocalRandom: localRnd,
+ RemoteRandom: remoteRnd,
+ SRTPProtectionProfile: uint16(s.srtpProtectionProfile),
+ PeerCertificates: s.PeerCertificates,
+ IdentityHint: s.IdentityHint,
+ SessionID: s.SessionID,
+ IsClient: s.isClient,
+ }
+}
+
+func (s *State) deserialize(serialized serializedState) {
+ // Set epoch values
+ epoch := serialized.LocalEpoch
+ s.localEpoch.Store(serialized.LocalEpoch)
+ s.remoteEpoch.Store(serialized.RemoteEpoch)
+
+ for len(s.localSequenceNumber) <= int(epoch) {
+ s.localSequenceNumber = append(s.localSequenceNumber, uint64(0))
+ }
+
+ // Set random values
+ localRandom := &handshake.Random{}
+ localRandom.UnmarshalFixed(serialized.LocalRandom)
+ s.localRandom = *localRandom
+
+ remoteRandom := &handshake.Random{}
+ remoteRandom.UnmarshalFixed(serialized.RemoteRandom)
+ s.remoteRandom = *remoteRandom
+
+ s.isClient = serialized.IsClient
+
+ // Set master secret
+ s.masterSecret = serialized.MasterSecret
+
+ // Set cipher suite
+ s.cipherSuite = cipherSuiteForID(CipherSuiteID(serialized.CipherSuiteID), nil)
+
+ atomic.StoreUint64(&s.localSequenceNumber[epoch], serialized.SequenceNumber)
+ s.srtpProtectionProfile = SRTPProtectionProfile(serialized.SRTPProtectionProfile)
+
+ // Set remote certificate
+ s.PeerCertificates = serialized.PeerCertificates
+ s.IdentityHint = serialized.IdentityHint
+ s.SessionID = serialized.SessionID
+}
+
+func (s *State) initCipherSuite() error {
+ if s.cipherSuite.IsInitialized() {
+ return nil
+ }
+
+ localRandom := s.localRandom.MarshalFixed()
+ remoteRandom := s.remoteRandom.MarshalFixed()
+
+ var err error
+ if s.isClient {
+ err = s.cipherSuite.Init(s.masterSecret, localRandom[:], remoteRandom[:], true)
+ } else {
+ err = s.cipherSuite.Init(s.masterSecret, remoteRandom[:], localRandom[:], false)
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// MarshalBinary is a binary.BinaryMarshaler.MarshalBinary implementation
+func (s *State) MarshalBinary() ([]byte, error) {
+ serialized := s.serialize()
+
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ if err := enc.Encode(*serialized); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// UnmarshalBinary is a binary.BinaryUnmarshaler.UnmarshalBinary implementation
+func (s *State) UnmarshalBinary(data []byte) error {
+ enc := gob.NewDecoder(bytes.NewBuffer(data))
+ var serialized serializedState
+ if err := enc.Decode(&serialized); err != nil {
+ return err
+ }
+
+ s.deserialize(serialized)
+ if err := s.initCipherSuite(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ExportKeyingMaterial returns length bytes of exported key material in a new
+// slice as defined in RFC 5705.
+// This allows protocols to use DTLS for key establishment, but
+// then use some of the keying material for their own purposes
+func (s *State) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
+ if s.getLocalEpoch() == 0 {
+ return nil, errHandshakeInProgress
+ } else if len(context) != 0 {
+ return nil, errContextUnsupported
+ } else if _, ok := invalidKeyingLabels()[label]; ok {
+ return nil, errReservedExportKeyingMaterial
+ }
+
+ localRandom := s.localRandom.MarshalFixed()
+ remoteRandom := s.remoteRandom.MarshalFixed()
+
+ seed := []byte(label)
+ if s.isClient {
+ seed = append(append(seed, localRandom[:]...), remoteRandom[:]...)
+ } else {
+ seed = append(append(seed, remoteRandom[:]...), localRandom[:]...)
+ }
+ return prf.PHash(s.masterSecret, seed, length, s.cipherSuite.HashFunc())
+}
+
+func (s *State) getRemoteEpoch() uint16 {
+ if remoteEpoch, ok := s.remoteEpoch.Load().(uint16); ok {
+ return remoteEpoch
+ }
+ return 0
+}
+
+func (s *State) getLocalEpoch() uint16 {
+ if localEpoch, ok := s.localEpoch.Load().(uint16); ok {
+ return localEpoch
+ }
+ return 0
+}
diff --git a/vendor/github.com/pion/dtls/v2/util.go b/vendor/github.com/pion/dtls/v2/util.go
new file mode 100644
index 000000000..dda055ab3
--- /dev/null
+++ b/vendor/github.com/pion/dtls/v2/util.go
@@ -0,0 +1,38 @@
+package dtls
+
+func findMatchingSRTPProfile(a, b []SRTPProtectionProfile) (SRTPProtectionProfile, bool) {
+ for _, aProfile := range a {
+ for _, bProfile := range b {
+ if aProfile == bProfile {
+ return aProfile, true
+ }
+ }
+ }
+ return 0, false
+}
+
+func findMatchingCipherSuite(a, b []CipherSuite) (CipherSuite, bool) {
+ for _, aSuite := range a {
+ for _, bSuite := range b {
+ if aSuite.ID() == bSuite.ID() {
+ return aSuite, true
+ }
+ }
+ }
+ return nil, false
+}
+
+func splitBytes(bytes []byte, splitLen int) [][]byte {
+ splitBytes := make([][]byte, 0)
+ numBytes := len(bytes)
+ for i := 0; i < numBytes; i += splitLen {
+ j := i + splitLen
+ if j > numBytes {
+ j = numBytes
+ }
+
+ splitBytes = append(splitBytes, bytes[i:j])
+ }
+
+ return splitBytes
+}
diff --git a/vendor/github.com/pion/logging/.golangci.yml b/vendor/github.com/pion/logging/.golangci.yml
new file mode 100644
index 000000000..ffb0058e6
--- /dev/null
+++ b/vendor/github.com/pion/logging/.golangci.yml
@@ -0,0 +1,13 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ misspell:
+ locale: US
+
+linters:
+ enable-all: true
+
+issues:
+ exclude-use-default: false
+ max-per-linter: 0
+ max-same-issues: 50
diff --git a/vendor/github.com/pion/logging/.travis.yml b/vendor/github.com/pion/logging/.travis.yml
new file mode 100644
index 000000000..b96a1edb9
--- /dev/null
+++ b/vendor/github.com/pion/logging/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+
+go:
+ - "1.x" # use the latest Go release
+
+env:
+ - GO111MODULE=on
+
+before_script:
+ - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.15.0
+
+script:
+ - golangci-lint run ./...
+# - rm -rf examples # Remove examples, no test coverage for them
+ - go test -coverpkg=$(go list ./... | tr '\n' ',') -coverprofile=cover.out -v -race -covermode=atomic ./...
+ - bash <(curl -s https://codecov.io/bash)
+ - bash .github/assert-contributors.sh
+ - bash .github/lint-disallowed-functions-in-library.sh
+ - bash .github/lint-commit-message.sh
diff --git a/vendor/github.com/pion/logging/LICENSE b/vendor/github.com/pion/logging/LICENSE
new file mode 100644
index 000000000..ab602974d
--- /dev/null
+++ b/vendor/github.com/pion/logging/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/pion/logging/README.md b/vendor/github.com/pion/logging/README.md
new file mode 100644
index 000000000..c15471d61
--- /dev/null
+++ b/vendor/github.com/pion/logging/README.md
@@ -0,0 +1,41 @@
+
+
+ Pion Logging
+
+
+
The Pion logging library
+
+
+
+
+
+
+
+
+
+
+
+
+### Roadmap
+The library is used as a part of our WebRTC implementation. Please refer to that [roadmap](https://github.com/pion/webrtc/issues/9) to track our major milestones.
+
+### Community
+Pion has an active community on the [Golang Slack](https://invite.slack.golangbridge.org/). Sign up and join the **#pion** channel for discussions and support. You can also use [Pion mailing list](https://groups.google.com/forum/#!forum/pion).
+
+We are always looking to support **your projects**. Please reach out if you have something to build!
+
+If you need commercial support or don't want to use public methods you can contact us at [team@pion.ly](mailto:team@pion.ly)
+
+### Contributing
+Check out the **[contributing wiki](https://github.com/pion/webrtc/wiki/Contributing)** to join the group of amazing people making this project possible:
+
+* [John Bradley](https://github.com/kc5nra) - *Original Author*
+* [Sean DuBois](https://github.com/Sean-Der) - *Original Author*
+* [Michael MacDonald](https://github.com/mjmac) - *Original Author*
+* [Woodrow Douglass](https://github.com/wdouglass) - *Test coverage*
+* [Michiel De Backker](https://github.com/backkem) - *Docs*
+* [Hugo Arregui](https://github.com/hugoArregui) - *Custom Logs*
+* [Justin Okamoto](https://github.com/justinokamoto) - *Disabled Logs Update*
+
+### License
+MIT License - see [LICENSE](LICENSE) for full text
diff --git a/vendor/github.com/pion/logging/logger.go b/vendor/github.com/pion/logging/logger.go
new file mode 100644
index 000000000..35f650581
--- /dev/null
+++ b/vendor/github.com/pion/logging/logger.go
@@ -0,0 +1,228 @@
+package logging
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "strings"
+ "sync"
+)
+
+// Use this abstraction to ensure thread-safe access to the logger's io.Writer
+// (which could change at runtime)
+type loggerWriter struct {
+ sync.RWMutex
+ output io.Writer
+}
+
+func (lw *loggerWriter) SetOutput(output io.Writer) {
+ lw.Lock()
+ defer lw.Unlock()
+ lw.output = output
+}
+
+func (lw *loggerWriter) Write(data []byte) (int, error) {
+ lw.RLock()
+ defer lw.RUnlock()
+ return lw.output.Write(data)
+}
+
+// DefaultLeveledLogger encapsulates functionality for providing logging at
+// user-defined levels
+type DefaultLeveledLogger struct {
+ level LogLevel
+ writer *loggerWriter
+ trace *log.Logger
+ debug *log.Logger
+ info *log.Logger
+ warn *log.Logger
+ err *log.Logger
+}
+
+// WithTraceLogger is a chainable configuration function which sets the
+// Trace-level logger
+func (ll *DefaultLeveledLogger) WithTraceLogger(log *log.Logger) *DefaultLeveledLogger {
+ ll.trace = log
+ return ll
+}
+
+// WithDebugLogger is a chainable configuration function which sets the
+// Debug-level logger
+func (ll *DefaultLeveledLogger) WithDebugLogger(log *log.Logger) *DefaultLeveledLogger {
+ ll.debug = log
+ return ll
+}
+
+// WithInfoLogger is a chainable configuration function which sets the
+// Info-level logger
+func (ll *DefaultLeveledLogger) WithInfoLogger(log *log.Logger) *DefaultLeveledLogger {
+ ll.info = log
+ return ll
+}
+
+// WithWarnLogger is a chainable configuration function which sets the
+// Warn-level logger
+func (ll *DefaultLeveledLogger) WithWarnLogger(log *log.Logger) *DefaultLeveledLogger {
+ ll.warn = log
+ return ll
+}
+
+// WithErrorLogger is a chainable configuration function which sets the
+// Error-level logger
+func (ll *DefaultLeveledLogger) WithErrorLogger(log *log.Logger) *DefaultLeveledLogger {
+ ll.err = log
+ return ll
+}
+
+// WithOutput is a chainable configuration function which sets the logger's
+// logging output to the supplied io.Writer
+func (ll *DefaultLeveledLogger) WithOutput(output io.Writer) *DefaultLeveledLogger {
+ ll.writer.SetOutput(output)
+ return ll
+}
+
+func (ll *DefaultLeveledLogger) logf(logger *log.Logger, level LogLevel, format string, args ...interface{}) {
+ if ll.level.Get() < level {
+ return
+ }
+
+ callDepth := 3 // this frame + wrapper func + caller
+ msg := fmt.Sprintf(format, args...)
+ if err := logger.Output(callDepth, msg); err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to log: %s", err)
+ }
+}
+
+// SetLevel sets the logger's logging level
+func (ll *DefaultLeveledLogger) SetLevel(newLevel LogLevel) {
+ ll.level.Set(newLevel)
+}
+
+// Trace emits the preformatted message if the logger is at or below LogLevelTrace
+func (ll *DefaultLeveledLogger) Trace(msg string) {
+ ll.logf(ll.trace, LogLevelTrace, msg)
+}
+
+// Tracef formats and emits a message if the logger is at or below LogLevelTrace
+func (ll *DefaultLeveledLogger) Tracef(format string, args ...interface{}) {
+ ll.logf(ll.trace, LogLevelTrace, format, args...)
+}
+
+// Debug emits the preformatted message if the logger is at or below LogLevelDebug
+func (ll *DefaultLeveledLogger) Debug(msg string) {
+ ll.logf(ll.debug, LogLevelDebug, msg)
+}
+
+// Debugf formats and emits a message if the logger is at or below LogLevelDebug
+func (ll *DefaultLeveledLogger) Debugf(format string, args ...interface{}) {
+ ll.logf(ll.debug, LogLevelDebug, format, args...)
+}
+
+// Info emits the preformatted message if the logger is at or below LogLevelInfo
+func (ll *DefaultLeveledLogger) Info(msg string) {
+ ll.logf(ll.info, LogLevelInfo, msg)
+}
+
+// Infof formats and emits a message if the logger is at or below LogLevelInfo
+func (ll *DefaultLeveledLogger) Infof(format string, args ...interface{}) {
+ ll.logf(ll.info, LogLevelInfo, format, args...)
+}
+
+// Warn emits the preformatted message if the logger is at or below LogLevelWarn
+func (ll *DefaultLeveledLogger) Warn(msg string) {
+ ll.logf(ll.warn, LogLevelWarn, msg)
+}
+
+// Warnf formats and emits a message if the logger is at or below LogLevelWarn
+func (ll *DefaultLeveledLogger) Warnf(format string, args ...interface{}) {
+ ll.logf(ll.warn, LogLevelWarn, format, args...)
+}
+
+// Error emits the preformatted message if the logger is at or below LogLevelError
+func (ll *DefaultLeveledLogger) Error(msg string) {
+ ll.logf(ll.err, LogLevelError, msg)
+}
+
+// Errorf formats and emits a message if the logger is at or below LogLevelError
+func (ll *DefaultLeveledLogger) Errorf(format string, args ...interface{}) {
+ ll.logf(ll.err, LogLevelError, format, args...)
+}
+
+// NewDefaultLeveledLoggerForScope returns a configured LeveledLogger
+func NewDefaultLeveledLoggerForScope(scope string, level LogLevel, writer io.Writer) *DefaultLeveledLogger {
+ if writer == nil {
+ writer = os.Stdout
+ }
+ logger := &DefaultLeveledLogger{
+ writer: &loggerWriter{output: writer},
+ level: level,
+ }
+ return logger.
+ WithTraceLogger(log.New(logger.writer, fmt.Sprintf("%s TRACE: ", scope), log.Lmicroseconds|log.Lshortfile)).
+ WithDebugLogger(log.New(logger.writer, fmt.Sprintf("%s DEBUG: ", scope), log.Lmicroseconds|log.Lshortfile)).
+ WithInfoLogger(log.New(logger.writer, fmt.Sprintf("%s INFO: ", scope), log.LstdFlags)).
+ WithWarnLogger(log.New(logger.writer, fmt.Sprintf("%s WARNING: ", scope), log.LstdFlags)).
+ WithErrorLogger(log.New(logger.writer, fmt.Sprintf("%s ERROR: ", scope), log.LstdFlags))
+}
+
+// DefaultLoggerFactory define levels by scopes and creates new DefaultLeveledLogger
+type DefaultLoggerFactory struct {
+ Writer io.Writer
+ DefaultLogLevel LogLevel
+ ScopeLevels map[string]LogLevel
+}
+
+// NewDefaultLoggerFactory creates a new DefaultLoggerFactory
+func NewDefaultLoggerFactory() *DefaultLoggerFactory {
+ factory := DefaultLoggerFactory{}
+ factory.DefaultLogLevel = LogLevelError
+ factory.ScopeLevels = make(map[string]LogLevel)
+ factory.Writer = os.Stdout
+
+ logLevels := map[string]LogLevel{
+ "DISABLE": LogLevelDisabled,
+ "ERROR": LogLevelError,
+ "WARN": LogLevelWarn,
+ "INFO": LogLevelInfo,
+ "DEBUG": LogLevelDebug,
+ "TRACE": LogLevelTrace,
+ }
+
+ for name, level := range logLevels {
+ env := os.Getenv(fmt.Sprintf("PION_LOG_%s", name))
+
+ if env == "" {
+ env = os.Getenv(fmt.Sprintf("PIONS_LOG_%s", name))
+ }
+
+ if env == "" {
+ continue
+ }
+
+ if strings.ToLower(env) == "all" {
+ factory.DefaultLogLevel = level
+ continue
+ }
+
+ scopes := strings.Split(strings.ToLower(env), ",")
+ for _, scope := range scopes {
+ factory.ScopeLevels[scope] = level
+ }
+ }
+
+ return &factory
+}
+
+// NewLogger returns a configured LeveledLogger for the given , argsscope
+func (f *DefaultLoggerFactory) NewLogger(scope string) LeveledLogger {
+ logLevel := f.DefaultLogLevel
+ if f.ScopeLevels != nil {
+ scopeLevel, found := f.ScopeLevels[scope]
+
+ if found {
+ logLevel = scopeLevel
+ }
+ }
+ return NewDefaultLeveledLoggerForScope(scope, logLevel, f.Writer)
+}
diff --git a/vendor/github.com/pion/logging/scoped.go b/vendor/github.com/pion/logging/scoped.go
new file mode 100644
index 000000000..678bab426
--- /dev/null
+++ b/vendor/github.com/pion/logging/scoped.go
@@ -0,0 +1,72 @@
+package logging
+
+import (
+ "sync/atomic"
+)
+
+// LogLevel represents the level at which the logger will emit log messages
+type LogLevel int32
+
+// Set updates the LogLevel to the supplied value
+func (ll *LogLevel) Set(newLevel LogLevel) {
+ atomic.StoreInt32((*int32)(ll), int32(newLevel))
+}
+
+// Get retrieves the current LogLevel value
+func (ll *LogLevel) Get() LogLevel {
+ return LogLevel(atomic.LoadInt32((*int32)(ll)))
+}
+
+func (ll LogLevel) String() string {
+ switch ll {
+ case LogLevelDisabled:
+ return "Disabled"
+ case LogLevelError:
+ return "Error"
+ case LogLevelWarn:
+ return "Warn"
+ case LogLevelInfo:
+ return "Info"
+ case LogLevelDebug:
+ return "Debug"
+ case LogLevelTrace:
+ return "Trace"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+const (
+ // LogLevelDisabled completely disables logging of any events
+ LogLevelDisabled LogLevel = iota
+ // LogLevelError is for fatal errors which should be handled by user code,
+ // but are logged to ensure that they are seen
+ LogLevelError
+ // LogLevelWarn is for logging abnormal, but non-fatal library operation
+ LogLevelWarn
+ // LogLevelInfo is for logging normal library operation (e.g. state transitions, etc.)
+ LogLevelInfo
+ // LogLevelDebug is for logging low-level library information (e.g. internal operations)
+ LogLevelDebug
+ // LogLevelTrace is for logging very low-level library information (e.g. network traces)
+ LogLevelTrace
+)
+
+// LeveledLogger is the basic pion Logger interface
+type LeveledLogger interface {
+ Trace(msg string)
+ Tracef(format string, args ...interface{})
+ Debug(msg string)
+ Debugf(format string, args ...interface{})
+ Info(msg string)
+ Infof(format string, args ...interface{})
+ Warn(msg string)
+ Warnf(format string, args ...interface{})
+ Error(msg string)
+ Errorf(format string, args ...interface{})
+}
+
+// LoggerFactory is the basic pion LoggerFactory interface
+type LoggerFactory interface {
+ NewLogger(scope string) LeveledLogger
+}
diff --git a/vendor/github.com/pion/transport/v2/AUTHORS.txt b/vendor/github.com/pion/transport/v2/AUTHORS.txt
new file mode 100644
index 000000000..e8bfdeda0
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/AUTHORS.txt
@@ -0,0 +1,23 @@
+# Thank you to everyone that made Pion possible. If you are interested in contributing
+# we would love to have you https://github.com/pion/webrtc/wiki/Contributing
+#
+# This file is auto generated, using git to list all individuals contributors.
+# see `.github/generate-authors.sh` for the scripting
+Adrian Cable
+Atsushi Watanabe
+backkem
+Hugo Arregui
+Jeremiah Millay
+Jozef Kralik
+Juliusz Chroboczek
+Luke Curley
+Mathis Engelbart
+OrlandoCo
+Sean DuBois
+Sean DuBois
+Sean DuBois
+Steffen Vogel
+Winlin
+Woodrow Douglass
+Yutaka Takeda
+ZHENK
diff --git a/vendor/github.com/pion/transport/v2/LICENSE b/vendor/github.com/pion/transport/v2/LICENSE
new file mode 100644
index 000000000..ab602974d
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/pion/transport/v2/connctx/connctx.go b/vendor/github.com/pion/transport/v2/connctx/connctx.go
new file mode 100644
index 000000000..fc25ee9f0
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/connctx/connctx.go
@@ -0,0 +1,172 @@
+// Package connctx wraps net.Conn using context.Context.
+package connctx
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ErrClosing is returned on Write to closed connection.
+var ErrClosing = errors.New("use of closed network connection")
+
+// Reader is an interface for context controlled reader.
+type Reader interface {
+ ReadContext(context.Context, []byte) (int, error)
+}
+
+// Writer is an interface for context controlled writer.
+type Writer interface {
+ WriteContext(context.Context, []byte) (int, error)
+}
+
+// ReadWriter is a composite of ReadWriter.
+type ReadWriter interface {
+ Reader
+ Writer
+}
+
+// ConnCtx is a wrapper of net.Conn using context.Context.
+type ConnCtx interface {
+ Reader
+ Writer
+ io.Closer
+ LocalAddr() net.Addr
+ RemoteAddr() net.Addr
+ Conn() net.Conn
+}
+
+type connCtx struct {
+ nextConn net.Conn
+ closed chan struct{}
+ closeOnce sync.Once
+ readMu sync.Mutex
+ writeMu sync.Mutex
+}
+
+var veryOld = time.Unix(0, 1) //nolint:gochecknoglobals
+
+// New creates a new ConnCtx wrapping given net.Conn.
+func New(conn net.Conn) ConnCtx {
+ c := &connCtx{
+ nextConn: conn,
+ closed: make(chan struct{}),
+ }
+ return c
+}
+
+func (c *connCtx) ReadContext(ctx context.Context, b []byte) (int, error) {
+ c.readMu.Lock()
+ defer c.readMu.Unlock()
+
+ select {
+ case <-c.closed:
+ return 0, io.EOF
+ default:
+ }
+
+ done := make(chan struct{})
+ var wg sync.WaitGroup
+ var errSetDeadline atomic.Value
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ select {
+ case <-ctx.Done():
+ // context canceled
+ if err := c.nextConn.SetReadDeadline(veryOld); err != nil {
+ errSetDeadline.Store(err)
+ return
+ }
+ <-done
+ if err := c.nextConn.SetReadDeadline(time.Time{}); err != nil {
+ errSetDeadline.Store(err)
+ }
+ case <-done:
+ }
+ }()
+
+ n, err := c.nextConn.Read(b)
+
+ close(done)
+ wg.Wait()
+ if e := ctx.Err(); e != nil && n == 0 {
+ err = e
+ }
+ if err2, ok := errSetDeadline.Load().(error); ok && err == nil && err2 != nil {
+ err = err2
+ }
+ return n, err
+}
+
+func (c *connCtx) WriteContext(ctx context.Context, b []byte) (int, error) {
+ c.writeMu.Lock()
+ defer c.writeMu.Unlock()
+
+ select {
+ case <-c.closed:
+ return 0, ErrClosing
+ default:
+ }
+
+ done := make(chan struct{})
+ var wg sync.WaitGroup
+ var errSetDeadline atomic.Value
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ select {
+ case <-ctx.Done():
+ // context canceled
+ if err := c.nextConn.SetWriteDeadline(veryOld); err != nil {
+ errSetDeadline.Store(err)
+ return
+ }
+ <-done
+ if err := c.nextConn.SetWriteDeadline(time.Time{}); err != nil {
+ errSetDeadline.Store(err)
+ }
+ case <-done:
+ }
+ }()
+
+ n, err := c.nextConn.Write(b)
+
+ close(done)
+ wg.Wait()
+ if e := ctx.Err(); e != nil && n == 0 {
+ err = e
+ }
+ if err2, ok := errSetDeadline.Load().(error); ok && err == nil && err2 != nil {
+ err = err2
+ }
+ return n, err
+}
+
+func (c *connCtx) Close() error {
+ err := c.nextConn.Close()
+ c.closeOnce.Do(func() {
+ c.writeMu.Lock()
+ c.readMu.Lock()
+ close(c.closed)
+ c.readMu.Unlock()
+ c.writeMu.Unlock()
+ })
+ return err
+}
+
+func (c *connCtx) LocalAddr() net.Addr {
+ return c.nextConn.LocalAddr()
+}
+
+func (c *connCtx) RemoteAddr() net.Addr {
+ return c.nextConn.RemoteAddr()
+}
+
+func (c *connCtx) Conn() net.Conn {
+ return c.nextConn
+}
diff --git a/vendor/github.com/pion/transport/v2/connctx/pipe.go b/vendor/github.com/pion/transport/v2/connctx/pipe.go
new file mode 100644
index 000000000..e2f040928
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/connctx/pipe.go
@@ -0,0 +1,11 @@
+package connctx
+
+import (
+ "net"
+)
+
+// Pipe creates piped pair of ConnCtx.
+func Pipe() (ConnCtx, ConnCtx) {
+ ca, cb := net.Pipe()
+ return New(ca), New(cb)
+}
diff --git a/vendor/github.com/pion/transport/v2/deadline/deadline.go b/vendor/github.com/pion/transport/v2/deadline/deadline.go
new file mode 100644
index 000000000..918b18b68
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/deadline/deadline.go
@@ -0,0 +1,114 @@
+// Package deadline provides deadline timer used to implement
+// net.Conn compatible connection
+package deadline
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// Deadline signals updatable deadline timer.
+// Also, it implements context.Context.
+type Deadline struct {
+ exceeded chan struct{}
+ stop chan struct{}
+ stopped chan bool
+ deadline time.Time
+ mu sync.RWMutex
+}
+
+// New creates new deadline timer.
+func New() *Deadline {
+ d := &Deadline{
+ exceeded: make(chan struct{}),
+ stop: make(chan struct{}),
+ stopped: make(chan bool, 1),
+ }
+ d.stopped <- true
+ return d
+}
+
+// Set new deadline. Zero value means no deadline.
+func (d *Deadline) Set(t time.Time) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.deadline = t
+
+ close(d.stop)
+
+ select {
+ case <-d.exceeded:
+ d.exceeded = make(chan struct{})
+ default:
+ stopped := <-d.stopped
+ if !stopped {
+ d.exceeded = make(chan struct{})
+ }
+ }
+ d.stop = make(chan struct{})
+ d.stopped = make(chan bool, 1)
+
+ if t.IsZero() {
+ d.stopped <- true
+ return
+ }
+
+ if dur := time.Until(t); dur > 0 {
+ exceeded := d.exceeded
+ stopped := d.stopped
+ go func() {
+ timer := time.NewTimer(dur)
+ select {
+ case <-timer.C:
+ close(exceeded)
+ stopped <- false
+ case <-d.stop:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ stopped <- true
+ }
+ }()
+ return
+ }
+
+ close(d.exceeded)
+ d.stopped <- false
+}
+
+// Done receives deadline signal.
+func (d *Deadline) Done() <-chan struct{} {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+ return d.exceeded
+}
+
+// Err returns context.DeadlineExceeded if the deadline is exceeded.
+// Otherwise, it returns nil.
+func (d *Deadline) Err() error {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+ select {
+ case <-d.exceeded:
+ return context.DeadlineExceeded
+ default:
+ return nil
+ }
+}
+
+// Deadline returns current deadline.
+func (d *Deadline) Deadline() (time.Time, bool) {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+ if d.deadline.IsZero() {
+ return d.deadline, false
+ }
+ return d.deadline, true
+}
+
+// Value returns nil.
+func (d *Deadline) Value(interface{}) interface{} {
+ return nil
+}
diff --git a/vendor/github.com/pion/transport/v2/packetio/buffer.go b/vendor/github.com/pion/transport/v2/packetio/buffer.go
new file mode 100644
index 000000000..5da0901c6
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/packetio/buffer.go
@@ -0,0 +1,347 @@
+// Package packetio provides packet buffer
+package packetio
+
+import (
+ "errors"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/pion/transport/v2/deadline"
+)
+
+var errPacketTooBig = errors.New("packet too big")
+
+// BufferPacketType allow the Buffer to know which packet protocol is writing.
+type BufferPacketType int
+
+const (
+ // RTPBufferPacket indicates the Buffer that is handling RTP packets
+ RTPBufferPacket BufferPacketType = 1
+ // RTCPBufferPacket indicates the Buffer that is handling RTCP packets
+ RTCPBufferPacket BufferPacketType = 2
+)
+
+// Buffer allows writing packets to an intermediate buffer, which can then be read form.
+// This is verify similar to bytes.Buffer but avoids combining multiple writes into a single read.
+type Buffer struct {
+ mutex sync.Mutex
+
+ // this is a circular buffer. If head <= tail, then the useful
+ // data is in the interval [head, tail[. If tail < head, then
+ // the useful data is the union of [head, len[ and [0, tail[.
+ // In order to avoid ambiguity when head = tail, we always leave
+ // an unused byte in the buffer.
+ data []byte
+ head, tail int
+
+ notify chan struct{}
+ subs bool
+ closed bool
+
+ count int
+ limitCount, limitSize int
+
+ readDeadline *deadline.Deadline
+}
+
+const (
+ minSize = 2048
+ cutoffSize = 128 * 1024
+ maxSize = 4 * 1024 * 1024
+)
+
+// NewBuffer creates a new Buffer.
+func NewBuffer() *Buffer {
+ return &Buffer{
+ notify: make(chan struct{}),
+ readDeadline: deadline.New(),
+ }
+}
+
+// available returns true if the buffer is large enough to fit a packet
+// of the given size, taking overhead into account.
+func (b *Buffer) available(size int) bool {
+ available := b.head - b.tail
+ if available <= 0 {
+ available += len(b.data)
+ }
+ // we interpret head=tail as empty, so always keep a byte free
+ if size+2+1 > available {
+ return false
+ }
+
+ return true
+}
+
+// grow increases the size of the buffer. If it returns nil, then the
+// buffer has been grown. It returns ErrFull if hits a limit.
+func (b *Buffer) grow() error {
+ var newSize int
+ if len(b.data) < cutoffSize {
+ newSize = 2 * len(b.data)
+ } else {
+ newSize = 5 * len(b.data) / 4
+ }
+ if newSize < minSize {
+ newSize = minSize
+ }
+ if (b.limitSize <= 0 || sizeHardLimit) && newSize > maxSize {
+ newSize = maxSize
+ }
+
+ // one byte slack
+ if b.limitSize > 0 && newSize > b.limitSize+1 {
+ newSize = b.limitSize + 1
+ }
+
+ if newSize <= len(b.data) {
+ return ErrFull
+ }
+
+ newData := make([]byte, newSize)
+
+ var n int
+ if b.head <= b.tail {
+ // data was contiguous
+ n = copy(newData, b.data[b.head:b.tail])
+ } else {
+ // data was discontinuous
+ n = copy(newData, b.data[b.head:])
+ n += copy(newData[n:], b.data[:b.tail])
+ }
+ b.head = 0
+ b.tail = n
+ b.data = newData
+
+ return nil
+}
+
+// Write appends a copy of the packet data to the buffer.
+// Returns ErrFull if the packet doesn't fit.
+//
+// Note that the packet size is limited to 65536 bytes since v0.11.0 due to the internal data structure.
+func (b *Buffer) Write(packet []byte) (int, error) {
+ if len(packet) >= 0x10000 {
+ return 0, errPacketTooBig
+ }
+
+ b.mutex.Lock()
+
+ if b.closed {
+ b.mutex.Unlock()
+ return 0, io.ErrClosedPipe
+ }
+
+ if (b.limitCount > 0 && b.count >= b.limitCount) ||
+ (b.limitSize > 0 && b.size()+2+len(packet) > b.limitSize) {
+ b.mutex.Unlock()
+ return 0, ErrFull
+ }
+
+ // grow the buffer until the packet fits
+ for !b.available(len(packet)) {
+ err := b.grow()
+ if err != nil {
+ b.mutex.Unlock()
+ return 0, err
+ }
+ }
+
+ var notify chan struct{}
+
+ if b.subs {
+ // readers are waiting. Prepare to notify, but only
+ // actually do it after we release the lock.
+ notify = b.notify
+ b.notify = make(chan struct{})
+ b.subs = false
+ }
+
+ // store the length of the packet
+ b.data[b.tail] = uint8(len(packet) >> 8)
+ b.tail++
+ if b.tail >= len(b.data) {
+ b.tail = 0
+ }
+ b.data[b.tail] = uint8(len(packet))
+ b.tail++
+ if b.tail >= len(b.data) {
+ b.tail = 0
+ }
+
+ // store the packet
+ n := copy(b.data[b.tail:], packet)
+ b.tail += n
+ if b.tail >= len(b.data) {
+ // we reached the end, wrap around
+ m := copy(b.data, packet[n:])
+ b.tail = m
+ }
+ b.count++
+ b.mutex.Unlock()
+
+ if notify != nil {
+ close(notify)
+ }
+
+ return len(packet), nil
+}
+
+// Read populates the given byte slice, returning the number of bytes read.
+// Blocks until data is available or the buffer is closed.
+// Returns io.ErrShortBuffer is the packet is too small to copy the Write.
+// Returns io.EOF if the buffer is closed.
+func (b *Buffer) Read(packet []byte) (n int, err error) {
+ // Return immediately if the deadline is already exceeded.
+ select {
+ case <-b.readDeadline.Done():
+ return 0, &netError{ErrTimeout, true, true}
+ default:
+ }
+
+ for {
+ b.mutex.Lock()
+
+ if b.head != b.tail {
+ // decode the packet size
+ n1 := b.data[b.head]
+ b.head++
+ if b.head >= len(b.data) {
+ b.head = 0
+ }
+ n2 := b.data[b.head]
+ b.head++
+ if b.head >= len(b.data) {
+ b.head = 0
+ }
+ count := int((uint16(n1) << 8) | uint16(n2))
+
+ // determine the number of bytes we'll actually copy
+ copied := count
+ if copied > len(packet) {
+ copied = len(packet)
+ }
+
+ // copy the data
+ if b.head+copied < len(b.data) {
+ copy(packet, b.data[b.head:b.head+copied])
+ } else {
+ k := copy(packet, b.data[b.head:])
+ copy(packet[k:], b.data[:copied-k])
+ }
+
+ // advance head, discarding any data that wasn't copied
+ b.head += count
+ if b.head >= len(b.data) {
+ b.head -= len(b.data)
+ }
+
+ if b.head == b.tail {
+ // the buffer is empty, reset to beginning
+ // in order to improve cache locality.
+ b.head = 0
+ b.tail = 0
+ }
+
+ b.count--
+
+ b.mutex.Unlock()
+
+ if copied < count {
+ return copied, io.ErrShortBuffer
+ }
+ return copied, nil
+ }
+
+ if b.closed {
+ b.mutex.Unlock()
+ return 0, io.EOF
+ }
+
+ notify := b.notify
+ b.subs = true
+ b.mutex.Unlock()
+
+ select {
+ case <-b.readDeadline.Done():
+ return 0, &netError{ErrTimeout, true, true}
+ case <-notify:
+ }
+ }
+}
+
+// Close the buffer, unblocking any pending reads.
+// Data in the buffer can still be read, Read will return io.EOF only when empty.
+func (b *Buffer) Close() (err error) {
+ b.mutex.Lock()
+
+ if b.closed {
+ b.mutex.Unlock()
+ return nil
+ }
+
+ notify := b.notify
+ b.closed = true
+
+ b.mutex.Unlock()
+
+ close(notify)
+
+ return nil
+}
+
+// Count returns the number of packets in the buffer.
+func (b *Buffer) Count() int {
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
+ return b.count
+}
+
+// SetLimitCount controls the maximum number of packets that can be buffered.
+// Causes Write to return ErrFull when this limit is reached.
+// A zero value will disable this limit.
+func (b *Buffer) SetLimitCount(limit int) {
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
+
+ b.limitCount = limit
+}
+
+// Size returns the total byte size of packets in the buffer, including
+// a small amount of administrative overhead.
+func (b *Buffer) Size() int {
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
+
+ return b.size()
+}
+
+func (b *Buffer) size() int {
+ size := b.tail - b.head
+ if size < 0 {
+ size += len(b.data)
+ }
+ return size
+}
+
+// SetLimitSize controls the maximum number of bytes that can be buffered.
+// Causes Write to return ErrFull when this limit is reached.
+// A zero value means 4MB since v0.11.0.
+//
+// User can set packetioSizeHardLimit build tag to enable 4MB hard limit.
+// When packetioSizeHardLimit build tag is set, SetLimitSize exceeding
+// the hard limit will be silently discarded.
+func (b *Buffer) SetLimitSize(limit int) {
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
+
+ b.limitSize = limit
+}
+
+// SetReadDeadline sets the deadline for the Read operation.
+// Setting to zero means no deadline.
+func (b *Buffer) SetReadDeadline(t time.Time) error {
+ b.readDeadline.Set(t)
+ return nil
+}
diff --git a/vendor/github.com/pion/transport/v2/packetio/errors.go b/vendor/github.com/pion/transport/v2/packetio/errors.go
new file mode 100644
index 000000000..06f1b9d98
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/packetio/errors.go
@@ -0,0 +1,27 @@
+package packetio
+
+import (
+ "errors"
+)
+
+// netError implements net.Error
+type netError struct {
+ error
+ timeout, temporary bool
+}
+
+func (e *netError) Timeout() bool {
+ return e.timeout
+}
+
+func (e *netError) Temporary() bool {
+ return e.temporary
+}
+
+var (
+ // ErrFull is returned when the buffer has hit the configured limits.
+ ErrFull = errors.New("packetio.Buffer is full, discarding write")
+
+ // ErrTimeout is returned when a deadline has expired
+ ErrTimeout = errors.New("i/o timeout")
+)
diff --git a/vendor/github.com/pion/transport/v2/packetio/hardlimit.go b/vendor/github.com/pion/transport/v2/packetio/hardlimit.go
new file mode 100644
index 000000000..9f4fc5e74
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/packetio/hardlimit.go
@@ -0,0 +1,6 @@
+//go:build packetioSizeHardlimit
+// +build packetioSizeHardlimit
+
+package packetio
+
+const sizeHardLimit = true
diff --git a/vendor/github.com/pion/transport/v2/packetio/no_hardlimit.go b/vendor/github.com/pion/transport/v2/packetio/no_hardlimit.go
new file mode 100644
index 000000000..869e0b667
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/packetio/no_hardlimit.go
@@ -0,0 +1,6 @@
+//go:build !packetioSizeHardlimit
+// +build !packetioSizeHardlimit
+
+package packetio
+
+const sizeHardLimit = false
diff --git a/vendor/github.com/pion/transport/v2/replaydetector/fixedbig.go b/vendor/github.com/pion/transport/v2/replaydetector/fixedbig.go
new file mode 100644
index 000000000..a571a1aad
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/replaydetector/fixedbig.go
@@ -0,0 +1,78 @@
+package replaydetector
+
+import (
+ "fmt"
+)
+
+// fixedBigInt is the fix-sized multi-word integer.
+type fixedBigInt struct {
+ bits []uint64
+ n uint
+ msbMask uint64
+}
+
+// newFixedBigInt creates a new fix-sized multi-word int.
+func newFixedBigInt(n uint) *fixedBigInt {
+ chunkSize := (n + 63) / 64
+ if chunkSize == 0 {
+ chunkSize = 1
+ }
+ return &fixedBigInt{
+ bits: make([]uint64, chunkSize),
+ n: n,
+ msbMask: (1 << (64 - n%64)) - 1,
+ }
+}
+
+// Lsh is the left shift operation.
+func (s *fixedBigInt) Lsh(n uint) {
+ if n == 0 {
+ return
+ }
+ nChunk := int(n / 64)
+ nN := n % 64
+
+ for i := len(s.bits) - 1; i >= 0; i-- {
+ var carry uint64
+ if i-nChunk >= 0 {
+ carry = s.bits[i-nChunk] << nN
+ if i-nChunk-1 >= 0 {
+ carry |= s.bits[i-nChunk-1] >> (64 - nN)
+ }
+ }
+ s.bits[i] = (s.bits[i] << n) | carry
+ }
+ s.bits[len(s.bits)-1] &= s.msbMask
+}
+
+// Bit returns i-th bit of the fixedBigInt.
+func (s *fixedBigInt) Bit(i uint) uint {
+ if i >= s.n {
+ return 0
+ }
+ chunk := i / 64
+ pos := i % 64
+ if s.bits[chunk]&(1<= s.n {
+ return
+ }
+ chunk := i / 64
+ pos := i % 64
+ s.bits[chunk] |= 1 << pos
+}
+
+// String returns string representation of fixedBigInt.
+func (s *fixedBigInt) String() string {
+ var out string
+ for i := len(s.bits) - 1; i >= 0; i-- {
+ out += fmt.Sprintf("%016X", s.bits[i])
+ }
+ return out
+}
diff --git a/vendor/github.com/pion/transport/v2/replaydetector/replaydetector.go b/vendor/github.com/pion/transport/v2/replaydetector/replaydetector.go
new file mode 100644
index 000000000..297f9f3ae
--- /dev/null
+++ b/vendor/github.com/pion/transport/v2/replaydetector/replaydetector.go
@@ -0,0 +1,116 @@
+// Package replaydetector provides packet replay detection algorithm.
+package replaydetector
+
+// ReplayDetector is the interface of sequence replay detector.
+type ReplayDetector interface {
+ // Check returns true if given sequence number is not replayed.
+ // Call accept() to mark the packet is received properly.
+ Check(seq uint64) (accept func(), ok bool)
+}
+
+type slidingWindowDetector struct {
+ latestSeq uint64
+ maxSeq uint64
+ windowSize uint
+ mask *fixedBigInt
+}
+
+// New creates ReplayDetector.
+// Created ReplayDetector doesn't allow wrapping.
+// It can handle monotonically increasing sequence number up to
+// full 64bit number. It is suitable for DTLS replay protection.
+func New(windowSize uint, maxSeq uint64) ReplayDetector {
+ return &slidingWindowDetector{
+ maxSeq: maxSeq,
+ windowSize: windowSize,
+ mask: newFixedBigInt(windowSize),
+ }
+}
+
+func (d *slidingWindowDetector) Check(seq uint64) (accept func(), ok bool) {
+ if seq > d.maxSeq {
+ // Exceeded upper limit.
+ return func() {}, false
+ }
+
+ if seq <= d.latestSeq {
+ if d.latestSeq >= uint64(d.windowSize)+seq {
+ return func() {}, false
+ }
+ if d.mask.Bit(uint(d.latestSeq-seq)) != 0 {
+ // The sequence number is duplicated.
+ return func() {}, false
+ }
+ }
+
+ return func() {
+ if seq > d.latestSeq {
+ // Update the head of the window.
+ d.mask.Lsh(uint(seq - d.latestSeq))
+ d.latestSeq = seq
+ }
+ diff := (d.latestSeq - seq) % d.maxSeq
+ d.mask.SetBit(uint(diff))
+ }, true
+}
+
+// WithWrap creates ReplayDetector allowing sequence wrapping.
+// This is suitable for short bit width counter like SRTP and SRTCP.
+func WithWrap(windowSize uint, maxSeq uint64) ReplayDetector {
+ return &wrappedSlidingWindowDetector{
+ maxSeq: maxSeq,
+ windowSize: windowSize,
+ mask: newFixedBigInt(windowSize),
+ }
+}
+
+type wrappedSlidingWindowDetector struct {
+ latestSeq uint64
+ maxSeq uint64
+ windowSize uint
+ mask *fixedBigInt
+ init bool
+}
+
+func (d *wrappedSlidingWindowDetector) Check(seq uint64) (accept func(), ok bool) {
+ if seq > d.maxSeq {
+ // Exceeded upper limit.
+ return func() {}, false
+ }
+ if !d.init {
+ if seq != 0 {
+ d.latestSeq = seq - 1
+ } else {
+ d.latestSeq = d.maxSeq
+ }
+ d.init = true
+ }
+
+ diff := int64(d.latestSeq) - int64(seq)
+ // Wrap the number.
+ if diff > int64(d.maxSeq)/2 {
+ diff -= int64(d.maxSeq + 1)
+ } else if diff <= -int64(d.maxSeq)/2 {
+ diff += int64(d.maxSeq + 1)
+ }
+
+ if diff >= int64(d.windowSize) {
+ // Too old.
+ return func() {}, false
+ }
+ if diff >= 0 {
+ if d.mask.Bit(uint(diff)) != 0 {
+ // The sequence number is duplicated.
+ return func() {}, false
+ }
+ }
+
+ return func() {
+ if diff < 0 {
+ // Update the head of the window.
+ d.mask.Lsh(uint(-diff))
+ d.latestSeq = seq
+ }
+ d.mask.SetBit(uint(d.latestSeq - seq))
+ }, true
+}
diff --git a/vendor/github.com/pion/udp/.gitignore b/vendor/github.com/pion/udp/.gitignore
new file mode 100644
index 000000000..f977e7485
--- /dev/null
+++ b/vendor/github.com/pion/udp/.gitignore
@@ -0,0 +1,25 @@
+### JetBrains IDE ###
+#####################
+.idea/
+
+### Emacs Temporary Files ###
+#############################
+*~
+
+### Folders ###
+###############
+bin/
+vendor/
+node_modules/
+
+### Files ###
+#############
+*.ivf
+*.ogg
+tags
+cover.out
+*.sw[poe]
+*.wasm
+examples/sfu-ws/cert.pem
+examples/sfu-ws/key.pem
+wasm_exec.js
diff --git a/vendor/github.com/pion/udp/.golangci.yml b/vendor/github.com/pion/udp/.golangci.yml
new file mode 100644
index 000000000..48696f16b
--- /dev/null
+++ b/vendor/github.com/pion/udp/.golangci.yml
@@ -0,0 +1,116 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ misspell:
+ locale: US
+ exhaustive:
+ default-signifies-exhaustive: true
+ gomodguard:
+ blocked:
+ modules:
+ - github.com/pkg/errors:
+ recommendations:
+ - errors
+
+linters:
+ enable:
+ - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
+ - bidichk # Checks for dangerous unicode character sequences
+ - bodyclose # checks whether HTTP response body is closed successfully
+ - contextcheck # check the function whether use a non-inherited context
+ - decorder # check declaration order and count of types, constants, variables and functions
+ - depguard # Go linter that checks if package imports are in a list of acceptable packages
+ - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
+ - dupl # Tool for code clone detection
+ - durationcheck # check for two durations multiplied together
+ - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
+ - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted.
+ - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`.
+ - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
+ - exhaustive # check exhaustiveness of enum switch statements
+ - exportloopref # checks for pointers to enclosing loop variables
+ - forcetypeassert # finds forced type assertions
+ - gci # Gci control golang package import order and make it always deterministic.
+ - gochecknoglobals # Checks that no globals are present in Go code
+ - gochecknoinits # Checks that no init functions are present in Go code
+ - gocognit # Computes and checks the cognitive complexity of functions
+ - goconst # Finds repeated strings that could be replaced by a constant
+ - gocritic # The most opinionated Go source code linter
+ - godox # Tool for detection of FIXME, TODO and other comment keywords
+ - goerr113 # Golang linter to check the errors handling expressions
+ - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
+ - gofumpt # Gofumpt checks whether code was gofumpt-ed.
+ - goheader # Checks is file header matches to pattern
+ - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports
+ - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod.
+ - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations.
+ - goprintffuncname # Checks that printf-like functions are named with `f` at the end
+ - gosec # Inspects source code for security problems
+ - gosimple # Linter for Go source code that specializes in simplifying a code
+ - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
+ - grouper # An analyzer to analyze expression groups.
+ - importas # Enforces consistent import aliases
+ - ineffassign # Detects when assignments to existing variables are not used
+ - misspell # Finds commonly misspelled English words in comments
+ - nakedret # Finds naked returns in functions greater than a specified function length
+ - nilerr # Finds the code that returns nil even if it checks that the error is not nil.
+ - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value.
+ - noctx # noctx finds sending http request without context.Context
+ - predeclared # find code that shadows one of Go's predeclared identifiers
+ - revive # golint replacement, finds style mistakes
+ - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks
+ - stylecheck # Stylecheck is a replacement for golint
+ - tagliatelle # Checks the struct tags.
+ - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17
+ - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
+ - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
+ - unconvert # Remove unnecessary type conversions
+ - unparam # Reports unused function parameters
+ - unused # Checks Go code for unused constants, variables, functions and types
+ - wastedassign # wastedassign finds wasted assignment statements
+ - whitespace # Tool for detection of leading and trailing whitespace
+ disable:
+ - containedctx # containedctx is a linter that detects struct contained context.Context field
+ - cyclop # checks function and package cyclomatic complexity
+ - exhaustivestruct # Checks if all struct's fields are initialized
+ - forbidigo # Forbids identifiers
+ - funlen # Tool for detection of long functions
+ - gocyclo # Computes and checks the cyclomatic complexity of functions
+ - godot # Check if comments end in a period
+ - gomnd # An analyzer to detect magic numbers.
+ - ifshort # Checks that your code uses short syntax for if-statements whenever possible
+ - ireturn # Accept Interfaces, Return Concrete Types
+ - lll # Reports long lines
+ - maintidx # maintidx measures the maintainability index of each function.
+ - makezero # Finds slice declarations with non-zero initial length
+ - maligned # Tool to detect Go structs that would take less memory if their fields were sorted
+ - nestif # Reports deeply nested if statements
+ - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity
+ - nolintlint # Reports ill-formed or insufficient nolint directives
+ - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test
+ - prealloc # Finds slice declarations that could potentially be preallocated
+ - promlinter # Check Prometheus metrics naming via promlint
+ - rowserrcheck # checks whether Err of rows is checked successfully
+ - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed.
+ - testpackage # linter that makes you use a separate _test package
+ - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers
+ - varnamelen # checks that the length of a variable's name matches its scope
+ - wrapcheck # Checks that errors returned from external packages are wrapped
+ - wsl # Whitespace Linter - Forces you to use empty lines!
+
+issues:
+ exclude-use-default: false
+ exclude-rules:
+ # Allow complex tests, better to be self contained
+ - path: _test\.go
+ linters:
+ - gocognit
+
+ # Allow complex main function in examples
+ - path: examples
+ text: "of func `main` is high"
+ linters:
+ - gocognit
+
+run:
+ skip-dirs-use-default: false
diff --git a/vendor/github.com/pion/udp/.goreleaser.yml b/vendor/github.com/pion/udp/.goreleaser.yml
new file mode 100644
index 000000000..2caa5fbd3
--- /dev/null
+++ b/vendor/github.com/pion/udp/.goreleaser.yml
@@ -0,0 +1,2 @@
+builds:
+- skip: true
diff --git a/vendor/github.com/pion/udp/AUTHORS.txt b/vendor/github.com/pion/udp/AUTHORS.txt
new file mode 100644
index 000000000..25969ecc5
--- /dev/null
+++ b/vendor/github.com/pion/udp/AUTHORS.txt
@@ -0,0 +1,15 @@
+# Thank you to everyone that made Pion possible. If you are interested in contributing
+# we would love to have you https://github.com/pion/webrtc/wiki/Contributing
+#
+# This file is auto generated, using git to list all individuals contributors.
+# see https://github.com/pion/.goassets/blob/master/scripts/generate-authors.sh for the scripting
+Atsushi Watanabe
+Daniel Beseda
+Jozef Kralik
+Mathias Fredriksson
+Michiel De Backker
+sterling.deng
+ZHENK
+
+# List of contributors not appearing in Git history
+
diff --git a/vendor/github.com/pion/udp/LICENSE b/vendor/github.com/pion/udp/LICENSE
new file mode 100644
index 000000000..81f990d60
--- /dev/null
+++ b/vendor/github.com/pion/udp/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/pion/udp/README.md b/vendor/github.com/pion/udp/README.md
new file mode 100644
index 000000000..af0b9d5fb
--- /dev/null
+++ b/vendor/github.com/pion/udp/README.md
@@ -0,0 +1,35 @@
+
+
+ Pion UDP
+
+
+
A connection-oriented listener over a UDP PacketConn
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Roadmap
+This package is used in the [DTLS](https://github.com/pion/dtls) and [SCTP](https://github.com/pion/sctp) transport to provide a connection-oriented listener over a UDP.
+
+### Community
+Pion has an active community on the [Golang Slack](https://pion.ly/slack/). You can also use [Pion mailing list](https://groups.google.com/forum/#!forum/pion).
+
+We are always looking to support **your projects**. Please reach out if you have something to build!
+
+If you need commercial support or don't want to use public methods you can contact us at [team@pion.ly](mailto:team@pion.ly)
+
+### Contributing
+Check out the **[contributing wiki](https://github.com/pion/webrtc/wiki/Contributing)** to join the group of amazing people making this project possible:
+
+### License
+MIT License - see [LICENSE](LICENSE) for full text
diff --git a/vendor/github.com/pion/udp/codecov.yml b/vendor/github.com/pion/udp/codecov.yml
new file mode 100644
index 000000000..085200a48
--- /dev/null
+++ b/vendor/github.com/pion/udp/codecov.yml
@@ -0,0 +1,20 @@
+#
+# DO NOT EDIT THIS FILE
+#
+# It is automatically copied from https://github.com/pion/.goassets repository.
+#
+
+coverage:
+ status:
+ project:
+ default:
+ # Allow decreasing 2% of total coverage to avoid noise.
+ threshold: 2%
+ patch:
+ default:
+ target: 70%
+ only_pulls: true
+
+ignore:
+ - "examples/*"
+ - "examples/**/*"
diff --git a/vendor/github.com/pion/udp/conn.go b/vendor/github.com/pion/udp/conn.go
new file mode 100644
index 000000000..926a81268
--- /dev/null
+++ b/vendor/github.com/pion/udp/conn.go
@@ -0,0 +1,312 @@
+// Package udp provides a connection-oriented listener over a UDP PacketConn
+package udp
+
+import (
+ "context"
+ "errors"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/pion/transport/v2/deadline"
+ "github.com/pion/transport/v2/packetio"
+ pkgSync "github.com/pion/udp/pkg/sync"
+)
+
+const (
+ receiveMTU = 8192
+ defaultListenBacklog = 128 // same as Linux default
+)
+
+// Typed errors
+var (
+ ErrClosedListener = errors.New("udp: listener closed")
+ ErrListenQueueExceeded = errors.New("udp: listen queue exceeded")
+)
+
+// listener augments a connection-oriented Listener over a UDP PacketConn
+type listener struct {
+ pConn *net.UDPConn
+
+ accepting atomic.Value // bool
+ acceptCh chan *Conn
+ doneCh chan struct{}
+ doneOnce sync.Once
+ acceptFilter func([]byte) bool
+ readBufferPool *sync.Pool
+
+ connLock sync.Mutex
+ conns map[string]*Conn
+ connWG *pkgSync.WaitGroup
+
+ readWG sync.WaitGroup
+ errClose atomic.Value // error
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (l *listener) Accept() (net.Conn, error) {
+ select {
+ case c := <-l.acceptCh:
+ l.connWG.Add(1)
+ return c, nil
+
+ case <-l.doneCh:
+ return nil, ErrClosedListener
+ }
+}
+
+// Close closes the listener.
+// Any blocked Accept operations will be unblocked and return errors.
+func (l *listener) Close() error {
+ var err error
+ l.doneOnce.Do(func() {
+ l.accepting.Store(false)
+ close(l.doneCh)
+
+ l.connLock.Lock()
+ // Close unaccepted connections
+ L_CLOSE:
+ for {
+ select {
+ case c := <-l.acceptCh:
+ close(c.doneCh)
+ delete(l.conns, c.rAddr.String())
+
+ default:
+ break L_CLOSE
+ }
+ }
+ nConns := len(l.conns)
+ l.connLock.Unlock()
+
+ l.connWG.Done()
+
+ if nConns == 0 {
+ // Wait if this is the final connection
+ l.readWG.Wait()
+ if errClose, ok := l.errClose.Load().(error); ok {
+ err = errClose
+ }
+ } else {
+ err = nil
+ }
+ })
+
+ return err
+}
+
+// Addr returns the listener's network address.
+func (l *listener) Addr() net.Addr {
+ return l.pConn.LocalAddr()
+}
+
+// ListenConfig stores options for listening to an address.
+type ListenConfig struct {
+ // Backlog defines the maximum length of the queue of pending
+ // connections. It is equivalent of the backlog argument of
+ // POSIX listen function.
+ // If a connection request arrives when the queue is full,
+ // the request will be silently discarded, unlike TCP.
+ // Set zero to use default value 128 which is same as Linux default.
+ Backlog int
+
+ // AcceptFilter determines whether the new conn should be made for
+ // the incoming packet. If not set, any packet creates new conn.
+ AcceptFilter func([]byte) bool
+}
+
+// Listen creates a new listener based on the ListenConfig.
+func (lc *ListenConfig) Listen(network string, laddr *net.UDPAddr) (net.Listener, error) {
+ if lc.Backlog == 0 {
+ lc.Backlog = defaultListenBacklog
+ }
+
+ conn, err := net.ListenUDP(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+
+ l := &listener{
+ pConn: conn,
+ acceptCh: make(chan *Conn, lc.Backlog),
+ conns: make(map[string]*Conn),
+ doneCh: make(chan struct{}),
+ acceptFilter: lc.AcceptFilter,
+ readBufferPool: &sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, receiveMTU)
+ return &buf
+ },
+ },
+ connWG: pkgSync.NewWaitGroup(),
+ }
+
+ l.accepting.Store(true)
+ l.connWG.Add(1)
+ l.readWG.Add(2) // wait readLoop and Close execution routine
+
+ go l.readLoop()
+ go func() {
+ l.connWG.Wait()
+ if err := l.pConn.Close(); err != nil {
+ l.errClose.Store(err)
+ }
+ l.readWG.Done()
+ }()
+
+ return l, nil
+}
+
+// Listen creates a new listener using default ListenConfig.
+func Listen(network string, laddr *net.UDPAddr) (net.Listener, error) {
+ return (&ListenConfig{}).Listen(network, laddr)
+}
+
+// readLoop has to tasks:
+// 1. Dispatching incoming packets to the correct Conn.
+// It can therefore not be ended until all Conns are closed.
+// 2. Creating a new Conn when receiving from a new remote.
+func (l *listener) readLoop() {
+ defer l.readWG.Done()
+
+ for {
+ buf, ok := l.readBufferPool.Get().(*[]byte)
+ if !ok {
+ return
+ }
+
+ n, raddr, err := l.pConn.ReadFrom(*buf)
+ if err != nil {
+ return
+ }
+ conn, ok, err := l.getConn(raddr, (*buf)[:n])
+ if err != nil {
+ continue
+ }
+ if ok {
+ _, _ = conn.buffer.Write((*buf)[:n])
+ }
+ }
+}
+
+func (l *listener) getConn(raddr net.Addr, buf []byte) (*Conn, bool, error) {
+ l.connLock.Lock()
+ defer l.connLock.Unlock()
+ conn, ok := l.conns[raddr.String()]
+ if !ok {
+ if isAccepting, ok := l.accepting.Load().(bool); !isAccepting || !ok {
+ return nil, false, ErrClosedListener
+ }
+ if l.acceptFilter != nil {
+ if !l.acceptFilter(buf) {
+ return nil, false, nil
+ }
+ }
+ conn = l.newConn(raddr)
+ select {
+ case l.acceptCh <- conn:
+ l.conns[raddr.String()] = conn
+ default:
+ return nil, false, ErrListenQueueExceeded
+ }
+ }
+ return conn, true, nil
+}
+
+// Conn augments a connection-oriented connection over a UDP PacketConn
+type Conn struct {
+ listener *listener
+
+ rAddr net.Addr
+
+ buffer *packetio.Buffer
+
+ doneCh chan struct{}
+ doneOnce sync.Once
+
+ writeDeadline *deadline.Deadline
+}
+
+func (l *listener) newConn(rAddr net.Addr) *Conn {
+ return &Conn{
+ listener: l,
+ rAddr: rAddr,
+ buffer: packetio.NewBuffer(),
+ doneCh: make(chan struct{}),
+ writeDeadline: deadline.New(),
+ }
+}
+
+// Read reads from c into p
+func (c *Conn) Read(p []byte) (int, error) {
+ return c.buffer.Read(p)
+}
+
+// Write writes len(p) bytes from p to the DTLS connection
+func (c *Conn) Write(p []byte) (n int, err error) {
+ select {
+ case <-c.writeDeadline.Done():
+ return 0, context.DeadlineExceeded
+ default:
+ }
+ return c.listener.pConn.WriteTo(p, c.rAddr)
+}
+
+// Close closes the conn and releases any Read calls
+func (c *Conn) Close() error {
+ var err error
+ c.doneOnce.Do(func() {
+ c.listener.connWG.Done()
+ close(c.doneCh)
+ c.listener.connLock.Lock()
+ delete(c.listener.conns, c.rAddr.String())
+ nConns := len(c.listener.conns)
+ c.listener.connLock.Unlock()
+
+ if isAccepting, ok := c.listener.accepting.Load().(bool); nConns == 0 && !isAccepting && ok {
+ // Wait if this is the final connection
+ c.listener.readWG.Wait()
+ if errClose, ok := c.listener.errClose.Load().(error); ok {
+ err = errClose
+ }
+ } else {
+ err = nil
+ }
+
+ if errBuf := c.buffer.Close(); errBuf != nil && err == nil {
+ err = errBuf
+ }
+ })
+
+ return err
+}
+
+// LocalAddr implements net.Conn.LocalAddr
+func (c *Conn) LocalAddr() net.Addr {
+ return c.listener.pConn.LocalAddr()
+}
+
+// RemoteAddr implements net.Conn.RemoteAddr
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.rAddr
+}
+
+// SetDeadline implements net.Conn.SetDeadline
+func (c *Conn) SetDeadline(t time.Time) error {
+ c.writeDeadline.Set(t)
+ return c.SetReadDeadline(t)
+}
+
+// SetReadDeadline implements net.Conn.SetDeadline
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.buffer.SetReadDeadline(t)
+}
+
+// SetWriteDeadline implements net.Conn.SetDeadline
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline.Set(t)
+ // Write deadline of underlying connection should not be changed
+ // since the connection can be shared.
+ return nil
+}
diff --git a/vendor/github.com/pion/udp/pkg/sync/waitgroup.go b/vendor/github.com/pion/udp/pkg/sync/waitgroup.go
new file mode 100644
index 000000000..dcd1c89bb
--- /dev/null
+++ b/vendor/github.com/pion/udp/pkg/sync/waitgroup.go
@@ -0,0 +1,68 @@
+// Package sync extends basic synchronization primitives.
+package sync
+
+import (
+ "sync"
+)
+
+// A WaitGroup waits for a collection of goroutines to finish.
+// The main goroutine calls Add to set the number of
+// goroutines to wait for. Then each of the goroutines
+// runs and calls Done when finished. At the same time,
+// Wait can be used to block until all goroutines have finished.
+//
+// WaitGroups in the sync package do not allow adding or
+// subtracting from the counter while another goroutine is
+// waiting, while this one does.
+//
+// A WaitGroup must not be copied after first use.
+//
+// In the terminology of the Go memory model, a call to Done
+type WaitGroup struct {
+ c int64
+ mutex sync.Mutex
+ cond *sync.Cond
+}
+
+// NewWaitGroup creates a new WaitGroup.
+func NewWaitGroup() *WaitGroup {
+ wg := &WaitGroup{}
+ wg.cond = sync.NewCond(&wg.mutex)
+ return wg
+}
+
+// Add adds delta, which may be negative, to the WaitGroup counter.
+// If the counter becomes zero, all goroutines blocked on Wait are released.
+// If the counter goes negative, Add panics.
+func (wg *WaitGroup) Add(delta int) {
+ wg.mutex.Lock()
+ defer wg.mutex.Unlock()
+ wg.c += int64(delta)
+ if wg.c < 0 {
+ panic("udp: negative WaitGroup counter") // nolint
+ }
+ wg.cond.Signal()
+}
+
+// Done decrements the WaitGroup counter by one.
+func (wg *WaitGroup) Done() {
+ wg.Add(-1)
+}
+
+// Wait blocks until the WaitGroup counter is zero.
+func (wg *WaitGroup) Wait() {
+ wg.mutex.Lock()
+ defer wg.mutex.Unlock()
+ for {
+ c := wg.c
+ switch {
+ case c == 0:
+ // wake another goroutine if there is one
+ wg.cond.Signal()
+ return
+ case c < 0:
+ panic("udp: negative WaitGroup counter") // nolint
+ }
+ wg.cond.Wait()
+ }
+}
diff --git a/vendor/github.com/pion/udp/renovate.json b/vendor/github.com/pion/udp/renovate.json
new file mode 100644
index 000000000..f1bb98c6a
--- /dev/null
+++ b/vendor/github.com/pion/udp/renovate.json
@@ -0,0 +1,6 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "github>pion/renovate-config"
+ ]
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 8a4f49a4c..519db348a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -844,9 +844,7 @@ func (h *histogram) Write(out *dto.Metric) error {
}}
}
- // If exemplars are not configured, the cap will be 0.
- // So append is not needed in this case.
- if cap(h.nativeExemplars.exemplars) > 0 {
+ if h.nativeExemplars.isEnabled() {
h.nativeExemplars.Lock()
his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
h.nativeExemplars.Unlock()
@@ -1665,6 +1663,10 @@ type nativeExemplars struct {
exemplars []*dto.Exemplar
}
+func (n *nativeExemplars) isEnabled() bool {
+ return n.ttl != -1
+}
+
func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
if ttl == 0 {
ttl = 5 * time.Minute
@@ -1686,7 +1688,7 @@ func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
}
func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
- if n.ttl == -1 {
+ if !n.isEnabled() {
return
}
diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go
new file mode 100644
index 000000000..197d95e5c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/version/info.go
@@ -0,0 +1,125 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "text/template"
+)
+
+// Build information. Populated at build-time.
+var (
+ Version string
+ Revision string
+ Branch string
+ BuildUser string
+ BuildDate string
+ GoVersion = runtime.Version()
+ GoOS = runtime.GOOS
+ GoArch = runtime.GOARCH
+
+ computedRevision string
+ computedTags string
+)
+
+// versionInfoTmpl contains the template used by Info.
+var versionInfoTmpl = `
+{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})
+ build user: {{.buildUser}}
+ build date: {{.buildDate}}
+ go version: {{.goVersion}}
+ platform: {{.platform}}
+ tags: {{.tags}}
+`
+
+// Print returns version information.
+func Print(program string) string {
+ m := map[string]string{
+ "program": program,
+ "version": Version,
+ "revision": GetRevision(),
+ "branch": Branch,
+ "buildUser": BuildUser,
+ "buildDate": BuildDate,
+ "goVersion": GoVersion,
+ "platform": GoOS + "/" + GoArch,
+ "tags": GetTags(),
+ }
+ t := template.Must(template.New("version").Parse(versionInfoTmpl))
+
+ var buf bytes.Buffer
+ if err := t.ExecuteTemplate(&buf, "version", m); err != nil {
+ panic(err)
+ }
+ return strings.TrimSpace(buf.String())
+}
+
+// Info returns version, branch and revision information.
+func Info() string {
+ return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, GetRevision())
+}
+
+// BuildContext returns goVersion, platform, buildUser and buildDate information.
+func BuildContext() string {
+ return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, GetTags())
+}
+
+func GetRevision() string {
+ if Revision != "" {
+ return Revision
+ }
+ return computedRevision
+}
+
+func GetTags() string {
+ return computedTags
+}
+
+func init() {
+ computedRevision, computedTags = computeRevision()
+}
+
+func computeRevision() (string, string) {
+ var (
+ rev = "unknown"
+ tags = "unknown"
+ modified bool
+ )
+
+ buildInfo, ok := debug.ReadBuildInfo()
+ if !ok {
+ return rev, tags
+ }
+ for _, v := range buildInfo.Settings {
+ if v.Key == "vcs.revision" {
+ rev = v.Value
+ }
+ if v.Key == "vcs.modified" {
+ if v.Value == "true" {
+ modified = true
+ }
+ }
+ if v.Key == "-tags" {
+ tags = v.Value
+ }
+ }
+ if modified {
+ return rev + "-modified", tags
+ }
+ return rev, tags
+}
diff --git a/vendor/github.com/prometheus/prometheus/LICENSE b/vendor/github.com/prometheus/prometheus/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/prometheus/NOTICE b/vendor/github.com/prometheus/prometheus/NOTICE
new file mode 100644
index 000000000..5e4f50989
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/NOTICE
@@ -0,0 +1,98 @@
+The Prometheus systems and service monitoring server
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (https://soundcloud.com/).
+
+
+The following components are included in this product:
+
+Bootstrap
+https://getbootstrap.com
+Copyright 2011-2014 Twitter, Inc.
+Licensed under the MIT License
+
+bootstrap3-typeahead.js
+https://github.com/bassjobsen/Bootstrap-3-Typeahead
+Original written by @mdo and @fat
+Copyright 2014 Bass Jobsen @bassjobsen
+Licensed under the Apache License, Version 2.0
+
+fuzzy
+https://github.com/mattyork/fuzzy
+Original written by @mattyork
+Copyright 2012 Matt York
+Licensed under the MIT License
+
+bootstrap-datetimepicker.js
+https://github.com/Eonasdan/bootstrap-datetimepicker
+Copyright 2015 Jonathan Peterson (@Eonasdan)
+Licensed under the MIT License
+
+moment.js
+https://github.com/moment/moment/
+Copyright JS Foundation and other contributors
+Licensed under the MIT License
+
+Rickshaw
+https://github.com/shutterstock/rickshaw
+Copyright 2011-2014 by Shutterstock Images, LLC
+See https://github.com/shutterstock/rickshaw/blob/master/LICENSE for license details
+
+mustache.js
+https://github.com/janl/mustache.js
+Copyright 2009 Chris Wanstrath (Ruby)
+Copyright 2010-2014 Jan Lehnardt (JavaScript)
+Copyright 2010-2015 The mustache.js community
+Licensed under the MIT License
+
+jQuery
+https://jquery.org
+Copyright jQuery Foundation and other contributors
+Licensed under the MIT License
+
+Protocol Buffers for Go with Gadgets
+https://github.com/gogo/protobuf/
+Copyright (c) 2013, The GoGo Authors.
+See source code for license details.
+
+Go support for leveled logs, analogous to
+https://code.google.com/p/google-glog/
+Copyright 2013 Google Inc.
+Licensed under the Apache License, Version 2.0
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
+
+DNS library in Go
+https://miek.nl/2014/august/16/go-dns-package/
+Copyright 2009 The Go Authors, 2011 Miek Gieben
+See https://github.com/miekg/dns/blob/master/LICENSE for license details.
+
+LevelDB key/value database in Go
+https://github.com/syndtr/goleveldb
+Copyright 2012 Suryandaru Triandana
+See https://github.com/syndtr/goleveldb/blob/master/LICENSE for license details.
+
+gosnappy - a fork of code.google.com/p/snappy-go
+https://github.com/syndtr/gosnappy
+Copyright 2011 The Snappy-Go Authors
+See https://github.com/syndtr/gosnappy/blob/master/LICENSE for license details.
+
+go-zookeeper - Native ZooKeeper client for Go
+https://github.com/samuel/go-zookeeper
+Copyright (c) 2013, Samuel Stauffer
+See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details.
+
+Time series compression algorithm from Facebook's Gorilla paper
+https://github.com/dgryski/go-tsz
+Copyright (c) 2015,2016 Damian Gryski
+See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details.
+
+We also use code from a large number of npm packages. For details, see:
+- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json
+- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json
+- The individual package licenses as copied from the node_modules directory can be found in
+ the npm_licenses.tar.bz2 archive in release tarballs and Docker images.
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go b/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go
new file mode 100644
index 000000000..5c11cc2ee
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go
@@ -0,0 +1,474 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+ "bytes"
+ "encoding/json"
+ "sort"
+ "strconv"
+
+ "github.com/cespare/xxhash/v2"
+)
+
+// Well-known label names used by Prometheus components.
+const (
+ MetricName = "__name__"
+ AlertName = "alertname"
+ BucketLabel = "le"
+ InstanceName = "instance"
+
+ labelSep = '\xfe'
+)
+
+var seps = []byte{'\xff'}
+
+// Label is a key/value pair of strings.
+type Label struct {
+ Name, Value string
+}
+
+// Labels is a sorted set of labels. Order has to be guaranteed upon
+// instantiation.
+type Labels []Label
+
+func (ls Labels) Len() int { return len(ls) }
+func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
+func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
+
+func (ls Labels) String() string {
+ var b bytes.Buffer
+
+ b.WriteByte('{')
+ for i, l := range ls {
+ if i > 0 {
+ b.WriteByte(',')
+ b.WriteByte(' ')
+ }
+ b.WriteString(l.Name)
+ b.WriteByte('=')
+ b.WriteString(strconv.Quote(l.Value))
+ }
+ b.WriteByte('}')
+ return b.String()
+}
+
+// Bytes returns ls as a byte slice.
+// It uses an byte invalid character as a separator and so should not be used for printing.
+func (ls Labels) Bytes(buf []byte) []byte {
+ b := bytes.NewBuffer(buf[:0])
+ b.WriteByte(labelSep)
+ for i, l := range ls {
+ if i > 0 {
+ b.WriteByte(seps[0])
+ }
+ b.WriteString(l.Name)
+ b.WriteByte(seps[0])
+ b.WriteString(l.Value)
+ }
+ return b.Bytes()
+}
+
+// MarshalJSON implements json.Marshaler.
+func (ls Labels) MarshalJSON() ([]byte, error) {
+ return json.Marshal(ls.Map())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (ls *Labels) UnmarshalJSON(b []byte) error {
+ var m map[string]string
+
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+
+ *ls = FromMap(m)
+ return nil
+}
+
+// MarshalYAML implements yaml.Marshaler.
+func (ls Labels) MarshalYAML() (interface{}, error) {
+ return ls.Map(), nil
+}
+
+// UnmarshalYAML implements yaml.Unmarshaler.
+func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var m map[string]string
+
+ if err := unmarshal(&m); err != nil {
+ return err
+ }
+
+ *ls = FromMap(m)
+ return nil
+}
+
+// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
+// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
+func (ls Labels) MatchLabels(on bool, names ...string) Labels {
+ matchedLabels := Labels{}
+
+ nameSet := map[string]struct{}{}
+ for _, n := range names {
+ nameSet[n] = struct{}{}
+ }
+
+ for _, v := range ls {
+ if _, ok := nameSet[v.Name]; on == ok && (on || v.Name != MetricName) {
+ matchedLabels = append(matchedLabels, v)
+ }
+ }
+
+ return matchedLabels
+}
+
+// Hash returns a hash value for the label set.
+func (ls Labels) Hash() uint64 {
+ // Use xxhash.Sum64(b) for fast path as it's faster.
+ b := make([]byte, 0, 1024)
+ for i, v := range ls {
+ if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
+ // If labels entry is 1KB+ do not allocate whole entry.
+ h := xxhash.New()
+ _, _ = h.Write(b)
+ for _, v := range ls[i:] {
+ _, _ = h.WriteString(v.Name)
+ _, _ = h.Write(seps)
+ _, _ = h.WriteString(v.Value)
+ _, _ = h.Write(seps)
+ }
+ return h.Sum64()
+ }
+
+ b = append(b, v.Name...)
+ b = append(b, seps[0])
+ b = append(b, v.Value...)
+ b = append(b, seps[0])
+ }
+ return xxhash.Sum64(b)
+}
+
+// HashForLabels returns a hash value for the labels matching the provided names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
+ b = b[:0]
+ i, j := 0, 0
+ for i < len(ls) && j < len(names) {
+ if names[j] < ls[i].Name {
+ j++
+ } else if ls[i].Name < names[j] {
+ i++
+ } else {
+ b = append(b, ls[i].Name...)
+ b = append(b, seps[0])
+ b = append(b, ls[i].Value...)
+ b = append(b, seps[0])
+ i++
+ j++
+ }
+ }
+ return xxhash.Sum64(b), b
+}
+
+// HashWithoutLabels returns a hash value for all labels except those matching
+// the provided names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
+ b = b[:0]
+ j := 0
+ for i := range ls {
+ for j < len(names) && names[j] < ls[i].Name {
+ j++
+ }
+ if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) {
+ continue
+ }
+ b = append(b, ls[i].Name...)
+ b = append(b, seps[0])
+ b = append(b, ls[i].Value...)
+ b = append(b, seps[0])
+ }
+ return xxhash.Sum64(b), b
+}
+
+// WithLabels returns a new labels.Labels from ls that only contains labels matching names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) WithLabels(names ...string) Labels {
+ ret := make([]Label, 0, len(ls))
+
+ i, j := 0, 0
+ for i < len(ls) && j < len(names) {
+ if names[j] < ls[i].Name {
+ j++
+ } else if ls[i].Name < names[j] {
+ i++
+ } else {
+ ret = append(ret, ls[i])
+ i++
+ j++
+ }
+ }
+ return ret
+}
+
+// WithoutLabels returns a new labels.Labels from ls that contains labels not matching names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) WithoutLabels(names ...string) Labels {
+ ret := make([]Label, 0, len(ls))
+
+ j := 0
+ for i := range ls {
+ for j < len(names) && names[j] < ls[i].Name {
+ j++
+ }
+ if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) {
+ continue
+ }
+ ret = append(ret, ls[i])
+ }
+ return ret
+}
+
+// Copy returns a copy of the labels.
+func (ls Labels) Copy() Labels {
+ res := make(Labels, len(ls))
+ copy(res, ls)
+ return res
+}
+
+// Get returns the value for the label with the given name.
+// Returns an empty string if the label doesn't exist.
+func (ls Labels) Get(name string) string {
+ for _, l := range ls {
+ if l.Name == name {
+ return l.Value
+ }
+ }
+ return ""
+}
+
+// Has returns true if the label with the given name is present.
+func (ls Labels) Has(name string) bool {
+ for _, l := range ls {
+ if l.Name == name {
+ return true
+ }
+ }
+ return false
+}
+
+// HasDuplicateLabelNames returns whether ls has duplicate label names.
+// It assumes that the labelset is sorted.
+func (ls Labels) HasDuplicateLabelNames() (string, bool) {
+ for i, l := range ls {
+ if i == 0 {
+ continue
+ }
+ if l.Name == ls[i-1].Name {
+ return l.Name, true
+ }
+ }
+ return "", false
+}
+
+// WithoutEmpty returns the labelset without empty labels.
+// May return the same labelset.
+func (ls Labels) WithoutEmpty() Labels {
+ for _, v := range ls {
+ if v.Value != "" {
+ continue
+ }
+ // Do not copy the slice until it's necessary.
+ els := make(Labels, 0, len(ls)-1)
+ for _, v := range ls {
+ if v.Value != "" {
+ els = append(els, v)
+ }
+ }
+ return els
+ }
+ return ls
+}
+
+// Equal returns whether the two label sets are equal.
+func Equal(ls, o Labels) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for i, l := range ls {
+ if l.Name != o[i].Name || l.Value != o[i].Value {
+ return false
+ }
+ }
+ return true
+}
+
+// Map returns a string map of the labels.
+func (ls Labels) Map() map[string]string {
+ m := make(map[string]string, len(ls))
+ for _, l := range ls {
+ m[l.Name] = l.Value
+ }
+ return m
+}
+
+// New returns a sorted Labels from the given labels.
+// The caller has to guarantee that all label names are unique.
+func New(ls ...Label) Labels {
+ set := make(Labels, 0, len(ls))
+ for _, l := range ls {
+ set = append(set, l)
+ }
+ sort.Sort(set)
+
+ return set
+}
+
+// FromMap returns new sorted Labels from the given map.
+func FromMap(m map[string]string) Labels {
+ l := make([]Label, 0, len(m))
+ for k, v := range m {
+ l = append(l, Label{Name: k, Value: v})
+ }
+ return New(l...)
+}
+
+// FromStrings creates new labels from pairs of strings.
+func FromStrings(ss ...string) Labels {
+ if len(ss)%2 != 0 {
+ panic("invalid number of strings")
+ }
+ var res Labels
+ for i := 0; i < len(ss); i += 2 {
+ res = append(res, Label{Name: ss[i], Value: ss[i+1]})
+ }
+
+ sort.Sort(res)
+ return res
+}
+
+// Compare compares the two label sets.
+// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
+func Compare(a, b Labels) int {
+ l := len(a)
+ if len(b) < l {
+ l = len(b)
+ }
+
+ for i := 0; i < l; i++ {
+ if a[i].Name != b[i].Name {
+ if a[i].Name < b[i].Name {
+ return -1
+ }
+ return 1
+ }
+ if a[i].Value != b[i].Value {
+ if a[i].Value < b[i].Value {
+ return -1
+ }
+ return 1
+ }
+ }
+ // If all labels so far were in common, the set with fewer labels comes first.
+ return len(a) - len(b)
+}
+
+// Builder allows modifying Labels.
+type Builder struct {
+ base Labels
+ del []string
+ add []Label
+}
+
+// NewBuilder returns a new LabelsBuilder.
+func NewBuilder(base Labels) *Builder {
+ b := &Builder{
+ del: make([]string, 0, 5),
+ add: make([]Label, 0, 5),
+ }
+ b.Reset(base)
+ return b
+}
+
+// Reset clears all current state for the builder.
+func (b *Builder) Reset(base Labels) {
+ b.base = base
+ b.del = b.del[:0]
+ b.add = b.add[:0]
+ for _, l := range b.base {
+ if l.Value == "" {
+ b.del = append(b.del, l.Name)
+ }
+ }
+}
+
+// Del deletes the label of the given name.
+func (b *Builder) Del(ns ...string) *Builder {
+ for _, n := range ns {
+ for i, a := range b.add {
+ if a.Name == n {
+ b.add = append(b.add[:i], b.add[i+1:]...)
+ }
+ }
+ b.del = append(b.del, n)
+ }
+ return b
+}
+
+// Set the name/value pair as a label.
+func (b *Builder) Set(n, v string) *Builder {
+ if v == "" {
+ // Empty labels are the same as missing labels.
+ return b.Del(n)
+ }
+ for i, a := range b.add {
+ if a.Name == n {
+ b.add[i].Value = v
+ return b
+ }
+ }
+ b.add = append(b.add, Label{Name: n, Value: v})
+
+ return b
+}
+
+// Labels returns the labels from the builder. If no modifications
+// were made, the original labels are returned.
+func (b *Builder) Labels() Labels {
+ if len(b.del) == 0 && len(b.add) == 0 {
+ return b.base
+ }
+
+ // In the general case, labels are removed, modified or moved
+ // rather than added.
+ res := make(Labels, 0, len(b.base))
+Outer:
+ for _, l := range b.base {
+ for _, n := range b.del {
+ if l.Name == n {
+ continue Outer
+ }
+ }
+ for _, la := range b.add {
+ if l.Name == la.Name {
+ continue Outer
+ }
+ }
+ res = append(res, l)
+ }
+ res = append(res, b.add...)
+ sort.Sort(res)
+
+ return res
+}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go b/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go
new file mode 100644
index 000000000..88d463233
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go
@@ -0,0 +1,119 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+ "fmt"
+)
+
+// MatchType is an enum for label matching types.
+type MatchType int
+
+// Possible MatchTypes.
+const (
+ MatchEqual MatchType = iota
+ MatchNotEqual
+ MatchRegexp
+ MatchNotRegexp
+)
+
+func (m MatchType) String() string {
+ typeToStr := map[MatchType]string{
+ MatchEqual: "=",
+ MatchNotEqual: "!=",
+ MatchRegexp: "=~",
+ MatchNotRegexp: "!~",
+ }
+ if str, ok := typeToStr[m]; ok {
+ return str
+ }
+ panic("unknown match type")
+}
+
+// Matcher models the matching of a label.
+type Matcher struct {
+ Type MatchType
+ Name string
+ Value string
+
+ re *FastRegexMatcher
+}
+
+// NewMatcher returns a matcher object.
+func NewMatcher(t MatchType, n, v string) (*Matcher, error) {
+ m := &Matcher{
+ Type: t,
+ Name: n,
+ Value: v,
+ }
+ if t == MatchRegexp || t == MatchNotRegexp {
+ re, err := NewFastRegexMatcher(v)
+ if err != nil {
+ return nil, err
+ }
+ m.re = re
+ }
+ return m, nil
+}
+
+// MustNewMatcher panics on error - only for use in tests!
+func MustNewMatcher(mt MatchType, name, val string) *Matcher {
+ m, err := NewMatcher(mt, name, val)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+func (m *Matcher) String() string {
+ return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value)
+}
+
+// Matches returns whether the matcher matches the given string value.
+func (m *Matcher) Matches(s string) bool {
+ switch m.Type {
+ case MatchEqual:
+ return s == m.Value
+ case MatchNotEqual:
+ return s != m.Value
+ case MatchRegexp:
+ return m.re.MatchString(s)
+ case MatchNotRegexp:
+ return !m.re.MatchString(s)
+ }
+ panic("labels.Matcher.Matches: invalid match type")
+}
+
+// Inverse returns a matcher that matches the opposite.
+func (m *Matcher) Inverse() (*Matcher, error) {
+ switch m.Type {
+ case MatchEqual:
+ return NewMatcher(MatchNotEqual, m.Name, m.Value)
+ case MatchNotEqual:
+ return NewMatcher(MatchEqual, m.Name, m.Value)
+ case MatchRegexp:
+ return NewMatcher(MatchNotRegexp, m.Name, m.Value)
+ case MatchNotRegexp:
+ return NewMatcher(MatchRegexp, m.Name, m.Value)
+ }
+ panic("labels.Matcher.Matches: invalid match type")
+}
+
+// GetRegexString returns the regex string.
+func (m *Matcher) GetRegexString() string {
+ if m.re == nil {
+ return ""
+ }
+ return m.re.GetRegexString()
+}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go b/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go
new file mode 100644
index 000000000..eb2b07995
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go
@@ -0,0 +1,107 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+ "regexp"
+ "regexp/syntax"
+ "strings"
+)
+
+type FastRegexMatcher struct {
+ re *regexp.Regexp
+ prefix string
+ suffix string
+ contains string
+}
+
+func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
+ re, err := regexp.Compile("^(?:" + v + ")$")
+ if err != nil {
+ return nil, err
+ }
+
+ parsed, err := syntax.Parse(v, syntax.Perl)
+ if err != nil {
+ return nil, err
+ }
+
+ m := &FastRegexMatcher{
+ re: re,
+ }
+
+ if parsed.Op == syntax.OpConcat {
+ m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed)
+ }
+
+ return m, nil
+}
+
+func (m *FastRegexMatcher) MatchString(s string) bool {
+ if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
+ return false
+ }
+ if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
+ return false
+ }
+ if m.contains != "" && !strings.Contains(s, m.contains) {
+ return false
+ }
+ return m.re.MatchString(s)
+}
+
+func (m *FastRegexMatcher) GetRegexString() string {
+ return m.re.String()
+}
+
+// optimizeConcatRegex returns literal prefix/suffix text that can be safely
+// checked against the label value before running the regexp matcher.
+func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
+ sub := r.Sub
+
+ // We can safely remove begin and end text matchers respectively
+ // at the beginning and end of the regexp.
+ if len(sub) > 0 && sub[0].Op == syntax.OpBeginText {
+ sub = sub[1:]
+ }
+ if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText {
+ sub = sub[:len(sub)-1]
+ }
+
+ if len(sub) == 0 {
+ return
+ }
+
+ // Given Prometheus regex matchers are always anchored to the begin/end
+ // of the text, if the first/last operations are literals, we can safely
+ // treat them as prefix/suffix.
+ if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 {
+ prefix = string(sub[0].Rune)
+ }
+ if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 {
+ suffix = string(sub[last].Rune)
+ }
+
+ // If contains any literal which is not a prefix/suffix, we keep the
+ // 1st one. We do not keep the whole list of literals to simplify the
+ // fast path.
+ for i := 1; i < len(sub)-1; i++ {
+ if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 {
+ contains = string(sub[i].Rune)
+ break
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go
new file mode 100644
index 000000000..319ee6184
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go
@@ -0,0 +1,87 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+ "bufio"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// Slice is a sortable slice of label sets.
+type Slice []Labels
+
+func (s Slice) Len() int { return len(s) }
+func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }
+
+// Selector holds constraints for matching against a label set.
+type Selector []*Matcher
+
+// Matches returns whether the labels satisfy all matchers.
+func (s Selector) Matches(labels Labels) bool {
+ for _, m := range s {
+ if v := labels.Get(m.Name); !m.Matches(v) {
+ return false
+ }
+ }
+ return true
+}
+
+// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful
+// to load testing data.
+func ReadLabels(fn string, n int) ([]Labels, error) {
+ f, err := os.Open(fn)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+
+ var mets []Labels
+ hashes := map[uint64]struct{}{}
+ i := 0
+
+ for scanner.Scan() && i < n {
+ m := make(Labels, 0, 10)
+
+ r := strings.NewReplacer("\"", "", "{", "", "}", "")
+ s := r.Replace(scanner.Text())
+
+ labelChunks := strings.Split(s, ",")
+ for _, labelChunk := range labelChunks {
+ split := strings.Split(labelChunk, ":")
+ m = append(m, Label{Name: split[0], Value: split[1]})
+ }
+ // Order of the k/v labels matters, don't assume we'll always receive them already sorted.
+ sort.Sort(m)
+
+ h := m.Hash()
+ if _, ok := hashes[h]; ok {
+ continue
+ }
+ mets = append(mets, m)
+ hashes[h] = struct{}{}
+ i++
+ }
+
+ if i != n {
+ return mets, errors.Errorf("requested %d metrics but found %d", n, i)
+ }
+ return mets, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/value/value.go b/vendor/github.com/prometheus/prometheus/pkg/value/value.go
new file mode 100644
index 000000000..655ce852d
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/pkg/value/value.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package value
+
+import (
+ "math"
+)
+
+const (
+ // NormalNaN is a quiet NaN. This is also math.NaN().
+ NormalNaN uint64 = 0x7ff8000000000001
+
+ // StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0.
+ // This value is chosen with many leading 0s, so we have scope to store more
+ // complicated values in the future. It is 2 rather than 1 to make
+ // it easier to distinguish from the NormalNaN by a human when debugging.
+ StaleNaN uint64 = 0x7ff0000000000002
+)
+
+// IsStaleNaN returns true when the provided NaN value is a stale marker.
+func IsStaleNaN(v float64) bool {
+ return math.Float64bits(v) == StaleNaN
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
new file mode 100644
index 000000000..de82d6725
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
@@ -0,0 +1,434 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "context"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/prometheus/prometheus/pkg/labels"
+ "github.com/prometheus/prometheus/storage"
+)
+
+// Node is a generic interface for all nodes in an AST.
+//
+// Whenever numerous nodes are listed such as in a switch-case statement
+// or a chain of function definitions (e.g. String(), PromQLExpr(), etc.) convention is
+// to list them as follows:
+//
+// - Statements
+// - statement types (alphabetical)
+// - ...
+// - Expressions
+// - expression types (alphabetical)
+// - ...
+//
+type Node interface {
+ // String representation of the node that returns the given node when parsed
+ // as part of a valid query.
+ String() string
+
+ // PositionRange returns the position of the AST Node in the query string.
+ PositionRange() PositionRange
+}
+
+// Statement is a generic interface for all statements.
+type Statement interface {
+ Node
+
+ // PromQLStmt ensures that no other type accidentally implements the interface
+ // nolint:unused
+ PromQLStmt()
+}
+
+// EvalStmt holds an expression and information on the range it should
+// be evaluated on.
+type EvalStmt struct {
+ Expr Expr // Expression to be evaluated.
+
+ // The time boundaries for the evaluation. If Start equals End an instant
+ // is evaluated.
+ Start, End time.Time
+ // Time between two evaluated instants for the range [Start:End].
+ Interval time.Duration
+}
+
+func (*EvalStmt) PromQLStmt() {}
+
+// Expr is a generic interface for all expression types.
+type Expr interface {
+ Node
+
+ // Type returns the type the expression evaluates to. It does not perform
+ // in-depth checks as this is done at parsing-time.
+ Type() ValueType
+ // PromQLExpr ensures that no other types accidentally implement the interface.
+ PromQLExpr()
+}
+
+// Expressions is a list of expression nodes that implements Node.
+type Expressions []Expr
+
+// AggregateExpr represents an aggregation operation on a Vector.
+type AggregateExpr struct {
+ Op ItemType // The used aggregation operation.
+ Expr Expr // The Vector expression over which is aggregated.
+ Param Expr // Parameter used by some aggregators.
+ Grouping []string // The labels by which to group the Vector.
+ Without bool // Whether to drop the given labels rather than keep them.
+ PosRange PositionRange
+}
+
+// BinaryExpr represents a binary expression between two child expressions.
+type BinaryExpr struct {
+ Op ItemType // The operation of the expression.
+ LHS, RHS Expr // The operands on the respective sides of the operator.
+
+ // The matching behavior for the operation if both operands are Vectors.
+ // If they are not this field is nil.
+ VectorMatching *VectorMatching
+
+ // If a comparison operator, return 0/1 rather than filtering.
+ ReturnBool bool
+}
+
+// Call represents a function call.
+type Call struct {
+ Func *Function // The function that was called.
+ Args Expressions // Arguments used in the call.
+
+ PosRange PositionRange
+}
+
+// MatrixSelector represents a Matrix selection.
+type MatrixSelector struct {
+ // It is safe to assume that this is an VectorSelector
+ // if the parser hasn't returned an error.
+ VectorSelector Expr
+ Range time.Duration
+
+ EndPos Pos
+}
+
+// SubqueryExpr represents a subquery.
+type SubqueryExpr struct {
+ Expr Expr
+ Range time.Duration
+ Offset time.Duration
+ Step time.Duration
+
+ EndPos Pos
+}
+
+// NumberLiteral represents a number.
+type NumberLiteral struct {
+ Val float64
+
+ PosRange PositionRange
+}
+
+// ParenExpr wraps an expression so it cannot be disassembled as a consequence
+// of operator precedence.
+type ParenExpr struct {
+ Expr Expr
+ PosRange PositionRange
+}
+
+// StringLiteral represents a string.
+type StringLiteral struct {
+ Val string
+ PosRange PositionRange
+}
+
+// UnaryExpr represents a unary operation on another expression.
+// Currently unary operations are only supported for Scalars.
+type UnaryExpr struct {
+ Op ItemType
+ Expr Expr
+
+ StartPos Pos
+}
+
+// VectorSelector represents a Vector selection.
+type VectorSelector struct {
+ Name string
+ Offset time.Duration
+ LabelMatchers []*labels.Matcher
+
+ // The unexpanded seriesSet populated at query preparation time.
+ UnexpandedSeriesSet storage.SeriesSet
+ Series []storage.Series
+
+ PosRange PositionRange
+}
+
+// TestStmt is an internal helper statement that allows execution
+// of an arbitrary function during handling. It is used to test the Engine.
+type TestStmt func(context.Context) error
+
+func (TestStmt) String() string { return "test statement" }
+func (TestStmt) PromQLStmt() {}
+
+func (TestStmt) PositionRange() PositionRange {
+ return PositionRange{
+ Start: -1,
+ End: -1,
+ }
+}
+func (e *AggregateExpr) Type() ValueType { return ValueTypeVector }
+func (e *Call) Type() ValueType { return e.Func.ReturnType }
+func (e *MatrixSelector) Type() ValueType { return ValueTypeMatrix }
+func (e *SubqueryExpr) Type() ValueType { return ValueTypeMatrix }
+func (e *NumberLiteral) Type() ValueType { return ValueTypeScalar }
+func (e *ParenExpr) Type() ValueType { return e.Expr.Type() }
+func (e *StringLiteral) Type() ValueType { return ValueTypeString }
+func (e *UnaryExpr) Type() ValueType { return e.Expr.Type() }
+func (e *VectorSelector) Type() ValueType { return ValueTypeVector }
+func (e *BinaryExpr) Type() ValueType {
+ if e.LHS.Type() == ValueTypeScalar && e.RHS.Type() == ValueTypeScalar {
+ return ValueTypeScalar
+ }
+ return ValueTypeVector
+}
+
+func (*AggregateExpr) PromQLExpr() {}
+func (*BinaryExpr) PromQLExpr() {}
+func (*Call) PromQLExpr() {}
+func (*MatrixSelector) PromQLExpr() {}
+func (*SubqueryExpr) PromQLExpr() {}
+func (*NumberLiteral) PromQLExpr() {}
+func (*ParenExpr) PromQLExpr() {}
+func (*StringLiteral) PromQLExpr() {}
+func (*UnaryExpr) PromQLExpr() {}
+func (*VectorSelector) PromQLExpr() {}
+
+// VectorMatchCardinality describes the cardinality relationship
+// of two Vectors in a binary operation.
+type VectorMatchCardinality int
+
+const (
+ CardOneToOne VectorMatchCardinality = iota
+ CardManyToOne
+ CardOneToMany
+ CardManyToMany
+)
+
+func (vmc VectorMatchCardinality) String() string {
+ switch vmc {
+ case CardOneToOne:
+ return "one-to-one"
+ case CardManyToOne:
+ return "many-to-one"
+ case CardOneToMany:
+ return "one-to-many"
+ case CardManyToMany:
+ return "many-to-many"
+ }
+ panic("promql.VectorMatchCardinality.String: unknown match cardinality")
+}
+
+// VectorMatching describes how elements from two Vectors in a binary
+// operation are supposed to be matched.
+type VectorMatching struct {
+ // The cardinality of the two Vectors.
+ Card VectorMatchCardinality
+ // MatchingLabels contains the labels which define equality of a pair of
+ // elements from the Vectors.
+ MatchingLabels []string
+ // On includes the given label names from matching,
+ // rather than excluding them.
+ On bool
+ // Include contains additional labels that should be included in
+ // the result from the side with the lower cardinality.
+ Include []string
+}
+
+// Visitor allows visiting a Node and its child nodes. The Visit method is
+// invoked for each node with the path leading to the node provided additionally.
+// If the result visitor w is not nil and no error, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.Visit(nil, nil).
+type Visitor interface {
+ Visit(node Node, path []Node) (w Visitor, err error)
+}
+
+// Walk traverses an AST in depth-first order: It starts by calling
+// v.Visit(node, path); node must not be nil. If the visitor w returned by
+// v.Visit(node, path) is not nil and the visitor returns no error, Walk is
+// invoked recursively with visitor w for each of the non-nil children of node,
+// followed by a call of w.Visit(nil), returning an error
+// As the tree is descended the path of previous nodes is provided.
+func Walk(v Visitor, node Node, path []Node) error {
+ var err error
+ if v, err = v.Visit(node, path); v == nil || err != nil {
+ return err
+ }
+ path = append(path, node)
+
+ for _, e := range Children(node) {
+ if err := Walk(v, e, path); err != nil {
+ return err
+ }
+ }
+
+ _, err = v.Visit(nil, nil)
+ return err
+}
+
+type inspector func(Node, []Node) error
+
+func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
+ if err := f(node, path); err != nil {
+ return nil, err
+ }
+
+ return f, nil
+}
+
+// Inspect traverses an AST in depth-first order: It starts by calling
+// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f
+// for all the non-nil children of node, recursively.
+func Inspect(node Node, f inspector) {
+ //nolint: errcheck
+ Walk(inspector(f), node, nil)
+}
+
+// Children returns a list of all child nodes of a syntax tree node.
+func Children(node Node) []Node {
+ // For some reasons these switches have significantly better performance than interfaces
+ switch n := node.(type) {
+ case *EvalStmt:
+ return []Node{n.Expr}
+ case Expressions:
+ // golang cannot convert slices of interfaces
+ ret := make([]Node, len(n))
+ for i, e := range n {
+ ret[i] = e
+ }
+ return ret
+ case *AggregateExpr:
+ // While this does not look nice, it should avoid unnecessary allocations
+ // caused by slice resizing
+ if n.Expr == nil && n.Param == nil {
+ return nil
+ } else if n.Expr == nil {
+ return []Node{n.Param}
+ } else if n.Param == nil {
+ return []Node{n.Expr}
+ } else {
+ return []Node{n.Expr, n.Param}
+ }
+ case *BinaryExpr:
+ return []Node{n.LHS, n.RHS}
+ case *Call:
+ // golang cannot convert slices of interfaces
+ ret := make([]Node, len(n.Args))
+ for i, e := range n.Args {
+ ret[i] = e
+ }
+ return ret
+ case *SubqueryExpr:
+ return []Node{n.Expr}
+ case *ParenExpr:
+ return []Node{n.Expr}
+ case *UnaryExpr:
+ return []Node{n.Expr}
+ case *MatrixSelector:
+ return []Node{n.VectorSelector}
+ case *NumberLiteral, *StringLiteral, *VectorSelector:
+ // nothing to do
+ return []Node{}
+ default:
+ panic(errors.Errorf("promql.Children: unhandled node type %T", node))
+ }
+}
+
+// PositionRange describes a position in the input string of the parser.
+type PositionRange struct {
+ Start Pos
+ End Pos
+}
+
+// mergeRanges is a helper function to merge the PositionRanges of two Nodes.
+// Note that the arguments must be in the same order as they
+// occur in the input string.
+func mergeRanges(first Node, last Node) PositionRange {
+ return PositionRange{
+ Start: first.PositionRange().Start,
+ End: last.PositionRange().End,
+ }
+}
+
+// Item implements the Node interface.
+// This makes it possible to call mergeRanges on them.
+func (i *Item) PositionRange() PositionRange {
+ return PositionRange{
+ Start: i.Pos,
+ End: i.Pos + Pos(len(i.Val)),
+ }
+}
+
+func (e *AggregateExpr) PositionRange() PositionRange {
+ return e.PosRange
+}
+func (e *BinaryExpr) PositionRange() PositionRange {
+ return mergeRanges(e.LHS, e.RHS)
+}
+func (e *Call) PositionRange() PositionRange {
+ return e.PosRange
+}
+func (e *EvalStmt) PositionRange() PositionRange {
+ return e.Expr.PositionRange()
+}
+func (e Expressions) PositionRange() PositionRange {
+ if len(e) == 0 {
+ // Position undefined.
+ return PositionRange{
+ Start: -1,
+ End: -1,
+ }
+ }
+ return mergeRanges(e[0], e[len(e)-1])
+}
+func (e *MatrixSelector) PositionRange() PositionRange {
+ return PositionRange{
+ Start: e.VectorSelector.PositionRange().Start,
+ End: e.EndPos,
+ }
+}
+func (e *SubqueryExpr) PositionRange() PositionRange {
+ return PositionRange{
+ Start: e.Expr.PositionRange().Start,
+ End: e.EndPos,
+ }
+}
+func (e *NumberLiteral) PositionRange() PositionRange {
+ return e.PosRange
+}
+func (e *ParenExpr) PositionRange() PositionRange {
+ return e.PosRange
+}
+func (e *StringLiteral) PositionRange() PositionRange {
+ return e.PosRange
+}
+func (e *UnaryExpr) PositionRange() PositionRange {
+ return PositionRange{
+ Start: e.StartPos,
+ End: e.Expr.PositionRange().End,
+ }
+}
+func (e *VectorSelector) PositionRange() PositionRange {
+ return e.PosRange
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
new file mode 100644
index 000000000..4516829e5
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
@@ -0,0 +1,277 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+// Function represents a function of the expression language and is
+// used by function nodes.
+type Function struct {
+ Name string
+ ArgTypes []ValueType
+ Variadic int
+ ReturnType ValueType
+}
+
+// Functions is a list of all functions supported by PromQL, including their types.
+var Functions = map[string]*Function{
+ "abs": {
+ Name: "abs",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "absent": {
+ Name: "absent",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "absent_over_time": {
+ Name: "absent_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "avg_over_time": {
+ Name: "avg_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "ceil": {
+ Name: "ceil",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "changes": {
+ Name: "changes",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "clamp_max": {
+ Name: "clamp_max",
+ ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
+ ReturnType: ValueTypeVector,
+ },
+ "clamp_min": {
+ Name: "clamp_min",
+ ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
+ ReturnType: ValueTypeVector,
+ },
+ "count_over_time": {
+ Name: "count_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "days_in_month": {
+ Name: "days_in_month",
+ ArgTypes: []ValueType{ValueTypeVector},
+ Variadic: 1,
+ ReturnType: ValueTypeVector,
+ },
+ "day_of_month": {
+ Name: "day_of_month",
+ ArgTypes: []ValueType{ValueTypeVector},
+ Variadic: 1,
+ ReturnType: ValueTypeVector,
+ },
+ "day_of_week": {
+ Name: "day_of_week",
+ ArgTypes: []ValueType{ValueTypeVector},
+ Variadic: 1,
+ ReturnType: ValueTypeVector,
+ },
+ "delta": {
+ Name: "delta",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "deriv": {
+ Name: "deriv",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "exp": {
+ Name: "exp",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "floor": {
+ Name: "floor",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "histogram_quantile": {
+ Name: "histogram_quantile",
+ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "holt_winters": {
+ Name: "holt_winters",
+ ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar},
+ ReturnType: ValueTypeVector,
+ },
+ "hour": {
+ Name: "hour",
+ ArgTypes: []ValueType{ValueTypeVector},
+ Variadic: 1,
+ ReturnType: ValueTypeVector,
+ },
+ "idelta": {
+ Name: "idelta",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "increase": {
+ Name: "increase",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "irate": {
+ Name: "irate",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "label_replace": {
+ Name: "label_replace",
+ ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString, ValueTypeString},
+ ReturnType: ValueTypeVector,
+ },
+ "label_join": {
+ Name: "label_join",
+ ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString},
+ Variadic: -1,
+ ReturnType: ValueTypeVector,
+ },
+ "ln": {
+ Name: "ln",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "log10": {
+ Name: "log10",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "log2": {
+ Name: "log2",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "max_over_time": {
+ Name: "max_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "min_over_time": {
+ Name: "min_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "minute": {
+ Name: "minute",
+ ArgTypes: []ValueType{ValueTypeVector},
+ Variadic: 1,
+ ReturnType: ValueTypeVector,
+ },
+ "month": {
+ Name: "month",
+ ArgTypes: []ValueType{ValueTypeVector},
+ Variadic: 1,
+ ReturnType: ValueTypeVector,
+ },
+ "predict_linear": {
+ Name: "predict_linear",
+ ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar},
+ ReturnType: ValueTypeVector,
+ },
+ "quantile_over_time": {
+ Name: "quantile_over_time",
+ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "rate": {
+ Name: "rate",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "resets": {
+ Name: "resets",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "round": {
+ Name: "round",
+ ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
+ Variadic: 1,
+ ReturnType: ValueTypeVector,
+ },
+ "scalar": {
+ Name: "scalar",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeScalar,
+ },
+ "sort": {
+ Name: "sort",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "sort_desc": {
+ Name: "sort_desc",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "sqrt": {
+ Name: "sqrt",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "stddev_over_time": {
+ Name: "stddev_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "stdvar_over_time": {
+ Name: "stdvar_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "sum_over_time": {
+ Name: "sum_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
+ "time": {
+ Name: "time",
+ ArgTypes: []ValueType{},
+ ReturnType: ValueTypeScalar,
+ },
+ "timestamp": {
+ Name: "timestamp",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
+ "vector": {
+ Name: "vector",
+ ArgTypes: []ValueType{ValueTypeScalar},
+ ReturnType: ValueTypeVector,
+ },
+ "year": {
+ Name: "year",
+ ArgTypes: []ValueType{ValueTypeVector},
+ Variadic: 1,
+ ReturnType: ValueTypeVector,
+ },
+}
+
+// getFunction returns a predefined Function object for the given name.
+func getFunction(name string) (*Function, bool) {
+ function, ok := Functions[name]
+ return function, ok
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
new file mode 100644
index 000000000..f0bdc320f
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
@@ -0,0 +1,710 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+%{
+package parser
+
+import (
+ "math"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/prometheus/prometheus/pkg/labels"
+ "github.com/prometheus/prometheus/pkg/value"
+)
+%}
+
+%union {
+ node Node
+ item Item
+ matchers []*labels.Matcher
+ matcher *labels.Matcher
+ label labels.Label
+ labels labels.Labels
+ strings []string
+ series []SequenceValue
+ uint uint64
+ float float64
+ duration time.Duration
+}
+
+
+%token
+EQL
+BLANK
+COLON
+COMMA
+COMMENT
+DURATION
+EOF
+ERROR
+IDENTIFIER
+LEFT_BRACE
+LEFT_BRACKET
+LEFT_PAREN
+METRIC_IDENTIFIER
+NUMBER
+RIGHT_BRACE
+RIGHT_BRACKET
+RIGHT_PAREN
+SEMICOLON
+SPACE
+STRING
+TIMES
+
+// Operators.
+%token operatorsStart
+%token
+ADD
+DIV
+EQLC
+EQL_REGEX
+GTE
+GTR
+LAND
+LOR
+LSS
+LTE
+LUNLESS
+MOD
+MUL
+NEQ
+NEQ_REGEX
+POW
+SUB
+%token operatorsEnd
+
+// Aggregators.
+%token aggregatorsStart
+%token
+AVG
+BOTTOMK
+COUNT
+COUNT_VALUES
+GROUP
+MAX
+MIN
+QUANTILE
+STDDEV
+STDVAR
+SUM
+TOPK
+%token aggregatorsEnd
+
+// Keywords.
+%token keywordsStart
+%token
+BOOL
+BY
+GROUP_LEFT
+GROUP_RIGHT
+IGNORING
+OFFSET
+ON
+WITHOUT
+%token keywordsEnd
+
+
+// Start symbols for the generated parser.
+%token startSymbolsStart
+%token
+START_METRIC
+START_SERIES_DESCRIPTION
+START_EXPRESSION
+START_METRIC_SELECTOR
+%token startSymbolsEnd
+
+
+// Type definitions for grammar rules.
+%type label_match_list
+%type label_matcher
+
+%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op
+
+%type label_set label_set_list metric
+%type