diff --git a/go.mod b/go.mod
index b5577d3b74..a74fd1ddb2 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/k8snetworkplumbingwg/sriov-network-operator
-go 1.22
+go 1.22.4
require (
github.com/Masterminds/sprig/v3 v3.2.2
@@ -9,8 +9,10 @@ require (
github.com/coreos/go-systemd/v22 v22.5.0
github.com/fsnotify/fsnotify v1.7.0
github.com/go-logr/logr v1.2.4
+ github.com/go-logr/stdr v1.2.2
github.com/golang/mock v1.4.4
github.com/google/go-cmp v0.6.0
+ github.com/google/renameio/v2 v2.0.0
github.com/google/uuid v1.3.1
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/jaypipes/ghw v0.9.0
@@ -24,6 +26,7 @@ require (
github.com/openshift/api v0.0.0-20230807132801-600991d550ac
github.com/openshift/client-go v0.0.0-20230607134213-3cd0021bbee3
github.com/openshift/machine-config-operator v0.0.1-0.20231024085435-7e1fb719c1ba
+ github.com/ovn-org/libovsdb v0.6.1-0.20240125124854-03f787b1a892
github.com/pkg/errors v0.9.1
github.com/safchain/ethtool v0.3.0
github.com/spf13/cobra v1.7.0
@@ -55,6 +58,9 @@ require (
github.com/aws/aws-sdk-go v1.44.204 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/hub v1.0.1 // indirect
+ github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/clarketm/json v1.17.1 // indirect
diff --git a/go.sum b/go.sum
index 32cba669d8..4fafc1d91b 100644
--- a/go.sum
+++ b/go.sum
@@ -71,8 +71,16 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA=
+github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg=
+github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs=
+github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ=
+github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -149,8 +157,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
@@ -243,6 +254,8 @@ github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg=
+github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -362,6 +375,8 @@ github.com/openshift/library-go v0.0.0-20231020125025-211b32f1a1f2 h1:TWG/YVRhSv
github.com/openshift/library-go v0.0.0-20231020125025-211b32f1a1f2/go.mod h1:ZFwNwC3opc/7aOvzUbU95zp33Lbxet48h80ryH3p6DY=
github.com/openshift/machine-config-operator v0.0.1-0.20231024085435-7e1fb719c1ba h1:WM6K+m2xMAwbQDetKGhV/Rd8yukF3AsU1z74cqoWrz0=
github.com/openshift/machine-config-operator v0.0.1-0.20231024085435-7e1fb719c1ba/go.mod h1:mSt3ACow31pa1hTRONn+yT5e+KFkgi7G2bFEx5Nj+n0=
+github.com/ovn-org/libovsdb v0.6.1-0.20240125124854-03f787b1a892 h1:/yg3/z+RH+iDLMxp6FTnmlk5bStK542/Rge5EBjnA9A=
+github.com/ovn-org/libovsdb v0.6.1-0.20240125124854-03f787b1a892/go.mod h1:LC5DOvcY58jOG3HTvDyCVidoMJDurPeu+xlxv5Krd9Q=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY=
diff --git a/pkg/consts/constants.go b/pkg/consts/constants.go
index 282f31adde..09e20cd892 100644
--- a/pkg/consts/constants.go
+++ b/pkg/consts/constants.go
@@ -61,6 +61,7 @@ const (
PfAppliedConfig = SriovConfBasePath + "/pci"
SriovSwitchDevConfPath = SriovConfBasePath + "/sriov_config.json"
SriovHostSwitchDevConfPath = Host + SriovSwitchDevConfPath
+ ManagedOVSBridgesPath = SriovConfBasePath + "/managed-ovs-bridges.json"
MachineConfigPoolPausedAnnotation = "sriovnetwork.openshift.io/state"
MachineConfigPoolPausedAnnotationIdle = "Idle"
diff --git a/pkg/host/internal/bridge/bridge.go b/pkg/host/internal/bridge/bridge.go
new file mode 100644
index 0000000000..7ceb8129e7
--- /dev/null
+++ b/pkg/host/internal/bridge/bridge.go
@@ -0,0 +1,81 @@
+package bridge
+
+import (
+ "context"
+
+ "sigs.k8s.io/controller-runtime/pkg/log"
+
+ sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/host/internal/bridge/ovs"
+ ovsStorePkg "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/host/internal/bridge/ovs/store"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/host/types"
+)
+
+type bridge struct {
+ ovs ovs.Interface
+}
+
+// New return default implementation of the BridgeInterface
+func New() types.BridgeInterface {
+ return &bridge{
+ ovs: ovs.New(ovsStorePkg.New()),
+ }
+}
+
+// DiscoverBridges returns information about managed bridges on the host
+func (b *bridge) DiscoverBridges() (sriovnetworkv1.Bridges, error) {
+ log.Log.V(2).Info("DiscoverBridges(): discover managed bridges")
+ discoveredOVSBridges, err := b.ovs.GetOVSBridges(context.Background())
+ if err != nil {
+ log.Log.Error(err, "DiscoverBridges(): failed to discover managed OVS bridges")
+ return sriovnetworkv1.Bridges{}, err
+ }
+ return sriovnetworkv1.Bridges{OVS: discoveredOVSBridges}, nil
+}
+
+// ConfigureBridge configure managed bridges for the host
+func (b *bridge) ConfigureBridges(bridgesSpec sriovnetworkv1.Bridges, bridgesStatus sriovnetworkv1.Bridges) error {
+ log.Log.V(1).Info("ConfigureBridges(): configure bridges")
+ if len(bridgesSpec.OVS) == 0 && len(bridgesStatus.OVS) == 0 {
+ // there are no reported OVS bridges in the status and the spec doesn't contains bridges.
+ // no need to validated configuration
+ log.Log.V(2).Info("ConfigureBridges(): configuration is not required")
+ return nil
+ }
+ for _, curBr := range bridgesStatus.OVS {
+ found := false
+ for _, desiredBr := range bridgesSpec.OVS {
+ if curBr.Name == desiredBr.Name {
+ found = true
+ break
+ }
+ }
+ if !found {
+ if err := b.ovs.RemoveOVSBridge(context.Background(), curBr.Name); err != nil {
+ log.Log.Error(err, "ConfigureBridges(): failed to remove OVS bridge", "bridge", curBr.Name)
+ return err
+ }
+ }
+ }
+ // create bridges, existing bridges will be updated only if the new config doesn't match current config
+ for i := range bridgesSpec.OVS {
+ desiredBr := bridgesSpec.OVS[i]
+ if err := b.ovs.CreateOVSBridge(context.Background(), &desiredBr); err != nil {
+ log.Log.Error(err, "ConfigureBridges(): failed to create OVS bridge", "bridge", desiredBr.Name)
+ return err
+ }
+ }
+ return nil
+}
+
+// DetachInterfaceFromManagedBridge detach interface from a managed bridge,
+// this step is required before applying some configurations to PF, e.g. changing of eSwitch mode.
+// The function detach interface from managed bridges only.
+func (b *bridge) DetachInterfaceFromManagedBridge(pciAddr string) error {
+ log.Log.V(1).Info("DetachInterfaceFromManagedBridge(): detach interface", "pciAddr", pciAddr)
+ if err := b.ovs.RemoveInterfaceFromOVSBridge(context.Background(), pciAddr); err != nil {
+ log.Log.Error(err, "DetachInterfaceFromManagedBridge(): failed to detach interface from OVS bridge", "pciAddr", pciAddr)
+ return err
+ }
+ return nil
+}
diff --git a/pkg/host/internal/bridge/bridge_test.go b/pkg/host/internal/bridge/bridge_test.go
new file mode 100644
index 0000000000..072ca27a5b
--- /dev/null
+++ b/pkg/host/internal/bridge/bridge_test.go
@@ -0,0 +1,97 @@
+package bridge
+
+import (
+ "fmt"
+
+ "github.com/golang/mock/gomock"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
+ ovsMockPkg "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/host/internal/bridge/ovs/mock"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/host/types"
+)
+
+var _ = Describe("Bridge", func() {
+ var (
+ testCtrl *gomock.Controller
+ br types.BridgeInterface
+ ovsMock *ovsMockPkg.MockInterface
+ testErr = fmt.Errorf("test")
+ )
+ BeforeEach(func() {
+ testCtrl = gomock.NewController(GinkgoT())
+ ovsMock = ovsMockPkg.NewMockInterface(testCtrl)
+ br = &bridge{ovs: ovsMock}
+ })
+ AfterEach(func() {
+ testCtrl.Finish()
+ })
+ Context("DiscoverBridges", func() {
+ It("succeed", func() {
+ ovsMock.EXPECT().GetOVSBridges(gomock.Any()).Return([]sriovnetworkv1.OVSConfigExt{{Name: "test"}, {Name: "test2"}}, nil)
+ ret, err := br.DiscoverBridges()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ret.OVS).To(HaveLen(2))
+ })
+ It("error", func() {
+ ovsMock.EXPECT().GetOVSBridges(gomock.Any()).Return(nil, testErr)
+ _, err := br.DiscoverBridges()
+ Expect(err).To(MatchError(testErr))
+ })
+ })
+
+ Context("ConfigureBridges", func() {
+ It("succeed", func() {
+ brCreate1 := sriovnetworkv1.OVSConfigExt{Name: "br-to-create-1"}
+ brCreate2 := sriovnetworkv1.OVSConfigExt{Name: "br-to-create-2"}
+ brDelete1 := sriovnetworkv1.OVSConfigExt{Name: "br-to-delete-1"}
+ brDelete2 := sriovnetworkv1.OVSConfigExt{Name: "br-to-delete-2"}
+
+ ovsMock.EXPECT().RemoveOVSBridge(gomock.Any(), brDelete1.Name).Return(nil)
+ ovsMock.EXPECT().RemoveOVSBridge(gomock.Any(), brDelete2.Name).Return(nil)
+ ovsMock.EXPECT().CreateOVSBridge(gomock.Any(), &brCreate1).Return(nil)
+ ovsMock.EXPECT().CreateOVSBridge(gomock.Any(), &brCreate2).Return(nil)
+ err := br.ConfigureBridges(
+ sriovnetworkv1.Bridges{OVS: []sriovnetworkv1.OVSConfigExt{brCreate1, brCreate2}},
+ sriovnetworkv1.Bridges{OVS: []sriovnetworkv1.OVSConfigExt{brCreate1, brDelete1, brDelete2}})
+ Expect(err).NotTo(HaveOccurred())
+ })
+ It("empty spec and status", func() {
+ err := br.ConfigureBridges(
+ sriovnetworkv1.Bridges{OVS: []sriovnetworkv1.OVSConfigExt{}},
+ sriovnetworkv1.Bridges{OVS: []sriovnetworkv1.OVSConfigExt{}})
+ Expect(err).NotTo(HaveOccurred())
+ })
+ It("failed on creation", func() {
+ brCreate1 := sriovnetworkv1.OVSConfigExt{Name: "br-to-create-1"}
+ ovsMock.EXPECT().CreateOVSBridge(gomock.Any(), &brCreate1).Return(testErr)
+ err := br.ConfigureBridges(
+ sriovnetworkv1.Bridges{OVS: []sriovnetworkv1.OVSConfigExt{brCreate1}},
+ sriovnetworkv1.Bridges{OVS: []sriovnetworkv1.OVSConfigExt{}})
+ Expect(err).To(MatchError(testErr))
+ })
+ It("failed on removal", func() {
+ brDelete1 := sriovnetworkv1.OVSConfigExt{Name: "br-to-delete-1"}
+ ovsMock.EXPECT().RemoveOVSBridge(gomock.Any(), brDelete1.Name).Return(testErr)
+ err := br.ConfigureBridges(
+ sriovnetworkv1.Bridges{OVS: []sriovnetworkv1.OVSConfigExt{}},
+ sriovnetworkv1.Bridges{OVS: []sriovnetworkv1.OVSConfigExt{brDelete1}})
+ Expect(err).To(MatchError(testErr))
+ })
+ })
+
+ Context("DetachInterfaceFromManagedBridge", func() {
+ It("succeed", func() {
+ ovsMock.EXPECT().RemoveInterfaceFromOVSBridge(gomock.Any(), "0000:d8:00.0").Return(nil)
+ err := br.DetachInterfaceFromManagedBridge("0000:d8:00.0")
+ Expect(err).NotTo(HaveOccurred())
+ })
+ It("error", func() {
+ ovsMock.EXPECT().RemoveInterfaceFromOVSBridge(gomock.Any(), "0000:d8:00.0").Return(testErr)
+ err := br.DetachInterfaceFromManagedBridge("0000:d8:00.0")
+ Expect(err).To(MatchError(testErr))
+ })
+ })
+})
diff --git a/pkg/host/internal/bridge/ovs/mock/mock_ovs.go b/pkg/host/internal/bridge/ovs/mock/mock_ovs.go
new file mode 100644
index 0000000000..ecd618a0ff
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/mock/mock_ovs.go
@@ -0,0 +1,93 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: ovs.go
+
+// Package mock_ovs is a generated GoMock package.
+package mock_ovs
+
+import (
+ context "context"
+ reflect "reflect"
+
+ gomock "github.com/golang/mock/gomock"
+ v1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+ ctrl *gomock.Controller
+ recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+ mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+ mock := &MockInterface{ctrl: ctrl}
+ mock.recorder = &MockInterfaceMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+ return m.recorder
+}
+
+// CreateOVSBridge mocks base method.
+func (m *MockInterface) CreateOVSBridge(ctx context.Context, conf *v1.OVSConfigExt) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateOVSBridge", ctx, conf)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CreateOVSBridge indicates an expected call of CreateOVSBridge.
+func (mr *MockInterfaceMockRecorder) CreateOVSBridge(ctx, conf interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOVSBridge", reflect.TypeOf((*MockInterface)(nil).CreateOVSBridge), ctx, conf)
+}
+
+// GetOVSBridges mocks base method.
+func (m *MockInterface) GetOVSBridges(ctx context.Context) ([]v1.OVSConfigExt, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetOVSBridges", ctx)
+ ret0, _ := ret[0].([]v1.OVSConfigExt)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetOVSBridges indicates an expected call of GetOVSBridges.
+func (mr *MockInterfaceMockRecorder) GetOVSBridges(ctx interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOVSBridges", reflect.TypeOf((*MockInterface)(nil).GetOVSBridges), ctx)
+}
+
+// RemoveInterfaceFromOVSBridge mocks base method.
+func (m *MockInterface) RemoveInterfaceFromOVSBridge(ctx context.Context, ifaceAddr string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveInterfaceFromOVSBridge", ctx, ifaceAddr)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// RemoveInterfaceFromOVSBridge indicates an expected call of RemoveInterfaceFromOVSBridge.
+func (mr *MockInterfaceMockRecorder) RemoveInterfaceFromOVSBridge(ctx, ifaceAddr interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveInterfaceFromOVSBridge", reflect.TypeOf((*MockInterface)(nil).RemoveInterfaceFromOVSBridge), ctx, ifaceAddr)
+}
+
+// RemoveOVSBridge mocks base method.
+func (m *MockInterface) RemoveOVSBridge(ctx context.Context, bridgeName string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveOVSBridge", ctx, bridgeName)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// RemoveOVSBridge indicates an expected call of RemoveOVSBridge.
+func (mr *MockInterfaceMockRecorder) RemoveOVSBridge(ctx, bridgeName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveOVSBridge", reflect.TypeOf((*MockInterface)(nil).RemoveOVSBridge), ctx, bridgeName)
+}
diff --git a/pkg/host/internal/bridge/ovs/models.go b/pkg/host/internal/bridge/ovs/models.go
new file mode 100644
index 0000000000..4bd3563123
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/models.go
@@ -0,0 +1,56 @@
+package ovs
+
+import (
+ "slices"
+
+ "github.com/ovn-org/libovsdb/model"
+)
+
+// OpenvSwitchEntry represents some fields of the object in the Open_vSwitch table
+type OpenvSwitchEntry struct {
+ UUID string `ovsdb:"_uuid"`
+ Bridges []string `ovsdb:"bridges"`
+}
+
+// BridgeEntry represents some fields of the object in the Bridge table
+type BridgeEntry struct {
+ UUID string `ovsdb:"_uuid"`
+ Name string `ovsdb:"name"`
+ DatapathType string `ovsdb:"datapath_type"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+ Ports []string `ovsdb:"ports"`
+}
+
+// HasPort returns true if portUUID is found in Ports slice
+func (b *BridgeEntry) HasPort(portUUID string) bool {
+ return slices.Contains(b.Ports, portUUID)
+}
+
+// InterfaceEntry represents some fields of the object in the Interface table
+type InterfaceEntry struct {
+ UUID string `ovsdb:"_uuid"`
+ Name string `ovsdb:"name"`
+ Type string `ovsdb:"type"`
+ Error *string `ovsdb:"error"`
+ Options map[string]string `ovsdb:"options"`
+ ExternalIDs map[string]string `ovsdb:"external_ids"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+}
+
+// PortEntry represents some fields of the object in the Port table
+type PortEntry struct {
+ UUID string `ovsdb:"_uuid"`
+ Name string `ovsdb:"name"`
+ Interfaces []string `ovsdb:"interfaces"`
+}
+
+// DatabaseModel returns the DatabaseModel object to be used in libovsdb
+func DatabaseModel() (model.ClientDBModel, error) {
+ return model.NewClientDBModel("Open_vSwitch", map[string]model.Model{
+ "Bridge": &BridgeEntry{},
+ "Interface": &InterfaceEntry{},
+ "Open_vSwitch": &OpenvSwitchEntry{},
+ "Port": &PortEntry{},
+ })
+}
diff --git a/pkg/host/internal/bridge/ovs/ovs.go b/pkg/host/internal/bridge/ovs/ovs.go
new file mode 100644
index 0000000000..6d232b0fb4
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/ovs.go
@@ -0,0 +1,685 @@
+package ovs
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+
+ sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
+ ovsStorePkg "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/host/internal/bridge/ovs/store"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/vars"
+)
+
+const (
+ // default timeout for ovsdb calls
+ defaultTimeout = time.Second * 15
+
+ // the number of checks that are performed after the interface is created
+ // to confirm that the interface is not in an error state right after creation
+ interfaceErrorCheckCount = 2
+ // interval between checks
+ interfaceErrorCheckInterval = time.Second
+)
+
+// Interface provides functions to configure managed OVS bridges
+//
+//go:generate ../../../../../bin/mockgen -destination mock/mock_ovs.go -source ovs.go
+type Interface interface {
+ // CreateOVSBridge creates OVS bridge from the provided config,
+ // does nothing if OVS bridge with the right config already exist,
+ // if OVS bridge exist with different config it will be removed and re-created
+ CreateOVSBridge(ctx context.Context, conf *sriovnetworkv1.OVSConfigExt) error
+ // GetOVSBridges returns configuration for all managed bridges
+ GetOVSBridges(ctx context.Context) ([]sriovnetworkv1.OVSConfigExt, error)
+ // RemoveOVSBridge removes managed OVS bridge by name
+ RemoveOVSBridge(ctx context.Context, bridgeName string) error
+ // RemoveInterfaceFromOVSBridge interface from the managed OVS bridge
+ RemoveInterfaceFromOVSBridge(ctx context.Context, ifaceAddr string) error
+}
+
+// New creates new instance of the OVS interface
+func New(store ovsStorePkg.Store) Interface {
+ return &ovs{store: store}
+}
+
+type ovs struct {
+ store ovsStorePkg.Store
+}
+
+// CreateOVSBridge creates OVS bridge from the provided config,
+// does nothing if OVS bridge with the right config already exist,
+// if OVS bridge exist with different config it will be removed and re-created
+func (o *ovs) CreateOVSBridge(ctx context.Context, conf *sriovnetworkv1.OVSConfigExt) error {
+ ctx, cancel := setDefaultTimeout(ctx)
+ defer cancel()
+ if len(conf.Uplinks) != 1 {
+ return fmt.Errorf("unsupported configuration, uplinks list must contain one element")
+ }
+ funcLog := log.Log.WithValues("bridge", conf.Name, "ifaceAddr", conf.Uplinks[0].PciAddress, "ifaceName", conf.Uplinks[0].Name)
+ funcLog.V(1).Info("CreateOVSBridge(): start configuration of the OVS bridge")
+
+ dbClient, err := getClient(ctx)
+ if err != nil {
+ funcLog.Error(err, "CreateOVSBridge(): failed to connect to OVSDB")
+ return fmt.Errorf("failed to connect to OVSDB: %v", err)
+ }
+ defer dbClient.Close()
+
+ knownConfig, err := o.store.GetManagedOVSBridge(conf.Name)
+ if err != nil {
+ funcLog.Error(err, "CreateOVSBridge(): failed to read data from store")
+ return fmt.Errorf("failed to read data from store: %v", err)
+ }
+ if knownConfig == nil || !reflect.DeepEqual(conf, knownConfig) {
+ funcLog.V(2).Info("CreateOVSBridge(): save current configuration to the store")
+ // config in store manager is not found or it is not the same config as passed with conf arg,
+ // update config in the store manager
+ if err := o.store.AddManagedOVSBridge(conf); err != nil {
+ funcLog.Error(err, "CreateOVSBridge(): failed to save current configuration to the store")
+ return err
+ }
+ }
+ keepBridge := false
+ if knownConfig != nil {
+ funcLog.V(2).Info("CreateOVSBridge(): configuration for the bridge found in the store")
+ // use knownConfig to query current state
+ currentState, err := o.getCurrentBridgeState(ctx, dbClient, knownConfig)
+ if err != nil {
+ funcLog.Error(err, "CreateOVSBridge(): failed to query current bridge state")
+ return err
+ }
+ if currentState != nil {
+ if reflect.DeepEqual(conf, currentState) {
+ // bridge already exist with the right config
+ funcLog.V(2).Info("CreateOVSBridge(): bridge state already match current configuration, no actions required")
+ return nil
+ }
+ funcLog.V(2).Info("CreateOVSBridge(): bridge state differs from the current configuration, reconfiguration required")
+ keepBridge = reflect.DeepEqual(conf.Bridge, currentState.Bridge)
+ }
+ } else {
+ funcLog.V(2).Info("CreateOVSBridge(): configuration for the bridge not found in the store, create the bridge")
+ }
+ funcLog.V(2).Info("CreateOVSBridge(): ensure uplink is not attached to any bridge")
+ // removal of the bridge should also remove all interfaces that are attached to it.
+ // we need to remove interface with additional call even if keepBridge is false to make
+ // sure that the interface is not attached to a different OVS bridge
+ if err := o.deleteInterfaceByName(ctx, dbClient, conf.Uplinks[0].Name); err != nil {
+ funcLog.Error(err, "CreateOVSBridge(): failed to remove uplink interface")
+ return err
+ }
+ if !keepBridge {
+ // make sure that bridge with provided name not exist
+ if err := o.deleteBridgeByName(ctx, dbClient, conf.Name); err != nil {
+ funcLog.Error(err, "CreateOVSBridge(): failed to remove existing bridge")
+ return err
+ }
+ funcLog.V(2).Info("CreateOVSBridge(): create OVS bridge", "config", conf)
+ if err := o.createBridge(ctx, dbClient, &BridgeEntry{
+ Name: conf.Name,
+ UUID: uuid.NewString(),
+ DatapathType: conf.Bridge.DatapathType,
+ ExternalIDs: conf.Bridge.ExternalIDs,
+ OtherConfig: conf.Bridge.OtherConfig,
+ }); err != nil {
+ return err
+ }
+ }
+ bridge, err := o.getBridgeByName(ctx, dbClient, conf.Name)
+ if err != nil {
+ funcLog.Error(err, "CreateOVSBridge(): failed to retrieve information about created bridge from OVSDB")
+ return err
+ }
+ if bridge == nil {
+ err = fmt.Errorf("can't retrieve bridge after creation")
+ funcLog.Error(err, "CreateOVSBridge(): failed to get bridge after creation")
+ return err
+ }
+ funcLog.V(2).Info("CreateOVSBridge(): add uplink interface to the bridge")
+ if err := o.addInterface(ctx, dbClient, bridge, &InterfaceEntry{
+ Name: conf.Uplinks[0].Name,
+ UUID: uuid.NewString(),
+ Type: conf.Uplinks[0].Interface.Type,
+ Options: conf.Uplinks[0].Interface.Options,
+ ExternalIDs: conf.Uplinks[0].Interface.ExternalIDs,
+ OtherConfig: conf.Uplinks[0].Interface.OtherConfig,
+ }); err != nil {
+ funcLog.Error(err, "CreateOVSBridge(): failed to add uplink interface to the bridge")
+ return err
+ }
+ return nil
+}
+
+// GetOVSBridges returns configuration for all managed bridges
+func (o *ovs) GetOVSBridges(ctx context.Context) ([]sriovnetworkv1.OVSConfigExt, error) {
+ ctx, cancel := setDefaultTimeout(ctx)
+ defer cancel()
+ funcLog := log.Log
+ funcLog.V(1).Info("GetOVSBridges(): get managed OVS bridges")
+ knownConfigs, err := o.store.GetManagedOVSBridges()
+ if err != nil {
+ funcLog.Error(err, "GetOVSBridges(): failed to read data from store")
+ return nil, fmt.Errorf("failed to read data from store: %v", err)
+ }
+ if len(knownConfigs) == 0 {
+ funcLog.V(2).Info("GetOVSBridges(): managed bridges not found")
+ return nil, nil
+ }
+ dbClient, err := getClient(ctx)
+ if err != nil {
+ funcLog.Error(err, "GetOVSBridges(): failed to connect to OVSDB")
+ return nil, fmt.Errorf("failed to connect to OVSDB: %v", err)
+ }
+ defer dbClient.Close()
+
+ result := make([]sriovnetworkv1.OVSConfigExt, 0, len(knownConfigs))
+ for _, knownConfig := range knownConfigs {
+ currentState, err := o.getCurrentBridgeState(ctx, dbClient, knownConfig)
+ if err != nil {
+ funcLog.Error(err, "GetOVSBridges(): failed to get state for the managed bridge", "bridge", knownConfig.Name)
+ return nil, err
+ }
+ if currentState != nil {
+ result = append(result, *currentState)
+ }
+ }
+ // always return bridges in the same order to make sure that the caller can easily compare
+ // two results returned by the GetOVSBridges function
+ sort.Slice(result, func(i, j int) bool {
+ return result[i].Name < result[j].Name
+ })
+ if funcLog.V(2).Enabled() {
+ data, _ := json.Marshal(&result)
+ funcLog.V(2).Info("GetOVSBridges()", "result", string(data))
+ }
+ return result, nil
+}
+
+// RemoveOVSBridge removes managed OVS bridge by name
+func (o *ovs) RemoveOVSBridge(ctx context.Context, bridgeName string) error {
+ ctx, cancel := setDefaultTimeout(ctx)
+ defer cancel()
+ funcLog := log.Log.WithValues("bridge", bridgeName)
+ funcLog.V(1).Info("RemoveOVSBridge(): remove managed bridge")
+ brConf, err := o.store.GetManagedOVSBridge(bridgeName)
+ if err != nil {
+ funcLog.Error(err, "RemoveOVSBridge(): failed to read data from store")
+ return fmt.Errorf("failed to read data from store: %v", err)
+ }
+ if brConf == nil {
+ funcLog.V(2).Info("RemoveOVSBridge(): managed bridge configuration not found in the store")
+ return nil
+ }
+ funcLog.V(2).Info("RemoveOVSBridge(): configuration for the managed bridge exist in the store")
+ dbClient, err := getClient(ctx)
+ if err != nil {
+ funcLog.Error(err, "RemoveOVSBridge(): failed to connect to OVSDB")
+ return fmt.Errorf("failed to connect to OVSDB: %v", err)
+ }
+ defer dbClient.Close()
+ currentState, err := o.getCurrentBridgeState(ctx, dbClient, brConf)
+ if err != nil {
+ funcLog.Error(err, "RemoveOVSBridge(): failed to get state of the managed bridge")
+ return err
+ }
+ if currentState != nil {
+ funcLog.V(2).Info("RemoveOVSBridge(): remove managed bridge")
+ if err := o.deleteBridgeByName(ctx, dbClient, brConf.Name); err != nil {
+ funcLog.Error(err, "RemoveOVSBridge(): failed to remove managed bridge")
+ return err
+ }
+ } else {
+ funcLog.V(2).Info("RemoveOVSBridge(): managed bridge not exist")
+ }
+
+ funcLog.V(2).Info("RemoveOVSBridge(): remove managed bridge configuration from the store")
+ if err := o.store.RemoveManagedOVSBridge(brConf.Name); err != nil {
+ funcLog.Error(err, "RemoveOVSBridge(): failed to remove managed bridge configuration from the store")
+ return err
+ }
+ return nil
+}
+
+// RemoveInterfaceFromOVSBridge removes interface from the managed OVS bridge
+func (o *ovs) RemoveInterfaceFromOVSBridge(ctx context.Context, pciAddress string) error {
+ ctx, cancel := setDefaultTimeout(ctx)
+ defer cancel()
+ funcLog := log.Log.WithValues("pciAddress", pciAddress)
+ funcLog.V(1).Info("RemoveInterfaceFromOVSBridge(): remove interface from managed bridge")
+ knownConfigs, err := o.store.GetManagedOVSBridges()
+ if err != nil {
+ funcLog.Error(err, "RemoveInterfaceFromOVSBridge(): failed to read data from store")
+ return fmt.Errorf("failed to read data from store: %v", err)
+ }
+ var relatedBridges []*sriovnetworkv1.OVSConfigExt
+ for _, kc := range knownConfigs {
+ if len(kc.Uplinks) > 0 && kc.Uplinks[0].PciAddress == pciAddress && kc.Uplinks[0].Name != "" {
+ relatedBridges = append(relatedBridges, kc)
+ }
+ }
+ if len(relatedBridges) == 0 {
+ funcLog.V(2).Info("RemoveInterfaceFromOVSBridge(): can't find related managed OVS bridge in the store")
+ return nil
+ }
+ if len(relatedBridges) > 1 {
+ funcLog.Info("RemoveInterfaceFromOVSBridge(): WARNING: uplink match more then one managed OVS bridge in the store, use first match")
+ }
+ brConf := relatedBridges[0]
+
+ dbClient, err := getClient(ctx)
+ if err != nil {
+ funcLog.Error(err, "RemoveInterfaceFromOVSBridge(): failed to connect to OVSDB")
+ return fmt.Errorf("failed to connect to OVSDB: %v", err)
+ }
+ defer dbClient.Close()
+
+ funcLog.V(2).Info("RemoveInterfaceFromOVSBridge(): related managed bridge found for interface in the store", "bridge", brConf.Name)
+ currentState, err := o.getCurrentBridgeState(ctx, dbClient, brConf)
+ if err != nil {
+ funcLog.Error(err, "RemoveInterfaceFromOVSBridge(): failed to get state of the managed bridge", "bridge", brConf.Name)
+ return err
+ }
+ if currentState == nil {
+ funcLog.V(2).Info("RemoveInterfaceFromOVSBridge(): bridge not found, remove information about the bridge from the store", "bridge", brConf.Name)
+ if err := o.store.RemoveManagedOVSBridge(brConf.Name); err != nil {
+ funcLog.Error(err, "RemoveInterfaceFromOVSBridge(): failed to remove information from the store", "bridge", brConf.Name)
+ return err
+ }
+ return nil
+ }
+
+ funcLog.V(2).Info("RemoveInterfaceFromOVSBridge(): remove interface from the bridge")
+ if err := o.deleteInterfaceByName(ctx, dbClient, brConf.Uplinks[0].Name); err != nil {
+ funcLog.Error(err, "RemoveInterfaceFromOVSBridge(): failed to remove interface from the bridge", "bridge", brConf.Name)
+ return err
+ }
+
+ return nil
+}
+
+func (o *ovs) getBridgeByName(ctx context.Context, dbClient client.Client, name string) (*BridgeEntry, error) {
+ br := &BridgeEntry{Name: name}
+ if err := dbClient.Get(ctx, br); err != nil {
+ if errors.Is(err, client.ErrNotFound) {
+ return nil, nil
+ } else {
+ return nil, fmt.Errorf("get call for the bridge %s failed: %v", name, err)
+ }
+ }
+ return br, nil
+}
+
+func (o *ovs) getInterfaceByName(ctx context.Context, dbClient client.Client, name string) (*InterfaceEntry, error) {
+ iface := &InterfaceEntry{Name: name}
+ if err := dbClient.Get(ctx, iface); err != nil {
+ if errors.Is(err, client.ErrNotFound) {
+ return nil, nil
+ } else {
+ return nil, fmt.Errorf("get call for the interfaces %s failed: %v", name, err)
+ }
+ }
+ return iface, nil
+}
+
+func (o *ovs) getPortByInterface(ctx context.Context, dbClient client.Client, iface *InterfaceEntry) (*PortEntry, error) {
+ portEntry := &PortEntry{}
+ portEntryList := []*PortEntry{}
+ err := dbClient.WhereAll(portEntry, model.Condition{
+ Field: &portEntry.Interfaces,
+ Function: ovsdb.ConditionIncludes,
+ Value: []string{iface.UUID},
+ }).List(ctx, &portEntryList)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list ports related to interface %s: %v", iface.Name, err)
+ }
+ if len(portEntryList) == 0 {
+ return nil, nil
+ }
+ return portEntryList[0], nil
+}
+
+func (o *ovs) getBridgeByPort(ctx context.Context, dbClient client.Client, port *PortEntry) (*BridgeEntry, error) {
+ brEntry := &BridgeEntry{}
+ brEntryList := []*BridgeEntry{}
+ err := dbClient.WhereAll(brEntry, model.Condition{
+ Field: &brEntry.Ports,
+ Function: ovsdb.ConditionIncludes,
+ Value: []string{port.UUID},
+ }).List(ctx, &brEntryList)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list bridges related to port %s: %v", port.Name, err)
+ }
+ if len(brEntryList) == 0 {
+ return nil, nil
+ }
+ return brEntryList[0], nil
+}
+
+// create bridge with provided configuration
+func (o *ovs) createBridge(ctx context.Context, dbClient client.Client, br *BridgeEntry) error {
+ brCreateOps, err := dbClient.Create(br)
+ if err != nil {
+ return fmt.Errorf("failed to prepare operation for bridge creation: %v", err)
+ }
+ rootObj, err := o.getRootObj(ctx, dbClient)
+ if err != nil {
+ return err
+ }
+ ovsMutateOps, err := dbClient.Where(rootObj).Mutate(rootObj, model.Mutation{
+ Field: &rootObj.Bridges,
+ Mutator: ovsdb.MutateOperationInsert,
+ Value: []string{br.UUID},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create mutate operation for Open_vSwitch table: %v", err)
+ }
+ if err := o.execTransaction(ctx, dbClient, brCreateOps, ovsMutateOps); err != nil {
+ return fmt.Errorf("bridge creation failed: %v", err)
+ }
+ return nil
+}
+
+// add interface with provided configuration to the provided bridge
+// and check that interface has no error for the next 2 seconds
+func (o *ovs) addInterface(ctx context.Context, dbClient client.Client, br *BridgeEntry, iface *InterfaceEntry) error {
+ addInterfaceOPs, err := dbClient.Create(iface)
+ if err != nil {
+ return fmt.Errorf("failed to prepare operation for interface creation: %v", err)
+ }
+ port := &PortEntry{Name: iface.Name, UUID: uuid.NewString(), Interfaces: []string{iface.UUID}}
+ addPortOPs, err := dbClient.Create(port)
+ if err != nil {
+ return fmt.Errorf("failed to prepare operation for port creation: %v", err)
+ }
+ bridgeMutateOps, err := dbClient.Where(br).Mutate(br, model.Mutation{
+ Field: &br.Ports,
+ Mutator: ovsdb.MutateOperationInsert,
+ Value: []string{port.UUID},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to prepare operation for bridge mutate: %v", err)
+ }
+ if err := o.execTransaction(ctx, dbClient, addInterfaceOPs, addPortOPs, bridgeMutateOps); err != nil {
+ return fmt.Errorf("bridge deletion failed: %v", err)
+ }
+ // check that interface has no error right after creation
+ for i := 0; i < interfaceErrorCheckCount; i++ {
+ select {
+ case <-time.After(interfaceErrorCheckInterval):
+ case <-ctx.Done():
+ }
+ if err := dbClient.Get(ctx, iface); err != nil {
+ return fmt.Errorf("failed to read interface after creation: %v", err)
+ }
+ if iface.Error != nil {
+ return fmt.Errorf("created interface is in error state: %s", *iface.Error)
+ }
+ }
+ return nil
+}
+
+// delete bridge by the name
+func (o *ovs) deleteBridgeByName(ctx context.Context, dbClient client.Client, brName string) error {
+ br, err := o.getBridgeByName(ctx, dbClient, brName)
+ if err != nil {
+ return err
+ }
+ if br == nil {
+ return nil
+ }
+ brDeleteOps, err := dbClient.Where(br).Delete()
+ if err != nil {
+ return fmt.Errorf("failed to prepare operation for bridge deletion: %v", err)
+ }
+ rootObj, err := o.getRootObj(ctx, dbClient)
+ if err != nil {
+ return err
+ }
+ ovsMutateOps, err := dbClient.Where(rootObj).Mutate(rootObj, model.Mutation{
+ Field: &rootObj.Bridges,
+ Mutator: ovsdb.MutateOperationDelete,
+ Value: []string{br.UUID},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create mutate operation for Open_vSwitch table: %v", err)
+ }
+ if err := o.execTransaction(ctx, dbClient, brDeleteOps, ovsMutateOps); err != nil {
+ return fmt.Errorf("bridge deletion failed: %v", err)
+ }
+ return nil
+}
+
+// delete interface by the name
+func (o *ovs) deleteInterfaceByName(ctx context.Context, dbClient client.Client, ifaceName string) error {
+ var operations [][]ovsdb.Operation
+ iface, err := o.getInterfaceByName(ctx, dbClient, ifaceName)
+ if err != nil {
+ return err
+ }
+ if iface == nil {
+ return nil
+ }
+ delIfaceOPs, err := dbClient.Where(iface).Delete()
+ if err != nil {
+ return fmt.Errorf("failed to prepare operation for interface deletion: %v", err)
+ }
+ operations = append(operations, delIfaceOPs)
+
+ port, err := o.getPortByInterface(ctx, dbClient, iface)
+ if err != nil {
+ return err
+ }
+ if port != nil {
+ delPortOPs, err := dbClient.Where(port).Delete()
+ if err != nil {
+ return fmt.Errorf("failed to prepare operation for port deletion: %v", err)
+ }
+ operations = append(operations, delPortOPs)
+
+ bridge, err := o.getBridgeByPort(ctx, dbClient, port)
+ if err != nil {
+ return err
+ }
+ if bridge != nil {
+ bridgeMutateOps, err := dbClient.Where(bridge).Mutate(bridge, model.Mutation{
+ Field: &bridge.Ports,
+ Mutator: ovsdb.MutateOperationDelete,
+ Value: []string{port.UUID},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to prepare operation for bridge mutate: %v", err)
+ }
+ operations = append(operations, bridgeMutateOps)
+ }
+ }
+ if err := o.execTransaction(ctx, dbClient, operations...); err != nil {
+ return fmt.Errorf("failed to remove interface %s: %v", iface.Name, err)
+ }
+ return nil
+}
+
+// execute multiple prepared OVSDB operations as a single transaction
+func (o *ovs) execTransaction(ctx context.Context, dbClient client.Client, ops ...[]ovsdb.Operation) error {
+ var operations []ovsdb.Operation
+ for _, o := range ops {
+ operations = append(operations, o...)
+ }
+ result, err := dbClient.Transact(ctx, operations...)
+ if err != nil {
+ return fmt.Errorf("transaction failed: %v", err)
+ }
+ operationsErr, err := ovsdb.CheckOperationResults(result, operations)
+ if err != nil || len(operationsErr) > 0 {
+ return fmt.Errorf("operation failed: %v, %v", err, operationsErr)
+ }
+ return nil
+}
+
+// return current state of the bridge and of the uplink interface.
+// uses knownConfig to check which fields are managed by the operator (other fields can be updated OVS itself or by other programs,
+// we should not take them into account)
+func (o *ovs) getCurrentBridgeState(ctx context.Context, dbClient client.Client, knownConfig *sriovnetworkv1.OVSConfigExt) (*sriovnetworkv1.OVSConfigExt, error) {
+ funcLog := log.Log.WithValues("bridge", knownConfig.Name)
+ funcLog.V(2).Info("getCurrentBridgeState(): get current bridge state")
+ bridge, err := o.getBridgeByName(ctx, dbClient, knownConfig.Name)
+ if err != nil {
+ return nil, err
+ }
+ if bridge == nil {
+ return nil, nil
+ }
+ currentConfig := &sriovnetworkv1.OVSConfigExt{
+ Name: bridge.Name,
+ Bridge: sriovnetworkv1.OVSBridgeConfig{
+ DatapathType: bridge.DatapathType,
+ // for ExternalIDs and OtherConfig maps we take into account only field which
+ // were set by the operator
+ ExternalIDs: updateMap(knownConfig.Bridge.ExternalIDs, bridge.ExternalIDs),
+ OtherConfig: updateMap(knownConfig.Bridge.OtherConfig, bridge.OtherConfig),
+ },
+ }
+ if len(knownConfig.Uplinks) == 0 {
+ return currentConfig, nil
+ }
+ knownConfigUplink := knownConfig.Uplinks[0]
+ iface, err := o.getInterfaceByName(ctx, dbClient, knownConfigUplink.Name)
+ if err != nil {
+ return nil, err
+ }
+ if iface == nil {
+ return currentConfig, nil
+ }
+
+ if iface.Error != nil {
+ funcLog.V(2).Info("getCurrentBridgeState(): interface has an error, remove it from the bridge state", "interface", iface.Name, "error", iface.Error)
+ // interface has an error, do not report info about it to let the operator try to recreate it
+ return currentConfig, nil
+ }
+
+ port, err := o.getPortByInterface(ctx, dbClient, iface)
+ if err != nil {
+ return nil, err
+ }
+ if port == nil {
+ return currentConfig, nil
+ }
+
+ if !bridge.HasPort(port.UUID) {
+ // interface belongs to a wrong bridge, do not include uplink config to
+ // the current bridge state to let the operator try to fix this
+ return currentConfig, nil
+ }
+ currentConfig.Uplinks = []sriovnetworkv1.OVSUplinkConfigExt{{
+ PciAddress: knownConfigUplink.PciAddress,
+ Name: knownConfigUplink.Name,
+ Interface: sriovnetworkv1.OVSInterfaceConfig{
+ Type: iface.Type,
+ ExternalIDs: updateMap(knownConfigUplink.Interface.ExternalIDs, iface.ExternalIDs),
+ Options: updateMap(knownConfigUplink.Interface.Options, iface.Options),
+ OtherConfig: updateMap(knownConfigUplink.Interface.OtherConfig, iface.OtherConfig),
+ },
+ }}
+ return currentConfig, nil
+}
+
+func (o *ovs) getRootObj(ctx context.Context, dbClient client.Client) (*OpenvSwitchEntry, error) {
+ ovsList := []*OpenvSwitchEntry{}
+ if err := dbClient.List(ctx, &ovsList); err != nil {
+ return nil, fmt.Errorf("can't retrieve root object uuid from Open_vSwitch table")
+ }
+ if len(ovsList) == 0 {
+ return nil, fmt.Errorf("Open_vSwitch table is empty")
+ }
+ return ovsList[0], nil
+}
+
+// if the provided context has no timeout, the default timeout will be set
+func setDefaultTimeout(ctx context.Context) (context.Context, context.CancelFunc) {
+ _, ok := ctx.Deadline()
+ if ok {
+ // context already contains deadline,
+ // return original context and dummy cancel function
+ return ctx, func() {}
+ }
+ return context.WithTimeout(ctx, defaultTimeout)
+}
+
+// resulting map contains keys from the old map with values from the new map.
+// if key from the old map not found in the new map it will not be added to resulting map
+func updateMap(old, new map[string]string) map[string]string {
+ result := map[string]string{}
+ for k := range old {
+ val, found := new[k]
+ if found {
+ result[k] = val
+ }
+ }
+ return result
+}
+
+// initialize and return OVSDB client
+func getClient(ctx context.Context) (client.Client, error) {
+ openvSwitchEntry := &OpenvSwitchEntry{}
+ bridgeEntry := &BridgeEntry{}
+ interfaceEntry := &InterfaceEntry{}
+ portEntry := &PortEntry{}
+ clientDBModel, err := DatabaseModel()
+ if err != nil {
+ return nil, fmt.Errorf("can't create client DB model: %v", err)
+ }
+
+ dbClient, err := client.NewOVSDBClient(clientDBModel,
+ client.WithEndpoint(vars.OVSDBSocketPath),
+ client.WithLogger(&log.Log))
+ if err != nil {
+ return nil, fmt.Errorf("can't create DB client: %v", err)
+ }
+
+ err = dbClient.Connect(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("can't connect to ovsdb server: %v", err)
+ }
+ _, err = dbClient.Monitor(ctx, dbClient.NewMonitor(
+ client.WithTable(openvSwitchEntry,
+ &openvSwitchEntry.UUID,
+ &openvSwitchEntry.Bridges,
+ ),
+ client.WithTable(bridgeEntry,
+ &bridgeEntry.UUID,
+ &bridgeEntry.Name,
+ &bridgeEntry.DatapathType,
+ &bridgeEntry.ExternalIDs,
+ &bridgeEntry.OtherConfig,
+ &bridgeEntry.Ports,
+ ),
+ client.WithTable(interfaceEntry,
+ &interfaceEntry.UUID,
+ &interfaceEntry.Name,
+ &interfaceEntry.Type,
+ &interfaceEntry.Error,
+ &interfaceEntry.Options,
+ &interfaceEntry.ExternalIDs,
+ &interfaceEntry.OtherConfig,
+ ),
+ client.WithTable(portEntry,
+ &portEntry.UUID,
+ &portEntry.Name,
+ &portEntry.Interfaces,
+ ),
+ ))
+ if err != nil {
+ dbClient.Close()
+ return nil, fmt.Errorf("can't start monitor: %v", err)
+ }
+ return dbClient, nil
+}
diff --git a/pkg/host/internal/bridge/ovs/ovs_test.go b/pkg/host/internal/bridge/ovs/ovs_test.go
new file mode 100644
index 0000000000..0396966720
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/ovs_test.go
@@ -0,0 +1,517 @@
+package ovs
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/golang/mock/gomock"
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/client"
+ "github.com/ovn-org/libovsdb/database/inmemory"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/libovsdb/server"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
+ ovsStoreMockPkg "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/host/internal/bridge/ovs/store/mock"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/vars"
+)
+
+func getManagedBridges() map[string]*sriovnetworkv1.OVSConfigExt {
+ return map[string]*sriovnetworkv1.OVSConfigExt{
+ "br-0000_d8_00.0": {
+ Name: "br-0000_d8_00.0",
+ Bridge: sriovnetworkv1.OVSBridgeConfig{
+ DatapathType: "netdev",
+ ExternalIDs: map[string]string{"br_externalID_key": "br_externalID_value"},
+ OtherConfig: map[string]string{"br_otherConfig_key": "br_otherConfig_value"},
+ },
+ Uplinks: []sriovnetworkv1.OVSUplinkConfigExt{{
+ PciAddress: "0000:d8:00.0",
+ Name: "enp216s0f0np0",
+ Interface: sriovnetworkv1.OVSInterfaceConfig{
+ Type: "dpdk",
+ ExternalIDs: map[string]string{"iface_externalID_key": "iface_externalID_value"},
+ OtherConfig: map[string]string{"iface_otherConfig_key": "iface_otherConfig_value"},
+ Options: map[string]string{"iface_options_key": "iface_options_value"},
+ },
+ }},
+ },
+ }
+}
+
+type testDBEntries struct {
+ OpenVSwitch []*OpenvSwitchEntry
+ Bridge []*BridgeEntry
+ Port []*PortEntry
+ Interface []*InterfaceEntry
+}
+
+func (t *testDBEntries) GetCreateOperations(c client.Client) []ovsdb.Operation {
+ var operations []ovsdb.Operation
+
+ var mdls []model.Model
+ for _, o := range t.OpenVSwitch {
+ mdls = append(mdls, o)
+ }
+ for _, o := range t.Bridge {
+ mdls = append(mdls, o)
+ }
+ for _, o := range t.Port {
+ mdls = append(mdls, o)
+ }
+ for _, o := range t.Interface {
+ mdls = append(mdls, o)
+ }
+ for _, e := range mdls {
+ if e != nil {
+ o, err := c.Create(e)
+ Expect(err).NotTo(HaveOccurred())
+ operations = append(operations, o...)
+ }
+ }
+ return operations
+}
+
+func getDefaultInitialDBContent() *testDBEntries {
+ iface := &InterfaceEntry{
+ Name: "enp216s0f0np0",
+ UUID: uuid.NewString(),
+ Type: "dpdk",
+ ExternalIDs: map[string]string{"iface_externalID_key": "iface_externalID_value"},
+ OtherConfig: map[string]string{"iface_otherConfig_key": "iface_otherConfig_value"},
+ Options: map[string]string{"iface_options_key": "iface_options_value"},
+ }
+ port := &PortEntry{
+ Name: "enp216s0f0np0",
+ UUID: uuid.NewString(),
+ Interfaces: []string{iface.UUID},
+ }
+ br := &BridgeEntry{
+ Name: "br-0000_d8_00.0",
+ UUID: uuid.NewString(),
+ Ports: []string{port.UUID},
+ DatapathType: "netdev",
+ ExternalIDs: map[string]string{"br_externalID_key": "br_externalID_value"},
+ OtherConfig: map[string]string{"br_otherConfig_key": "br_otherConfig_value"},
+ }
+ ovs := &OpenvSwitchEntry{
+ UUID: uuid.NewString(),
+ Bridges: []string{br.UUID},
+ }
+ return &testDBEntries{
+ OpenVSwitch: []*OpenvSwitchEntry{ovs},
+ Bridge: []*BridgeEntry{br},
+ Port: []*PortEntry{port},
+ Interface: []*InterfaceEntry{iface},
+ }
+}
+
+func getDBContent(ctx context.Context, c client.Client) *testDBEntries {
+ ret := &testDBEntries{}
+ Expect(c.List(ctx, &ret.OpenVSwitch)).NotTo(HaveOccurred())
+ Expect(c.List(ctx, &ret.Bridge)).NotTo(HaveOccurred())
+ Expect(c.List(ctx, &ret.Port)).NotTo(HaveOccurred())
+ Expect(c.List(ctx, &ret.Interface)).NotTo(HaveOccurred())
+ return ret
+}
+
+func createInitialDBContent(ctx context.Context, c client.Client, expectedState *testDBEntries) {
+ operations := expectedState.GetCreateOperations(c)
+ result, err := c.Transact(ctx, operations...)
+ Expect(err).NotTo(HaveOccurred())
+ operationsErr, err := ovsdb.CheckOperationResults(result, operations)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(operationsErr).To(BeEmpty())
+}
+
+func validateDBConfig(dbContent *testDBEntries, conf *sriovnetworkv1.OVSConfigExt) {
+ Expect(dbContent.OpenVSwitch).To(HaveLen(1))
+ Expect(dbContent.Bridge).To(HaveLen(1))
+ Expect(dbContent.Interface).To(HaveLen(1))
+ Expect(dbContent.Port).To(HaveLen(1))
+ ovs := dbContent.OpenVSwitch[0]
+ br := dbContent.Bridge[0]
+ port := dbContent.Port[0]
+ iface := dbContent.Interface[0]
+ Expect(ovs.Bridges).To(ContainElement(br.UUID))
+ Expect(br.Name).To(Equal(conf.Name))
+ Expect(br.DatapathType).To(Equal(conf.Bridge.DatapathType))
+ Expect(br.OtherConfig).To(Equal(conf.Bridge.OtherConfig))
+ Expect(br.ExternalIDs).To(Equal(conf.Bridge.ExternalIDs))
+ Expect(br.Ports).To(ContainElement(port.UUID))
+ Expect(port.Name).To(Equal(conf.Uplinks[0].Name))
+ Expect(port.Interfaces).To(ContainElement(iface.UUID))
+ Expect(iface.Name).To(Equal(conf.Uplinks[0].Name))
+ Expect(iface.Options).To(Equal(conf.Uplinks[0].Interface.Options))
+ Expect(iface.Type).To(Equal(conf.Uplinks[0].Interface.Type))
+ Expect(iface.OtherConfig).To(Equal(conf.Uplinks[0].Interface.OtherConfig))
+ Expect(iface.ExternalIDs).To(Equal(conf.Uplinks[0].Interface.ExternalIDs))
+}
+
+var _ = Describe("OVS", func() {
+ var (
+ ctx context.Context
+ )
+ BeforeEach(func() {
+ ctx = context.Background()
+ })
+ Context("setDefaultTimeout", func() {
+ It("use default", func() {
+ newCtx, cFunc := setDefaultTimeout(ctx)
+ deadline, isSet := newCtx.Deadline()
+ Expect(isSet).To(BeTrue())
+ Expect(time.Now().Before(deadline))
+ Expect(cFunc).NotTo(BeNil())
+ // cFunc should cancel the context
+ cFunc()
+ Expect(newCtx.Err()).To(MatchError(context.Canceled))
+
+ })
+ It("use explicit timeout - use configured timeout", func() {
+ timeoutCtx, timeoutFunc := context.WithTimeout(ctx, time.Millisecond*100)
+ defer timeoutFunc()
+ newCtx, _ := setDefaultTimeout(timeoutCtx)
+ time.Sleep(time.Millisecond * 200)
+ Expect(newCtx.Err()).To(MatchError(context.DeadlineExceeded))
+ })
+ It("use explicit timeout - should return noop cancel function", func() {
+ timeoutCtx, timeoutFunc := context.WithTimeout(ctx, time.Minute)
+ defer timeoutFunc()
+ newCtx, cFunc := setDefaultTimeout(timeoutCtx)
+ Expect(cFunc).NotTo(BeNil())
+ cFunc()
+ Expect(newCtx.Err()).NotTo(HaveOccurred())
+ })
+ })
+
+ Context("updateMap", func() {
+ It("nil maps", func() {
+ Expect(updateMap(nil, nil)).To(BeEmpty())
+ })
+ It("empty new map", func() {
+ Expect(updateMap(map[string]string{"key": "val"}, nil)).To(BeEmpty())
+ })
+ It("empty old map", func() {
+ Expect(updateMap(nil, map[string]string{"key": "val"})).To(BeEmpty())
+ })
+ It("update known values", func() {
+ Expect(updateMap(
+ map[string]string{"key2": "val2", "key4": "val4"},
+ map[string]string{"key1": "val1new", "key2": "val2new", "key3": "val3new"})).To(
+ Equal(
+ map[string]string{"key2": "val2new"},
+ ))
+ })
+ })
+
+ Context("manage bridges", func() {
+ var (
+ store *ovsStoreMockPkg.MockStore
+ testCtrl *gomock.Controller
+ tempDir string
+ testServerSocket string
+ err error
+ stopServerFunc func()
+ ovsClient client.Client
+ ovs Interface
+ )
+ BeforeEach(func() {
+ tempDir, err = os.MkdirTemp("", "sriov-operator-ovs-test-dir*")
+ testServerSocket = filepath.Join(tempDir, "ovsdb.sock")
+ Expect(err).NotTo(HaveOccurred())
+ testCtrl = gomock.NewController(GinkgoT())
+ store = ovsStoreMockPkg.NewMockStore(testCtrl)
+ _ = store
+ stopServerFunc = startServer("unix", testServerSocket)
+
+ origSocketValue := vars.OVSDBSocketPath
+ vars.OVSDBSocketPath = "unix://" + testServerSocket
+ DeferCleanup(func() {
+ vars.OVSDBSocketPath = origSocketValue
+ })
+
+ ovsClient, err = getClient(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ ovs = New(store)
+ })
+
+ AfterEach(func() {
+ ovsClient.Close()
+ stopServerFunc()
+ Expect(os.RemoveAll(tempDir)).NotTo(HaveOccurred())
+ testCtrl.Finish()
+ })
+
+ Context("CreateOVSBridge", func() {
+ It("Bridge already exist with the right config, do nothing", func() {
+ expectedConf := getManagedBridges()["br-0000_d8_00.0"]
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(expectedConf, nil)
+ initialDBContent := getDefaultInitialDBContent()
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+ Expect(ovs.CreateOVSBridge(ctx, expectedConf)).NotTo(HaveOccurred())
+ dbContent := getDBContent(ctx, ovsClient)
+ // dbContent should be exactly same
+ Expect(dbContent).To(Equal(initialDBContent))
+ })
+ It("No Bridge, create bridge", func() {
+ expectedConf := getManagedBridges()["br-0000_d8_00.0"]
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(nil, nil)
+
+ rootUUID := uuid.NewString()
+ initialDBContent := &testDBEntries{OpenVSwitch: []*OpenvSwitchEntry{{UUID: rootUUID}}}
+
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+
+ store.EXPECT().AddManagedOVSBridge(expectedConf).Return(nil)
+ Expect(ovs.CreateOVSBridge(ctx, expectedConf)).NotTo(HaveOccurred())
+
+ validateDBConfig(getDBContent(ctx, ovsClient), expectedConf)
+ })
+ It("Bridge exist, no data in store, should recreate", func() {
+ expectedConf := getManagedBridges()["br-0000_d8_00.0"]
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(nil, nil)
+ store.EXPECT().AddManagedOVSBridge(expectedConf).Return(nil)
+ initialDBContent := getDefaultInitialDBContent()
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+
+ Expect(ovs.CreateOVSBridge(ctx, expectedConf)).NotTo(HaveOccurred())
+
+ dbContent := getDBContent(ctx, ovsClient)
+
+ validateDBConfig(dbContent, expectedConf)
+ // should recreate all objects
+ Expect(dbContent.Bridge[0].UUID).NotTo(Equal(initialDBContent.Bridge[0].UUID))
+ Expect(dbContent.Interface[0].UUID).NotTo(Equal(initialDBContent.Interface[0].UUID))
+ Expect(dbContent.Port[0].UUID).NotTo(Equal(initialDBContent.Port[0].UUID))
+ })
+ It("Bridge exist with wrong config, should recreate", func() {
+ expectedConf := getManagedBridges()["br-0000_d8_00.0"]
+ expectedConf.Bridge.DatapathType = "test"
+
+ oldConfig := getManagedBridges()["br-0000_d8_00.0"]
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(oldConfig, nil)
+ store.EXPECT().AddManagedOVSBridge(expectedConf).Return(nil)
+
+ initialDBContent := getDefaultInitialDBContent()
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+
+ Expect(ovs.CreateOVSBridge(ctx, expectedConf)).NotTo(HaveOccurred())
+
+ dbContent := getDBContent(ctx, ovsClient)
+ validateDBConfig(dbContent, expectedConf)
+
+ Expect(dbContent.Bridge[0].UUID).NotTo(Equal(initialDBContent.Bridge[0].UUID))
+ })
+ It("Bridge exist with right config, interface has wrong config, should recreate interface only", func() {
+ expectedConf := getManagedBridges()["br-0000_d8_00.0"]
+ expectedConf.Uplinks[0].Interface.Type = "test"
+
+ oldConfig := getManagedBridges()["br-0000_d8_00.0"]
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(oldConfig, nil)
+ store.EXPECT().AddManagedOVSBridge(expectedConf).Return(nil)
+
+ initialDBContent := getDefaultInitialDBContent()
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+
+ Expect(ovs.CreateOVSBridge(ctx, expectedConf)).NotTo(HaveOccurred())
+
+ dbContent := getDBContent(ctx, ovsClient)
+ validateDBConfig(dbContent, expectedConf)
+
+ Expect(dbContent.Bridge[0].UUID).To(Equal(initialDBContent.Bridge[0].UUID))
+ Expect(dbContent.Interface[0].UUID).NotTo(Equal(initialDBContent.Interface[0].UUID))
+ })
+ It("Interface has an error, should recreate interface only", func() {
+ expectedConf := getManagedBridges()["br-0000_d8_00.0"]
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(expectedConf, nil)
+
+ initialDBContent := getDefaultInitialDBContent()
+ errMsg := "test"
+ initialDBContent.Interface[0].Error = &errMsg
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+
+ Expect(ovs.CreateOVSBridge(ctx, expectedConf)).NotTo(HaveOccurred())
+
+ dbContent := getDBContent(ctx, ovsClient)
+ validateDBConfig(dbContent, expectedConf)
+
+ // keep bridge, recreate iface
+ Expect(dbContent.Bridge[0].UUID).To(Equal(initialDBContent.Bridge[0].UUID))
+ Expect(dbContent.Interface[0].UUID).NotTo(Equal(initialDBContent.Interface[0].UUID))
+ })
+ })
+ Context("GetOVSBridges", func() {
+ It("Bridge exist, but no managed bridges in config", func() {
+ createInitialDBContent(ctx, ovsClient, getDefaultInitialDBContent())
+ store.EXPECT().GetManagedOVSBridges().Return(nil, nil)
+ ret, err := ovs.GetOVSBridges(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ret).To(BeEmpty())
+ })
+ It("Managed bridge exist with the right config", func() {
+ createInitialDBContent(ctx, ovsClient, getDefaultInitialDBContent())
+ conf := getManagedBridges()
+ store.EXPECT().GetManagedOVSBridges().Return(conf, nil)
+ ret, err := ovs.GetOVSBridges(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ret).To(ContainElement(*conf["br-0000_d8_00.0"]))
+ })
+ It("Managed bridge exist, interface not found", func() {
+ initialDBContent := getDefaultInitialDBContent()
+ initialDBContent.Bridge[0].Ports = nil
+ initialDBContent.Interface = nil
+ initialDBContent.Port = nil
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+ conf := getManagedBridges()
+ store.EXPECT().GetManagedOVSBridges().Return(conf, nil)
+ ret, err := ovs.GetOVSBridges(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ret).To(HaveLen(1))
+ Expect(ret[0].Bridge).To(Equal(conf["br-0000_d8_00.0"].Bridge))
+ Expect(ret[0].Uplinks).To(BeEmpty())
+ })
+ It("Config exist, bridge not found", func() {
+ store.EXPECT().GetManagedOVSBridges().Return(getManagedBridges(), nil)
+ ret, err := ovs.GetOVSBridges(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ret).To(BeEmpty())
+ })
+ It("Should report only managed fields", func() {
+ conf := getManagedBridges()
+ store.EXPECT().GetManagedOVSBridges().Return(conf, nil)
+ initialDBContent := getDefaultInitialDBContent()
+ initialDBContent.Bridge[0].ExternalIDs["foo"] = "bar"
+ initialDBContent.Bridge[0].OtherConfig["foo"] = "bar"
+ initialDBContent.Interface[0].ExternalIDs["foo"] = "bar"
+ initialDBContent.Interface[0].OtherConfig["foo"] = "bar"
+ initialDBContent.Interface[0].Options["foo"] = "bar"
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+ ret, err := ovs.GetOVSBridges(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ret).To(ContainElement(*conf["br-0000_d8_00.0"]))
+ })
+ It("Should not report managed fields which are missing in ovsdb", func() {
+ initialDBContent := getDefaultInitialDBContent()
+ initialDBContent.Bridge[0].ExternalIDs = nil
+ initialDBContent.Bridge[0].OtherConfig = nil
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+ conf := getManagedBridges()
+ store.EXPECT().GetManagedOVSBridges().Return(conf, nil)
+ ret, err := ovs.GetOVSBridges(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ret).To(HaveLen(1))
+ Expect(ret[0].Bridge.ExternalIDs).To(BeEmpty())
+ Expect(ret[0].Bridge.OtherConfig).To(BeEmpty())
+ })
+ })
+ Context("RemoveOVSBridge", func() {
+ It("No config", func() {
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(nil, nil)
+ Expect(ovs.RemoveOVSBridge(ctx, "br-0000_d8_00.0")).NotTo(HaveOccurred())
+ })
+ It("Has config, no bridge", func() {
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(getManagedBridges()["br-0000_d8_00.0"], nil)
+ store.EXPECT().RemoveManagedOVSBridge("br-0000_d8_00.0").Return(nil)
+ Expect(ovs.RemoveOVSBridge(ctx, "br-0000_d8_00.0")).NotTo(HaveOccurred())
+ })
+ It("Remove bridge", func() {
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(getManagedBridges()["br-0000_d8_00.0"], nil)
+ store.EXPECT().RemoveManagedOVSBridge("br-0000_d8_00.0").Return(nil)
+ createInitialDBContent(ctx, ovsClient, getDefaultInitialDBContent())
+ Expect(ovs.RemoveOVSBridge(ctx, "br-0000_d8_00.0")).NotTo(HaveOccurred())
+ dbContent := getDBContent(ctx, ovsClient)
+ Expect(dbContent.Bridge).To(BeEmpty())
+ Expect(dbContent.Interface).To(BeEmpty())
+ Expect(dbContent.Port).To(BeEmpty())
+ })
+ It("Should keep unmanaged bridge", func() {
+ store.EXPECT().GetManagedOVSBridge("br-0000_d8_00.0").Return(nil, nil)
+ initialDBContent := getDefaultInitialDBContent()
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+ Expect(ovs.RemoveOVSBridge(ctx, "br-0000_d8_00.0")).NotTo(HaveOccurred())
+ Expect(getDBContent(ctx, ovsClient)).To(Equal(initialDBContent))
+ })
+ })
+ Context("RemoveInterfaceFromOVSBridge", func() {
+ It("should not remove if interface is part of unmanaged bridge", func() {
+ store.EXPECT().GetManagedOVSBridges().Return(nil, nil)
+ initialDBContent := getDefaultInitialDBContent()
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+ Expect(ovs.RemoveInterfaceFromOVSBridge(ctx, "0000:d8:00.0")).NotTo(HaveOccurred())
+ Expect(getDBContent(ctx, ovsClient)).To(Equal(initialDBContent))
+ })
+ It("should remove interface from managed bridge", func() {
+ store.EXPECT().GetManagedOVSBridges().Return(getManagedBridges(), nil)
+ initialDBContent := getDefaultInitialDBContent()
+ createInitialDBContent(ctx, ovsClient, initialDBContent)
+ Expect(ovs.RemoveInterfaceFromOVSBridge(ctx, "0000:d8:00.0")).NotTo(HaveOccurred())
+ dbContent := getDBContent(ctx, ovsClient)
+ Expect(dbContent.Bridge[0].UUID).To(Equal(initialDBContent.Bridge[0].UUID))
+ Expect(dbContent.Interface).To(BeEmpty())
+ Expect(dbContent.Port).To(BeEmpty())
+ })
+ It("bridge not found", func() {
+ store.EXPECT().GetManagedOVSBridges().Return(getManagedBridges(), nil)
+ store.EXPECT().RemoveManagedOVSBridge("br-0000_d8_00.0").Return(nil)
+ Expect(ovs.RemoveInterfaceFromOVSBridge(ctx, "0000:d8:00.0")).NotTo(HaveOccurred())
+ })
+ })
+ })
+
+})
+
+func startServer(protocol, path string) func() {
+ clientDBModels, err := DatabaseModel()
+ Expect(err).NotTo(HaveOccurred())
+ schema := getSchema()
+ ovsDB := inmemory.NewDatabase(map[string]model.ClientDBModel{
+ schema.Name: clientDBModels,
+ })
+
+ dbModel, errs := model.NewDatabaseModel(schema, clientDBModels)
+ Expect(errs).To(BeEmpty())
+ s, err := server.NewOvsdbServer(ovsDB, dbModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ stopped := make(chan struct{})
+
+ go func() {
+ defer GinkgoRecover()
+ defer close(stopped)
+ Expect(s.Serve(protocol, path)).NotTo(HaveOccurred())
+ }()
+ Eventually(func(g Gomega) {
+ g.Expect(s.Ready()).To(BeTrue())
+ }).WithTimeout(time.Second * 5).WithPolling(time.Millisecond * 100).Should(Succeed())
+ return func() {
+ s.Close()
+ select {
+ case <-stopped:
+ return
+ case <-time.After(time.Second * 10):
+ Expect(fmt.Errorf("failed to stop ovsdb server")).NotTo(HaveOccurred())
+ }
+ }
+}
+
+const (
+ schemaFile = "test_db.ovsschema"
+)
+
+// getSchema returns partial OVS schema to use in test OVSDB server
+func getSchema() ovsdb.DatabaseSchema {
+ schema, err := os.ReadFile(schemaFile)
+ Expect(err).NotTo(HaveOccurred())
+ var s ovsdb.DatabaseSchema
+ err = json.Unmarshal(schema, &s)
+ Expect(err).NotTo(HaveOccurred())
+ return s
+}
diff --git a/pkg/host/internal/bridge/ovs/store/mock/mock_store.go b/pkg/host/internal/bridge/ovs/store/mock/mock_store.go
new file mode 100644
index 0000000000..2f98b96b9f
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/store/mock/mock_store.go
@@ -0,0 +1,93 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: store.go
+
+// Package mock_store is a generated GoMock package.
+package mock_store
+
+import (
+ reflect "reflect"
+
+ gomock "github.com/golang/mock/gomock"
+ v1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
+)
+
+// MockStore is a mock of Store interface.
+type MockStore struct {
+ ctrl *gomock.Controller
+ recorder *MockStoreMockRecorder
+}
+
+// MockStoreMockRecorder is the mock recorder for MockStore.
+type MockStoreMockRecorder struct {
+ mock *MockStore
+}
+
+// NewMockStore creates a new mock instance.
+func NewMockStore(ctrl *gomock.Controller) *MockStore {
+ mock := &MockStore{ctrl: ctrl}
+ mock.recorder = &MockStoreMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockStore) EXPECT() *MockStoreMockRecorder {
+ return m.recorder
+}
+
+// AddManagedOVSBridge mocks base method.
+func (m *MockStore) AddManagedOVSBridge(br *v1.OVSConfigExt) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddManagedOVSBridge", br)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddManagedOVSBridge indicates an expected call of AddManagedOVSBridge.
+func (mr *MockStoreMockRecorder) AddManagedOVSBridge(br interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddManagedOVSBridge", reflect.TypeOf((*MockStore)(nil).AddManagedOVSBridge), br)
+}
+
+// GetManagedOVSBridge mocks base method.
+func (m *MockStore) GetManagedOVSBridge(name string) (*v1.OVSConfigExt, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetManagedOVSBridge", name)
+ ret0, _ := ret[0].(*v1.OVSConfigExt)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetManagedOVSBridge indicates an expected call of GetManagedOVSBridge.
+func (mr *MockStoreMockRecorder) GetManagedOVSBridge(name interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagedOVSBridge", reflect.TypeOf((*MockStore)(nil).GetManagedOVSBridge), name)
+}
+
+// GetManagedOVSBridges mocks base method.
+func (m *MockStore) GetManagedOVSBridges() (map[string]*v1.OVSConfigExt, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetManagedOVSBridges")
+ ret0, _ := ret[0].(map[string]*v1.OVSConfigExt)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetManagedOVSBridges indicates an expected call of GetManagedOVSBridges.
+func (mr *MockStoreMockRecorder) GetManagedOVSBridges() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagedOVSBridges", reflect.TypeOf((*MockStore)(nil).GetManagedOVSBridges))
+}
+
+// RemoveManagedOVSBridge mocks base method.
+func (m *MockStore) RemoveManagedOVSBridge(name string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveManagedOVSBridge", name)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// RemoveManagedOVSBridge indicates an expected call of RemoveManagedOVSBridge.
+func (mr *MockStoreMockRecorder) RemoveManagedOVSBridge(name interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveManagedOVSBridge", reflect.TypeOf((*MockStore)(nil).RemoveManagedOVSBridge), name)
+}
diff --git a/pkg/host/internal/bridge/ovs/store/store.go b/pkg/host/internal/bridge/ovs/store/store.go
new file mode 100644
index 0000000000..40c759ed00
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/store/store.go
@@ -0,0 +1,219 @@
+package store
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/google/renameio/v2"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+
+ sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/consts"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/utils"
+)
+
+// Store interface provides methods to store and query information
+// about OVS bridges that are managed by the operator
+//
+//go:generate ../../../../../../bin/mockgen -destination mock/mock_store.go -source store.go
+type Store interface {
+ // GetManagedOVSBridges returns map with saved information about managed OVS bridges.
+ // Bridge name is a key in the map
+ GetManagedOVSBridges() (map[string]*sriovnetworkv1.OVSConfigExt, error)
+ // GetManagedOVSBridge returns saved information about managed OVS bridge
+ GetManagedOVSBridge(name string) (*sriovnetworkv1.OVSConfigExt, error)
+ // AddManagedOVSBridge save information about the OVS bridge
+ AddManagedOVSBridge(br *sriovnetworkv1.OVSConfigExt) error
+ // RemoveManagedOVSBridge removes saved information about the OVS bridge
+ RemoveManagedOVSBridge(name string) error
+}
+
+// New returns default implementation of Store interfaces
+func New() Store {
+ s := &ovsStore{
+ lock: &sync.RWMutex{},
+ }
+ return s
+}
+
+type ovsStore struct {
+ lock *sync.RWMutex
+ cache map[string]sriovnetworkv1.OVSConfigExt
+}
+
+// loads data from the fs if required
+func (s *ovsStore) ensureCacheIsLoaded() error {
+ funcLog := log.Log
+ if s.cache != nil {
+ funcLog.V(2).Info("ensureCacheIsLoaded(): cache is already loaded")
+ return nil
+ }
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ // check again after we got the lock to make sure that the cache was
+ // not loaded by another goroutine while we was waiting for the lock
+ if s.cache != nil {
+ return nil
+ }
+ funcLog.V(2).Info("ensureCacheIsLoaded(): load store cache from the FS")
+ var err error
+ err = s.ensureStoreDirExist()
+ if err != nil {
+ funcLog.Error(err, "ensureCacheIsLoaded(): failed to create store dir")
+ return err
+ }
+ s.cache, err = s.readStoreFile()
+ if err != nil {
+ funcLog.Error(err, "ensureCacheIsLoaded(): failed to read store file")
+ return err
+ }
+ return nil
+}
+
+// GetManagedOVSBridges returns map with saved information about managed OVS bridges.
+// Bridge name is a key in the map
+func (s *ovsStore) GetManagedOVSBridges() (map[string]*sriovnetworkv1.OVSConfigExt, error) {
+ funcLog := log.Log
+ funcLog.V(1).Info("GetManagedOVSBridges(): get information about all managed OVS bridges from the store")
+ if err := s.ensureCacheIsLoaded(); err != nil {
+ return nil, err
+ }
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+ result := make(map[string]*sriovnetworkv1.OVSConfigExt, len(s.cache))
+ for k, v := range s.cache {
+ result[k] = v.DeepCopy()
+ }
+ if funcLog.V(2).Enabled() {
+ data, _ := json.Marshal(result)
+ funcLog.V(2).Info("GetManagedOVSBridges()", "result", string(data))
+ }
+ return result, nil
+}
+
+// GetManagedOVSBridge returns saved information about managed OVS bridge
+func (s *ovsStore) GetManagedOVSBridge(name string) (*sriovnetworkv1.OVSConfigExt, error) {
+ funcLog := log.Log.WithValues("name", name)
+ funcLog.V(1).Info("GetManagedOVSBridge(): get information about managed OVS bridge from the store")
+ if err := s.ensureCacheIsLoaded(); err != nil {
+ return nil, err
+ }
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+ b, found := s.cache[name]
+ if !found {
+ funcLog.V(2).Info("GetManagedOVSBridge(): bridge info not found")
+ return nil, nil
+ }
+ if funcLog.V(2).Enabled() {
+ data, _ := json.Marshal(&b)
+ funcLog.V(2).Info("GetManagedOVSBridge()", "result", string(data))
+ }
+ return b.DeepCopy(), nil
+}
+
+// AddManagedOVSBridge save information about the OVS bridge
+func (s *ovsStore) AddManagedOVSBridge(br *sriovnetworkv1.OVSConfigExt) error {
+ log.Log.V(1).Info("AddManagedOVSBridge(): add information about managed OVS bridge to the store", "name", br.Name)
+ if err := s.ensureCacheIsLoaded(); err != nil {
+ return err
+ }
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ revert := s.putCacheEntryToStash(br.Name)
+ s.cache[br.Name] = *br.DeepCopy()
+ if err := s.writeStoreFile(); err != nil {
+ revert()
+ return err
+ }
+ return nil
+}
+
+// RemoveManagedOVSBridge removes saved information about the OVS bridge
+func (s *ovsStore) RemoveManagedOVSBridge(name string) error {
+ log.Log.V(1).Info("RemoveManagedOVSBridge(): remove information about managed OVS bridge from the store", "name", name)
+ if err := s.ensureCacheIsLoaded(); err != nil {
+ return err
+ }
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ revert := s.putCacheEntryToStash(name)
+ delete(s.cache, name)
+ if err := s.writeStoreFile(); err != nil {
+ revert()
+ return err
+ }
+ return nil
+}
+
+// saves the current value from the cache to a temporary variable
+// and returns the function which can be used to restore it in the cache.
+// the caller of this function must hold the write lock for the store.
+func (s *ovsStore) putCacheEntryToStash(key string) func() {
+ origValue, isSet := s.cache[key]
+ return func() {
+ if isSet {
+ s.cache[key] = origValue
+ } else {
+ delete(s.cache, key)
+ }
+ }
+}
+
+func (s *ovsStore) ensureStoreDirExist() error {
+ storeDir := filepath.Dir(s.getStoreFilePath())
+ _, err := os.Stat(storeDir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = os.MkdirAll(storeDir, 0755)
+ if err != nil {
+ return fmt.Errorf("failed to create directory for store %s: %v", storeDir, err)
+ }
+ } else {
+ return fmt.Errorf("failed to check if directory for store exist %s: %v", storeDir, err)
+ }
+ }
+ return nil
+}
+
+func (s *ovsStore) readStoreFile() (map[string]sriovnetworkv1.OVSConfigExt, error) {
+ storeFilePath := s.getStoreFilePath()
+ funcLog := log.Log.WithValues("storeFilePath", storeFilePath)
+ funcLog.V(2).Info("readStoreFile(): read OVS store file")
+ result := map[string]sriovnetworkv1.OVSConfigExt{}
+ data, err := os.ReadFile(storeFilePath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ funcLog.V(2).Info("readStoreFile(): OVS store file not found")
+ return result, nil
+ }
+ return nil, err
+ }
+ if err := json.Unmarshal(data, &result); err != nil {
+ funcLog.Error(err, "readStoreFile(): failed to unmarshal content of the OVS store file")
+ return nil, err
+ }
+ return result, nil
+}
+
+func (s *ovsStore) writeStoreFile() error {
+ storeFilePath := s.getStoreFilePath()
+ funcLog := log.Log.WithValues("storeFilePath", storeFilePath)
+ data, err := json.Marshal(s.cache)
+ if err != nil {
+ funcLog.Error(err, "writeStoreFile(): can't serialize cached info about managed OVS bridges")
+ return err
+ }
+ if err := renameio.WriteFile(storeFilePath, data, 0644); err != nil {
+ funcLog.Error(err, "writeStoreFile(): can't write info about managed OVS bridge to disk")
+ return err
+ }
+ return nil
+}
+
+func (s *ovsStore) getStoreFilePath() string {
+ return utils.GetHostExtensionPath(consts.ManagedOVSBridgesPath)
+}
diff --git a/pkg/host/internal/bridge/ovs/store/store_test.go b/pkg/host/internal/bridge/ovs/store/store_test.go
new file mode 100644
index 0000000000..7e4bfb351c
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/store/store_test.go
@@ -0,0 +1,75 @@
+package store
+
+import (
+ "sync"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/consts"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/test/util/fakefilesystem"
+ "github.com/k8snetworkplumbingwg/sriov-network-operator/test/util/helpers"
+)
+
+func getStore() Store {
+ s := New()
+ Expect(s).NotTo(BeNil())
+ return s
+}
+
+var _ = Describe("OVS store", func() {
+ It("load data from disk", func() {
+ helpers.GinkgoConfigureFakeFS(&fakefilesystem.FS{
+ Dirs: []string{"/host/etc/sriov-operator/"},
+ Files: map[string][]byte{"/host" + consts.ManagedOVSBridgesPath: []byte(`{"test": {"name": "test"}}`)}})
+ s := getStore()
+ b, err := s.GetManagedOVSBridge("test")
+ Expect(err).NotTo(HaveOccurred())
+ Expect(b).NotTo(BeNil())
+ Expect(b.Name).To(Equal("test"))
+ })
+ It("should read saved data", func() {
+ helpers.GinkgoConfigureFakeFS(&fakefilesystem.FS{})
+ s := getStore()
+ testObj := &sriovnetworkv1.OVSConfigExt{Name: "test", Bridge: sriovnetworkv1.OVSBridgeConfig{DatapathType: "test"}}
+ Expect(s.AddManagedOVSBridge(testObj)).NotTo(HaveOccurred())
+ ret, err := s.GetManagedOVSBridge("test")
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ret).To(Equal(testObj))
+ retMap, err := s.GetManagedOVSBridges()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(retMap["test"]).To(Equal(testObj))
+ })
+ It("should persist writes on disk", func() {
+ helpers.GinkgoConfigureFakeFS(&fakefilesystem.FS{})
+ s := getStore()
+ testObj := &sriovnetworkv1.OVSConfigExt{Name: "test", Bridge: sriovnetworkv1.OVSBridgeConfig{DatapathType: "test"}}
+ Expect(s.AddManagedOVSBridge(testObj)).NotTo(HaveOccurred())
+ helpers.GinkgoAssertFileContentsEquals("/host"+consts.ManagedOVSBridgesPath,
+ `{"test":{"name":"test","bridge":{"datapathType":"test"}}}`)
+ Expect(s.RemoveManagedOVSBridge("test")).NotTo(HaveOccurred())
+ helpers.GinkgoAssertFileContentsEquals("/host"+consts.ManagedOVSBridgesPath, "{}")
+ })
+ It("stash/restore", func() {
+ s := &ovsStore{
+ lock: &sync.RWMutex{},
+ cache: make(map[string]sriovnetworkv1.OVSConfigExt),
+ }
+ s.cache["a"] = sriovnetworkv1.OVSConfigExt{Name: "a"}
+ s.cache["b"] = sriovnetworkv1.OVSConfigExt{Name: "b"}
+ aRestore := s.putCacheEntryToStash("a")
+ bRestore := s.putCacheEntryToStash("b")
+ cRestore := s.putCacheEntryToStash("c")
+ s.cache["a"] = sriovnetworkv1.OVSConfigExt{Name: "replaced"}
+ delete(s.cache, "b")
+ s.cache["c"] = sriovnetworkv1.OVSConfigExt{Name: "created"}
+
+ aRestore()
+ bRestore()
+ cRestore()
+ Expect(s.cache["a"].Name).To(Equal("a"))
+ Expect(s.cache["b"].Name).To(Equal("b"))
+ Expect(s.cache).NotTo(HaveKey("c"))
+ })
+})
diff --git a/pkg/host/internal/bridge/ovs/store/suite_test.go b/pkg/host/internal/bridge/ovs/store/suite_test.go
new file mode 100644
index 0000000000..2480de34c1
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/store/suite_test.go
@@ -0,0 +1,21 @@
+package store
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "go.uber.org/zap/zapcore"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+func TestStore(t *testing.T) {
+ log.SetLogger(zap.New(
+ zap.WriteTo(GinkgoWriter),
+ zap.Level(zapcore.Level(-2)),
+ zap.UseDevMode(true)))
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Package OVS Store Suite")
+}
diff --git a/pkg/host/internal/bridge/ovs/suite_test.go b/pkg/host/internal/bridge/ovs/suite_test.go
new file mode 100644
index 0000000000..17190c0215
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/suite_test.go
@@ -0,0 +1,24 @@
+package ovs
+
+import (
+ "testing"
+
+ "github.com/go-logr/stdr"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "go.uber.org/zap/zapcore"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+func TestOVS(t *testing.T) {
+ log.SetLogger(zap.New(
+ zap.WriteTo(GinkgoWriter),
+ zap.Level(zapcore.Level(-2)),
+ zap.UseDevMode(true)))
+ // to reduce verbosity of ovsdb server's logs
+ stdr.SetVerbosity(0)
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Package OVS Suite")
+}
diff --git a/pkg/host/internal/bridge/ovs/test_db.ovsschema b/pkg/host/internal/bridge/ovs/test_db.ovsschema
new file mode 100644
index 0000000000..46c59dd0c0
--- /dev/null
+++ b/pkg/host/internal/bridge/ovs/test_db.ovsschema
@@ -0,0 +1,179 @@
+{
+ "name": "Open_vSwitch",
+ "version": "8.3.0",
+ "tables": {
+ "Bridge": {
+ "columns": {
+ "datapath_type": {
+ "type": "string"
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string",
+ "mutable": false
+ },
+ "other_config": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "ports": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Port"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ]
+ },
+ "Interface": {
+ "columns": {
+ "error": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string",
+ "mutable": false
+ },
+ "options": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "other_config": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "type": {
+ "type": "string"
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ]
+ },
+ "Open_vSwitch": {
+ "columns": {
+ "bridges": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Bridge"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "isRoot": true
+ },
+ "Port": {
+ "columns": {
+ "external_ids": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ },
+ "interfaces": {
+ "type": {
+ "key": {
+ "type": "uuid",
+ "refTable": "Interface"
+ },
+ "min": 1,
+ "max": "unlimited"
+ }
+ },
+ "name": {
+ "type": "string",
+ "mutable": false
+ },
+ "other_config": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": "unlimited"
+ }
+ }
+ },
+ "indexes": [
+ [
+ "name"
+ ]
+ ]
+ }
+ }
+}
diff --git a/pkg/host/internal/bridge/suite_test.go b/pkg/host/internal/bridge/suite_test.go
new file mode 100644
index 0000000000..8ece6c2190
--- /dev/null
+++ b/pkg/host/internal/bridge/suite_test.go
@@ -0,0 +1,21 @@
+package bridge
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "go.uber.org/zap/zapcore"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+func TestBridge(t *testing.T) {
+ log.SetLogger(zap.New(
+ zap.WriteTo(GinkgoWriter),
+ zap.Level(zapcore.Level(-2)),
+ zap.UseDevMode(true)))
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Package Bridge Suite")
+}
diff --git a/pkg/host/types/interfaces.go b/pkg/host/types/interfaces.go
index e9e8a4ef02..55d2fe620d 100644
--- a/pkg/host/types/interfaces.go
+++ b/pkg/host/types/interfaces.go
@@ -193,3 +193,14 @@ type VdpaInterface interface {
// returns empty string if VDPA device not found or unknown driver is in use
DiscoverVDPAType(pciAddr string) string
}
+
+type BridgeInterface interface {
+ // DiscoverBridges returns information about managed bridges on the host
+ DiscoverBridges() (sriovnetworkv1.Bridges, error)
+ // ConfigureBridge configure managed bridges for the host
+ ConfigureBridges(bridgesSpec sriovnetworkv1.Bridges, bridgesStatus sriovnetworkv1.Bridges) error
+ // DetachInterfaceFromManagedBridge detach interface from a managed bridge,
+ // this step is required before applying some configurations to PF, e.g. changing of eSwitch mode.
+ // The function detach interface from managed bridges only.
+ DetachInterfaceFromManagedBridge(pciAddr string) error
+}
diff --git a/pkg/vars/vars.go b/pkg/vars/vars.go
index 0d2b9a39da..c4ff9b9cd7 100644
--- a/pkg/vars/vars.go
+++ b/pkg/vars/vars.go
@@ -54,6 +54,9 @@ var (
// FilesystemRoot used by test to mock interactions with filesystem
FilesystemRoot = ""
+ // OVSDBSocketPath path to OVSDB socket
+ OVSDBSocketPath = "unix:///var/run/openvswitch/db.sock"
+
//Cluster variables
Config *rest.Config = nil
Scheme *runtime.Scheme = nil
diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore
new file mode 100644
index 0000000000..50d95c548b
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# IDEs
+.idea/
diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE
new file mode 100644
index 0000000000..89b8179965
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
new file mode 100644
index 0000000000..16abdfc084
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -0,0 +1,32 @@
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
+
+This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
+
+[Exponential backoff][exponential backoff wiki]
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+## Usage
+
+Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+
+Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+
+## Contributing
+
+* I would like to keep this library as small as possible.
+* Please don't send a PR without opening an issue and discussing it first.
+* If proposed change is not a common use case, I will probably not accept it.
+
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
+[travis]: https://travis-ci.org/cenkalti/backoff
+[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
+[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
+[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
+
+[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
+[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
+
+[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go
new file mode 100644
index 0000000000..3676ee405d
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go
@@ -0,0 +1,66 @@
+// Package backoff implements backoff algorithms for retrying operations.
+//
+// Use Retry function for retrying operations that may fail.
+// If Retry does not meet your needs,
+// copy/paste the function into your project and modify as you wish.
+//
+// There is also Ticker type similar to time.Ticker.
+// You can use it if you need to work with channels.
+//
+// See Examples section below for usage examples.
+package backoff
+
+import "time"
+
+// BackOff is a backoff policy for retrying an operation.
+type BackOff interface {
+ // NextBackOff returns the duration to wait before retrying the operation,
+ // or backoff. Stop to indicate that no more retries should be made.
+ //
+ // Example usage:
+ //
+ // duration := backoff.NextBackOff();
+ // if (duration == backoff.Stop) {
+ // // Do not retry operation.
+ // } else {
+ // // Sleep for duration and retry operation.
+ // }
+ //
+ NextBackOff() time.Duration
+
+ // Reset to initial state.
+ Reset()
+}
+
+// Stop indicates that no more retries should be made for use in NextBackOff().
+const Stop time.Duration = -1
+
+// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting, indefinitely.
+type ZeroBackOff struct{}
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
+
+// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
+// NextBackOff(), meaning that the operation should never be retried.
+type StopBackOff struct{}
+
+func (b *StopBackOff) Reset() {}
+
+func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
+
+// ConstantBackOff is a backoff policy that always returns the same backoff delay.
+// This is in contrast to an exponential backoff policy,
+// which returns a delay that grows longer as you call NextBackOff() over and over again.
+type ConstantBackOff struct {
+ Interval time.Duration
+}
+
+func (b *ConstantBackOff) Reset() {}
+func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
+
+func NewConstantBackOff(d time.Duration) *ConstantBackOff {
+ return &ConstantBackOff{Interval: d}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go
new file mode 100644
index 0000000000..48482330eb
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/context.go
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+ "context"
+ "time"
+)
+
+// BackOffContext is a backoff policy that stops retrying after the context
+// is canceled.
+type BackOffContext interface { // nolint: golint
+ BackOff
+ Context() context.Context
+}
+
+type backOffContext struct {
+ BackOff
+ ctx context.Context
+}
+
+// WithContext returns a BackOffContext with context ctx
+//
+// ctx must not be nil
+func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
+ if ctx == nil {
+ panic("nil context")
+ }
+
+ if b, ok := b.(*backOffContext); ok {
+ return &backOffContext{
+ BackOff: b.BackOff,
+ ctx: ctx,
+ }
+ }
+
+ return &backOffContext{
+ BackOff: b,
+ ctx: ctx,
+ }
+}
+
+func getContext(b BackOff) context.Context {
+ if cb, ok := b.(BackOffContext); ok {
+ return cb.Context()
+ }
+ if tb, ok := b.(*backOffTries); ok {
+ return getContext(tb.delegate)
+ }
+ return context.Background()
+}
+
+func (b *backOffContext) Context() context.Context {
+ return b.ctx
+}
+
+func (b *backOffContext) NextBackOff() time.Duration {
+ select {
+ case <-b.ctx.Done():
+ return Stop
+ default:
+ return b.BackOff.NextBackOff()
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
new file mode 100644
index 0000000000..2c56c1e718
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -0,0 +1,161 @@
+package backoff
+
+import (
+ "math/rand"
+ "time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+ RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request # RetryInterval (seconds) Randomized Interval (seconds)
+
+ 1 0.5 [0.25, 0.75]
+ 2 0.75 [0.375, 1.125]
+ 3 1.125 [0.562, 1.687]
+ 4 1.687 [0.8435, 2.53]
+ 5 2.53 [1.265, 3.795]
+ 6 3.795 [1.897, 5.692]
+ 7 5.692 [2.846, 8.538]
+ 8 8.538 [4.269, 12.807]
+ 9 12.807 [6.403, 19.210]
+ 10 19.210 backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+ InitialInterval time.Duration
+ RandomizationFactor float64
+ Multiplier float64
+ MaxInterval time.Duration
+ // After MaxElapsedTime the ExponentialBackOff returns Stop.
+ // It never stops if MaxElapsedTime == 0.
+ MaxElapsedTime time.Duration
+ Stop time.Duration
+ Clock Clock
+
+ currentInterval time.Duration
+ startTime time.Time
+}
+
+// Clock is an interface that returns current time for BackOff.
+type Clock interface {
+ Now() time.Time
+}
+
+// Default values for ExponentialBackOff.
+const (
+ DefaultInitialInterval = 500 * time.Millisecond
+ DefaultRandomizationFactor = 0.5
+ DefaultMultiplier = 1.5
+ DefaultMaxInterval = 60 * time.Second
+ DefaultMaxElapsedTime = 15 * time.Minute
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff() *ExponentialBackOff {
+ b := &ExponentialBackOff{
+ InitialInterval: DefaultInitialInterval,
+ RandomizationFactor: DefaultRandomizationFactor,
+ Multiplier: DefaultMultiplier,
+ MaxInterval: DefaultMaxInterval,
+ MaxElapsedTime: DefaultMaxElapsedTime,
+ Stop: Stop,
+ Clock: SystemClock,
+ }
+ b.Reset()
+ return b
+}
+
+type systemClock struct{}
+
+func (t systemClock) Now() time.Time {
+ return time.Now()
+}
+
+// SystemClock implements Clock interface that uses time.Now().
+var SystemClock = systemClock{}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+ b.currentInterval = b.InitialInterval
+ b.startTime = b.Clock.Now()
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+ // Make sure we have not gone over the maximum elapsed time.
+ elapsed := b.GetElapsedTime()
+ next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+ b.incrementCurrentInterval()
+ if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
+ return b.Stop
+ }
+ return next
+}
+
+// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
+// is created and is reset when Reset() is called.
+//
+// The elapsed time is computed using time.Now().UnixNano(). It is
+// safe to call even while the backoff policy is used by a running
+// ticker.
+func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
+ return b.Clock.Now().Sub(b.startTime)
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+ // Check for overflow, if overflow is detected set the current interval to the max interval.
+ if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+ b.currentInterval = b.MaxInterval
+ } else {
+ b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+ }
+}
+
+// Returns a random value from the following interval:
+// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+ if randomizationFactor == 0 {
+ return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+ }
+ var delta = randomizationFactor * float64(currentInterval)
+ var minInterval = float64(currentInterval) - delta
+ var maxInterval = float64(currentInterval) + delta
+
+ // Get a random value from the range [minInterval, maxInterval].
+ // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+ // we want a 33% chance for selecting either 1, 2 or 3.
+ return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go
new file mode 100644
index 0000000000..b9c0c51cd7
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/retry.go
@@ -0,0 +1,146 @@
+package backoff
+
+import (
+ "errors"
+ "time"
+)
+
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+ return func() (struct{}, error) {
+ return struct{}{}, o()
+ }
+}
+
+// Notify is a notify-on-error function. It receives an operation error and
+// backoff delay if the operation failed (with an error).
+//
+// NOTE that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error, time.Duration)
+
+// Retry the operation o until it does not return error or BackOff stops.
+// o is guaranteed to be run at least once.
+//
+// If o returns a *PermanentError, the operation is not retried, and the
+// wrapped error is returned.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b BackOff) error {
+ return RetryNotify(o, b, nil)
+}
+
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+ return RetryNotifyWithData(o, b, nil)
+}
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b BackOff, notify Notify) error {
+ return RetryNotifyWithTimer(operation, b, notify, nil)
+}
+
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+ return doRetryNotify(operation, b, notify, nil)
+}
+
+// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
+// for each failed attempt before sleep.
+// A default timer that uses system timer is used when nil is passed.
+func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
+ _, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+ return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ var (
+ err error
+ next time.Duration
+ res T
+ )
+ if t == nil {
+ t = &defaultTimer{}
+ }
+
+ defer func() {
+ t.Stop()
+ }()
+
+ ctx := getContext(b)
+
+ b.Reset()
+ for {
+ res, err = operation()
+ if err == nil {
+ return res, nil
+ }
+
+ var permanent *PermanentError
+ if errors.As(err, &permanent) {
+ return res, permanent.Err
+ }
+
+ if next = b.NextBackOff(); next == Stop {
+ if cerr := ctx.Err(); cerr != nil {
+ return res, cerr
+ }
+
+ return res, err
+ }
+
+ if notify != nil {
+ notify(err, next)
+ }
+
+ t.Start(next)
+
+ select {
+ case <-ctx.Done():
+ return res, ctx.Err()
+ case <-t.C():
+ }
+ }
+}
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+ Err error
+}
+
+func (e *PermanentError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *PermanentError) Unwrap() error {
+ return e.Err
+}
+
+func (e *PermanentError) Is(target error) bool {
+ _, ok := target.(*PermanentError)
+ return ok
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &PermanentError{
+ Err: err,
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go
new file mode 100644
index 0000000000..df9d68bce5
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go
@@ -0,0 +1,97 @@
+package backoff
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
+//
+// Ticks will continue to arrive when the previous operation is still running,
+// so operations that take a while to fail could run in quick succession.
+type Ticker struct {
+ C <-chan time.Time
+ c chan time.Time
+ b BackOff
+ ctx context.Context
+ timer Timer
+ stop chan struct{}
+ stopOnce sync.Once
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the time at times specified by the BackOff argument. Ticker is
+// guaranteed to tick at least once. The channel is closed when Stop
+// method is called or BackOff stops. It is not safe to manipulate the
+// provided backoff policy (notably calling NextBackOff or Reset)
+// while the ticker is running.
+func NewTicker(b BackOff) *Ticker {
+ return NewTickerWithTimer(b, &defaultTimer{})
+}
+
+// NewTickerWithTimer returns a new Ticker with a custom timer.
+// A default timer that uses system timer is used when nil is passed.
+func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
+ if timer == nil {
+ timer = &defaultTimer{}
+ }
+ c := make(chan time.Time)
+ t := &Ticker{
+ C: c,
+ c: c,
+ b: b,
+ ctx: getContext(b),
+ timer: timer,
+ stop: make(chan struct{}),
+ }
+ t.b.Reset()
+ go t.run()
+ return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+func (t *Ticker) Stop() {
+ t.stopOnce.Do(func() { close(t.stop) })
+}
+
+func (t *Ticker) run() {
+ c := t.c
+ defer close(c)
+
+ // Ticker is guaranteed to tick at least once.
+ afterC := t.send(time.Now())
+
+ for {
+ if afterC == nil {
+ return
+ }
+
+ select {
+ case tick := <-afterC:
+ afterC = t.send(tick)
+ case <-t.stop:
+ t.c = nil // Prevent future ticks from being sent to the channel.
+ return
+ case <-t.ctx.Done():
+ return
+ }
+ }
+}
+
+func (t *Ticker) send(tick time.Time) <-chan time.Time {
+ select {
+ case t.c <- tick:
+ case <-t.stop:
+ return nil
+ }
+
+ next := t.b.NextBackOff()
+ if next == Stop {
+ t.Stop()
+ return nil
+ }
+
+ t.timer.Start(next)
+ return t.timer.C()
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go
new file mode 100644
index 0000000000..8120d0213c
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/timer.go
@@ -0,0 +1,35 @@
+package backoff
+
+import "time"
+
+type Timer interface {
+ Start(duration time.Duration)
+ Stop()
+ C() <-chan time.Time
+}
+
+// defaultTimer implements Timer interface using time.Timer
+type defaultTimer struct {
+ timer *time.Timer
+}
+
+// C returns the timers channel which receives the current time when the timer fires.
+func (t *defaultTimer) C() <-chan time.Time {
+ return t.timer.C
+}
+
+// Start starts the timer to fire after the given duration
+func (t *defaultTimer) Start(duration time.Duration) {
+ if t.timer == nil {
+ t.timer = time.NewTimer(duration)
+ } else {
+ t.timer.Reset(duration)
+ }
+}
+
+// Stop is called when the timer is not used anymore and resources may be freed.
+func (t *defaultTimer) Stop() {
+ if t.timer != nil {
+ t.timer.Stop()
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go
new file mode 100644
index 0000000000..28d58ca37c
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/tries.go
@@ -0,0 +1,38 @@
+package backoff
+
+import "time"
+
+/*
+WithMaxRetries creates a wrapper around another BackOff, which will
+return Stop if NextBackOff() has been called too many times since
+the last time Reset() was called
+
+Note: Implementation is not thread-safe.
+*/
+func WithMaxRetries(b BackOff, max uint64) BackOff {
+ return &backOffTries{delegate: b, maxTries: max}
+}
+
+type backOffTries struct {
+ delegate BackOff
+ maxTries uint64
+ numTries uint64
+}
+
+func (b *backOffTries) NextBackOff() time.Duration {
+ if b.maxTries == 0 {
+ return Stop
+ }
+ if b.maxTries > 0 {
+ if b.maxTries <= b.numTries {
+ return Stop
+ }
+ b.numTries++
+ }
+ return b.delegate.NextBackOff()
+}
+
+func (b *backOffTries) Reset() {
+ b.numTries = 0
+ b.delegate.Reset()
+}
diff --git a/vendor/github.com/cenkalti/hub/.gitignore b/vendor/github.com/cenkalti/hub/.gitignore
new file mode 100644
index 0000000000..00268614f0
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/cenkalti/hub/.travis.yml b/vendor/github.com/cenkalti/hub/.travis.yml
new file mode 100644
index 0000000000..b05e4c53fa
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go: 1.2
+
diff --git a/vendor/github.com/cenkalti/hub/LICENSE b/vendor/github.com/cenkalti/hub/LICENSE
new file mode 100644
index 0000000000..89b8179965
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cenkalti/hub/README.md b/vendor/github.com/cenkalti/hub/README.md
new file mode 100644
index 0000000000..d3f2118183
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/README.md
@@ -0,0 +1,5 @@
+hub
+===
+
+[![GoDoc](https://godoc.org/github.com/cenkalti/hub?status.png)](https://godoc.org/github.com/cenkalti/hub)
+[![Build Status](https://travis-ci.org/cenkalti/hub.png)](https://travis-ci.org/cenkalti/hub)
diff --git a/vendor/github.com/cenkalti/hub/hub.go b/vendor/github.com/cenkalti/hub/hub.go
new file mode 100644
index 0000000000..24c5efa861
--- /dev/null
+++ b/vendor/github.com/cenkalti/hub/hub.go
@@ -0,0 +1,82 @@
+// Package hub provides a simple event dispatcher for publish/subscribe pattern.
+package hub
+
+import "sync"
+
+type Kind int
+
+// Event is an interface for published events.
+type Event interface {
+ Kind() Kind
+}
+
+// Hub is an event dispatcher, publishes events to the subscribers
+// which are subscribed for a specific event type.
+// Optimized for publish calls.
+// The handlers may be called in order different than they are registered.
+type Hub struct {
+ subscribers map[Kind][]handler
+ m sync.RWMutex
+ seq uint64
+}
+
+type handler struct {
+ f func(Event)
+ id uint64
+}
+
+// Subscribe registers f for the event of a specific kind.
+func (h *Hub) Subscribe(kind Kind, f func(Event)) (cancel func()) {
+ var cancelled bool
+ h.m.Lock()
+ h.seq++
+ id := h.seq
+ if h.subscribers == nil {
+ h.subscribers = make(map[Kind][]handler)
+ }
+ h.subscribers[kind] = append(h.subscribers[kind], handler{id: id, f: f})
+ h.m.Unlock()
+ return func() {
+ h.m.Lock()
+ if cancelled {
+ h.m.Unlock()
+ return
+ }
+ cancelled = true
+ a := h.subscribers[kind]
+ for i, f := range a {
+ if f.id == id {
+ a[i], h.subscribers[kind] = a[len(a)-1], a[:len(a)-1]
+ break
+ }
+ }
+ if len(a) == 0 {
+ delete(h.subscribers, kind)
+ }
+ h.m.Unlock()
+ }
+}
+
+// Publish an event to the subscribers.
+func (h *Hub) Publish(e Event) {
+ h.m.RLock()
+ if handlers, ok := h.subscribers[e.Kind()]; ok {
+ for _, h := range handlers {
+ h.f(e)
+ }
+ }
+ h.m.RUnlock()
+}
+
+// DefaultHub is the default Hub used by Publish and Subscribe.
+var DefaultHub Hub
+
+// Subscribe registers f for the event of a specific kind in the DefaultHub.
+func Subscribe(kind Kind, f func(Event)) (cancel func()) {
+ return DefaultHub.Subscribe(kind, f)
+}
+
+// Publish an event to the subscribers in DefaultHub.
+func Publish(e Event) {
+ DefaultHub.Publish(e)
+}
diff --git a/vendor/github.com/cenkalti/rpc2/.gitignore b/vendor/github.com/cenkalti/rpc2/.gitignore
new file mode 100644
index 0000000000..836562412f
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/cenkalti/rpc2/.travis.yml b/vendor/github.com/cenkalti/rpc2/.travis.yml
new file mode 100644
index 0000000000..ae8233c2bf
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+ - 1.15
+ - tip
+
+arch:
+ - amd64
+ - ppc64le
diff --git a/vendor/github.com/cenkalti/rpc2/LICENSE b/vendor/github.com/cenkalti/rpc2/LICENSE
new file mode 100644
index 0000000000..d565b1b1fb
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/cenkalti/rpc2/README.md b/vendor/github.com/cenkalti/rpc2/README.md
new file mode 100644
index 0000000000..3dffd26e43
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/README.md
@@ -0,0 +1,82 @@
+rpc2
+====
+
+[![GoDoc](https://godoc.org/github.com/cenkalti/rpc2?status.png)](https://godoc.org/github.com/cenkalti/rpc2)
+[![Build Status](https://travis-ci.org/cenkalti/rpc2.png)](https://travis-ci.org/cenkalti/rpc2)
+
+rpc2 is a fork of net/rpc package in the standard library.
+The main goal is to add bi-directional support to calls.
+That means server can call the methods of client.
+This is not possible with net/rpc package.
+In order to do this it adds a `*Client` argument to method signatures.
+
+Install
+--------
+
+ go get github.com/cenkalti/rpc2
+
+Example server
+---------------
+
+```go
+package main
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/cenkalti/rpc2"
+)
+
+type Args struct{ A, B int }
+type Reply int
+
+func main() {
+ srv := rpc2.NewServer()
+ srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error {
+
+ // Reversed call (server to client)
+ var rep Reply
+ client.Call("mult", Args{2, 3}, &rep)
+ fmt.Println("mult result:", rep)
+
+ *reply = Reply(args.A + args.B)
+ return nil
+ })
+
+ lis, _ := net.Listen("tcp", "127.0.0.1:5000")
+ srv.Accept(lis)
+}
+```
+
+Example Client
+---------------
+
+```go
+package main
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/cenkalti/rpc2"
+)
+
+type Args struct{ A, B int }
+type Reply int
+
+func main() {
+ conn, _ := net.Dial("tcp", "127.0.0.1:5000")
+
+ clt := rpc2.NewClient(conn)
+ clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error {
+ *reply = Reply(args.A * args.B)
+ return nil
+ })
+ go clt.Run()
+
+ var rep Reply
+ clt.Call("add", Args{1, 2}, &rep)
+ fmt.Println("add result:", rep)
+}
+```
diff --git a/vendor/github.com/cenkalti/rpc2/client.go b/vendor/github.com/cenkalti/rpc2/client.go
new file mode 100644
index 0000000000..cc9956976f
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/client.go
@@ -0,0 +1,364 @@
+// Package rpc2 provides bi-directional RPC client and server similar to net/rpc.
+package rpc2
+
+import (
+ "context"
+ "errors"
+ "io"
+ "log"
+ "reflect"
+ "sync"
+)
+
+// Client represents an RPC Client.
+// There may be multiple outstanding Calls associated
+// with a single Client, and a Client may be used by
+// multiple goroutines simultaneously.
+type Client struct {
+ mutex sync.Mutex // protects pending, seq, request
+ sending sync.Mutex
+ request Request // temp area used in send()
+ seq uint64
+ pending map[uint64]*Call
+ closing bool
+ shutdown bool
+ server bool
+ codec Codec
+ handlers map[string]*handler
+ disconnect chan struct{}
+ State *State // additional information to associate with client
+ blocking bool // whether to block request handling
+}
+
+// NewClient returns a new Client to handle requests to the
+// set of services at the other end of the connection.
+// It adds a buffer to the write side of the connection so
+// the header and payload are sent as a unit.
+func NewClient(conn io.ReadWriteCloser) *Client {
+ return NewClientWithCodec(NewGobCodec(conn))
+}
+
+// NewClientWithCodec is like NewClient but uses the specified
+// codec to encode requests and decode responses.
+func NewClientWithCodec(codec Codec) *Client {
+ return &Client{
+ codec: codec,
+ pending: make(map[uint64]*Call),
+ handlers: make(map[string]*handler),
+ disconnect: make(chan struct{}),
+ seq: 1, // 0 means notification.
+ }
+}
+
+// SetBlocking puts the client in blocking mode.
+// In blocking mode, received requests are processes synchronously.
+// If you have methods that may take a long time, other subsequent requests may time out.
+func (c *Client) SetBlocking(blocking bool) {
+ c.blocking = blocking
+}
+
+// Run the client's read loop.
+// You must run this method before calling any methods on the server.
+func (c *Client) Run() {
+ c.readLoop()
+}
+
+// DisconnectNotify returns a channel that is closed
+// when the client connection has gone away.
+func (c *Client) DisconnectNotify() chan struct{} {
+ return c.disconnect
+}
+
+// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics.
+func (c *Client) Handle(method string, handlerFunc interface{}) {
+ addHandler(c.handlers, method, handlerFunc)
+}
+
+// readLoop reads messages from codec.
+// It reads a reqeust or a response to the previous request.
+// If the message is request, calls the handler function.
+// If the message is response, sends the reply to the associated call.
+func (c *Client) readLoop() {
+ var err error
+ var req Request
+ var resp Response
+ for err == nil {
+ req = Request{}
+ resp = Response{}
+ if err = c.codec.ReadHeader(&req, &resp); err != nil {
+ break
+ }
+
+ if req.Method != "" {
+ // request comes to server
+ if err = c.readRequest(&req); err != nil {
+ debugln("rpc2: error reading request:", err.Error())
+ }
+ } else {
+ // response comes to client
+ if err = c.readResponse(&resp); err != nil {
+ debugln("rpc2: error reading response:", err.Error())
+ }
+ }
+ }
+ // Terminate pending calls.
+ c.sending.Lock()
+ c.mutex.Lock()
+ c.shutdown = true
+ closing := c.closing
+ if err == io.EOF {
+ if closing {
+ err = ErrShutdown
+ } else {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ for _, call := range c.pending {
+ call.Error = err
+ call.done()
+ }
+ c.mutex.Unlock()
+ c.sending.Unlock()
+ if err != io.EOF && !closing && !c.server {
+ debugln("rpc2: client protocol error:", err)
+ }
+ close(c.disconnect)
+ if !closing {
+ c.codec.Close()
+ }
+}
+
+func (c *Client) handleRequest(req Request, method *handler, argv reflect.Value) {
+ // Invoke the method, providing a new value for the reply.
+ replyv := reflect.New(method.replyType.Elem())
+
+ returnValues := method.fn.Call([]reflect.Value{reflect.ValueOf(c), argv, replyv})
+
+ // Do not send response if request is a notification.
+ if req.Seq == 0 {
+ return
+ }
+
+ // The return value for the method is an error.
+ errInter := returnValues[0].Interface()
+ errmsg := ""
+ if errInter != nil {
+ errmsg = errInter.(error).Error()
+ }
+ resp := &Response{
+ Seq: req.Seq,
+ Error: errmsg,
+ }
+ if err := c.codec.WriteResponse(resp, replyv.Interface()); err != nil {
+ debugln("rpc2: error writing response:", err.Error())
+ }
+}
+
+func (c *Client) readRequest(req *Request) error {
+ method, ok := c.handlers[req.Method]
+ if !ok {
+ resp := &Response{
+ Seq: req.Seq,
+ Error: "rpc2: can't find method " + req.Method,
+ }
+ return c.codec.WriteResponse(resp, resp)
+ }
+
+ // Decode the argument value.
+ var argv reflect.Value
+ argIsValue := false // if true, need to indirect before calling.
+ if method.argType.Kind() == reflect.Ptr {
+ argv = reflect.New(method.argType.Elem())
+ } else {
+ argv = reflect.New(method.argType)
+ argIsValue = true
+ }
+ // argv guaranteed to be a pointer now.
+ if err := c.codec.ReadRequestBody(argv.Interface()); err != nil {
+ return err
+ }
+ if argIsValue {
+ argv = argv.Elem()
+ }
+
+ if c.blocking {
+ c.handleRequest(*req, method, argv)
+ } else {
+ go c.handleRequest(*req, method, argv)
+ }
+
+ return nil
+}
+
+func (c *Client) readResponse(resp *Response) error {
+ seq := resp.Seq
+ c.mutex.Lock()
+ call := c.pending[seq]
+ delete(c.pending, seq)
+ c.mutex.Unlock()
+
+ var err error
+ switch {
+ case call == nil:
+ // We've got no pending call. That usually means that
+ // WriteRequest partially failed, and call was already
+ // removed; response is a server telling us about an
+ // error reading request body. We should still attempt
+ // to read error body, but there's no one to give it to.
+ err = c.codec.ReadResponseBody(nil)
+ if err != nil {
+ err = errors.New("reading error body: " + err.Error())
+ }
+ case resp.Error != "":
+ // We've got an error response. Give this to the request;
+ // any subsequent requests will get the ReadResponseBody
+ // error if there is one.
+ call.Error = ServerError(resp.Error)
+ err = c.codec.ReadResponseBody(nil)
+ if err != nil {
+ err = errors.New("reading error body: " + err.Error())
+ }
+ call.done()
+ default:
+ err = c.codec.ReadResponseBody(call.Reply)
+ if err != nil {
+ call.Error = errors.New("reading body " + err.Error())
+ }
+ call.done()
+ }
+
+ return err
+}
+
+// Close waits for active calls to finish and closes the codec.
+func (c *Client) Close() error {
+ c.mutex.Lock()
+ if c.shutdown || c.closing {
+ c.mutex.Unlock()
+ return ErrShutdown
+ }
+ c.closing = true
+ c.mutex.Unlock()
+ return c.codec.Close()
+}
+
+// Go invokes the function asynchronously. It returns the Call structure representing
+// the invocation. The done channel will signal when the call is complete by returning
+// the same Call object. If done is nil, Go will allocate a new channel.
+// If non-nil, done must be buffered or Go will deliberately crash.
+func (c *Client) Go(method string, args interface{}, reply interface{}, done chan *Call) *Call {
+ call := new(Call)
+ call.Method = method
+ call.Args = args
+ call.Reply = reply
+ if done == nil {
+ done = make(chan *Call, 10) // buffered.
+ } else {
+ // If caller passes done != nil, it must arrange that
+ // done has enough buffer for the number of simultaneous
+ // RPCs that will be using that channel. If the channel
+ // is totally unbuffered, it's best not to run at all.
+ if cap(done) == 0 {
+ log.Panic("rpc2: done channel is unbuffered")
+ }
+ }
+ call.Done = done
+ c.send(call)
+ return call
+}
+
+// CallWithContext invokes the named function, waits for it to complete, and
+// returns its error status, or an error from Context timeout.
+func (c *Client) CallWithContext(ctx context.Context, method string, args interface{}, reply interface{}) error {
+ call := c.Go(method, args, reply, make(chan *Call, 1))
+ select {
+ case <-call.Done:
+ return call.Error
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return nil
+}
+
+// Call invokes the named function, waits for it to complete, and returns its error status.
+func (c *Client) Call(method string, args interface{}, reply interface{}) error {
+ return c.CallWithContext(context.Background(), method, args, reply)
+}
+
+func (call *Call) done() {
+ select {
+ case call.Done <- call:
+ // ok
+ default:
+ // We don't want to block here. It is the caller's responsibility to make
+ // sure the channel has enough buffer space. See comment in Go().
+ debugln("rpc2: discarding Call reply due to insufficient Done chan capacity")
+ }
+}
+
+// ServerError represents an error that has been returned from
+// the remote side of the RPC connection.
+type ServerError string
+
+func (e ServerError) Error() string {
+ return string(e)
+}
+
+// ErrShutdown is returned when the connection is closing or closed.
+var ErrShutdown = errors.New("connection is shut down")
+
+// Call represents an active RPC.
+type Call struct {
+ Method string // The name of the service and method to call.
+ Args interface{} // The argument to the function (*struct).
+ Reply interface{} // The reply from the function (*struct).
+ Error error // After completion, the error status.
+ Done chan *Call // Strobes when call is complete.
+}
+
+func (c *Client) send(call *Call) {
+ c.sending.Lock()
+ defer c.sending.Unlock()
+
+ // Register this call.
+ c.mutex.Lock()
+ if c.shutdown || c.closing {
+ call.Error = ErrShutdown
+ c.mutex.Unlock()
+ call.done()
+ return
+ }
+ seq := c.seq
+ c.seq++
+ c.pending[seq] = call
+ c.mutex.Unlock()
+
+ // Encode and send the request.
+ c.request.Seq = seq
+ c.request.Method = call.Method
+ err := c.codec.WriteRequest(&c.request, call.Args)
+ if err != nil {
+ c.mutex.Lock()
+ call = c.pending[seq]
+ delete(c.pending, seq)
+ c.mutex.Unlock()
+ if call != nil {
+ call.Error = err
+ call.done()
+ }
+ }
+}
+
+// Notify sends a request to the receiver but does not wait for a return value.
+func (c *Client) Notify(method string, args interface{}) error {
+ c.sending.Lock()
+ defer c.sending.Unlock()
+
+ if c.shutdown || c.closing {
+ return ErrShutdown
+ }
+
+ c.request.Seq = 0
+ c.request.Method = method
+ return c.codec.WriteRequest(&c.request, args)
+}
diff --git a/vendor/github.com/cenkalti/rpc2/codec.go b/vendor/github.com/cenkalti/rpc2/codec.go
new file mode 100644
index 0000000000..b097d9aaa6
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/codec.go
@@ -0,0 +1,125 @@
+package rpc2
+
+import (
+ "bufio"
+ "encoding/gob"
+ "io"
+ "sync"
+)
+
+// A Codec implements reading and writing of RPC requests and responses.
+// The client calls ReadHeader to read a message header.
+// The implementation must populate either Request or Response argument.
+// Depending on which argument is populated, ReadRequestBody or
+// ReadResponseBody is called right after ReadHeader.
+// ReadRequestBody and ReadResponseBody may be called with a nil
+// argument to force the body to be read and then discarded.
+type Codec interface {
+ // ReadHeader must read a message and populate either the request
+ // or the response by inspecting the incoming message.
+ ReadHeader(*Request, *Response) error
+
+ // ReadRequestBody into args argument of handler function.
+ ReadRequestBody(interface{}) error
+
+ // ReadResponseBody into reply argument of handler function.
+ ReadResponseBody(interface{}) error
+
+ // WriteRequest must be safe for concurrent use by multiple goroutines.
+ WriteRequest(*Request, interface{}) error
+
+ // WriteResponse must be safe for concurrent use by multiple goroutines.
+ WriteResponse(*Response, interface{}) error
+
+ // Close is called when client/server finished with the connection.
+ Close() error
+}
+
+// Request is a header written before every RPC call.
+type Request struct {
+ Seq uint64 // sequence number chosen by client
+ Method string
+}
+
+// Response is a header written before every RPC return.
+type Response struct {
+ Seq uint64 // echoes that of the request
+ Error string // error, if any.
+}
+
+type gobCodec struct {
+ rwc io.ReadWriteCloser
+ dec *gob.Decoder
+ enc *gob.Encoder
+ encBuf *bufio.Writer
+ mutex sync.Mutex
+}
+
+type message struct {
+ Seq uint64
+ Method string
+ Error string
+}
+
+// NewGobCodec returns a new rpc2.Codec using gob encoding/decoding on conn.
+func NewGobCodec(conn io.ReadWriteCloser) Codec {
+ buf := bufio.NewWriter(conn)
+ return &gobCodec{
+ rwc: conn,
+ dec: gob.NewDecoder(conn),
+ enc: gob.NewEncoder(buf),
+ encBuf: buf,
+ }
+}
+
+func (c *gobCodec) ReadHeader(req *Request, resp *Response) error {
+ var msg message
+ if err := c.dec.Decode(&msg); err != nil {
+ return err
+ }
+
+ if msg.Method != "" {
+ req.Seq = msg.Seq
+ req.Method = msg.Method
+ } else {
+ resp.Seq = msg.Seq
+ resp.Error = msg.Error
+ }
+ return nil
+}
+
+func (c *gobCodec) ReadRequestBody(body interface{}) error {
+ return c.dec.Decode(body)
+}
+
+func (c *gobCodec) ReadResponseBody(body interface{}) error {
+ return c.dec.Decode(body)
+}
+
+func (c *gobCodec) WriteRequest(r *Request, body interface{}) (err error) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ if err = c.enc.Encode(r); err != nil {
+ return
+ }
+ if err = c.enc.Encode(body); err != nil {
+ return
+ }
+ return c.encBuf.Flush()
+}
+
+func (c *gobCodec) WriteResponse(r *Response, body interface{}) (err error) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ if err = c.enc.Encode(r); err != nil {
+ return
+ }
+ if err = c.enc.Encode(body); err != nil {
+ return
+ }
+ return c.encBuf.Flush()
+}
+
+func (c *gobCodec) Close() error {
+ return c.rwc.Close()
+}
diff --git a/vendor/github.com/cenkalti/rpc2/debug.go b/vendor/github.com/cenkalti/rpc2/debug.go
new file mode 100644
index 0000000000..ec1b625218
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/debug.go
@@ -0,0 +1,12 @@
+package rpc2
+
+import "log"
+
+// DebugLog controls the printing of internal and I/O errors.
+var DebugLog = false
+
+func debugln(v ...interface{}) {
+ if DebugLog {
+ log.Println(v...)
+ }
+}
diff --git a/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go
new file mode 100644
index 0000000000..87e116887f
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go
@@ -0,0 +1,226 @@
+// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec for the rpc2 package.
+//
+// Beside struct types, JSONCodec allows using positional arguments.
+// Use []interface{} as the type of argument when sending and receiving methods.
+//
+// Positional arguments example:
+// server.Handle("add", func(client *rpc2.Client, args []interface{}, result *float64) error {
+// *result = args[0].(float64) + args[1].(float64)
+// return nil
+// })
+//
+// var result float64
+// client.Call("add", []interface{}{1, 2}, &result)
+//
+package jsonrpc
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sync"
+
+ "github.com/cenkalti/rpc2"
+)
+
+type jsonCodec struct {
+ dec *json.Decoder // for reading JSON values
+ enc *json.Encoder // for writing JSON values
+ c io.Closer
+
+ // temporary work space
+ msg message
+ serverRequest serverRequest
+ clientResponse clientResponse
+
+ // JSON-RPC clients can use arbitrary json values as request IDs.
+ // Package rpc expects uint64 request IDs.
+ // We assign uint64 sequence numbers to incoming requests
+ // but save the original request ID in the pending map.
+ // When rpc responds, we use the sequence number in
+ // the response to find the original request ID.
+ mutex sync.Mutex // protects seq, pending
+ pending map[uint64]*json.RawMessage
+ seq uint64
+}
+
+// NewJSONCodec returns a new rpc2.Codec using JSON-RPC on conn.
+func NewJSONCodec(conn io.ReadWriteCloser) rpc2.Codec {
+ return &jsonCodec{
+ dec: json.NewDecoder(conn),
+ enc: json.NewEncoder(conn),
+ c: conn,
+ pending: make(map[uint64]*json.RawMessage),
+ }
+}
+
+// serverRequest and clientResponse combined
+type message struct {
+ Method string `json:"method"`
+ Params *json.RawMessage `json:"params"`
+ Id *json.RawMessage `json:"id"`
+ Result *json.RawMessage `json:"result"`
+ Error interface{} `json:"error"`
+}
+
+// Unmarshal to
+type serverRequest struct {
+ Method string `json:"method"`
+ Params *json.RawMessage `json:"params"`
+ Id *json.RawMessage `json:"id"`
+}
+type clientResponse struct {
+ Id uint64 `json:"id"`
+ Result *json.RawMessage `json:"result"`
+ Error interface{} `json:"error"`
+}
+
+// to Marshal
+type serverResponse struct {
+ Id *json.RawMessage `json:"id"`
+ Result interface{} `json:"result"`
+ Error interface{} `json:"error"`
+}
+type clientRequest struct {
+ Method string `json:"method"`
+ Params interface{} `json:"params"`
+ Id *uint64 `json:"id"`
+}
+
+func (c *jsonCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error {
+ c.msg = message{}
+ if err := c.dec.Decode(&c.msg); err != nil {
+ return err
+ }
+
+ if c.msg.Method != "" {
+ // request comes to server
+ c.serverRequest.Id = c.msg.Id
+ c.serverRequest.Method = c.msg.Method
+ c.serverRequest.Params = c.msg.Params
+
+ req.Method = c.serverRequest.Method
+
+ // JSON request id can be any JSON value;
+ // RPC package expects uint64. Translate to
+ // internal uint64 and save JSON on the side.
+ if c.serverRequest.Id == nil {
+ // Notification
+ } else {
+ c.mutex.Lock()
+ c.seq++
+ c.pending[c.seq] = c.serverRequest.Id
+ c.serverRequest.Id = nil
+ req.Seq = c.seq
+ c.mutex.Unlock()
+ }
+ } else {
+ // response comes to client
+ err := json.Unmarshal(*c.msg.Id, &c.clientResponse.Id)
+ if err != nil {
+ return err
+ }
+ c.clientResponse.Result = c.msg.Result
+ c.clientResponse.Error = c.msg.Error
+
+ resp.Error = ""
+ resp.Seq = c.clientResponse.Id
+ if c.clientResponse.Error != nil || c.clientResponse.Result == nil {
+ x, ok := c.clientResponse.Error.(string)
+ if !ok {
+ return fmt.Errorf("invalid error %v", c.clientResponse.Error)
+ }
+ if x == "" {
+ x = "unspecified error"
+ }
+ resp.Error = x
+ }
+ }
+ return nil
+}
+
+var errMissingParams = errors.New("jsonrpc: request body missing params")
+
+func (c *jsonCodec) ReadRequestBody(x interface{}) error {
+ if x == nil {
+ return nil
+ }
+ if c.serverRequest.Params == nil {
+ return errMissingParams
+ }
+
+ var err error
+
+ // Check if x points to a slice of any kind
+ rt := reflect.TypeOf(x)
+ if rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Slice {
+ // If it's a slice, unmarshal as is
+ err = json.Unmarshal(*c.serverRequest.Params, x)
+ } else {
+ // Anything else unmarshal into a slice containing x
+ params := &[]interface{}{x}
+ err = json.Unmarshal(*c.serverRequest.Params, params)
+ }
+
+ return err
+}
+
+func (c *jsonCodec) ReadResponseBody(x interface{}) error {
+ if x == nil {
+ return nil
+ }
+ return json.Unmarshal(*c.clientResponse.Result, x)
+}
+
+func (c *jsonCodec) WriteRequest(r *rpc2.Request, param interface{}) error {
+ req := &clientRequest{Method: r.Method}
+
+ // Check if param is a slice of any kind
+ if param != nil && reflect.TypeOf(param).Kind() == reflect.Slice {
+ // If it's a slice, leave as is
+ req.Params = param
+ } else {
+ // Put anything else into a slice
+ req.Params = []interface{}{param}
+ }
+
+ if r.Seq == 0 {
+ // Notification
+ req.Id = nil
+ } else {
+ seq := r.Seq
+ req.Id = &seq
+ }
+ return c.enc.Encode(req)
+}
+
+var null = json.RawMessage([]byte("null"))
+
+func (c *jsonCodec) WriteResponse(r *rpc2.Response, x interface{}) error {
+ c.mutex.Lock()
+ b, ok := c.pending[r.Seq]
+ if !ok {
+ c.mutex.Unlock()
+ return errors.New("invalid sequence number in response")
+ }
+ delete(c.pending, r.Seq)
+ c.mutex.Unlock()
+
+ if b == nil {
+ // Invalid request so no id. Use JSON null.
+ b = &null
+ }
+ resp := serverResponse{Id: b}
+ if r.Error == "" {
+ resp.Result = x
+ } else {
+ resp.Error = r.Error
+ }
+ return c.enc.Encode(resp)
+}
+
+func (c *jsonCodec) Close() error {
+ return c.c.Close()
+}
diff --git a/vendor/github.com/cenkalti/rpc2/server.go b/vendor/github.com/cenkalti/rpc2/server.go
new file mode 100644
index 0000000000..2a5be7ed67
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/server.go
@@ -0,0 +1,181 @@
+package rpc2
+
+import (
+ "io"
+ "log"
+ "net"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/cenkalti/hub"
+)
+
+// Precompute the reflect type for error. Can't use error directly
+// because Typeof takes an empty interface value. This is annoying.
+var typeOfError = reflect.TypeOf((*error)(nil)).Elem()
+var typeOfClient = reflect.TypeOf((*Client)(nil))
+
+const (
+ clientConnected hub.Kind = iota
+ clientDisconnected
+)
+
+// Server responds to RPC requests made by Client.
+type Server struct {
+ handlers map[string]*handler
+ eventHub *hub.Hub
+}
+
+type handler struct {
+ fn reflect.Value
+ argType reflect.Type
+ replyType reflect.Type
+}
+
+type connectionEvent struct {
+ Client *Client
+}
+
+type disconnectionEvent struct {
+ Client *Client
+}
+
+func (connectionEvent) Kind() hub.Kind { return clientConnected }
+func (disconnectionEvent) Kind() hub.Kind { return clientDisconnected }
+
+// NewServer returns a new Server.
+func NewServer() *Server {
+ return &Server{
+ handlers: make(map[string]*handler),
+ eventHub: &hub.Hub{},
+ }
+}
+
+// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics.
+func (s *Server) Handle(method string, handlerFunc interface{}) {
+ addHandler(s.handlers, method, handlerFunc)
+}
+
+func addHandler(handlers map[string]*handler, mname string, handlerFunc interface{}) {
+ if _, ok := handlers[mname]; ok {
+ panic("rpc2: multiple registrations for " + mname)
+ }
+
+ method := reflect.ValueOf(handlerFunc)
+ mtype := method.Type()
+ // Method needs three ins: *client, *args, *reply.
+ if mtype.NumIn() != 3 {
+ log.Panicln("method", mname, "has wrong number of ins:", mtype.NumIn())
+ }
+ // First arg must be a pointer to rpc2.Client.
+ clientType := mtype.In(0)
+ if clientType.Kind() != reflect.Ptr {
+ log.Panicln("method", mname, "client type not a pointer:", clientType)
+ }
+ if clientType != typeOfClient {
+ log.Panicln("method", mname, "first argument", clientType.String(), "not *rpc2.Client")
+ }
+ // Second arg need not be a pointer.
+ argType := mtype.In(1)
+ if !isExportedOrBuiltinType(argType) {
+ log.Panicln(mname, "argument type not exported:", argType)
+ }
+ // Third arg must be a pointer.
+ replyType := mtype.In(2)
+ if replyType.Kind() != reflect.Ptr {
+ log.Panicln("method", mname, "reply type not a pointer:", replyType)
+ }
+ // Reply type must be exported.
+ if !isExportedOrBuiltinType(replyType) {
+ log.Panicln("method", mname, "reply type not exported:", replyType)
+ }
+ // Method needs one out.
+ if mtype.NumOut() != 1 {
+ log.Panicln("method", mname, "has wrong number of outs:", mtype.NumOut())
+ }
+ // The return type of the method must be error.
+ if returnType := mtype.Out(0); returnType != typeOfError {
+ log.Panicln("method", mname, "returns", returnType.String(), "not error")
+ }
+ handlers[mname] = &handler{
+ fn: method,
+ argType: argType,
+ replyType: replyType,
+ }
+}
+
+// Is this type exported or a builtin?
+func isExportedOrBuiltinType(t reflect.Type) bool {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ // PkgPath will be non-empty even for an exported type,
+ // so we need to check the type name as well.
+ return isExported(t.Name()) || t.PkgPath() == ""
+}
+
+// Is this an exported - upper case - name?
+func isExported(name string) bool {
+ rune, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(rune)
+}
+
+// OnConnect registers a function to run when a client connects.
+func (s *Server) OnConnect(f func(*Client)) {
+ s.eventHub.Subscribe(clientConnected, func(e hub.Event) {
+ go f(e.(connectionEvent).Client)
+ })
+}
+
+// OnDisconnect registers a function to run when a client disconnects.
+func (s *Server) OnDisconnect(f func(*Client)) {
+ s.eventHub.Subscribe(clientDisconnected, func(e hub.Event) {
+ go f(e.(disconnectionEvent).Client)
+ })
+}
+
+// Accept accepts connections on the listener and serves requests
+// for each incoming connection. Accept blocks; the caller typically
+// invokes it in a go statement.
+func (s *Server) Accept(lis net.Listener) {
+ for {
+ conn, err := lis.Accept()
+ if err != nil {
+ log.Print("rpc.Serve: accept:", err.Error())
+ return
+ }
+ go s.ServeConn(conn)
+ }
+}
+
+// ServeConn runs the server on a single connection.
+// ServeConn blocks, serving the connection until the client hangs up.
+// The caller typically invokes ServeConn in a go statement.
+// ServeConn uses the gob wire format (see package gob) on the
+// connection. To use an alternate codec, use ServeCodec.
+func (s *Server) ServeConn(conn io.ReadWriteCloser) {
+ s.ServeCodec(NewGobCodec(conn))
+}
+
+// ServeCodec is like ServeConn but uses the specified codec to
+// decode requests and encode responses.
+func (s *Server) ServeCodec(codec Codec) {
+ s.ServeCodecWithState(codec, NewState())
+}
+
+// ServeCodecWithState is like ServeCodec but also gives the ability to
+// associate a state variable with the client that persists across RPC calls.
+func (s *Server) ServeCodecWithState(codec Codec, state *State) {
+ defer codec.Close()
+
+ // Client also handles the incoming connections.
+ c := NewClientWithCodec(codec)
+ c.server = true
+ c.handlers = s.handlers
+ c.State = state
+
+ s.eventHub.Publish(connectionEvent{c})
+ c.Run()
+ s.eventHub.Publish(disconnectionEvent{c})
+}
diff --git a/vendor/github.com/cenkalti/rpc2/state.go b/vendor/github.com/cenkalti/rpc2/state.go
new file mode 100644
index 0000000000..7a4f23e6d9
--- /dev/null
+++ b/vendor/github.com/cenkalti/rpc2/state.go
@@ -0,0 +1,25 @@
+package rpc2
+
+import "sync"
+
+type State struct {
+ store map[string]interface{}
+ m sync.RWMutex
+}
+
+func NewState() *State {
+ return &State{store: make(map[string]interface{})}
+}
+
+func (s *State) Get(key string) (value interface{}, ok bool) {
+ s.m.RLock()
+ value, ok = s.store[key]
+ s.m.RUnlock()
+ return
+}
+
+func (s *State) Set(key string, value interface{}) {
+ s.m.Lock()
+ s.store[key] = value
+ s.m.Unlock()
+}
diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md
new file mode 100644
index 0000000000..5158667890
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/README.md
@@ -0,0 +1,6 @@
+# Minimal Go logging using logr and Go's standard library
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
+
+This package implements the [logr interface](https://github.com/go-logr/logr)
+in terms of Go's standard log package(https://pkg.go.dev/log).
diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go
new file mode 100644
index 0000000000..93a8aab51b
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/stdr.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package stdr implements github.com/go-logr/logr.Logger in terms of
+// Go's standard log package.
+package stdr
+
+import (
+ "log"
+ "os"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+)
+
+// The global verbosity level. See SetVerbosity().
+var globalVerbosity int
+
+// SetVerbosity sets the global level against which all info logs will be
+// compared. If this is greater than or equal to the "V" of the logger, the
+// message will be logged. A higher value here means more logs will be written.
+// The previous verbosity value is returned. This is not concurrent-safe -
+// callers must be sure to call it from only one goroutine.
+func SetVerbosity(v int) int {
+ old := globalVerbosity
+ globalVerbosity = v
+ return old
+}
+
+// New returns a logr.Logger which is implemented by Go's standard log package,
+// or something like it. If std is nil, this will use a default logger
+// instead.
+//
+// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+func New(std StdLogger) logr.Logger {
+ return NewWithOptions(std, Options{})
+}
+
+// NewWithOptions returns a logr.Logger which is implemented by Go's standard
+// log package, or something like it. See New for details.
+func NewWithOptions(std StdLogger, opts Options) logr.Logger {
+ if std == nil {
+ // Go's log.Default() is only available in 1.16 and higher.
+ std = log.New(os.Stderr, "", log.LstdFlags)
+ }
+
+ if opts.Depth < 0 {
+ opts.Depth = 0
+ }
+
+ fopts := funcr.Options{
+ LogCaller: funcr.MessageClass(opts.LogCaller),
+ }
+
+ sl := &logger{
+ Formatter: funcr.NewFormatter(fopts),
+ std: std,
+ }
+
+ // For skipping our own logger.Info/Error.
+ sl.Formatter.AddCallDepth(1 + opts.Depth)
+
+ return logr.New(sl)
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // Depth biases the assumed number of call frames to the "true" caller.
+ // This is useful when the calling code calls a function which then calls
+ // stdr (e.g. a logging shim to another API). Values less than zero will
+ // be treated as zero.
+ Depth int
+
+ // LogCaller tells stdr to add a "caller" key to some or all log lines.
+ // Go's log package has options to log this natively, too.
+ LogCaller MessageClass
+
+ // TODO: add an option to log the date/time
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
+// this adapter.
+type StdLogger interface {
+ // Output is the same as log.Output and log.Logger.Output.
+ Output(calldepth int, logline string) error
+}
+
+type logger struct {
+ funcr.Formatter
+ std StdLogger
+}
+
+var _ logr.LogSink = &logger{}
+var _ logr.CallDepthLogSink = &logger{}
+
+func (l logger) Enabled(level int) bool {
+ return globalVerbosity >= level
+}
+
+func (l logger) Info(level int, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ if prefix != "" {
+ args = prefix + ": " + args
+ }
+ _ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) Error(err error, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ if prefix != "" {
+ args = prefix + ": " + args
+ }
+ _ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l logger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+// Underlier exposes access to the underlying logging implementation. Since
+// callers only have a logr.Logger, they have to know which implementation is
+// in use, so this interface is less of an abstraction and more of way to test
+// type conversion.
+type Underlier interface {
+ GetUnderlying() StdLogger
+}
+
+// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
+// is itself an interface, the result may or may not be a Go log.Logger.
+func (l logger) GetUnderlying() StdLogger {
+ return l.std
+}
diff --git a/vendor/github.com/google/renameio/v2/.golangci.yml b/vendor/github.com/google/renameio/v2/.golangci.yml
new file mode 100644
index 0000000000..abfb6ca0a0
--- /dev/null
+++ b/vendor/github.com/google/renameio/v2/.golangci.yml
@@ -0,0 +1,5 @@
+linters:
+ disable:
+ - errcheck
+ enable:
+ - gofmt
diff --git a/vendor/github.com/google/renameio/v2/CONTRIBUTING.md b/vendor/github.com/google/renameio/v2/CONTRIBUTING.md
new file mode 100644
index 0000000000..939e5341e7
--- /dev/null
+++ b/vendor/github.com/google/renameio/v2/CONTRIBUTING.md
@@ -0,0 +1,28 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution;
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## Community Guidelines
+
+This project follows [Google's Open Source Community
+Guidelines](https://opensource.google.com/conduct/).
diff --git a/vendor/github.com/google/renameio/v2/LICENSE b/vendor/github.com/google/renameio/v2/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/github.com/google/renameio/v2/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/renameio/v2/README.md b/vendor/github.com/google/renameio/v2/README.md
new file mode 100644
index 0000000000..703884c260
--- /dev/null
+++ b/vendor/github.com/google/renameio/v2/README.md
@@ -0,0 +1,74 @@
+[![Build Status](https://github.com/google/renameio/workflows/Test/badge.svg)](https://github.com/google/renameio/actions?query=workflow%3ATest)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/google/renameio)](https://pkg.go.dev/github.com/google/renameio)
+[![Go Report Card](https://goreportcard.com/badge/github.com/google/renameio)](https://goreportcard.com/report/github.com/google/renameio)
+
+The `renameio` Go package provides a way to atomically create or replace a file or
+symbolic link.
+
+## Atomicity vs durability
+
+`renameio` concerns itself *only* with atomicity, i.e. making sure applications
+never see unexpected file content (a half-written file, or a 0-byte file).
+
+As a practical example, consider https://manpages.debian.org/: if there is a
+power outage while the site is updating, we are okay with losing the manpages
+which were being rendered at the time of the power outage. They will be added in
+a later run of the software. We are not okay with having a manpage replaced by a
+0-byte file under any circumstances, though.
+
+## Advantages of this package
+
+There are other packages for atomically replacing files, and sometimes ad-hoc
+implementations can be found in programs.
+
+A naive approach to the problem is to create a temporary file followed by a call
+to `os.Rename()`. However, there are a number of subtleties which make the
+correct sequence of operations hard to identify:
+
+* The temporary file should be removed when an error occurs, but a remove must
+ not be attempted if the rename succeeded, as a new file might have been
+ created with the same name. This renders a throwaway `defer
+ os.Remove(t.Name())` insufficient; state must be kept.
+
+* The temporary file must be created on the same file system (same mount point)
+ for the rename to work, but the TMPDIR environment variable should still be
+ respected, e.g. to direct temporary files into a separate directory outside of
+ the webserver’s document root but on the same file system.
+
+* On POSIX operating systems, the
+ [`fsync`](https://manpages.debian.org/stretch/manpages-dev/fsync.2) system
+ call must be used to ensure that the `os.Rename()` call will not result in a
+ 0-length file.
+
+This package attempts to get all of these details right, provides an intuitive,
+yet flexible API and caters to use-cases where high performance is required.
+
+## Major changes in v2
+
+With major version renameio/v2, `renameio.WriteFile` changes the way that
+permissions are handled. Before version 2, files were created with the
+permissions passed to the function, ignoring the
+[umask](https://en.wikipedia.org/wiki/Umask). From version 2 onwards, these
+permissions are further modified by process' umask (usually the user's
+preferred umask).
+
+If you were relying on the umask being ignored, add the
+`renameio.IgnoreUmask()` option to your `renameio.WriteFile` calls when
+upgrading to v2.
+
+## Windows support
+
+It is [not possible to reliably write files atomically on
+Windows](https://github.com/golang/go/issues/22397#issuecomment-498856679), and
+[`chmod` is not reliably supported by the Go standard library on
+Windows](https://github.com/google/renameio/issues/17).
+
+As it is not possible to provide a correct implementation, this package does not
+export any functions on Windows.
+
+## Disclaimer
+
+This is not an official Google product (experimental or otherwise), it
+is just code that happens to be owned by Google.
+
+This project is not affiliated with the Go project.
diff --git a/vendor/github.com/google/renameio/v2/doc.go b/vendor/github.com/google/renameio/v2/doc.go
new file mode 100644
index 0000000000..67416df481
--- /dev/null
+++ b/vendor/github.com/google/renameio/v2/doc.go
@@ -0,0 +1,21 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package renameio provides a way to atomically create or replace a file or
+// symbolic link.
+//
+// Caveat: this package requires the file system rename(2) implementation to be
+// atomic. Notably, this is not the case when using NFS with multiple clients:
+// https://stackoverflow.com/a/41396801
+package renameio
diff --git a/vendor/github.com/google/renameio/v2/option.go b/vendor/github.com/google/renameio/v2/option.go
new file mode 100644
index 0000000000..f825f6cf9f
--- /dev/null
+++ b/vendor/github.com/google/renameio/v2/option.go
@@ -0,0 +1,79 @@
+// Copyright 2021 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package renameio
+
+import "os"
+
+// Option is the interface implemented by all configuration function return
+// values.
+type Option interface {
+ apply(*config)
+}
+
+type optionFunc func(*config)
+
+func (fn optionFunc) apply(cfg *config) {
+ fn(cfg)
+}
+
+// WithTempDir configures the directory to use for temporary, uncommitted
+// files. Suitable for using a cached directory from
+// TempDir(filepath.Base(path)).
+func WithTempDir(dir string) Option {
+ return optionFunc(func(cfg *config) {
+ cfg.dir = dir
+ })
+}
+
+// WithPermissions sets the permissions for the target file while respecting
+// the umask(2). Bits set in the umask are removed from the permissions given
+// unless IgnoreUmask is used.
+func WithPermissions(perm os.FileMode) Option {
+ perm &= os.ModePerm
+ return optionFunc(func(cfg *config) {
+ cfg.createPerm = perm
+ })
+}
+
+// IgnoreUmask causes the permissions configured using WithPermissions to be
+// applied directly without applying the umask.
+func IgnoreUmask() Option {
+ return optionFunc(func(cfg *config) {
+ cfg.ignoreUmask = true
+ })
+}
+
+// WithStaticPermissions sets the permissions for the target file ignoring the
+// umask(2). This is equivalent to calling Chmod() on the file handle or using
+// WithPermissions in combination with IgnoreUmask.
+func WithStaticPermissions(perm os.FileMode) Option {
+ perm &= os.ModePerm
+ return optionFunc(func(cfg *config) {
+ cfg.chmod = &perm
+ })
+}
+
+// WithExistingPermissions configures the file creation to try to use the
+// permissions from an already existing target file. If the target file doesn't
+// exist yet or is not a regular file the default permissions are used unless
+// overridden using WithPermissions or WithStaticPermissions.
+func WithExistingPermissions() Option {
+ return optionFunc(func(c *config) {
+ c.attemptPermCopy = true
+ })
+}
diff --git a/vendor/github.com/google/renameio/v2/tempfile.go b/vendor/github.com/google/renameio/v2/tempfile.go
new file mode 100644
index 0000000000..edc3e9871c
--- /dev/null
+++ b/vendor/github.com/google/renameio/v2/tempfile.go
@@ -0,0 +1,283 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package renameio
+
+import (
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+// Default permissions for created files
+const defaultPerm os.FileMode = 0o600
+
+// nextrandom is a function generating a random number.
+var nextrandom = rand.Int63
+
+// openTempFile creates a randomly named file and returns an open handle. It is
+// similar to ioutil.TempFile except that the directory must be given, the file
+// permissions can be controlled and patterns in the name are not supported.
+// The name is always suffixed with a random number.
+func openTempFile(dir, name string, perm os.FileMode) (*os.File, error) {
+ prefix := filepath.Join(dir, name)
+
+ for attempt := 0; ; {
+ // Generate a reasonably random name which is unlikely to already
+ // exist. O_EXCL ensures that existing files generate an error.
+ name := prefix + strconv.FormatInt(nextrandom(), 10)
+
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
+ if !os.IsExist(err) {
+ return f, err
+ }
+
+ if attempt++; attempt > 10000 {
+ return nil, &os.PathError{
+ Op: "tempfile",
+ Path: name,
+ Err: os.ErrExist,
+ }
+ }
+ }
+}
+
+// TempDir checks whether os.TempDir() can be used as a temporary directory for
+// later atomically replacing files within dest. If no (os.TempDir() resides on
+// a different mount point), dest is returned.
+//
+// Note that the returned value ceases to be valid once either os.TempDir()
+// changes (e.g. on Linux, once the TMPDIR environment variable changes) or the
+// file system is unmounted.
+func TempDir(dest string) string {
+ return tempDir("", filepath.Join(dest, "renameio-TempDir"))
+}
+
+func tempDir(dir, dest string) string {
+ if dir != "" {
+ return dir // caller-specified directory always wins
+ }
+
+ // Chose the destination directory as temporary directory so that we
+ // definitely can rename the file, for which both temporary and destination
+ // file need to point to the same mount point.
+ fallback := filepath.Dir(dest)
+
+ // The user might have overridden the os.TempDir() return value by setting
+ // the TMPDIR environment variable.
+ tmpdir := os.TempDir()
+
+ testsrc, err := ioutil.TempFile(tmpdir, "."+filepath.Base(dest))
+ if err != nil {
+ return fallback
+ }
+ cleanup := true
+ defer func() {
+ if cleanup {
+ os.Remove(testsrc.Name())
+ }
+ }()
+ testsrc.Close()
+
+ testdest, err := ioutil.TempFile(filepath.Dir(dest), "."+filepath.Base(dest))
+ if err != nil {
+ return fallback
+ }
+ defer os.Remove(testdest.Name())
+ testdest.Close()
+
+ if err := os.Rename(testsrc.Name(), testdest.Name()); err != nil {
+ return fallback
+ }
+ cleanup = false // testsrc no longer exists
+ return tmpdir
+}
+
+// PendingFile is a pending temporary file, waiting to replace the destination
+// path in a call to CloseAtomicallyReplace.
+type PendingFile struct {
+ *os.File
+
+ path string
+ done bool
+ closed bool
+}
+
+// Cleanup is a no-op if CloseAtomicallyReplace succeeded, and otherwise closes
+// and removes the temporary file.
+//
+// This method is not safe for concurrent use by multiple goroutines.
+func (t *PendingFile) Cleanup() error {
+ if t.done {
+ return nil
+ }
+ // An error occurred. Close and remove the tempfile. Errors are returned for
+ // reporting, there is nothing the caller can recover here.
+ var closeErr error
+ if !t.closed {
+ closeErr = t.Close()
+ }
+ if err := os.Remove(t.Name()); err != nil {
+ return err
+ }
+ t.done = true
+ return closeErr
+}
+
+// CloseAtomicallyReplace closes the temporary file and atomically replaces
+// the destination file with it, i.e., a concurrent open(2) call will either
+// open the file previously located at the destination path (if any), or the
+// just written file, but the file will always be present.
+//
+// This method is not safe for concurrent use by multiple goroutines.
+func (t *PendingFile) CloseAtomicallyReplace() error {
+ // Even on an ordered file system (e.g. ext4 with data=ordered) or file
+ // systems with write barriers, we cannot skip the fsync(2) call as per
+ // Theodore Ts'o (ext2/3/4 lead developer):
+ //
+ // > data=ordered only guarantees the avoidance of stale data (e.g., the previous
+ // > contents of a data block showing up after a crash, where the previous data
+ // > could be someone's love letters, medical records, etc.). Without the fsync(2)
+ // > a zero-length file is a valid and possible outcome after the rename.
+ if err := t.Sync(); err != nil {
+ return err
+ }
+ t.closed = true
+ if err := t.Close(); err != nil {
+ return err
+ }
+ if err := os.Rename(t.Name(), t.path); err != nil {
+ return err
+ }
+ t.done = true
+ return nil
+}
+
+// TempFile creates a temporary file destined to atomically creating or
+// replacing the destination file at path.
+//
+// If dir is the empty string, TempDir(filepath.Base(path)) is used. If you are
+// going to write a large number of files to the same file system, store the
+// result of TempDir(filepath.Base(path)) and pass it instead of the empty
+// string.
+//
+// The file's permissions will be 0600. You can change these by explicitly
+// calling Chmod on the returned PendingFile.
+func TempFile(dir, path string) (*PendingFile, error) {
+ return NewPendingFile(path, WithTempDir(dir), WithStaticPermissions(defaultPerm))
+}
+
+type config struct {
+ dir, path string
+ createPerm os.FileMode
+ attemptPermCopy bool
+ ignoreUmask bool
+ chmod *os.FileMode
+}
+
+// NewPendingFile creates a temporary file destined to atomically creating or
+// replacing the destination file at path.
+//
+// TempDir(filepath.Base(path)) is used to store the temporary file. If you are
+// going to write a large number of files to the same file system, use the
+// result of TempDir(filepath.Base(path)) with the WithTempDir option.
+//
+// The file's permissions will be (0600 & ^umask). Use WithPermissions,
+// IgnoreUmask, WithStaticPermissions and WithExistingPermissions to control
+// them.
+func NewPendingFile(path string, opts ...Option) (*PendingFile, error) {
+ cfg := config{
+ path: path,
+ createPerm: defaultPerm,
+ }
+
+ for _, o := range opts {
+ o.apply(&cfg)
+ }
+
+ if cfg.ignoreUmask && cfg.chmod == nil {
+ cfg.chmod = &cfg.createPerm
+ }
+
+ if cfg.attemptPermCopy {
+ // Try to determine permissions from an existing file.
+ if existing, err := os.Lstat(cfg.path); err == nil && existing.Mode().IsRegular() {
+ perm := existing.Mode() & os.ModePerm
+ cfg.chmod = &perm
+
+ // Try to already create file with desired permissions; at worst
+ // a chmod will be needed afterwards.
+ cfg.createPerm = perm
+ } else if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ }
+
+ f, err := openTempFile(tempDir(cfg.dir, cfg.path), "."+filepath.Base(cfg.path), cfg.createPerm)
+ if err != nil {
+ return nil, err
+ }
+
+ if cfg.chmod != nil {
+ if fi, err := f.Stat(); err != nil {
+ return nil, err
+ } else if fi.Mode()&os.ModePerm != *cfg.chmod {
+ if err := f.Chmod(*cfg.chmod); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return &PendingFile{File: f, path: cfg.path}, nil
+}
+
+// Symlink wraps os.Symlink, replacing an existing symlink with the same name
+// atomically (os.Symlink fails when newname already exists, at least on Linux).
+func Symlink(oldname, newname string) error {
+ // Fast path: if newname does not exist yet, we can skip the whole dance
+ // below.
+ if err := os.Symlink(oldname, newname); err == nil || !os.IsExist(err) {
+ return err
+ }
+
+ // We need to use ioutil.TempDir, as we cannot overwrite a ioutil.TempFile,
+ // and removing+symlinking creates a TOCTOU race.
+ d, err := ioutil.TempDir(filepath.Dir(newname), "."+filepath.Base(newname))
+ if err != nil {
+ return err
+ }
+ cleanup := true
+ defer func() {
+ if cleanup {
+ os.RemoveAll(d)
+ }
+ }()
+
+ symlink := filepath.Join(d, "tmp.symlink")
+ if err := os.Symlink(oldname, symlink); err != nil {
+ return err
+ }
+
+ if err := os.Rename(symlink, newname); err != nil {
+ return err
+ }
+
+ cleanup = false
+ return os.RemoveAll(d)
+}
diff --git a/vendor/github.com/google/renameio/v2/writefile.go b/vendor/github.com/google/renameio/v2/writefile.go
new file mode 100644
index 0000000000..545042102b
--- /dev/null
+++ b/vendor/github.com/google/renameio/v2/writefile.go
@@ -0,0 +1,41 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package renameio
+
+import "os"
+
+// WriteFile mirrors ioutil.WriteFile, replacing an existing file with the same
+// name atomically.
+func WriteFile(filename string, data []byte, perm os.FileMode, opts ...Option) error {
+ opts = append([]Option{
+ WithPermissions(perm),
+ WithExistingPermissions(),
+ }, opts...)
+
+ t, err := NewPendingFile(filename, opts...)
+ if err != nil {
+ return err
+ }
+ defer t.Cleanup()
+
+ if _, err := t.Write(data); err != nil {
+ return err
+ }
+
+ return t.CloseAtomicallyReplace()
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/LICENSE b/vendor/github.com/ovn-org/libovsdb/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/ovn-org/libovsdb/NOTICE b/vendor/github.com/ovn-org/libovsdb/NOTICE
new file mode 100644
index 0000000000..156dcf39f8
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/NOTICE
@@ -0,0 +1,13 @@
+libovsdb
+
+Copyright 2014-2015 Socketplane Inc.
+Copyright 2015-2018 Docker Inc.
+
+This software consists of voluntary contributions made by many individuals. For
+exact contribution history, see the commit history.
+
+Modifications Copyright 2018-2019 eBay Inc.
+
+This software contains modifications developed by eBay Inc. and voluntary contributions
+from other individuals in a fork maintained at https://github.com/eBay/libovsdb
+For details on these contributions, please consult the git history.
diff --git a/vendor/github.com/ovn-org/libovsdb/cache/cache.go b/vendor/github.com/ovn-org/libovsdb/cache/cache.go
new file mode 100644
index 0000000000..60182071af
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/cache/cache.go
@@ -0,0 +1,1279 @@
+package cache
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/gob"
+ "encoding/hex"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/libovsdb/updates"
+)
+
+const (
+ updateEvent = "update"
+ addEvent = "add"
+ deleteEvent = "delete"
+ bufferSize = 65536
+ columnDelimiter = ","
+ keyDelimiter = "|"
+)
+
+// ErrCacheInconsistent is an error that can occur when an operation
+// would cause the cache to be inconsistent
+type ErrCacheInconsistent struct {
+ details string
+}
+
+// Error implements the error interface
+func (e *ErrCacheInconsistent) Error() string {
+ msg := "cache inconsistent"
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+func NewErrCacheInconsistent(details string) *ErrCacheInconsistent {
+ return &ErrCacheInconsistent{
+ details: details,
+ }
+}
+
+// ErrIndexExists is returned when an item in the database cannot be inserted due to existing indexes
+type ErrIndexExists struct {
+ Table string
+ Value interface{}
+ Index string
+ New string
+ Existing []string
+}
+
+func (e *ErrIndexExists) Error() string {
+ return fmt.Sprintf("cannot insert %s in the %s table. item %s has identical indexes. index: %s, value: %v", e.New, e.Table, e.Existing, e.Index, e.Value)
+}
+
+func NewIndexExistsError(table string, value interface{}, index string, new string, existing []string) *ErrIndexExists {
+ return &ErrIndexExists{
+ table, value, index, new, existing,
+ }
+}
+
+// map of unique values to uuids
+type valueToUUIDs map[interface{}]uuidset
+
+// map of column name(s) to unique values, to UUIDs
+type columnToValue map[index]valueToUUIDs
+
+// index is the type used to implement multiple cache indexes
+type index string
+
+// indexType is the type of index
+type indexType uint
+
+const (
+ schemaIndexType indexType = iota
+ clientIndexType
+)
+
+// indexSpec contains details about an index
+type indexSpec struct {
+ index index
+ columns []model.ColumnKey
+ indexType indexType
+}
+
+func (s indexSpec) isClientIndex() bool {
+ return s.indexType == clientIndexType
+}
+
+func (s indexSpec) isSchemaIndex() bool {
+ return s.indexType == schemaIndexType
+}
+
+// newIndex builds a index from a list of columns
+func newIndexFromColumns(columns ...string) index {
+ sort.Strings(columns)
+ return index(strings.Join(columns, columnDelimiter))
+}
+
+// newIndexFromColumnKeys builds a index from a list of column keys
+func newIndexFromColumnKeys(columnsKeys ...model.ColumnKey) index {
+ // RFC 7047 says that Indexes is a [] and "Each is a set of
+ // columns whose values, taken together within any given row, must be
+ // unique within the table". We'll store the column names, separated by comma
+ // as we'll assume (RFC is not clear), that comma isn't valid in a
+ columns := make([]string, 0, len(columnsKeys))
+ columnsMap := map[string]struct{}{}
+ for _, columnKey := range columnsKeys {
+ var column string
+ if columnKey.Key != nil {
+ column = fmt.Sprintf("%s%s%v", columnKey.Column, keyDelimiter, columnKey.Key)
+ } else {
+ column = columnKey.Column
+ }
+ if _, found := columnsMap[column]; !found {
+ columns = append(columns, column)
+ columnsMap[column] = struct{}{}
+ }
+ }
+ return newIndexFromColumns(columns...)
+}
+
+// newColumnKeysFromColumns builds a list of column keys from a list of columns
+func newColumnKeysFromColumns(columns ...string) []model.ColumnKey {
+ columnKeys := make([]model.ColumnKey, len(columns))
+ for i, column := range columns {
+ columnKeys[i] = model.ColumnKey{Column: column}
+ }
+ return columnKeys
+}
+
+// RowCache is a collections of Models hashed by UUID
+type RowCache struct {
+ name string
+ dbModel model.DatabaseModel
+ dataType reflect.Type
+ cache map[string]model.Model
+ indexSpecs []indexSpec
+ indexes columnToValue
+ mutex sync.RWMutex
+}
+
+// rowByUUID returns one model from the cache by UUID. Caller must hold the row
+// cache lock.
+func (r *RowCache) rowByUUID(uuid string) model.Model {
+ if row, ok := r.cache[uuid]; ok {
+ return model.Clone(row)
+ }
+ return nil
+}
+
+// Row returns one model from the cache by UUID
+func (r *RowCache) Row(uuid string) model.Model {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ return r.rowByUUID(uuid)
+}
+
+func (r *RowCache) HasRow(uuid string) bool {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ _, found := r.cache[uuid]
+ return found
+}
+
+// rowsByModels searches the cache to find all rows matching any of the provided
+// models, either by UUID or indexes. An error is returned if the model schema
+// has no UUID field, or if the provided models are not all the same type.
+func (r *RowCache) rowsByModels(models []model.Model, useClientIndexes bool) (map[string]model.Model, error) {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+
+ results := make(map[string]model.Model, len(models))
+ for _, m := range models {
+ if reflect.TypeOf(m) != r.dataType {
+ return nil, fmt.Errorf("model type %s didn't match expected row type %s", reflect.TypeOf(m), r.dataType)
+ }
+ info, _ := r.dbModel.NewModelInfo(m)
+ field, err := info.FieldByColumn("_uuid")
+ if err != nil {
+ return nil, err
+ }
+ if uuid := field.(string); uuid != "" {
+ if _, ok := results[uuid]; !ok {
+ if row := r.rowByUUID(uuid); row != nil {
+ results[uuid] = row
+ continue
+ }
+ }
+ }
+
+ // indexSpecs are ordered, schema indexes go first, then client indexes
+ for _, indexSpec := range r.indexSpecs {
+ if indexSpec.isClientIndex() && !useClientIndexes {
+ // Given the ordered indexSpecs, we can break here if we reach the
+ // first client index
+ break
+ }
+ val, err := valueFromIndex(info, indexSpec.columns)
+ if err != nil {
+ continue
+ }
+ vals := r.indexes[indexSpec.index]
+ if uuids, ok := vals[val]; ok {
+ for uuid := range uuids {
+ if _, ok := results[uuid]; !ok {
+ results[uuid] = r.rowByUUID(uuid)
+ }
+ }
+ // Break after handling the first found index
+ // to ensure we preserve index order preference
+ break
+ }
+ }
+ }
+ if len(results) == 0 {
+ return nil, nil
+ }
+ return results, nil
+}
+
+// RowByModel searches the cache by UUID and schema indexes. UUID search is
+// performed first. Then schema indexes are evaluated in turn by the same order
+// with which they are defined in the schema. The model for the first matching
+// index is returned along with its UUID. An empty string and nil is returned if
+// no Model is found.
+func (r *RowCache) RowByModel(m model.Model) (string, model.Model, error) {
+ models, err := r.rowsByModels([]model.Model{m}, false)
+ if err != nil {
+ return "", nil, err
+ }
+ for uuid, model := range models {
+ return uuid, model, nil
+ }
+ return "", nil, nil
+}
+
+// RowsByModels searches the cache by UUID, schema indexes and client indexes.
+// UUID search is performed first. Schema indexes are evaluated next in turn by
+// the same order with which they are defined in the schema. Finally, client
+// indexes are evaluated in turn by the same order with which they are defined
+// in the client DB model. The models for the first matching index are returned,
+// which might be more than 1 if they were found through a client index since in
+// that case uniqueness is not enforced. Nil is returned if no Model is found.
+func (r *RowCache) RowsByModels(models []model.Model) (map[string]model.Model, error) {
+ return r.rowsByModels(models, true)
+}
+
+// Create writes the provided content to the cache
+func (r *RowCache) Create(uuid string, m model.Model, checkIndexes bool) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if _, ok := r.cache[uuid]; ok {
+ return NewErrCacheInconsistent(fmt.Sprintf("cannot create row %s as it already exists", uuid))
+ }
+ if reflect.TypeOf(m) != r.dataType {
+ return fmt.Errorf("expected data of type %s, but got %s", r.dataType.String(), reflect.TypeOf(m).String())
+ }
+ info, err := r.dbModel.NewModelInfo(m)
+ if err != nil {
+ return err
+ }
+ addIndexes := r.newIndexes()
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ val, err := valueFromIndex(info, indexSpec.columns)
+ if err != nil {
+ return err
+ }
+
+ uuidset := newUUIDSet(uuid)
+
+ vals := r.indexes[index]
+ existing := vals[val]
+ if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) {
+ return NewIndexExistsError(r.name, val, string(index), uuid, existing.list())
+ }
+
+ addIndexes[index][val] = uuidset
+ }
+
+ // write indexes
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ for k, v := range addIndexes[index] {
+ if indexSpec.isSchemaIndex() {
+ r.indexes[index][k] = v
+ } else {
+ r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v)
+ }
+ }
+ }
+
+ r.cache[uuid] = model.Clone(m)
+ return nil
+}
+
+// Update updates the content in the cache and returns the original (pre-update) model
+func (r *RowCache) Update(uuid string, m model.Model, checkIndexes bool) (model.Model, error) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if _, ok := r.cache[uuid]; !ok {
+ return nil, NewErrCacheInconsistent(fmt.Sprintf("cannot update row %s as it does not exist in the cache", uuid))
+ }
+ oldRow := model.Clone(r.cache[uuid])
+ oldInfo, err := r.dbModel.NewModelInfo(oldRow)
+ if err != nil {
+ return nil, err
+ }
+ newInfo, err := r.dbModel.NewModelInfo(m)
+ if err != nil {
+ return nil, err
+ }
+
+ addIndexes := r.newIndexes()
+ removeIndexes := r.newIndexes()
+ var errs []error
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ var err error
+ oldVal, err := valueFromIndex(oldInfo, indexSpec.columns)
+ if err != nil {
+ return nil, err
+ }
+ newVal, err := valueFromIndex(newInfo, indexSpec.columns)
+ if err != nil {
+ return nil, err
+ }
+
+ // if old and new values are the same, don't worry
+ if oldVal == newVal {
+ continue
+ }
+ // old and new values are NOT the same
+
+ uuidset := newUUIDSet(uuid)
+
+ // check that there are no conflicts
+ vals := r.indexes[index]
+ existing := vals[newVal]
+ if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) {
+ errs = append(errs, NewIndexExistsError(
+ r.name,
+ newVal,
+ string(index),
+ uuid,
+ existing.list(),
+ ))
+ }
+
+ addIndexes[index][newVal] = uuidset
+ removeIndexes[index][oldVal] = uuidset
+ }
+ if len(errs) > 0 {
+ return nil, fmt.Errorf("%+v", errs)
+ }
+
+ // write indexes
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ for k, v := range addIndexes[index] {
+ if indexSpec.isSchemaIndex() {
+ r.indexes[index][k] = v
+ } else {
+ r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v)
+ }
+ }
+ for k, v := range removeIndexes[index] {
+ if indexSpec.isSchemaIndex() || substractUUIDSet(r.indexes[index][k], v).empty() {
+ delete(r.indexes[index], k)
+ }
+ }
+ }
+
+ r.cache[uuid] = model.Clone(m)
+ return oldRow, nil
+}
+
+// IndexExists checks if any of the schema indexes of the provided model is
+// already in the cache under a different UUID.
+func (r *RowCache) IndexExists(row model.Model) error {
+ info, err := r.dbModel.NewModelInfo(row)
+ if err != nil {
+ return err
+ }
+ field, err := info.FieldByColumn("_uuid")
+ if err != nil {
+ return nil
+ }
+ uuid := field.(string)
+ for _, indexSpec := range r.indexSpecs {
+ if !indexSpec.isSchemaIndex() {
+ // Given the ordered indexSpecs, we can break here if we reach the
+ // first non schema index
+ break
+ }
+ index := indexSpec.index
+ val, err := valueFromIndex(info, indexSpec.columns)
+ if err != nil {
+ continue
+ }
+ vals := r.indexes[index]
+ existing := vals[val]
+ if !existing.empty() && !existing.equals(newUUIDSet(uuid)) {
+ return NewIndexExistsError(
+ r.name,
+ val,
+ string(index),
+ uuid,
+ existing.list(),
+ )
+ }
+ }
+ return nil
+}
+
+// Delete deletes a row from the cache
+func (r *RowCache) Delete(uuid string) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if _, ok := r.cache[uuid]; !ok {
+ return NewErrCacheInconsistent(fmt.Sprintf("cannot delete row %s as it does not exist in the cache", uuid))
+ }
+ oldRow := r.cache[uuid]
+ oldInfo, err := r.dbModel.NewModelInfo(oldRow)
+ if err != nil {
+ return err
+ }
+
+ removeIndexes := r.newIndexes()
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ oldVal, err := valueFromIndex(oldInfo, indexSpec.columns)
+ if err != nil {
+ return err
+ }
+
+ removeIndexes[index][oldVal] = newUUIDSet(uuid)
+ }
+
+ // write indexes
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ for k, v := range removeIndexes[index] {
+ // only remove the index if it is pointing to this uuid
+ // otherwise we can cause a consistency issue if we've processed
+ // updates out of order
+ if substractUUIDSet(r.indexes[index][k], v).empty() {
+ delete(r.indexes[index], k)
+ }
+ }
+ }
+
+ delete(r.cache, uuid)
+ return nil
+}
+
+// Rows returns a copy of all Rows in the Cache
+func (r *RowCache) Rows() map[string]model.Model {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ result := make(map[string]model.Model)
+ for k, v := range r.cache {
+ result[k] = model.Clone(v)
+ }
+ return result
+}
+
+// RowsShallow returns a clone'd list of f all Rows in the cache, but does not
+// clone the underlying objects. Therefore, the objects returned are READ ONLY.
+// This is, however, thread safe, as the cached objects are cloned before being updated
+// when modifications come in.
+func (r *RowCache) RowsShallow() map[string]model.Model {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+
+ result := make(map[string]model.Model, len(r.cache))
+ for k, v := range r.cache {
+ result[k] = v
+ }
+ return result
+}
+
+// uuidsByConditionsAsIndexes checks possible indexes that can be built with a
+// subset of the provided conditions and returns the uuids for the models that
+// match that subset of conditions. If no conditions could be used as indexes,
+// returns nil. Note that this method does not necessarily match all the
+// provided conditions. Thus the caller is required to evaluate all the
+// conditions against the returned candidates. This is only useful to obtain, as
+// quick as possible, via indexes, a reduced list of candidate models that might
+// match all conditions, which should be better than just evaluating all
+// conditions against all rows of a table.
+//
+//nolint:gocyclo // warns overall function is complex but ignores inner functions
+func (r *RowCache) uuidsByConditionsAsIndexes(conditions []ovsdb.Condition, nativeValues []interface{}) (uuidset, error) {
+ type indexableCondition struct {
+ column string
+ keys []interface{}
+ nativeValue interface{}
+ }
+
+ // build an indexable condition, more appropriate for our processing, from
+ // an ovsdb condition. Only equality based conditions can be used as indexes
+ // (or `includes` conditions on map values).
+ toIndexableCondition := func(condition ovsdb.Condition, nativeValue interface{}) *indexableCondition {
+ if condition.Column == "_uuid" {
+ return nil
+ }
+ if condition.Function != ovsdb.ConditionEqual && condition.Function != ovsdb.ConditionIncludes {
+ return nil
+ }
+ v := reflect.ValueOf(nativeValue)
+ if !v.IsValid() {
+ return nil
+ }
+ isSet := v.Kind() == reflect.Slice || v.Kind() == reflect.Array
+ if condition.Function == ovsdb.ConditionIncludes && isSet {
+ return nil
+ }
+ keys := []interface{}{}
+ if v.Kind() == reflect.Map && condition.Function == ovsdb.ConditionIncludes {
+ for _, key := range v.MapKeys() {
+ keys = append(keys, key.Interface())
+ }
+ }
+ return &indexableCondition{
+ column: condition.Column,
+ keys: keys,
+ nativeValue: nativeValue,
+ }
+ }
+
+ // for any given set of conditions, we need to check if an index uses the
+ // same fields as the conditions
+ indexMatchesConditions := func(spec indexSpec, conditions []*indexableCondition) bool {
+ columnKeys := []model.ColumnKey{}
+ for _, condition := range conditions {
+ if len(condition.keys) == 0 {
+ columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column})
+ continue
+ }
+ for _, key := range condition.keys {
+ columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column, Key: key})
+ }
+ }
+ index := newIndexFromColumnKeys(columnKeys...)
+ return index == spec.index
+ }
+
+ // for a specific set of conditions, check if an index can be built from
+ // them and return the associated UUIDs
+ evaluateConditionSetAsIndex := func(conditions []*indexableCondition) (uuidset, error) {
+ // build a model with the values from the conditions
+ m, err := r.dbModel.NewModel(r.name)
+ if err != nil {
+ return nil, err
+ }
+ info, err := r.dbModel.NewModelInfo(m)
+ if err != nil {
+ return nil, err
+ }
+ for _, conditions := range conditions {
+ err := info.SetField(conditions.column, conditions.nativeValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, spec := range r.indexSpecs {
+ if !indexMatchesConditions(spec, conditions) {
+ continue
+ }
+ // if we have an index for those conditions, calculate the index
+ // value. The models mapped to that value match the conditions.
+ v, err := valueFromIndex(info, spec.columns)
+ if err != nil {
+ return nil, err
+ }
+ if v != nil {
+ uuids := r.indexes[spec.index][v]
+ if uuids == nil {
+ // this set of conditions was represented by an index but
+ // had no matches, return an empty set
+ uuids = uuidset{}
+ }
+ return uuids, nil
+ }
+ }
+ return nil, nil
+ }
+
+ // set of uuids that match the conditions as we evaluate them
+ var matching uuidset
+
+ // attempt to evaluate a set of conditions via indexes and intersect the
+ // results against matches of previous sets
+ intersectUUIDsFromConditionSet := func(indexableConditions []*indexableCondition) (bool, error) {
+ uuids, err := evaluateConditionSetAsIndex(indexableConditions)
+ if err != nil {
+ return true, err
+ }
+ if matching == nil {
+ matching = uuids
+ } else if uuids != nil {
+ matching = intersectUUIDSets(matching, uuids)
+ }
+ if matching != nil && len(matching) <= 1 {
+ // if we had no matches or a single match, no point in continuing
+ // searching for additional indexes. If we had a single match, it's
+ // cheaper to just evaluate all conditions on it.
+ return true, nil
+ }
+ return false, nil
+ }
+
+ // First, filter out conditions that cannot be matched against indexes. With
+ // the remaining conditions build all possible subsets (the power set of all
+ // conditions) and for any subset that is an index, intersect the obtained
+ // uuids with the ones obtained from previous subsets
+ matchUUIDsFromConditionsPowerSet := func() error {
+ ps := [][]*indexableCondition{}
+ // prime the power set with a first empty subset
+ ps = append(ps, []*indexableCondition{})
+ for i, condition := range conditions {
+ nativeValue := nativeValues[i]
+ iCondition := toIndexableCondition(condition, nativeValue)
+ // this is not a condition we can use as an index, skip it
+ if iCondition == nil {
+ continue
+ }
+ // the power set is built appending the subsets that result from
+ // adding each item to each of the previous subsets
+ ss := make([][]*indexableCondition, len(ps))
+ for j := range ss {
+ ss[j] = make([]*indexableCondition, len(ps[j]), len(ps[j])+1)
+ copy(ss[j], ps[j])
+ ss[j] = append(ss[j], iCondition)
+ // as we add them to the power set, attempt to evaluate this
+ // subset of conditions as indexes
+ stop, err := intersectUUIDsFromConditionSet(ss[j])
+ if stop || err != nil {
+ return err
+ }
+ }
+ ps = append(ps, ss...)
+ }
+ return nil
+ }
+
+ // finally
+ err := matchUUIDsFromConditionsPowerSet()
+ return matching, err
+}
+
+// RowsByCondition searches models in the cache that match all conditions
+func (r *RowCache) RowsByCondition(conditions []ovsdb.Condition) (map[string]model.Model, error) {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ results := make(map[string]model.Model)
+ schema := r.dbModel.Schema.Table(r.name)
+
+ // no conditions matches all rows
+ if len(conditions) == 0 {
+ for uuid := range r.cache {
+ results[uuid] = r.rowByUUID(uuid)
+ }
+ return results, nil
+ }
+
+ // one pass to obtain the native values
+ nativeValues := make([]interface{}, 0, len(conditions))
+ for _, condition := range conditions {
+ tSchema := schema.Column(condition.Column)
+ nativeValue, err := ovsdb.OvsToNative(tSchema, condition.Value)
+ if err != nil {
+ return nil, err
+ }
+ nativeValues = append(nativeValues, nativeValue)
+ }
+
+ // obtain all possible matches using conditions as indexes
+ matching, err := r.uuidsByConditionsAsIndexes(conditions, nativeValues)
+ if err != nil {
+ return nil, err
+ }
+
+ // From the matches obtained with indexes, which might have not used all
+ // conditions, continue trimming down the list explicitly evaluating the
+ // conditions.
+ for i, condition := range conditions {
+ matchingCondition := uuidset{}
+
+ if condition.Column == "_uuid" && (condition.Function == ovsdb.ConditionEqual || condition.Function == ovsdb.ConditionIncludes) {
+ uuid, ok := nativeValues[i].(string)
+ if !ok {
+ panic(fmt.Sprintf("%+v is not a uuid", nativeValues[i]))
+ }
+ if _, found := r.cache[uuid]; found {
+ matchingCondition.add(uuid)
+ }
+ } else {
+ matchCondition := func(uuid string) error {
+ row := r.cache[uuid]
+ info, err := r.dbModel.NewModelInfo(row)
+ if err != nil {
+ return err
+ }
+ value, err := info.FieldByColumn(condition.Column)
+ if err != nil {
+ return err
+ }
+ ok, err := condition.Function.Evaluate(value, nativeValues[i])
+ if err != nil {
+ return err
+ }
+ if ok {
+ matchingCondition.add(uuid)
+ }
+ return nil
+ }
+ if matching != nil {
+ // we just need to consider rows that matched previous
+ // conditions
+ for uuid := range matching {
+ err = matchCondition(uuid)
+ if err != nil {
+ return nil, err
+ }
+ }
+ } else {
+ // If this is the first condition we are able to check, just run
+ // it by whole table
+ for uuid := range r.cache {
+ err = matchCondition(uuid)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+ if matching == nil {
+ matching = matchingCondition
+ } else {
+ matching = intersectUUIDSets(matching, matchingCondition)
+ }
+ if matching.empty() {
+ // no models match the conditions checked up to now, no need to
+ // check remaining conditions
+ break
+ }
+ }
+
+ for uuid := range matching {
+ results[uuid] = r.rowByUUID(uuid)
+ }
+
+ return results, nil
+}
+
+// Len returns the length of the cache
+func (r *RowCache) Len() int {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ return len(r.cache)
+}
+
+func (r *RowCache) Index(columns ...string) (map[interface{}][]string, error) {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ spec := newIndexFromColumns(columns...)
+ index, ok := r.indexes[spec]
+ if !ok {
+ return nil, fmt.Errorf("%v is not an index", columns)
+ }
+ dbIndex := make(map[interface{}][]string, len(index))
+ for k, v := range index {
+ dbIndex[k] = v.list()
+ }
+ return dbIndex, nil
+}
+
+// EventHandler can handle events when the contents of the cache changes
+type EventHandler interface {
+ OnAdd(table string, model model.Model)
+ OnUpdate(table string, old model.Model, new model.Model)
+ OnDelete(table string, model model.Model)
+}
+
+// EventHandlerFuncs is a wrapper for the EventHandler interface
+// It allows a caller to only implement the functions they need
+type EventHandlerFuncs struct {
+ AddFunc func(table string, model model.Model)
+ UpdateFunc func(table string, old model.Model, new model.Model)
+ DeleteFunc func(table string, model model.Model)
+}
+
+// OnAdd calls AddFunc if it is not nil
+func (e *EventHandlerFuncs) OnAdd(table string, model model.Model) {
+ if e.AddFunc != nil {
+ e.AddFunc(table, model)
+ }
+}
+
+// OnUpdate calls UpdateFunc if it is not nil
+func (e *EventHandlerFuncs) OnUpdate(table string, old, new model.Model) {
+ if e.UpdateFunc != nil {
+ e.UpdateFunc(table, old, new)
+ }
+}
+
+// OnDelete calls DeleteFunc if it is not nil
+func (e *EventHandlerFuncs) OnDelete(table string, row model.Model) {
+ if e.DeleteFunc != nil {
+ e.DeleteFunc(table, row)
+ }
+}
+
+// TableCache contains a collection of RowCaches, hashed by name,
+// and an array of EventHandlers that respond to cache updates
+// It implements the ovsdb.NotificationHandler interface so it may
+// handle update notifications
+type TableCache struct {
+ cache map[string]*RowCache
+ eventProcessor *eventProcessor
+ dbModel model.DatabaseModel
+ ovsdb.NotificationHandler
+ mutex sync.RWMutex
+ logger *logr.Logger
+}
+
+// Data is the type for data that can be prepopulated in the cache
+type Data map[string]map[string]model.Model
+
+// NewTableCache creates a new TableCache
+func NewTableCache(dbModel model.DatabaseModel, data Data, logger *logr.Logger) (*TableCache, error) {
+ if !dbModel.Valid() {
+ return nil, fmt.Errorf("tablecache without valid databasemodel cannot be populated")
+ }
+ if logger == nil {
+ l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("cache")
+ logger = &l
+ } else {
+ l := logger.WithName("cache")
+ logger = &l
+ }
+ eventProcessor := newEventProcessor(bufferSize, logger)
+ cache := make(map[string]*RowCache)
+ tableTypes := dbModel.Types()
+ for name := range dbModel.Schema.Tables {
+ cache[name] = newRowCache(name, dbModel, tableTypes[name])
+ }
+ for table, rowData := range data {
+ if _, ok := dbModel.Schema.Tables[table]; !ok {
+ return nil, fmt.Errorf("table %s is not in schema", table)
+ }
+ rowCache := cache[table]
+ for uuid, row := range rowData {
+ if err := rowCache.Create(uuid, row, true); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return &TableCache{
+ cache: cache,
+ eventProcessor: eventProcessor,
+ dbModel: dbModel,
+ mutex: sync.RWMutex{},
+ logger: logger,
+ }, nil
+}
+
+// Mapper returns the mapper
+func (t *TableCache) Mapper() mapper.Mapper {
+ return t.dbModel.Mapper
+}
+
+// DatabaseModel returns the DatabaseModelRequest
+func (t *TableCache) DatabaseModel() model.DatabaseModel {
+ return t.dbModel
+}
+
+// Table returns the a Table from the cache with a given name
+func (t *TableCache) Table(name string) *RowCache {
+ t.mutex.RLock()
+ defer t.mutex.RUnlock()
+ if table, ok := t.cache[name]; ok {
+ return table
+ }
+ return nil
+}
+
+// Tables returns a list of table names that are in the cache
+func (t *TableCache) Tables() []string {
+ t.mutex.RLock()
+ defer t.mutex.RUnlock()
+ var result []string
+ for k := range t.cache {
+ result = append(result, k)
+ }
+ return result
+}
+
+// Update implements the update method of the NotificationHandler interface
+// this populates a channel with updates so they can be processed after the initial
+// state has been Populated
+func (t *TableCache) Update(context interface{}, tableUpdates ovsdb.TableUpdates) error {
+ if len(tableUpdates) == 0 {
+ return nil
+ }
+ if err := t.Populate(tableUpdates); err != nil {
+ t.logger.Error(err, "during libovsdb cache populate")
+ return err
+ }
+ return nil
+}
+
+// Update2 implements the update method of the NotificationHandler interface
+// this populates a channel with updates so they can be processed after the initial
+// state has been Populated
+func (t *TableCache) Update2(context interface{}, tableUpdates ovsdb.TableUpdates2) error {
+ if len(tableUpdates) == 0 {
+ return nil
+ }
+ if err := t.Populate2(tableUpdates); err != nil {
+ t.logger.Error(err, "during libovsdb cache populate2")
+ return err
+ }
+ return nil
+}
+
+// Locked implements the locked method of the NotificationHandler interface
+func (t *TableCache) Locked([]interface{}) {
+}
+
+// Stolen implements the stolen method of the NotificationHandler interface
+func (t *TableCache) Stolen([]interface{}) {
+}
+
+// Echo implements the echo method of the NotificationHandler interface
+func (t *TableCache) Echo([]interface{}) {
+}
+
+// Disconnected implements the disconnected method of the NotificationHandler interface
+func (t *TableCache) Disconnected() {
+}
+
+// Populate adds data to the cache and places an event on the channel
+func (t *TableCache) Populate(tableUpdates ovsdb.TableUpdates) error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ for table := range t.dbModel.Types() {
+ tu, ok := tableUpdates[table]
+ if !ok {
+ continue
+ }
+ tCache := t.cache[table]
+ for uuid, row := range tu {
+ t.logger.V(5).Info("processing update", "table", table, "uuid", uuid)
+ update := updates.ModelUpdates{}
+ current := tCache.cache[uuid]
+ err := update.AddRowUpdate(t.dbModel, table, uuid, current, *row)
+ if err != nil {
+ return err
+ }
+ err = t.ApplyCacheUpdate(update)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Populate2 adds data to the cache and places an event on the channel
+func (t *TableCache) Populate2(tableUpdates ovsdb.TableUpdates2) error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ for table := range t.dbModel.Types() {
+ tu, ok := tableUpdates[table]
+ if !ok {
+ continue
+ }
+ tCache := t.cache[table]
+ for uuid, row := range tu {
+ t.logger.V(5).Info("processing update", "table", table, "uuid", uuid)
+ update := updates.ModelUpdates{}
+ current := tCache.cache[uuid]
+ if row.Initial == nil && row.Insert == nil && current == nil {
+ return NewErrCacheInconsistent(fmt.Sprintf("row with uuid %s does not exist", uuid))
+ }
+ err := update.AddRowUpdate2(t.dbModel, table, uuid, current, *row)
+ if err != nil {
+ return err
+ }
+ err = t.ApplyCacheUpdate(update)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Purge drops all data in the cache and reinitializes it using the
+// provided database model
+func (t *TableCache) Purge(dbModel model.DatabaseModel) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.dbModel = dbModel
+ tableTypes := t.dbModel.Types()
+ for name := range t.dbModel.Schema.Tables {
+ t.cache[name] = newRowCache(name, t.dbModel, tableTypes[name])
+ }
+}
+
+// AddEventHandler registers the supplied EventHandler to receive cache events
+func (t *TableCache) AddEventHandler(handler EventHandler) {
+ t.eventProcessor.AddEventHandler(handler)
+}
+
+// Run starts the event processing and update processing loops.
+// It blocks until the stop channel is closed.
+// Once closed, it clears the updates/updates2 channels to ensure we don't process stale updates on a new connection
+func (t *TableCache) Run(stopCh <-chan struct{}) {
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ t.eventProcessor.Run(stopCh)
+ }()
+ wg.Wait()
+}
+
+// newRowCache creates a new row cache with the provided data
+// if the data is nil, and empty RowCache will be created
+func newRowCache(name string, dbModel model.DatabaseModel, dataType reflect.Type) *RowCache {
+ schemaIndexes := dbModel.Schema.Table(name).Indexes
+ clientIndexes := dbModel.Client().Indexes(name)
+
+ r := &RowCache{
+ name: name,
+ dbModel: dbModel,
+ indexSpecs: make([]indexSpec, 0, len(schemaIndexes)+len(clientIndexes)),
+ dataType: dataType,
+ cache: make(map[string]model.Model),
+ mutex: sync.RWMutex{},
+ }
+
+ // respect the order of indexes, add first schema indexes, then client
+ // indexes
+ indexes := map[index]indexSpec{}
+ for _, columns := range schemaIndexes {
+ columnKeys := newColumnKeysFromColumns(columns...)
+ index := newIndexFromColumnKeys(columnKeys...)
+ spec := indexSpec{index: index, columns: columnKeys, indexType: schemaIndexType}
+ r.indexSpecs = append(r.indexSpecs, spec)
+ indexes[index] = spec
+ }
+ for _, clientIndex := range clientIndexes {
+ columnKeys := clientIndex.Columns
+ index := newIndexFromColumnKeys(columnKeys...)
+ // if this is already a DB index, ignore
+ if _, ok := indexes[index]; ok {
+ continue
+ }
+ spec := indexSpec{index: index, columns: columnKeys, indexType: clientIndexType}
+ r.indexSpecs = append(r.indexSpecs, spec)
+ indexes[index] = spec
+ }
+
+ r.indexes = r.newIndexes()
+ return r
+}
+
+func (r *RowCache) newIndexes() columnToValue {
+ c := make(columnToValue)
+ for _, indexSpec := range r.indexSpecs {
+ index := indexSpec.index
+ c[index] = make(valueToUUIDs)
+ }
+ return c
+}
+
+// event encapsulates a cache event
+type event struct {
+ eventType string
+ table string
+ old model.Model
+ new model.Model
+}
+
+// eventProcessor handles the queueing and processing of cache events
+type eventProcessor struct {
+ events chan *event
+ // handlersMutex locks the handlers array when we add a handler or dispatch events
+ // we don't need a RWMutex in this case as we only have one thread reading and the write
+ // volume is very low (i.e only when AddEventHandler is called)
+ handlersMutex sync.Mutex
+ handlers []EventHandler
+ logger *logr.Logger
+}
+
+func newEventProcessor(capacity int, logger *logr.Logger) *eventProcessor {
+ return &eventProcessor{
+ events: make(chan *event, capacity),
+ handlers: []EventHandler{},
+ logger: logger,
+ }
+}
+
+// AddEventHandler registers the supplied EventHandler with the eventProcessor
+// EventHandlers MUST process events quickly, for example, pushing them to a queue
+// to be processed by the client. Long Running handler functions adversely affect
+// other handlers and MAY cause loss of data if the channel buffer is full
+func (e *eventProcessor) AddEventHandler(handler EventHandler) {
+ e.handlersMutex.Lock()
+ defer e.handlersMutex.Unlock()
+ e.handlers = append(e.handlers, handler)
+}
+
+// AddEvent writes an event to the channel
+func (e *eventProcessor) AddEvent(eventType string, table string, old model.Model, new model.Model) {
+ // We don't need to check for error here since there
+ // is only a single writer. RPC is run in blocking mode
+ event := event{
+ eventType: eventType,
+ table: table,
+ old: old,
+ new: new,
+ }
+ select {
+ case e.events <- &event:
+ // noop
+ return
+ default:
+ e.logger.V(0).Info("dropping event because event buffer is full")
+ }
+}
+
+// Run runs the eventProcessor loop.
+// It will block until the stopCh has been closed
+// Otherwise it will wait for events to arrive on the event channel
+// Once received, it will dispatch the event to each registered handler
+func (e *eventProcessor) Run(stopCh <-chan struct{}) {
+ for {
+ select {
+ case <-stopCh:
+ return
+ case event := <-e.events:
+ e.handlersMutex.Lock()
+ for _, handler := range e.handlers {
+ switch event.eventType {
+ case addEvent:
+ handler.OnAdd(event.table, event.new)
+ case updateEvent:
+ handler.OnUpdate(event.table, event.old, event.new)
+ case deleteEvent:
+ handler.OnDelete(event.table, event.old)
+ }
+ }
+ e.handlersMutex.Unlock()
+ }
+ }
+}
+
+type cacheUpdate interface {
+ GetUpdatedTables() []string
+ ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error
+}
+
+func (t *TableCache) ApplyCacheUpdate(update cacheUpdate) error {
+ tables := update.GetUpdatedTables()
+ for _, table := range tables {
+ tCache := t.cache[table]
+ err := update.ForEachModelUpdate(table, func(uuid string, old, new model.Model) error {
+ switch {
+ case old == nil && new != nil:
+ t.logger.V(5).Info("inserting model", "table", table, "uuid", uuid, "model", new)
+ err := tCache.Create(uuid, new, false)
+ if err != nil {
+ return err
+ }
+ t.eventProcessor.AddEvent(addEvent, table, nil, new)
+ case old != nil && new != nil:
+ t.logger.V(5).Info("updating model", "table", table, "uuid", uuid, "old", old, "new", new)
+ _, err := tCache.Update(uuid, new, false)
+ if err != nil {
+ return err
+ }
+ t.eventProcessor.AddEvent(updateEvent, table, old, new)
+ case new == nil:
+ t.logger.V(5).Info("deleting model", "table", table, "uuid", uuid, "model", old)
+ err := tCache.Delete(uuid)
+ if err != nil {
+ return err
+ }
+ t.eventProcessor.AddEvent(deleteEvent, table, old, nil)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func valueFromIndex(info *mapper.Info, columnKeys []model.ColumnKey) (interface{}, error) {
+ if len(columnKeys) > 1 {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ for _, columnKey := range columnKeys {
+ val, err := valueFromColumnKey(info, columnKey)
+ if err != nil {
+ return "", err
+ }
+ // if object is nil dont try to encode it
+ value := reflect.ValueOf(val)
+ if value.Kind() == reflect.Invalid {
+ continue
+ }
+ // if object is a nil pointer dont try to encode it
+ if value.Kind() == reflect.Pointer && value.IsNil() {
+ continue
+ }
+ err = enc.Encode(val)
+ if err != nil {
+ return "", err
+ }
+ }
+ h := sha256.New()
+ val := hex.EncodeToString(h.Sum(buf.Bytes()))
+ return val, nil
+ }
+ val, err := valueFromColumnKey(info, columnKeys[0])
+ if err != nil {
+ return "", err
+ }
+ return val, err
+}
+
+func valueFromColumnKey(info *mapper.Info, columnKey model.ColumnKey) (interface{}, error) {
+ val, err := info.FieldByColumn(columnKey.Column)
+ if err != nil {
+ return nil, err
+ }
+ if columnKey.Key != nil {
+ val, err = valueFromMap(val, columnKey.Key)
+ if err != nil {
+ return "", fmt.Errorf("can't get key value from map: %v", err)
+ }
+ }
+ return val, err
+}
+
+func valueFromMap(aMap interface{}, key interface{}) (interface{}, error) {
+ m := reflect.ValueOf(aMap)
+ if m.Kind() != reflect.Map {
+ return nil, fmt.Errorf("expected map but got %s", m.Kind())
+ }
+ v := m.MapIndex(reflect.ValueOf(key))
+ if !v.IsValid() {
+ // return the zero value for the map value type
+ return reflect.Indirect(reflect.New(m.Type().Elem())).Interface(), nil
+ }
+
+ return v.Interface(), nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/cache/doc.go b/vendor/github.com/ovn-org/libovsdb/cache/doc.go
new file mode 100644
index 0000000000..3b176f2775
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/cache/doc.go
@@ -0,0 +1,16 @@
+/*
+Package cache provides a cache of model.Model elements that can be used in an OVSDB client or server.
+
+The cache can be accessed using a simple API:
+
+ cache.Table("Open_vSwitch").Row("")
+
+It implements the ovsdb.NotificationHandler interface
+such that it can be populated automatically by
+update notifications
+
+It also contains an eventProcessor where callers
+may registers functions that will get called on
+every Add/Update/Delete event.
+*/
+package cache
diff --git a/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go b/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go
new file mode 100644
index 0000000000..f7c1397378
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go
@@ -0,0 +1,101 @@
+package cache
+
+type void struct{}
+type uuidset map[string]void
+
+func newUUIDSet(uuids ...string) uuidset {
+ s := uuidset{}
+ for _, uuid := range uuids {
+ s[uuid] = void{}
+ }
+ return s
+}
+
+func (s uuidset) add(uuid string) {
+ s[uuid] = void{}
+}
+
+func (s uuidset) remove(uuid string) {
+ delete(s, uuid)
+}
+
+func (s uuidset) has(uuid string) bool {
+ _, ok := s[uuid]
+ return ok
+}
+
+func (s uuidset) equals(o uuidset) bool {
+ if len(s) != len(o) {
+ return false
+ }
+ for uuid := range s {
+ if !o.has(uuid) {
+ return false
+ }
+ }
+ return true
+}
+
+func (s uuidset) getAny() string {
+ for k := range s {
+ return k
+ }
+ return ""
+}
+
+func (s uuidset) list() []string {
+ uuids := make([]string, 0, len(s))
+ for uuid := range s {
+ uuids = append(uuids, uuid)
+ }
+ return uuids
+}
+
+func (s uuidset) empty() bool {
+ return len(s) == 0
+}
+
+func addUUIDSet(s1, s2 uuidset) uuidset {
+ if len(s2) == 0 {
+ return s1
+ }
+ if s1 == nil {
+ s1 = uuidset{}
+ }
+ for uuid := range s2 {
+ s1.add(uuid)
+ }
+ return s1
+}
+
+func substractUUIDSet(s1, s2 uuidset) uuidset {
+ if len(s1) == 0 || len(s2) == 0 {
+ return s1
+ }
+ for uuid := range s2 {
+ s1.remove(uuid)
+ }
+ return s1
+}
+
+func intersectUUIDSets(s1, s2 uuidset) uuidset {
+ if len(s1) == 0 || len(s2) == 0 {
+ return nil
+ }
+ var big uuidset
+ var small uuidset
+ if len(s1) > len(s2) {
+ big = s1
+ small = s2
+ } else {
+ big = s2
+ small = s1
+ }
+ f := uuidset{}
+ for uuid := range small {
+ if big.has(uuid) {
+ f.add(uuid)
+ }
+ }
+ return f
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/api.go b/vendor/github.com/ovn-org/libovsdb/client/api.go
new file mode 100644
index 0000000000..4977589442
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/api.go
@@ -0,0 +1,593 @@
+package client
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/go-logr/logr"
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// API defines basic operations to interact with the database
+type API interface {
+ // List populates a slice of Models objects based on their type
+ // The function parameter must be a pointer to a slice of Models
+ // Models can be structs or pointers to structs
+ // If the slice is null, the entire cache will be copied into the slice
+ // If it has a capacity != 0, only 'capacity' elements will be filled in
+ List(ctx context.Context, result interface{}) error
+
+ // Create a Conditional API from a Function that is used to filter cached data
+ // The function must accept a Model implementation and return a boolean. E.g:
+ // ConditionFromFunc(func(l *LogicalSwitch) bool { return l.Enabled })
+ WhereCache(predicate interface{}) ConditionalAPI
+
+ // Create a ConditionalAPI from a Model's index data, where operations
+ // apply to elements that match the values provided in one or more
+ // model.Models according to the indexes. All provided Models must be
+ // the same type or an error will be generated when operations are
+ // are performed on the ConditionalAPI.
+ Where(...model.Model) ConditionalAPI
+
+ // WhereAny creates a ConditionalAPI from a list of Conditions where
+ // operations apply to elements that match any (eg, logical OR) of the
+ // conditions.
+ WhereAny(model.Model, ...model.Condition) ConditionalAPI
+
+ // WhereAll creates a ConditionalAPI from a list of Conditions where
+ // operations apply to elements that match all (eg, logical AND) of the
+ // conditions.
+ WhereAll(model.Model, ...model.Condition) ConditionalAPI
+
+ // Get retrieves a model from the cache
+ // The way the object will be fetch depends on the data contained in the
+ // provided model and the indexes defined in the associated schema
+ // For more complex ways of searching for elements in the cache, the
+ // preferred way is Where({condition}).List()
+ Get(context.Context, model.Model) error
+
+ // Create returns the operation needed to add the model(s) to the Database
+ // Only fields with non-default values will be added to the transaction. If
+ // the field associated with column "_uuid" has some content other than a
+ // UUID, it will be treated as named-uuid
+ Create(...model.Model) ([]ovsdb.Operation, error)
+}
+
+// ConditionalAPI is an interface used to perform operations that require / use Conditions
+type ConditionalAPI interface {
+ // List uses the condition to search on the cache and populates
+ // the slice of Models objects based on their type
+ List(ctx context.Context, result interface{}) error
+
+ // Mutate returns the operations needed to perform the mutation specified
+ // By the model and the list of Mutation objects
+ // Depending on the Condition, it might return one or many operations
+ Mutate(model.Model, ...model.Mutation) ([]ovsdb.Operation, error)
+
+ // Update returns the operations needed to update any number of rows according
+ // to the data in the given model.
+ // By default, all the non-default values contained in model will be updated.
+ // Optional fields can be passed (pointer to fields in the model) to select the
+ // the fields to be updated
+ Update(model.Model, ...interface{}) ([]ovsdb.Operation, error)
+
+ // Delete returns the Operations needed to delete the models selected via the condition
+ Delete() ([]ovsdb.Operation, error)
+
+ // Wait returns the operations needed to perform the wait specified
+ // by the until condition, timeout, row and columns based on provided parameters.
+ Wait(ovsdb.WaitCondition, *int, model.Model, ...interface{}) ([]ovsdb.Operation, error)
+}
+
+// ErrWrongType is used to report the user provided parameter has the wrong type
+type ErrWrongType struct {
+ inputType reflect.Type
+ reason string
+}
+
+func (e *ErrWrongType) Error() string {
+ return fmt.Sprintf("Wrong parameter type (%s): %s", e.inputType, e.reason)
+}
+
+// ErrNotFound is used to inform the object or table was not found in the cache
+var ErrNotFound = errors.New("object not found")
+
+// api struct implements both API and ConditionalAPI
+// Where() can be used to create a ConditionalAPI api
+type api struct {
+ cache *cache.TableCache
+ cond Conditional
+ logger *logr.Logger
+}
+
+// List populates a slice of Models given as parameter based on the configured Condition
+func (a api) List(ctx context.Context, result interface{}) error {
+ resultPtr := reflect.ValueOf(result)
+ if resultPtr.Type().Kind() != reflect.Ptr {
+ return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"}
+ }
+
+ resultVal := reflect.Indirect(resultPtr)
+ if resultVal.Type().Kind() != reflect.Slice {
+ return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"}
+ }
+
+ // List accepts a slice of Models that can be either structs or pointer to
+ // structs
+ var appendValue func(reflect.Value)
+ var m model.Model
+ if resultVal.Type().Elem().Kind() == reflect.Ptr {
+ m = reflect.New(resultVal.Type().Elem().Elem()).Interface()
+ appendValue = func(v reflect.Value) {
+ resultVal.Set(reflect.Append(resultVal, v))
+ }
+ } else {
+ m = reflect.New(resultVal.Type().Elem()).Interface()
+ appendValue = func(v reflect.Value) {
+ resultVal.Set(reflect.Append(resultVal, reflect.Indirect(v)))
+ }
+ }
+
+ table, err := a.getTableFromModel(m)
+ if err != nil {
+ return err
+ }
+
+ if a.cond != nil && a.cond.Table() != table {
+ return &ErrWrongType{resultPtr.Type(),
+ fmt.Sprintf("Table derived from input type (%s) does not match Table from Condition (%s)", table, a.cond.Table())}
+ }
+
+ tableCache := a.cache.Table(table)
+ if tableCache == nil {
+ return ErrNotFound
+ }
+
+ var rows map[string]model.Model
+ if a.cond != nil {
+ rows, err = a.cond.Matches()
+ if err != nil {
+ return err
+ }
+ } else {
+ rows = tableCache.Rows()
+ }
+ // If given a null slice, fill it in the cache table completely, if not, just up to
+ // its capability.
+ if resultVal.IsNil() || resultVal.Cap() == 0 {
+ resultVal.Set(reflect.MakeSlice(resultVal.Type(), 0, len(rows)))
+ }
+ i := resultVal.Len()
+ maxCap := resultVal.Cap()
+
+ for _, row := range rows {
+ if i >= maxCap {
+ break
+ }
+ appendValue(reflect.ValueOf(row))
+ i++
+ }
+
+ return nil
+}
+
+// Where returns a conditionalAPI based on model indexes. All provided models
+// must be the same type.
+func (a api) Where(models ...model.Model) ConditionalAPI {
+ return newConditionalAPI(a.cache, a.conditionFromModels(models), a.logger)
+}
+
+// WhereAny returns a conditionalAPI based on a Condition list that matches any
+// of the conditions individually
+func (a api) WhereAny(m model.Model, cond ...model.Condition) ConditionalAPI {
+ return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(false, m, cond...), a.logger)
+}
+
+// WhereAll returns a conditionalAPI based on a Condition list that matches all
+// of the conditions together
+func (a api) WhereAll(m model.Model, cond ...model.Condition) ConditionalAPI {
+ return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(true, m, cond...), a.logger)
+}
+
+// WhereCache returns a conditionalAPI based a Predicate
+func (a api) WhereCache(predicate interface{}) ConditionalAPI {
+ return newConditionalAPI(a.cache, a.conditionFromFunc(predicate), a.logger)
+}
+
+// Conditional interface implementation
+// FromFunc returns a Condition from a function
+func (a api) conditionFromFunc(predicate interface{}) Conditional {
+ table, err := a.getTableFromFunc(predicate)
+ if err != nil {
+ return newErrorConditional(err)
+ }
+
+ condition, err := newPredicateConditional(table, a.cache, predicate)
+ if err != nil {
+ return newErrorConditional(err)
+ }
+ return condition
+}
+
+// conditionFromModels returns a Conditional from one or more models.
+func (a api) conditionFromModels(models []model.Model) Conditional {
+ if len(models) == 0 {
+ return newErrorConditional(fmt.Errorf("at least one model required"))
+ }
+ tableName, err := a.getTableFromModel(models[0])
+ if tableName == "" {
+ return newErrorConditional(err)
+ }
+ conditional, err := newEqualityConditional(tableName, a.cache, models)
+ if err != nil {
+ return newErrorConditional(err)
+ }
+ return conditional
+}
+
+// conditionFromExplicitConditions returns a Conditional from a model and a set
+// of explicit conditions. If matchAll is true, then models that match all the given
+// conditions are selected by the Conditional. If matchAll is false, then any model
+// that matches one of the conditions is selected.
+func (a api) conditionFromExplicitConditions(matchAll bool, m model.Model, cond ...model.Condition) Conditional {
+ if len(cond) == 0 {
+ return newErrorConditional(fmt.Errorf("at least one condition is required"))
+ }
+ tableName, err := a.getTableFromModel(m)
+ if tableName == "" {
+ return newErrorConditional(err)
+ }
+ conditional, err := newExplicitConditional(tableName, a.cache, matchAll, m, cond...)
+ if err != nil {
+ return newErrorConditional(err)
+ }
+ return conditional
+}
+
+// Get is a generic Get function capable of returning (through a provided pointer)
+// a instance of any row in the cache.
+// 'result' must be a pointer to an Model that exists in the ClientDBModel
+//
+// The way the cache is searched depends on the fields already populated in 'result'
+// Any table index (including _uuid) will be used for comparison
+func (a api) Get(ctx context.Context, m model.Model) error {
+ table, err := a.getTableFromModel(m)
+ if err != nil {
+ return err
+ }
+
+ tableCache := a.cache.Table(table)
+ if tableCache == nil {
+ return ErrNotFound
+ }
+
+ _, found, err := tableCache.RowByModel(m)
+ if err != nil {
+ return err
+ } else if found == nil {
+ return ErrNotFound
+ }
+
+ model.CloneInto(found, m)
+
+ return nil
+}
+
+// Create is a generic function capable of creating any row in the DB
+// A valid Model (pointer to object) must be provided.
+func (a api) Create(models ...model.Model) ([]ovsdb.Operation, error) {
+ var operations []ovsdb.Operation
+
+ for _, model := range models {
+ var realUUID, namedUUID string
+ var err error
+
+ tableName, err := a.getTableFromModel(model)
+ if err != nil {
+ return nil, err
+ }
+
+ // Read _uuid field, and use it as named-uuid
+ info, err := a.cache.DatabaseModel().NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ if uuid, err := info.FieldByColumn("_uuid"); err == nil {
+ tmpUUID := uuid.(string)
+ if ovsdb.IsNamedUUID(tmpUUID) {
+ namedUUID = tmpUUID
+ } else if ovsdb.IsValidUUID(tmpUUID) {
+ realUUID = tmpUUID
+ }
+ } else {
+ return nil, err
+ }
+
+ row, err := a.cache.Mapper().NewRow(info)
+ if err != nil {
+ return nil, err
+ }
+ // UUID is given in the operation, not the object
+ delete(row, "_uuid")
+
+ operations = append(operations, ovsdb.Operation{
+ Op: ovsdb.OperationInsert,
+ Table: tableName,
+ Row: row,
+ UUID: realUUID,
+ UUIDName: namedUUID,
+ })
+ }
+ return operations, nil
+}
+
+// Mutate returns the operations needed to transform the one Model into another one
+func (a api) Mutate(model model.Model, mutationObjs ...model.Mutation) ([]ovsdb.Operation, error) {
+ var mutations []ovsdb.Mutation
+ var operations []ovsdb.Operation
+
+ if len(mutationObjs) < 1 {
+ return nil, fmt.Errorf("at least one Mutation must be provided")
+ }
+
+ tableName := a.cache.DatabaseModel().FindTable(reflect.ValueOf(model).Type())
+ if tableName == "" {
+ return nil, fmt.Errorf("table not found for object")
+ }
+ table := a.cache.Mapper().Schema.Table(tableName)
+ if table == nil {
+ return nil, fmt.Errorf("schema error: table not found in Database Model for type %s", reflect.TypeOf(model))
+ }
+
+ conditions, err := a.cond.Generate()
+ if err != nil {
+ return nil, err
+ }
+
+ info, err := a.cache.DatabaseModel().NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, mobj := range mutationObjs {
+ col, err := info.ColumnByPtr(mobj.Field)
+ if err != nil {
+ return nil, err
+ }
+
+ mutation, err := a.cache.Mapper().NewMutation(info, col, mobj.Mutator, mobj.Value)
+ if err != nil {
+ return nil, err
+ }
+ mutations = append(mutations, *mutation)
+ }
+ for _, condition := range conditions {
+ operations = append(operations,
+ ovsdb.Operation{
+ Op: ovsdb.OperationMutate,
+ Table: tableName,
+ Mutations: mutations,
+ Where: condition,
+ },
+ )
+ }
+
+ return operations, nil
+}
+
+// Update is a generic function capable of updating any mutable field in any row in the database
+// Additional fields can be passed (variadic opts) to indicate fields to be updated
+// All immutable fields will be ignored
+func (a api) Update(model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) {
+ var operations []ovsdb.Operation
+ table, err := a.getTableFromModel(model)
+ if err != nil {
+ return nil, err
+ }
+ tableSchema := a.cache.Mapper().Schema.Table(table)
+ info, err := a.cache.DatabaseModel().NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(fields) > 0 {
+ for _, f := range fields {
+ colName, err := info.ColumnByPtr(f)
+ if err != nil {
+ return nil, err
+ }
+ if !tableSchema.Columns[colName].Mutable() {
+ return nil, fmt.Errorf("unable to update field %s of table %s as it is not mutable", colName, table)
+ }
+ }
+ }
+
+ conditions, err := a.cond.Generate()
+ if err != nil {
+ return nil, err
+ }
+
+ row, err := a.cache.Mapper().NewRow(info, fields...)
+ if err != nil {
+ return nil, err
+ }
+
+ for colName, column := range tableSchema.Columns {
+ if !column.Mutable() {
+ a.logger.V(2).Info("removing immutable field", "name", colName)
+ delete(row, colName)
+ }
+ }
+ delete(row, "_uuid")
+
+ if len(row) == 0 {
+ return nil, fmt.Errorf("attempted to update using an empty row. please check that all fields you wish to update are mutable")
+ }
+
+ for _, condition := range conditions {
+ operations = append(operations,
+ ovsdb.Operation{
+ Op: ovsdb.OperationUpdate,
+ Table: table,
+ Row: row,
+ Where: condition,
+ },
+ )
+ }
+ return operations, nil
+}
+
+// Delete returns the Operation needed to delete the selected models from the database
+func (a api) Delete() ([]ovsdb.Operation, error) {
+ var operations []ovsdb.Operation
+ conditions, err := a.cond.Generate()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, condition := range conditions {
+ operations = append(operations,
+ ovsdb.Operation{
+ Op: ovsdb.OperationDelete,
+ Table: a.cond.Table(),
+ Where: condition,
+ },
+ )
+ }
+
+ return operations, nil
+}
+
+func (a api) Wait(untilConFun ovsdb.WaitCondition, timeout *int, model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) {
+ var operations []ovsdb.Operation
+
+ /*
+ Ref: https://datatracker.ietf.org/doc/html/rfc7047.txt#section-5.2.6
+
+ lb := &nbdb.LoadBalancer{}
+ condition := model.Condition{
+ Field: &lb.Name,
+ Function: ovsdb.ConditionEqual,
+ Value: "lbName",
+ }
+ timeout0 := 0
+ client.Where(lb, condition).Wait(
+ ovsdb.WaitConditionNotEqual, // Until
+ &timeout0, // Timeout
+ &lb, // Row (and Table)
+ &lb.Name, // Cols (aka fields)
+ )
+ */
+
+ conditions, err := a.cond.Generate()
+ if err != nil {
+ return nil, err
+ }
+
+ table, err := a.getTableFromModel(model)
+ if err != nil {
+ return nil, err
+ }
+
+ info, err := a.cache.DatabaseModel().NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+
+ var columnNames []string
+ if len(fields) > 0 {
+ columnNames = make([]string, 0, len(fields))
+ for _, f := range fields {
+ colName, err := info.ColumnByPtr(f)
+ if err != nil {
+ return nil, err
+ }
+ columnNames = append(columnNames, colName)
+ }
+ }
+
+ row, err := a.cache.Mapper().NewRow(info, fields...)
+ if err != nil {
+ return nil, err
+ }
+ rows := []ovsdb.Row{row}
+
+ for _, condition := range conditions {
+ operation := ovsdb.Operation{
+ Op: ovsdb.OperationWait,
+ Table: table,
+ Where: condition,
+ Until: string(untilConFun),
+ Columns: columnNames,
+ Rows: rows,
+ }
+
+ if timeout != nil {
+ operation.Timeout = timeout
+ }
+
+ operations = append(operations, operation)
+ }
+
+ return operations, nil
+}
+
+// getTableFromModel returns the table name from a Model object after performing
+// type verifications on the model
+func (a api) getTableFromModel(m interface{}) (string, error) {
+ if _, ok := m.(model.Model); !ok {
+ return "", &ErrWrongType{reflect.TypeOf(m), "Type does not implement Model interface"}
+ }
+ table := a.cache.DatabaseModel().FindTable(reflect.TypeOf(m))
+ if table == "" {
+ return "", &ErrWrongType{reflect.TypeOf(m), "Model not found in Database Model"}
+ }
+ return table, nil
+}
+
+// getTableFromModel returns the table name from a the predicate after performing
+// type verifications
+func (a api) getTableFromFunc(predicate interface{}) (string, error) {
+ predType := reflect.TypeOf(predicate)
+ if predType == nil || predType.Kind() != reflect.Func {
+ return "", &ErrWrongType{predType, "Expected function"}
+ }
+ if predType.NumIn() != 1 || predType.NumOut() != 1 || predType.Out(0).Kind() != reflect.Bool {
+ return "", &ErrWrongType{predType, "Expected func(Model) bool"}
+ }
+
+ modelInterface := reflect.TypeOf((*model.Model)(nil)).Elem()
+ modelType := predType.In(0)
+ if !modelType.Implements(modelInterface) {
+ return "", &ErrWrongType{predType,
+ fmt.Sprintf("Type %s does not implement Model interface", modelType.String())}
+ }
+
+ table := a.cache.DatabaseModel().FindTable(modelType)
+ if table == "" {
+ return "", &ErrWrongType{predType,
+ fmt.Sprintf("Model %s not found in Database Model", modelType.String())}
+ }
+ return table, nil
+}
+
+// newAPI returns a new API to interact with the database
+func newAPI(cache *cache.TableCache, logger *logr.Logger) API {
+ return api{
+ cache: cache,
+ logger: logger,
+ }
+}
+
+// newConditionalAPI returns a new ConditionalAPI to interact with the database
+func newConditionalAPI(cache *cache.TableCache, cond Conditional, logger *logr.Logger) ConditionalAPI {
+ return api{
+ cache: cache,
+ cond: cond,
+ logger: logger,
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go b/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go
new file mode 100644
index 0000000000..36ea476e08
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go
@@ -0,0 +1,167 @@
+package client
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/stretchr/testify/assert"
+)
+
+var apiTestSchema = []byte(`{
+ "name": "OVN_Northbound",
+ "version": "5.31.0",
+ "cksum": "2352750632 28701",
+ "tables": {
+ "Logical_Switch": {
+ "columns": {
+ "name": {"type": "string"},
+ "ports": {"type": {"key": {"type": "uuid",
+ "refTable": "Logical_Switch_Port",
+ "refType": "strong"},
+ "min": 0,
+ "max": "unlimited"}},
+ "acls": {"type": {"key": {"type": "uuid",
+ "refTable": "ACL",
+ "refType": "strong"},
+ "min": 0,
+ "max": "unlimited"}},
+ "qos_rules": {"type": {"key": {"type": "uuid",
+ "refTable": "QoS",
+ "refType": "strong"},
+ "min": 0,
+ "max": "unlimited"}},
+ "load_balancer": {"type": {"key": {"type": "uuid",
+ "refTable": "Load_Balancer",
+ "refType": "weak"},
+ "min": 0,
+ "max": "unlimited"}},
+ "dns_records": {"type": {"key": {"type": "uuid",
+ "refTable": "DNS",
+ "refType": "weak"},
+ "min": 0,
+ "max": "unlimited"}},
+ "other_config": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}},
+ "external_ids": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}},
+ "forwarding_groups": {
+ "type": {"key": {"type": "uuid",
+ "refTable": "Forwarding_Group",
+ "refType": "strong"},
+ "min": 0, "max": "unlimited"}}},
+ "isRoot": true},
+ "Logical_Switch_Port": {
+ "columns": {
+ "name": {"type": "string"},
+ "type": {"type": "string"},
+ "options": {
+ "type": {"key": "string",
+ "value": "string",
+ "min": 0,
+ "max": "unlimited"}},
+ "parent_name": {"type": {"key": "string", "min": 0, "max": 1}},
+ "tag_request": {
+ "type": {"key": {"type": "integer",
+ "minInteger": 0,
+ "maxInteger": 4095},
+ "min": 0, "max": 1}},
+ "tag": {
+ "type": {"key": {"type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4095},
+ "min": 0, "max": 1}},
+ "addresses": {"type": {"key": "string",
+ "min": 0,
+ "max": "unlimited"}},
+ "dynamic_addresses": {"type": {"key": "string",
+ "min": 0,
+ "max": 1}},
+ "port_security": {"type": {"key": "string",
+ "min": 0,
+ "max": "unlimited"}},
+ "up": {"type": {"key": "boolean", "min": 0, "max": 1}},
+ "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}},
+ "dhcpv4_options": {"type": {"key": {"type": "uuid",
+ "refTable": "DHCP_Options",
+ "refType": "weak"},
+ "min": 0,
+ "max": 1}},
+ "dhcpv6_options": {"type": {"key": {"type": "uuid",
+ "refTable": "DHCP_Options",
+ "refType": "weak"},
+ "min": 0,
+ "max": 1}},
+ "ha_chassis_group": {
+ "type": {"key": {"type": "uuid",
+ "refTable": "HA_Chassis_Group",
+ "refType": "strong"},
+ "min": 0,
+ "max": 1}},
+ "external_ids": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}}},
+ "indexes": [["name"]],
+ "isRoot": false}
+ }
+ }`)
+
+type testLogicalSwitch struct {
+ UUID string `ovsdb:"_uuid"`
+ Ports []string `ovsdb:"ports"`
+ ExternalIds map[string]string `ovsdb:"external_ids"`
+ Name string `ovsdb:"name"`
+ QosRules []string `ovsdb:"qos_rules"`
+ LoadBalancer []string `ovsdb:"load_balancer"`
+ DNSRecords []string `ovsdb:"dns_records"`
+ OtherConfig map[string]string `ovsdb:"other_config"`
+ ForwardingGroups []string `ovsdb:"forwarding_groups"`
+ Acls []string `ovsdb:"acls"`
+}
+
+// Table returns the table name. It's part of the Model interface
+func (*testLogicalSwitch) Table() string {
+ return "Logical_Switch"
+}
+
+//LogicalSwitchPort struct defines an object in Logical_Switch_Port table
+type testLogicalSwitchPort struct {
+ UUID string `ovsdb:"_uuid"`
+ Up *bool `ovsdb:"up"`
+ Dhcpv4Options *string `ovsdb:"dhcpv4_options"`
+ Name string `ovsdb:"name"`
+ DynamicAddresses *string `ovsdb:"dynamic_addresses"`
+ HaChassisGroup *string `ovsdb:"ha_chassis_group"`
+ Options map[string]string `ovsdb:"options"`
+ Enabled *bool `ovsdb:"enabled"`
+ Addresses []string `ovsdb:"addresses"`
+ Dhcpv6Options *string `ovsdb:"dhcpv6_options"`
+ TagRequest *int `ovsdb:"tag_request"`
+ Tag *int `ovsdb:"tag"`
+ PortSecurity []string `ovsdb:"port_security"`
+ ExternalIds map[string]string `ovsdb:"external_ids"`
+ Type string `ovsdb:"type"`
+ ParentName *string `ovsdb:"parent_name"`
+}
+
+// Table returns the table name. It's part of the Model interface
+func (*testLogicalSwitchPort) Table() string {
+ return "Logical_Switch_Port"
+}
+
+func apiTestCache(t testing.TB, data map[string]map[string]model.Model) *cache.TableCache {
+ var schema ovsdb.DatabaseSchema
+ err := json.Unmarshal(apiTestSchema, &schema)
+ assert.Nil(t, err)
+ db, err := model.NewClientDBModel("OVN_Northbound", map[string]model.Model{"Logical_Switch": &testLogicalSwitch{}, "Logical_Switch_Port": &testLogicalSwitchPort{}})
+ assert.Nil(t, err)
+ dbModel, errs := model.NewDatabaseModel(schema, db)
+ assert.Empty(t, errs)
+ cache, err := cache.NewTableCache(dbModel, data, nil)
+ assert.Nil(t, err)
+ return cache
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/client.go b/vendor/github.com/ovn-org/libovsdb/client/client.go
new file mode 100644
index 0000000000..10ea757ec7
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/client.go
@@ -0,0 +1,1480 @@
+package client
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "net/url"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/cenkalti/rpc2"
+ "github.com/cenkalti/rpc2/jsonrpc"
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/libovsdb/ovsdb/serverdb"
+)
+
+// Constants defined for libovsdb
+const (
+ SSL = "ssl"
+ TCP = "tcp"
+ UNIX = "unix"
+)
+
+const serverDB = "_Server"
+
+// ErrNotConnected is an error returned when the client is not connected
+var ErrNotConnected = errors.New("not connected")
+
+// ErrAlreadyConnected is an error returned when the client is already connected
+var ErrAlreadyConnected = errors.New("already connected")
+
+// ErrUnsupportedRPC is an error returned when an unsupported RPC method is called
+var ErrUnsupportedRPC = errors.New("unsupported rpc")
+
+// Client represents an OVSDB Client Connection
+// It provides all the necessary functionality to Connect to a server,
+// perform transactions, and build your own replica of the database with
+// Monitor or MonitorAll. It also provides a Cache that is populated from OVSDB
+// update notifications.
+type Client interface {
+ Connect(context.Context) error
+ Disconnect()
+ Close()
+ Schema() ovsdb.DatabaseSchema
+ Cache() *cache.TableCache
+ UpdateEndpoints([]string)
+ SetOption(Option) error
+ Connected() bool
+ DisconnectNotify() chan struct{}
+ Echo(context.Context) error
+ Transact(context.Context, ...ovsdb.Operation) ([]ovsdb.OperationResult, error)
+ Monitor(context.Context, *Monitor) (MonitorCookie, error)
+ MonitorAll(context.Context) (MonitorCookie, error)
+ MonitorCancel(ctx context.Context, cookie MonitorCookie) error
+ NewMonitor(...MonitorOption) *Monitor
+ CurrentEndpoint() string
+ API
+}
+
+type bufferedUpdate struct {
+ updates *ovsdb.TableUpdates
+ updates2 *ovsdb.TableUpdates2
+ lastTxnID string
+}
+
+type epInfo struct {
+ address string
+ serverID string
+}
+
+// ovsdbClient is an OVSDB client
+type ovsdbClient struct {
+ options *options
+ metrics metrics
+ connected bool
+ rpcClient *rpc2.Client
+ rpcMutex sync.RWMutex
+ // endpoints contains all possible endpoints; the first element is
+ // the active endpoint if connected=true
+ endpoints []*epInfo
+
+ // The name of the "primary" database - that is to say, the DB
+ // that the user expects to interact with.
+ primaryDBName string
+ databases map[string]*database
+
+ errorCh chan error
+ stopCh chan struct{}
+ disconnect chan struct{}
+ shutdown bool
+ shutdownMutex sync.Mutex
+
+ handlerShutdown *sync.WaitGroup
+
+ trafficSeen chan struct{}
+
+ logger *logr.Logger
+}
+
+// database is everything needed to map between go types and an ovsdb Database
+type database struct {
+ // model encapsulates the database schema and model of the database we're connecting to
+ model model.DatabaseModel
+ // modelMutex protects model from being replaced (via reconnect) while in use
+ modelMutex sync.RWMutex
+
+ // cache is used to store the updates for monitored tables
+ cache *cache.TableCache
+ // cacheMutex protects cache from being replaced (via reconnect) while in use
+ cacheMutex sync.RWMutex
+
+ api API
+
+ // any ongoing monitors, so we can re-create them if we disconnect
+ monitors map[string]*Monitor
+ monitorsMutex sync.Mutex
+
+ // tracks any outstanding updates while waiting for a monitor response
+ deferUpdates bool
+ deferredUpdates []*bufferedUpdate
+}
+
+// NewOVSDBClient creates a new OVSDB Client with the provided
+// database model. The client can be configured using one or more Option(s),
+// like WithTLSConfig. If no WithEndpoint option is supplied, the default of
+// unix:/var/run/openvswitch/ovsdb.sock is used
+func NewOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (Client, error) {
+ return newOVSDBClient(clientDBModel, opts...)
+}
+
+// newOVSDBClient creates a new ovsdbClient
+func newOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (*ovsdbClient, error) {
+ ovs := &ovsdbClient{
+ primaryDBName: clientDBModel.Name(),
+ databases: map[string]*database{
+ clientDBModel.Name(): {
+ model: model.NewPartialDatabaseModel(clientDBModel),
+ monitors: make(map[string]*Monitor),
+ deferUpdates: true,
+ deferredUpdates: make([]*bufferedUpdate, 0),
+ },
+ },
+ errorCh: make(chan error),
+ handlerShutdown: &sync.WaitGroup{},
+ disconnect: make(chan struct{}),
+ }
+ var err error
+ ovs.options, err = newOptions(opts...)
+ if err != nil {
+ return nil, err
+ }
+ for _, address := range ovs.options.endpoints {
+ ovs.endpoints = append(ovs.endpoints, &epInfo{address: address})
+ }
+
+ if ovs.options.logger == nil {
+ // create a new logger to log to stdout
+ l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("libovsdb").WithValues(
+ "database", ovs.primaryDBName,
+ )
+ stdr.SetVerbosity(5)
+ ovs.logger = &l
+ } else {
+ // add the "database" value to the structured logger
+ // to make it easier to tell between different DBs (e.g. ovn nbdb vs. sbdb)
+ l := ovs.options.logger.WithValues(
+ "database", ovs.primaryDBName,
+ )
+ ovs.logger = &l
+ }
+ ovs.metrics.init(clientDBModel.Name(), ovs.options.metricNamespace, ovs.options.metricSubsystem)
+ ovs.registerMetrics()
+
+ // if we should only connect to the leader, then add the special "_Server" database as well
+ if ovs.options.leaderOnly {
+ sm, err := serverdb.FullDatabaseModel()
+ if err != nil {
+ return nil, fmt.Errorf("could not initialize model _Server: %w", err)
+ }
+ ovs.databases[serverDB] = &database{
+ model: model.NewPartialDatabaseModel(sm),
+ monitors: make(map[string]*Monitor),
+ }
+ }
+
+ return ovs, nil
+}
+
+// Connect opens a connection to an OVSDB Server using the
+// endpoint provided when the Client was created.
+// The connection can be configured using one or more Option(s), like WithTLSConfig
+// If no WithEndpoint option is supplied, the default of unix:/var/run/openvswitch/ovsdb.sock is used
+func (o *ovsdbClient) Connect(ctx context.Context) error {
+ if err := o.connect(ctx, false); err != nil {
+ if err == ErrAlreadyConnected {
+ return nil
+ }
+ return err
+ }
+ if o.options.leaderOnly {
+ if err := o.watchForLeaderChange(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// moveEndpointFirst makes the endpoint requested by active the first element
+// in the endpoints slice, indicating it is the active endpoint
+func (o *ovsdbClient) moveEndpointFirst(i int) {
+ firstEp := o.endpoints[i]
+ othereps := append(o.endpoints[:i], o.endpoints[i+1:]...)
+ o.endpoints = append([]*epInfo{firstEp}, othereps...)
+}
+
+// moveEndpointLast moves the requested endpoint to the end of the list
+func (o *ovsdbClient) moveEndpointLast(i int) {
+ lastEp := o.endpoints[i]
+ othereps := append(o.endpoints[:i], o.endpoints[i+1:]...)
+ o.endpoints = append(othereps, lastEp)
+}
+
+func (o *ovsdbClient) resetRPCClient() {
+ if o.rpcClient != nil {
+ o.rpcClient.Close()
+ o.rpcClient = nil
+ }
+}
+
+func (o *ovsdbClient) connect(ctx context.Context, reconnect bool) error {
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ if o.rpcClient != nil {
+ return ErrAlreadyConnected
+ }
+
+ connected := false
+ connectErrors := []error{}
+ for i, endpoint := range o.endpoints {
+ u, err := url.Parse(endpoint.address)
+ if err != nil {
+ return err
+ }
+ if sid, err := o.tryEndpoint(ctx, u); err != nil {
+ o.resetRPCClient()
+ connectErrors = append(connectErrors,
+ fmt.Errorf("failed to connect to %s: %w", endpoint.address, err))
+ continue
+ } else {
+ o.logger.V(3).Info("successfully connected", "endpoint", endpoint.address, "sid", sid)
+ endpoint.serverID = sid
+ o.moveEndpointFirst(i)
+ connected = true
+ break
+ }
+ }
+
+ if !connected {
+ if len(connectErrors) == 1 {
+ return connectErrors[0]
+ }
+ var combined []string
+ for _, e := range connectErrors {
+ combined = append(combined, e.Error())
+ }
+
+ return fmt.Errorf("unable to connect to any endpoints: %s", strings.Join(combined, ". "))
+ }
+
+ // if we're reconnecting, re-start all the monitors
+ if reconnect {
+ o.logger.V(3).Info("reconnected - restarting monitors")
+ for dbName, db := range o.databases {
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+
+ // Purge entire cache if no monitors exist to update dynamically
+ if len(db.monitors) == 0 {
+ db.cache.Purge(db.model)
+ continue
+ }
+
+ // Restart all monitors; each monitor will handle purging
+ // the cache if necessary
+ for id, request := range db.monitors {
+ err := o.monitor(ctx, MonitorCookie{DatabaseName: dbName, ID: id}, true, request)
+ if err != nil {
+ o.resetRPCClient()
+ return err
+ }
+ }
+ }
+ }
+
+ go o.handleDisconnectNotification()
+ if o.options.inactivityTimeout > 0 {
+ o.handlerShutdown.Add(1)
+ go o.handleInactivityProbes()
+ }
+ for _, db := range o.databases {
+ o.handlerShutdown.Add(1)
+ eventStopChan := make(chan struct{})
+ go o.handleClientErrors(eventStopChan)
+ o.handlerShutdown.Add(1)
+ go func(db *database) {
+ defer o.handlerShutdown.Done()
+ db.cache.Run(o.stopCh)
+ close(eventStopChan)
+ }(db)
+ }
+
+ o.connected = true
+ return nil
+}
+
+// tryEndpoint connects to a single database endpoint. Returns the
+// server ID (if clustered) on success, or an error.
+func (o *ovsdbClient) tryEndpoint(ctx context.Context, u *url.URL) (string, error) {
+ o.logger.V(3).Info("trying to connect", "endpoint", fmt.Sprintf("%v", u))
+ var dialer net.Dialer
+ var err error
+ var c net.Conn
+
+ switch u.Scheme {
+ case UNIX:
+ c, err = dialer.DialContext(ctx, u.Scheme, u.Path)
+ case TCP:
+ c, err = dialer.DialContext(ctx, u.Scheme, u.Opaque)
+ case SSL:
+ dialer := tls.Dialer{
+ Config: o.options.tlsConfig,
+ }
+ c, err = dialer.DialContext(ctx, "tcp", u.Opaque)
+ default:
+ err = fmt.Errorf("unknown network protocol %s", u.Scheme)
+ }
+ if err != nil {
+ return "", fmt.Errorf("failed to open connection: %w", err)
+ }
+
+ o.createRPC2Client(c)
+
+ serverDBNames, err := o.listDbs(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ // for every requested database, ensure the DB exists in the server and
+ // that the schema matches what we expect.
+ for dbName, db := range o.databases {
+ // check the server has what we want
+ found := false
+ for _, name := range serverDBNames {
+ if name == dbName {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return "", fmt.Errorf("target database %s not found", dbName)
+ }
+
+ // load and validate the schema
+ schema, err := o.getSchema(ctx, dbName)
+ if err != nil {
+ return "", err
+ }
+
+ db.modelMutex.Lock()
+ var errors []error
+ db.model, errors = model.NewDatabaseModel(schema, db.model.Client())
+ db.modelMutex.Unlock()
+ if len(errors) > 0 {
+ var combined []string
+ for _, err := range errors {
+ combined = append(combined, err.Error())
+ }
+ return "", fmt.Errorf("database %s validation error (%d): %s",
+ dbName, len(errors), strings.Join(combined, ". "))
+ }
+
+ db.cacheMutex.Lock()
+ if db.cache == nil {
+ db.cache, err = cache.NewTableCache(db.model, nil, o.logger)
+ if err != nil {
+ db.cacheMutex.Unlock()
+ return "", err
+ }
+ db.api = newAPI(db.cache, o.logger)
+ }
+ db.cacheMutex.Unlock()
+ }
+
+ // check that this is the leader
+ var sid string
+ if o.options.leaderOnly {
+ var leader bool
+ leader, sid, err = o.isEndpointLeader(ctx)
+ if err != nil {
+ return "", err
+ }
+ if !leader {
+ return "", fmt.Errorf("endpoint is not leader")
+ }
+ }
+ return sid, nil
+}
+
+// createRPC2Client creates an rpcClient using the provided connection
+// It is also responsible for setting up go routines for client-side event handling
+// Should only be called when the mutex is held
+func (o *ovsdbClient) createRPC2Client(conn net.Conn) {
+ o.stopCh = make(chan struct{})
+ if o.options.inactivityTimeout > 0 {
+ o.trafficSeen = make(chan struct{})
+ }
+ o.rpcClient = rpc2.NewClientWithCodec(jsonrpc.NewJSONCodec(conn))
+ o.rpcClient.SetBlocking(true)
+ o.rpcClient.Handle("echo", func(_ *rpc2.Client, args []interface{}, reply *[]interface{}) error {
+ return o.echo(args, reply)
+ })
+ o.rpcClient.Handle("update", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error {
+ return o.update(args, reply)
+ })
+ o.rpcClient.Handle("update2", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error {
+ return o.update2(args, reply)
+ })
+ o.rpcClient.Handle("update3", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error {
+ return o.update3(args, reply)
+ })
+ go o.rpcClient.Run()
+}
+
+// isEndpointLeader returns true if the currently connected endpoint is leader,
+// otherwise false or an error. If the currently connected endpoint is the leader
+// and the database is clustered, also returns the database's Server ID.
+// Assumes rpcMutex is held.
+func (o *ovsdbClient) isEndpointLeader(ctx context.Context) (bool, string, error) {
+ op := ovsdb.Operation{
+ Op: ovsdb.OperationSelect,
+ Table: "Database",
+ Columns: []string{"name", "model", "leader", "sid"},
+ }
+ results, err := o.transact(ctx, serverDB, true, op)
+ if err != nil {
+ return false, "", fmt.Errorf("could not check if server was leader: %w", err)
+ }
+ // for now, if no rows are returned, just accept this server
+ if len(results) != 1 {
+ return true, "", nil
+ }
+ result := results[0]
+ if len(result.Rows) == 0 {
+ return true, "", nil
+ }
+
+ for _, row := range result.Rows {
+ dbName, ok := row["name"].(string)
+ if !ok {
+ return false, "", fmt.Errorf("could not parse name")
+ }
+ if dbName != o.primaryDBName {
+ continue
+ }
+
+ model, ok := row["model"].(string)
+ if !ok {
+ return false, "", fmt.Errorf("could not parse model")
+ }
+
+ // the database reports whether or not it is part of a cluster via the
+ // "model" column. If it's not clustered, it is by definition leader.
+ if model != serverdb.DatabaseModelClustered {
+ return true, "", nil
+ }
+
+ // Clustered database must have a Server ID
+ sid, ok := row["sid"].(ovsdb.UUID)
+ if !ok {
+ return false, "", fmt.Errorf("could not parse server id")
+ }
+
+ leader, ok := row["leader"].(bool)
+ if !ok {
+ return false, "", fmt.Errorf("could not parse leader")
+ }
+
+ return leader, sid.GoUUID, nil
+ }
+
+ // Extremely unlikely: there is no _Server row for the desired DB (which we made sure existed)
+ // for now, just continue
+ o.logger.V(3).Info("Couldn't find a row in _Server for our database. Continuing without leader detection", "database", o.primaryDBName)
+ return true, "", nil
+}
+
+func (o *ovsdbClient) primaryDB() *database {
+ return o.databases[o.primaryDBName]
+}
+
+// Schema returns the DatabaseSchema that is being used by the client
+// it will be nil until a connection has been established
+func (o *ovsdbClient) Schema() ovsdb.DatabaseSchema {
+ db := o.primaryDB()
+ db.modelMutex.RLock()
+ defer db.modelMutex.RUnlock()
+ return db.model.Schema
+}
+
+// Cache returns the TableCache that is populated from
+// ovsdb update notifications. It will be nil until a connection
+// has been established, and empty unless you call Monitor
+func (o *ovsdbClient) Cache() *cache.TableCache {
+ db := o.primaryDB()
+ db.cacheMutex.RLock()
+ defer db.cacheMutex.RUnlock()
+ return db.cache
+}
+
+// UpdateEndpoints sets client endpoints
+// It is intended to be called at runtime
+func (o *ovsdbClient) UpdateEndpoints(endpoints []string) {
+ o.logger.V(3).Info("update endpoints", "endpoints", endpoints)
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ if len(endpoints) == 0 {
+ endpoints = []string{defaultUnixEndpoint}
+ }
+ o.options.endpoints = endpoints
+ originEps := o.endpoints[:]
+ var newEps []*epInfo
+ activeIdx := -1
+ for i, address := range o.options.endpoints {
+ var serverID string
+ for j, origin := range originEps {
+ if address == origin.address {
+ if j == 0 {
+ activeIdx = i
+ }
+ serverID = origin.serverID
+ break
+ }
+ }
+ newEps = append(newEps, &epInfo{address: address, serverID: serverID})
+ }
+ o.endpoints = newEps
+ if activeIdx > 0 {
+ o.moveEndpointFirst(activeIdx)
+ } else if activeIdx == -1 {
+ o._disconnect()
+ }
+}
+
+// SetOption sets a new value for an option.
+// It may only be called when the client is not connected
+func (o *ovsdbClient) SetOption(opt Option) error {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ if o.rpcClient != nil {
+ return fmt.Errorf("cannot set option when client is connected")
+ }
+ return opt(o.options)
+}
+
+// Connected returns whether or not the client is currently connected to the server
+func (o *ovsdbClient) Connected() bool {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ return o.connected
+}
+
+func (o *ovsdbClient) CurrentEndpoint() string {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ if o.rpcClient == nil {
+ return ""
+ }
+ return o.endpoints[0].address
+}
+
+// DisconnectNotify returns a channel which will notify the caller when the
+// server has disconnected
+func (o *ovsdbClient) DisconnectNotify() chan struct{} {
+ return o.disconnect
+}
+
+// RFC 7047 : Section 4.1.6 : Echo
+func (o *ovsdbClient) echo(args []interface{}, reply *[]interface{}) error {
+ *reply = args
+ return nil
+}
+
+// RFC 7047 : Update Notification Section 4.1.6
+// params is an array of length 2: [json-value, table-updates]
+// - json-value: the arbitrary json-value passed when creating the Monitor, i.e. the "cookie"
+// - table-updates: map of table name to table-update. Table-update is a map of uuid to (old, new) row paris
+func (o *ovsdbClient) update(params []json.RawMessage, reply *[]interface{}) error {
+ cookie := MonitorCookie{}
+ *reply = []interface{}{}
+ if len(params) > 2 {
+ return fmt.Errorf("update requires exactly 2 args")
+ }
+ err := json.Unmarshal(params[0], &cookie)
+ if err != nil {
+ return err
+ }
+ var updates ovsdb.TableUpdates
+ err = json.Unmarshal(params[1], &updates)
+ if err != nil {
+ return err
+ }
+ db := o.databases[cookie.DatabaseName]
+ if db == nil {
+ return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName)
+ }
+ o.metrics.numUpdates.WithLabelValues(cookie.DatabaseName).Inc()
+ for tableName := range updates {
+ o.metrics.numTableUpdates.WithLabelValues(cookie.DatabaseName, tableName).Inc()
+ }
+
+ db.cacheMutex.Lock()
+ if db.deferUpdates {
+ db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{&updates, nil, ""})
+ db.cacheMutex.Unlock()
+ return nil
+ }
+ db.cacheMutex.Unlock()
+
+ // Update the local DB cache with the tableUpdates
+ db.cacheMutex.RLock()
+ err = db.cache.Update(cookie.ID, updates)
+ db.cacheMutex.RUnlock()
+
+ if err != nil {
+ o.errorCh <- err
+ }
+
+ return err
+}
+
+// update2 handling from ovsdb-server.7
+func (o *ovsdbClient) update2(params []json.RawMessage, reply *[]interface{}) error {
+ cookie := MonitorCookie{}
+ *reply = []interface{}{}
+ if len(params) > 2 {
+ return fmt.Errorf("update2 requires exactly 2 args")
+ }
+ err := json.Unmarshal(params[0], &cookie)
+ if err != nil {
+ return err
+ }
+ var updates ovsdb.TableUpdates2
+ err = json.Unmarshal(params[1], &updates)
+ if err != nil {
+ return err
+ }
+ db := o.databases[cookie.DatabaseName]
+ if db == nil {
+ return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName)
+ }
+
+ db.cacheMutex.Lock()
+ if db.deferUpdates {
+ db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, ""})
+ db.cacheMutex.Unlock()
+ return nil
+ }
+ db.cacheMutex.Unlock()
+
+ // Update the local DB cache with the tableUpdates
+ db.cacheMutex.RLock()
+ err = db.cache.Update2(cookie, updates)
+ db.cacheMutex.RUnlock()
+
+ if err != nil {
+ o.errorCh <- err
+ }
+
+ return err
+}
+
+// update3 handling from ovsdb-server.7
+func (o *ovsdbClient) update3(params []json.RawMessage, reply *[]interface{}) error {
+ cookie := MonitorCookie{}
+ *reply = []interface{}{}
+ if len(params) > 3 {
+ return fmt.Errorf("update requires exactly 3 args")
+ }
+ err := json.Unmarshal(params[0], &cookie)
+ if err != nil {
+ return err
+ }
+ var lastTransactionID string
+ err = json.Unmarshal(params[1], &lastTransactionID)
+ if err != nil {
+ return err
+ }
+ var updates ovsdb.TableUpdates2
+ err = json.Unmarshal(params[2], &updates)
+ if err != nil {
+ return err
+ }
+
+ db := o.databases[cookie.DatabaseName]
+ if db == nil {
+ return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName)
+ }
+
+ db.cacheMutex.Lock()
+ if db.deferUpdates {
+ db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, lastTransactionID})
+ db.cacheMutex.Unlock()
+ return nil
+ }
+ db.cacheMutex.Unlock()
+
+ // Update the local DB cache with the tableUpdates
+ db.cacheMutex.RLock()
+ err = db.cache.Update2(cookie, updates)
+ db.cacheMutex.RUnlock()
+
+ if err == nil {
+ db.monitorsMutex.Lock()
+ mon := db.monitors[cookie.ID]
+ mon.LastTransactionID = lastTransactionID
+ db.monitorsMutex.Unlock()
+ }
+
+ return err
+}
+
+// getSchema returns the schema in use for the provided database name
+// RFC 7047 : get_schema
+// Should only be called when mutex is held
+func (o *ovsdbClient) getSchema(ctx context.Context, dbName string) (ovsdb.DatabaseSchema, error) {
+ args := ovsdb.NewGetSchemaArgs(dbName)
+ var reply ovsdb.DatabaseSchema
+ err := o.rpcClient.CallWithContext(ctx, "get_schema", args, &reply)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return ovsdb.DatabaseSchema{}, ErrNotConnected
+ }
+ return ovsdb.DatabaseSchema{}, err
+ }
+ return reply, err
+}
+
+// listDbs returns the list of databases on the server
+// RFC 7047 : list_dbs
+// Should only be called when mutex is held
+func (o *ovsdbClient) listDbs(ctx context.Context) ([]string, error) {
+ var dbs []string
+ err := o.rpcClient.CallWithContext(ctx, "list_dbs", nil, &dbs)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return nil, ErrNotConnected
+ }
+ return nil, fmt.Errorf("listdbs failure - %v", err)
+ }
+ return dbs, err
+}
+
+// logFromContext returns a Logger from ctx or return the default logger
+func (o *ovsdbClient) logFromContext(ctx context.Context) *logr.Logger {
+ if logger, err := logr.FromContext(ctx); err == nil {
+ return &logger
+ }
+ return o.logger
+}
+
+// Transact performs the provided Operations on the database
+// RFC 7047 : transact
+func (o *ovsdbClient) Transact(ctx context.Context, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) {
+ logger := o.logFromContext(ctx)
+ o.rpcMutex.RLock()
+ if o.rpcClient == nil || !o.connected {
+ o.rpcMutex.RUnlock()
+ if o.options.reconnect {
+ logger.V(5).Info("blocking transaction until reconnected", "operations",
+ fmt.Sprintf("%+v", operation))
+ ticker := time.NewTicker(50 * time.Millisecond)
+ defer ticker.Stop()
+ ReconnectWaitLoop:
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, fmt.Errorf("%w: while awaiting reconnection", ctx.Err())
+ case <-ticker.C:
+ o.rpcMutex.RLock()
+ if o.rpcClient != nil && o.connected {
+ break ReconnectWaitLoop
+ }
+ o.rpcMutex.RUnlock()
+ }
+ }
+ } else {
+ return nil, ErrNotConnected
+ }
+ }
+ defer o.rpcMutex.RUnlock()
+ return o.transact(ctx, o.primaryDBName, false, operation...)
+}
+
+func (o *ovsdbClient) transact(ctx context.Context, dbName string, skipChWrite bool, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) {
+ logger := o.logFromContext(ctx)
+ var reply []ovsdb.OperationResult
+ db := o.databases[dbName]
+ db.modelMutex.RLock()
+ schema := o.databases[dbName].model.Schema
+ db.modelMutex.RUnlock()
+ if reflect.DeepEqual(schema, ovsdb.DatabaseSchema{}) {
+ return nil, fmt.Errorf("cannot transact to database %s: schema unknown", dbName)
+ }
+ if ok := schema.ValidateOperations(operation...); !ok {
+ return nil, fmt.Errorf("validation failed for the operation")
+ }
+
+ args := ovsdb.NewTransactArgs(dbName, operation...)
+ if o.rpcClient == nil {
+ return nil, ErrNotConnected
+ }
+ dbgLogger := logger.WithValues("database", dbName).V(4)
+ if dbgLogger.Enabled() {
+ dbgLogger.Info("transacting operations", "operations", fmt.Sprintf("%+v", operation))
+ }
+ err := o.rpcClient.CallWithContext(ctx, "transact", args, &reply)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return nil, ErrNotConnected
+ }
+ return nil, err
+ }
+
+ if !skipChWrite && o.trafficSeen != nil {
+ o.trafficSeen <- struct{}{}
+ }
+ return reply, nil
+}
+
+// MonitorAll is a convenience method to monitor every table/column
+func (o *ovsdbClient) MonitorAll(ctx context.Context) (MonitorCookie, error) {
+ m := newMonitor()
+ for name := range o.primaryDB().model.Types() {
+ m.Tables = append(m.Tables, TableMonitor{Table: name})
+ }
+ return o.Monitor(ctx, m)
+}
+
+// MonitorCancel will request cancel a previously issued monitor request
+// RFC 7047 : monitor_cancel
+func (o *ovsdbClient) MonitorCancel(ctx context.Context, cookie MonitorCookie) error {
+ var reply ovsdb.OperationResult
+ args := ovsdb.NewMonitorCancelArgs(cookie)
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ if o.rpcClient == nil {
+ return ErrNotConnected
+ }
+ err := o.rpcClient.CallWithContext(ctx, "monitor_cancel", args, &reply)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return ErrNotConnected
+ }
+ return err
+ }
+ if reply.Error != "" {
+ return fmt.Errorf("error while executing transaction: %s", reply.Error)
+ }
+ o.primaryDB().monitorsMutex.Lock()
+ defer o.primaryDB().monitorsMutex.Unlock()
+ delete(o.primaryDB().monitors, cookie.ID)
+ o.metrics.numMonitors.Dec()
+ return nil
+}
+
+// Monitor will provide updates for a given table/column
+// and populate the cache with them. Subsequent updates will be processed
+// by the Update Notifications
+// RFC 7047 : monitor
+func (o *ovsdbClient) Monitor(ctx context.Context, monitor *Monitor) (MonitorCookie, error) {
+ cookie := newMonitorCookie(o.primaryDBName)
+ db := o.databases[o.primaryDBName]
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+ return cookie, o.monitor(ctx, cookie, false, monitor)
+}
+
+// If fields is provided, the request will be constrained to the provided columns
+// If no fields are provided, all columns will be used
+func newMonitorRequest(data *mapper.Info, fields []string, conditions []ovsdb.Condition) (*ovsdb.MonitorRequest, error) {
+ var columns []string
+ if len(fields) > 0 {
+ columns = append(columns, fields...)
+ } else {
+ for c := range data.Metadata.TableSchema.Columns {
+ columns = append(columns, c)
+ }
+ }
+ return &ovsdb.MonitorRequest{Columns: columns, Where: conditions, Select: ovsdb.NewDefaultMonitorSelect()}, nil
+}
+
+// monitor must only be called with a lock on monitorsMutex
+//
+//gocyclo:ignore
+func (o *ovsdbClient) monitor(ctx context.Context, cookie MonitorCookie, reconnecting bool, monitor *Monitor) error {
+ // if we're reconnecting, we already hold the rpcMutex
+ if !reconnecting {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ }
+ if o.rpcClient == nil {
+ return ErrNotConnected
+ }
+ if len(monitor.Errors) != 0 {
+ var errString []string
+ for _, err := range monitor.Errors {
+ errString = append(errString, err.Error())
+ }
+ return fmt.Errorf(strings.Join(errString, ". "))
+ }
+ if len(monitor.Tables) == 0 {
+ return fmt.Errorf("at least one table should be monitored")
+ }
+ dbName := cookie.DatabaseName
+ db := o.databases[dbName]
+ db.modelMutex.RLock()
+ typeMap := db.model.Types()
+ requests := make(map[string]ovsdb.MonitorRequest)
+ for _, o := range monitor.Tables {
+ _, ok := typeMap[o.Table]
+ if !ok {
+ return fmt.Errorf("type for table %s does not exist in model", o.Table)
+ }
+ model, err := db.model.NewModel(o.Table)
+ if err != nil {
+ return err
+ }
+ info, err := db.model.NewModelInfo(model)
+ if err != nil {
+ return err
+ }
+ request, err := newMonitorRequest(info, o.Fields, o.Conditions)
+ if err != nil {
+ return err
+ }
+ requests[o.Table] = *request
+ }
+ db.modelMutex.RUnlock()
+
+ var args []interface{}
+ if monitor.Method == ovsdb.ConditionalMonitorSinceRPC {
+ // If we are reconnecting a CondSince monitor that is the only
+ // monitor, then we can use its LastTransactionID since it is
+ // valid (because we're reconnecting) and we can safely keep
+ // the cache intact (because it's the only monitor).
+ transactionID := emptyUUID
+ if reconnecting && len(db.monitors) == 1 {
+ transactionID = monitor.LastTransactionID
+ }
+ args = ovsdb.NewMonitorCondSinceArgs(dbName, cookie, requests, transactionID)
+ } else {
+ args = ovsdb.NewMonitorArgs(dbName, cookie, requests)
+ }
+ var err error
+ var tableUpdates interface{}
+
+ var lastTransactionFound bool
+ switch monitor.Method {
+ case ovsdb.MonitorRPC:
+ var reply ovsdb.TableUpdates
+ err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply)
+ tableUpdates = reply
+ case ovsdb.ConditionalMonitorRPC:
+ var reply ovsdb.TableUpdates2
+ err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply)
+ tableUpdates = reply
+ case ovsdb.ConditionalMonitorSinceRPC:
+ var reply ovsdb.MonitorCondSinceReply
+ err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply)
+ if err == nil && reply.Found {
+ monitor.LastTransactionID = reply.LastTransactionID
+ lastTransactionFound = true
+ }
+ tableUpdates = reply.Updates
+ default:
+ return fmt.Errorf("unsupported monitor method: %v", monitor.Method)
+ }
+
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return ErrNotConnected
+ }
+ if err.Error() == "unknown method" {
+ if monitor.Method == ovsdb.ConditionalMonitorSinceRPC {
+ o.logger.V(3).Error(err, "method monitor_cond_since not supported, falling back to monitor_cond")
+ monitor.Method = ovsdb.ConditionalMonitorRPC
+ return o.monitor(ctx, cookie, reconnecting, monitor)
+ }
+ if monitor.Method == ovsdb.ConditionalMonitorRPC {
+ o.logger.V(3).Error(err, "method monitor_cond not supported, falling back to monitor")
+ monitor.Method = ovsdb.MonitorRPC
+ return o.monitor(ctx, cookie, reconnecting, monitor)
+ }
+ }
+ return err
+ }
+
+ if !reconnecting {
+ db.monitors[cookie.ID] = monitor
+ o.metrics.numMonitors.Inc()
+ }
+
+ db.cacheMutex.Lock()
+ defer db.cacheMutex.Unlock()
+
+ // On reconnect, purge the cache _unless_ the only monitor is a
+ // MonitorCondSince one, whose LastTransactionID was known to the
+ // server. In this case the reply contains only updates to the existing
+ // cache data, while otherwise it includes complete DB data so we must
+ // purge to get rid of old rows.
+ if reconnecting && (len(db.monitors) > 1 || !lastTransactionFound) {
+ db.cache.Purge(db.model)
+ }
+
+ if monitor.Method == ovsdb.MonitorRPC {
+ u := tableUpdates.(ovsdb.TableUpdates)
+ err = db.cache.Populate(u)
+ } else {
+ u := tableUpdates.(ovsdb.TableUpdates2)
+ err = db.cache.Populate2(u)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ // populate any deferred updates
+ db.deferUpdates = false
+ for _, update := range db.deferredUpdates {
+ if update.updates != nil {
+ if err = db.cache.Populate(*update.updates); err != nil {
+ return err
+ }
+ }
+
+ if update.updates2 != nil {
+ if err = db.cache.Populate2(*update.updates2); err != nil {
+ return err
+ }
+ }
+ if len(update.lastTxnID) > 0 {
+ db.monitors[cookie.ID].LastTransactionID = update.lastTxnID
+ }
+ }
+ // clear deferred updates for next time
+ db.deferredUpdates = make([]*bufferedUpdate, 0)
+
+ return err
+}
+
+// Echo tests the liveness of the OVSDB connetion
+func (o *ovsdbClient) Echo(ctx context.Context) error {
+ args := ovsdb.NewEchoArgs()
+ var reply []interface{}
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ if o.rpcClient == nil {
+ return ErrNotConnected
+ }
+ err := o.rpcClient.CallWithContext(ctx, "echo", args, &reply)
+ if err != nil {
+ if err == rpc2.ErrShutdown {
+ return ErrNotConnected
+ }
+ }
+ if !reflect.DeepEqual(args, reply) {
+ return fmt.Errorf("incorrect server response: %v, %v", args, reply)
+ }
+ return nil
+}
+
+// watchForLeaderChange will trigger a reconnect if the connected endpoint
+// ever loses leadership
+func (o *ovsdbClient) watchForLeaderChange() error {
+ updates := make(chan model.Model)
+ o.databases[serverDB].cache.AddEventHandler(&cache.EventHandlerFuncs{
+ UpdateFunc: func(table string, _, new model.Model) {
+ if table == "Database" {
+ updates <- new
+ }
+ },
+ })
+
+ m := newMonitor()
+ // NOTE: _Server does not support monitor_cond_since
+ m.Method = ovsdb.ConditionalMonitorRPC
+ m.Tables = []TableMonitor{{Table: "Database"}}
+ db := o.databases[serverDB]
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+ err := o.monitor(context.Background(), newMonitorCookie(serverDB), false, m)
+ if err != nil {
+ return err
+ }
+
+ go func() {
+ for m := range updates {
+ dbInfo, ok := m.(*serverdb.Database)
+ if !ok {
+ continue
+ }
+
+ // Ignore the dbInfo for _Server
+ if dbInfo.Name != o.primaryDBName {
+ continue
+ }
+
+ // Only handle leadership changes for clustered databases
+ if dbInfo.Model != serverdb.DatabaseModelClustered {
+ continue
+ }
+
+ // Clustered database servers must have a valid Server ID
+ var sid string
+ if dbInfo.Sid != nil {
+ sid = *dbInfo.Sid
+ }
+ if sid == "" {
+ o.logger.V(3).Info("clustered database update contained invalid server ID")
+ continue
+ }
+
+ o.rpcMutex.Lock()
+ if !dbInfo.Leader && o.connected {
+ activeEndpoint := o.endpoints[0]
+ if sid == activeEndpoint.serverID {
+ o.logger.V(3).Info("endpoint lost leader, reconnecting",
+ "endpoint", activeEndpoint.address, "sid", sid)
+ // don't immediately reconnect to the active endpoint since it's no longer leader
+ o.moveEndpointLast(0)
+ o._disconnect()
+ } else {
+ o.logger.V(3).Info("endpoint lost leader but had unexpected server ID",
+ "endpoint", activeEndpoint.address,
+ "expected", activeEndpoint.serverID, "found", sid)
+ }
+ }
+ o.rpcMutex.Unlock()
+ }
+ }()
+ return nil
+}
+
+func (o *ovsdbClient) handleClientErrors(stopCh <-chan struct{}) {
+ defer o.handlerShutdown.Done()
+ var errColumnNotFound *mapper.ErrColumnNotFound
+ var errCacheInconsistent *cache.ErrCacheInconsistent
+ var errIndexExists *cache.ErrIndexExists
+ for {
+ select {
+ case <-stopCh:
+ return
+ case err := <-o.errorCh:
+ if errors.As(err, &errColumnNotFound) {
+ o.logger.V(3).Error(err, "error updating cache, DB schema may be newer than client!")
+ } else if errors.As(err, &errCacheInconsistent) || errors.As(err, &errIndexExists) {
+ // trigger a reconnect, which will purge the cache
+ // hopefully a rebuild will fix any inconsistency
+ o.logger.V(3).Error(err, "triggering reconnect to rebuild cache")
+ // for rebuilding cache with mon_cond_since (not yet fully supported in libovsdb) we
+ // need to reset the last txn ID
+ for _, db := range o.databases {
+ db.monitorsMutex.Lock()
+ for _, mon := range db.monitors {
+ mon.LastTransactionID = emptyUUID
+ }
+ db.monitorsMutex.Unlock()
+ }
+ o.Disconnect()
+ } else {
+ o.logger.V(3).Error(err, "error updating cache")
+ }
+ }
+ }
+}
+
+func (o *ovsdbClient) sendEcho(args []interface{}, reply *[]interface{}) *rpc2.Call {
+ o.rpcMutex.RLock()
+ defer o.rpcMutex.RUnlock()
+ if o.rpcClient == nil {
+ return nil
+ }
+ return o.rpcClient.Go("echo", args, reply, make(chan *rpc2.Call, 1))
+}
+
+func (o *ovsdbClient) handleInactivityProbes() {
+ defer o.handlerShutdown.Done()
+ echoReplied := make(chan string)
+ var lastEcho string
+ stopCh := o.stopCh
+ trafficSeen := o.trafficSeen
+ for {
+ select {
+ case <-stopCh:
+ return
+ case <-trafficSeen:
+ // We got some traffic from the server, restart our timer
+ case ts := <-echoReplied:
+ // Got a response from the server, check it against lastEcho; if same clear lastEcho; if not same Disconnect()
+ if ts != lastEcho {
+ o.Disconnect()
+ return
+ }
+ lastEcho = ""
+ case <-time.After(o.options.inactivityTimeout):
+ // If there's a lastEcho already, then we didn't get a server reply, disconnect
+ if lastEcho != "" {
+ o.Disconnect()
+ return
+ }
+ // Otherwise send an echo
+ thisEcho := fmt.Sprintf("%d", time.Now().UnixMicro())
+ args := []interface{}{"libovsdb echo", thisEcho}
+ var reply []interface{}
+ // Can't use o.Echo() because it blocks; we need the Call object direct from o.rpcClient.Go()
+ call := o.sendEcho(args, &reply)
+ if call == nil {
+ o.Disconnect()
+ return
+ }
+ lastEcho = thisEcho
+ go func() {
+ // Wait for the echo reply
+ select {
+ case <-stopCh:
+ return
+ case <-call.Done:
+ if call.Error != nil {
+ // RPC timeout; disconnect
+ o.logger.V(3).Error(call.Error, "server echo reply error")
+ o.Disconnect()
+ } else if !reflect.DeepEqual(args, reply) {
+ o.logger.V(3).Info("warning: incorrect server echo reply",
+ "expected", args, "reply", reply)
+ o.Disconnect()
+ } else {
+ // Otherwise stuff thisEcho into the echoReplied channel
+ echoReplied <- thisEcho
+ }
+ }
+ }()
+ }
+ }
+}
+
+func (o *ovsdbClient) handleDisconnectNotification() {
+ <-o.rpcClient.DisconnectNotify()
+ // close the stopCh, which will stop the cache event processor
+ close(o.stopCh)
+ if o.trafficSeen != nil {
+ close(o.trafficSeen)
+ }
+ o.metrics.numDisconnects.Inc()
+ // wait for client related handlers to shutdown
+ o.handlerShutdown.Wait()
+ o.rpcMutex.Lock()
+ if o.options.reconnect && !o.shutdown {
+ o.rpcClient = nil
+ o.rpcMutex.Unlock()
+ suppressionCounter := 1
+ connect := func() error {
+ // need to ensure deferredUpdates is cleared on every reconnect attempt
+ for _, db := range o.databases {
+ db.cacheMutex.Lock()
+ db.deferredUpdates = make([]*bufferedUpdate, 0)
+ db.deferUpdates = true
+ db.cacheMutex.Unlock()
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), o.options.timeout)
+ defer cancel()
+ err := o.connect(ctx, true)
+ if err != nil {
+ if suppressionCounter < 5 {
+ o.logger.V(2).Error(err, "failed to reconnect")
+ } else if suppressionCounter == 5 {
+ o.logger.V(2).Error(err, "reconnect has failed 5 times, suppressing logging "+
+ "for future attempts")
+ }
+ }
+ suppressionCounter++
+ return err
+ }
+ o.logger.V(3).Info("connection lost, reconnecting", "endpoint", o.endpoints[0].address)
+ err := backoff.Retry(connect, o.options.backoff)
+ if err != nil {
+ // TODO: We should look at passing this back to the
+ // caller to handle
+ panic(err)
+ }
+ // this goroutine finishes, and is replaced with a new one (from Connect)
+ return
+ }
+
+ // clear connection state
+ o.rpcClient = nil
+ o.rpcMutex.Unlock()
+
+ for _, db := range o.databases {
+ db.cacheMutex.Lock()
+ defer db.cacheMutex.Unlock()
+ db.cache = nil
+ // need to defer updates if/when we reconnect and clear any stale updates
+ db.deferUpdates = true
+ db.deferredUpdates = make([]*bufferedUpdate, 0)
+
+ db.modelMutex.Lock()
+ defer db.modelMutex.Unlock()
+ db.model = model.NewPartialDatabaseModel(db.model.Client())
+
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+ db.monitors = make(map[string]*Monitor)
+ }
+ o.metrics.numMonitors.Set(0)
+
+ o.shutdownMutex.Lock()
+ defer o.shutdownMutex.Unlock()
+ o.shutdown = false
+
+ select {
+ case o.disconnect <- struct{}{}:
+ // sent disconnect notification to client
+ default:
+ // client is not listening to the channel
+ }
+}
+
+// _disconnect will close the connection to the OVSDB server
+// If the client was created with WithReconnect then the client
+// will reconnect afterwards. Assumes rpcMutex is held.
+func (o *ovsdbClient) _disconnect() {
+ o.connected = false
+ if o.rpcClient == nil {
+ return
+ }
+ o.rpcClient.Close()
+}
+
+// Disconnect will close the connection to the OVSDB server
+// If the client was created with WithReconnect then the client
+// will reconnect afterwards
+func (o *ovsdbClient) Disconnect() {
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ o._disconnect()
+}
+
+// Close will close the connection to the OVSDB server
+// It will remove all stored state ready for the next connection
+// Even If the client was created with WithReconnect it will not reconnect afterwards
+func (o *ovsdbClient) Close() {
+ o.rpcMutex.Lock()
+ defer o.rpcMutex.Unlock()
+ o.connected = false
+ if o.rpcClient == nil {
+ return
+ }
+ o.shutdownMutex.Lock()
+ defer o.shutdownMutex.Unlock()
+ o.shutdown = true
+ o.rpcClient.Close()
+}
+
+// Ensures the cache is consistent by evaluating that the client is connected
+// and the monitor is fully setup, with the cache populated. Caller must hold
+// the database's cache mutex for reading.
+func isCacheConsistent(db *database) bool {
+ // This works because when a client is disconnected the deferUpdates variable
+ // will be set to true. deferUpdates is also protected by the db.cacheMutex.
+ // When the client reconnects and then re-establishes the monitor; the final step
+ // is to process all deferred updates, set deferUpdates back to false, and unlock cacheMutex
+ return !db.deferUpdates
+}
+
+// best effort to ensure cache is in a good state for reading. RLocks the
+// database's cache before returning; caller must always unlock.
+func waitForCacheConsistent(ctx context.Context, db *database, logger *logr.Logger, dbName string) {
+ if !hasMonitors(db) {
+ db.cacheMutex.RLock()
+ return
+ }
+ // Check immediately as a fastpath
+ db.cacheMutex.RLock()
+ if isCacheConsistent(db) {
+ return
+ }
+ db.cacheMutex.RUnlock()
+
+ ticker := time.NewTicker(50 * time.Millisecond)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ logger.V(3).Info("warning: unable to ensure cache consistency for reading",
+ "database", dbName)
+ db.cacheMutex.RLock()
+ return
+ case <-ticker.C:
+ db.cacheMutex.RLock()
+ if isCacheConsistent(db) {
+ return
+ }
+ db.cacheMutex.RUnlock()
+ }
+ }
+}
+
+func hasMonitors(db *database) bool {
+ db.monitorsMutex.Lock()
+ defer db.monitorsMutex.Unlock()
+ return len(db.monitors) > 0
+}
+
+// Client API interface wrapper functions
+// We add this wrapper to allow users to access the API directly on the
+// client object
+
+// Get implements the API interface's Get function
+func (o *ovsdbClient) Get(ctx context.Context, model model.Model) error {
+ primaryDB := o.primaryDB()
+ waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName)
+ defer primaryDB.cacheMutex.RUnlock()
+ return primaryDB.api.Get(ctx, model)
+}
+
+// Create implements the API interface's Create function
+func (o *ovsdbClient) Create(models ...model.Model) ([]ovsdb.Operation, error) {
+ return o.primaryDB().api.Create(models...)
+}
+
+// List implements the API interface's List function
+func (o *ovsdbClient) List(ctx context.Context, result interface{}) error {
+ primaryDB := o.primaryDB()
+ waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName)
+ defer primaryDB.cacheMutex.RUnlock()
+ return primaryDB.api.List(ctx, result)
+}
+
+// Where implements the API interface's Where function
+func (o *ovsdbClient) Where(models ...model.Model) ConditionalAPI {
+ return o.primaryDB().api.Where(models...)
+}
+
+// WhereAny implements the API interface's WhereAny function
+func (o *ovsdbClient) WhereAny(m model.Model, conditions ...model.Condition) ConditionalAPI {
+ return o.primaryDB().api.WhereAny(m, conditions...)
+}
+
+// WhereAll implements the API interface's WhereAll function
+func (o *ovsdbClient) WhereAll(m model.Model, conditions ...model.Condition) ConditionalAPI {
+ return o.primaryDB().api.WhereAll(m, conditions...)
+}
+
+// WhereCache implements the API interface's WhereCache function
+func (o *ovsdbClient) WhereCache(predicate interface{}) ConditionalAPI {
+ return o.primaryDB().api.WhereCache(predicate)
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/condition.go b/vendor/github.com/ovn-org/libovsdb/client/condition.go
new file mode 100644
index 0000000000..1dfabda02e
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/condition.go
@@ -0,0 +1,248 @@
+package client
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// Conditional is the interface used by the ConditionalAPI to match on cache objects
+// and generate ovsdb conditions
+type Conditional interface {
+ // Generate returns a list of lists of conditions to be used in Operations
+ // Each element in the (outer) list corresponds to an operation
+ Generate() ([][]ovsdb.Condition, error)
+ // Returns the models that match the conditions
+ Matches() (map[string]model.Model, error)
+ // returns the table that this condition is associated with
+ Table() string
+}
+
+func generateConditionsFromModels(dbModel model.DatabaseModel, models map[string]model.Model) ([][]ovsdb.Condition, error) {
+ anyConditions := make([][]ovsdb.Condition, 0, len(models))
+ for _, model := range models {
+ info, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ allConditions, err := dbModel.Mapper.NewEqualityCondition(info)
+ if err != nil {
+ return nil, err
+ }
+ anyConditions = append(anyConditions, allConditions)
+ }
+ return anyConditions, nil
+}
+
+func generateOvsdbConditionsFromModelConditions(dbModel model.DatabaseModel, info *mapper.Info, conditions []model.Condition, singleOp bool) ([][]ovsdb.Condition, error) {
+ anyConditions := [][]ovsdb.Condition{}
+ if singleOp {
+ anyConditions = append(anyConditions, []ovsdb.Condition{})
+ }
+ for _, condition := range conditions {
+ ovsdbCond, err := dbModel.Mapper.NewCondition(info, condition.Field, condition.Function, condition.Value)
+ if err != nil {
+ return nil, err
+ }
+ allConditions := []ovsdb.Condition{*ovsdbCond}
+ if singleOp {
+ anyConditions[0] = append(anyConditions[0], allConditions...)
+ } else {
+ anyConditions = append(anyConditions, allConditions)
+ }
+ }
+ return anyConditions, nil
+}
+
+// equalityConditional uses the indexes available in a provided model to find a
+// matching model in the database.
+type equalityConditional struct {
+ tableName string
+ models []model.Model
+ cache *cache.TableCache
+}
+
+func (c *equalityConditional) Table() string {
+ return c.tableName
+}
+
+// Returns the models that match the indexes available through the provided
+// model.
+func (c *equalityConditional) Matches() (map[string]model.Model, error) {
+ tableCache := c.cache.Table(c.tableName)
+ if tableCache == nil {
+ return nil, ErrNotFound
+ }
+ return tableCache.RowsByModels(c.models)
+}
+
+// Generate conditions based on the equality of the first available index. If
+// the index can be matched against a model in the cache, the condition will be
+// based on the UUID of the found model. Otherwise, the conditions will be based
+// on the index.
+func (c *equalityConditional) Generate() ([][]ovsdb.Condition, error) {
+ models, err := c.Matches()
+ if err != nil && err != ErrNotFound {
+ return nil, err
+ }
+ if len(models) == 0 {
+ // no cache hits, generate condition from models we were given
+ modelMap := make(map[string]model.Model, len(c.models))
+ for i, m := range c.models {
+ // generateConditionsFromModels() ignores the map keys
+ // so just use the range index
+ modelMap[fmt.Sprintf("%d", i)] = m
+ }
+ return generateConditionsFromModels(c.cache.DatabaseModel(), modelMap)
+ }
+ return generateConditionsFromModels(c.cache.DatabaseModel(), models)
+}
+
+// NewEqualityCondition creates a new equalityConditional
+func newEqualityConditional(table string, cache *cache.TableCache, models []model.Model) (Conditional, error) {
+ return &equalityConditional{
+ tableName: table,
+ models: models,
+ cache: cache,
+ }, nil
+}
+
+// explicitConditional generates conditions based on the provided Condition list
+type explicitConditional struct {
+ tableName string
+ anyConditions [][]ovsdb.Condition
+ cache *cache.TableCache
+}
+
+func (c *explicitConditional) Table() string {
+ return c.tableName
+}
+
+// Returns the models that match the conditions
+func (c *explicitConditional) Matches() (map[string]model.Model, error) {
+ tableCache := c.cache.Table(c.tableName)
+ if tableCache == nil {
+ return nil, ErrNotFound
+ }
+ found := map[string]model.Model{}
+ for _, allConditions := range c.anyConditions {
+ models, err := tableCache.RowsByCondition(allConditions)
+ if err != nil {
+ return nil, err
+ }
+ for uuid, model := range models {
+ found[uuid] = model
+ }
+ }
+ return found, nil
+}
+
+// Generate returns conditions based on the provided Condition list
+func (c *explicitConditional) Generate() ([][]ovsdb.Condition, error) {
+ models, err := c.Matches()
+ if err != nil && err != ErrNotFound {
+ return nil, err
+ }
+ if len(models) == 0 {
+ // no cache hits, return conditions we were given
+ return c.anyConditions, nil
+ }
+ return generateConditionsFromModels(c.cache.DatabaseModel(), models)
+}
+
+// newExplicitConditional creates a new explicitConditional
+func newExplicitConditional(table string, cache *cache.TableCache, matchAll bool, model model.Model, cond ...model.Condition) (Conditional, error) {
+ dbModel := cache.DatabaseModel()
+ info, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ anyConditions, err := generateOvsdbConditionsFromModelConditions(dbModel, info, cond, matchAll)
+ if err != nil {
+ return nil, err
+ }
+ return &explicitConditional{
+ tableName: table,
+ anyConditions: anyConditions,
+ cache: cache,
+ }, nil
+}
+
+// predicateConditional is a Conditional that calls a provided function pointer
+// to match on models.
+type predicateConditional struct {
+ tableName string
+ predicate interface{}
+ cache *cache.TableCache
+}
+
+// matches returns the result of the execution of the predicate
+// Type verifications are not performed
+// Returns the models that match the conditions
+func (c *predicateConditional) Matches() (map[string]model.Model, error) {
+ tableCache := c.cache.Table(c.tableName)
+ if tableCache == nil {
+ return nil, ErrNotFound
+ }
+ found := map[string]model.Model{}
+ // run the predicate on a shallow copy of the models for speed and only
+ // clone the matches
+ for u, m := range tableCache.RowsShallow() {
+ ret := reflect.ValueOf(c.predicate).Call([]reflect.Value{reflect.ValueOf(m)})
+ if ret[0].Bool() {
+ found[u] = model.Clone(m)
+ }
+ }
+ return found, nil
+}
+
+func (c *predicateConditional) Table() string {
+ return c.tableName
+}
+
+// generate returns a list of conditions that match, by _uuid equality, all the objects that
+// match the predicate
+func (c *predicateConditional) Generate() ([][]ovsdb.Condition, error) {
+ models, err := c.Matches()
+ if err != nil {
+ return nil, err
+ }
+ return generateConditionsFromModels(c.cache.DatabaseModel(), models)
+}
+
+// newPredicateConditional creates a new predicateConditional
+func newPredicateConditional(table string, cache *cache.TableCache, predicate interface{}) (Conditional, error) {
+ return &predicateConditional{
+ tableName: table,
+ predicate: predicate,
+ cache: cache,
+ }, nil
+}
+
+// errorConditional is a conditional that encapsulates an error
+// It is used to delay the reporting of errors from conditional creation to API method call
+type errorConditional struct {
+ err error
+}
+
+func (e *errorConditional) Matches() (map[string]model.Model, error) {
+ return nil, e.err
+}
+
+func (e *errorConditional) Table() string {
+ return ""
+}
+
+func (e *errorConditional) Generate() ([][]ovsdb.Condition, error) {
+ return nil, e.err
+}
+
+func newErrorConditional(err error) Conditional {
+ return &errorConditional{
+ err: fmt.Errorf("conditionerror: %s", err.Error()),
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/config.go b/vendor/github.com/ovn-org/libovsdb/client/config.go
new file mode 100644
index 0000000000..a9c00f56a9
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/config.go
@@ -0,0 +1,27 @@
+/**
+ * Copyright (c) 2019 eBay Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package client
+
+import (
+ "crypto/tls"
+)
+
+// Config is a structure used in provisioning a connection to ovsdb.
+type Config struct {
+ Addr string
+ TLSConfig *tls.Config
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/doc.go b/vendor/github.com/ovn-org/libovsdb/client/doc.go
new file mode 100644
index 0000000000..90e409ee70
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/doc.go
@@ -0,0 +1,164 @@
+/*
+Package client connects to, monitors and interacts with OVSDB servers (RFC7047).
+
+This package uses structs, that contain the 'ovs' field tag to determine which field goes to
+which column in the database. We refer to pointers to this structs as Models. Example:
+
+ type MyLogicalSwitch struct {
+ UUID string `ovsdb:"_uuid"` // _uuid tag is mandatory
+ Name string `ovsdb:"name"`
+ Ports []string `ovsdb:"ports"`
+ Config map[string]string `ovsdb:"other_config"`
+ }
+
+Based on these Models a Database Model (see ClientDBModel type) is built to represent
+the entire OVSDB:
+
+ clientDBModel, _ := client.NewClientDBModel("OVN_Northbound",
+ map[string]client.Model{
+ "Logical_Switch": &MyLogicalSwitch{},
+ })
+
+
+The ClientDBModel represents the entire Database (or the part of it we're interested in).
+Using it, the libovsdb.client package is able to properly encode and decode OVSDB messages
+and store them in Model instances.
+A client instance is created by simply specifying the connection information and the database model:
+
+ ovs, _ := client.Connect(context.Background(), clientDBModel)
+
+Main API
+
+After creating a OvsdbClient using the Connect() function, we can use a number of CRUD-like
+to interact with the database:
+List(), Get(), Create(), Update(), Mutate(), Delete().
+
+The specific database table that the operation targets is automatically determined based on the type
+of the parameter.
+
+In terms of return values, some of these functions like Create(), Update(), Mutate() and Delete(),
+interact with the database so they return list of ovsdb.Operation objects that can be grouped together
+and passed to client.Transact().
+
+Others, such as List() and Get(), interact with the client's internal cache and are able to
+return Model instances (or a list thereof) directly.
+
+Conditions
+
+Some API functions (Create() and Get()), can be run directly. Others, require us to use
+a ConditionalAPI. The ConditionalAPI injects RFC7047 Conditions into ovsdb Operations as well as
+uses the Conditions to search the internal cache.
+
+The ConditionalAPI is created using the Where(), WhereCache() and WhereAll() functions.
+
+Where() accepts a Model (pointer to a struct with ovs tags) and a number of Condition instances.
+Conditions must refer to fields of the provided Model (via pointer to fields). Example:
+
+ ls = &MyLogicalSwitch {}
+ ovs.Where(ls, client.Condition {
+ Field: &ls.Ports,
+ Function: ovsdb.ConditionIncludes,
+ Value: []string{"portUUID"},
+ })
+
+If no client.Condition is provided, the client will use any of fields that correspond to indexes to
+generate an appropriate condition. Therefore the following two statements are equivalent:
+
+ ls = &MyLogicalSwitch {UUID:"myUUID"}
+
+ ovs.Where(ls)
+
+ ovs.Where(ls, client.Condition {
+ Field: &ls.UUID,
+ Function: ovsdb.ConditionEqual,
+ Value: "myUUID"},
+ })
+
+Where() accepts multiple Condition instances (through variadic arguments).
+If provided, the client will generate multiple operations each matching one condition.
+For example, the following operation will delete all the Logical Switches named "foo" OR "bar":
+
+ ops, err := ovs.Where(ls,
+ client.Condition {
+ Field: &ls.Name
+ Function: ovsdb.ConditionEqual,
+ Value: "foo",
+ },client.Condition {
+ Field: &ls.Port,
+ Function: ovsdb.ConditionIncludes,
+ Value: "bar",
+ }).Delete()
+
+To create a Condition that matches all of the conditions simultaneously (i.e: AND semantics), use WhereAll().
+
+Where() or WhereAll() evaluate the provided index values or explicit conditions against the cache and generate
+conditions based on the UUIDs of matching models. If no matches are found in the cache, the generated conditions
+will be based on the index or condition fields themselves.
+
+A more flexible mechanism to search the cache is available: WhereCache()
+
+WhereCache() accepts a function that takes any Model as argument and returns a boolean.
+It is used to search the cache so commonly used with List() function. For example:
+
+ lsList := &[]LogicalSwitch{}
+ err := ovs.WhereCache(
+ func(ls *LogicalSwitch) bool {
+ return strings.HasPrefix(ls.Name, "ext_")
+ }).List(lsList)
+
+Server side operations can be executed using WhereCache() conditions but it's not recommended. For each matching
+cache element, an operation will be created matching on the "_uuid" column. The number of operations can be
+quite large depending on the cache size and the provided function. Most likely there is a way to express the
+same condition using Where() or WhereAll() which will be more efficient.
+
+Get
+
+Get() operation is a simple operation capable of retrieving one Model based on some of its schema indexes. E.g:
+
+ ls := &LogicalSwitch{UUID:"myUUID"}
+ err := ovs.Get(ls)
+ fmt.Printf("Name of the switch is: &s", ls.Name)
+
+List
+
+List() searches the cache and populates a slice of Models. It can be used directly or using WhereCache()
+
+ lsList := &[]LogicalSwitch{}
+ err := ovs.List(lsList) // List all elements
+
+ err := ovs.WhereCache(
+ func(ls *LogicalSwitch) bool {
+ return strings.HasPrefix(ls.Name, "ext_")
+ }).List(lsList)
+
+Create
+
+Create returns a list of operations to create the models provided. E.g:
+
+ ops, err := ovs.Create(&LogicalSwitch{Name:"foo")}, &LogicalSwitch{Name:"bar"})
+
+Update
+Update returns a list of operations to update the matching rows to match the values of the provided model. E.g:
+
+ ls := &LogicalSwitch{ExternalIDs: map[string]string {"foo": "bar"}}
+ ops, err := ovs.Where(...).Update(&ls, &ls.ExternalIDs}
+
+Mutate
+
+Mutate returns a list of operations needed to mutate the matching rows as described by the list of Mutation objects. E.g:
+
+ ls := &LogicalSwitch{}
+ ops, err := ovs.Where(...).Mutate(&ls, client.Mutation {
+ Field: &ls.Config,
+ Mutator: ovsdb.MutateOperationInsert,
+ Value: map[string]string{"foo":"bar"},
+ })
+
+Delete
+
+Delete returns a list of operations needed to delete the matching rows. E.g:
+
+ ops, err := ovs.Where(...).Delete()
+
+*/
+package client
diff --git a/vendor/github.com/ovn-org/libovsdb/client/metrics.go b/vendor/github.com/ovn-org/libovsdb/client/metrics.go
new file mode 100644
index 0000000000..8c4e5f6f2d
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/metrics.go
@@ -0,0 +1,88 @@
+package client
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const libovsdbName = "libovsdb"
+
+type metrics struct {
+ numUpdates *prometheus.CounterVec
+ numTableUpdates *prometheus.CounterVec
+ numDisconnects prometheus.Counter
+ numMonitors prometheus.Gauge
+ registerOnce sync.Once
+}
+
+func (m *metrics) init(modelName string, namespace, subsystem string) {
+ // labels that are the same across all metrics
+ constLabels := prometheus.Labels{"primary_model": modelName}
+
+ if namespace == "" {
+ namespace = libovsdbName
+ subsystem = ""
+ }
+
+ m.numUpdates = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "update_messages_total",
+ Help: "Count of libovsdb monitor update messages processed, partitioned by database",
+ ConstLabels: constLabels,
+ },
+ []string{"database"},
+ )
+
+ m.numTableUpdates = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "table_updates_total",
+ Help: "Count of libovsdb monitor update messages per table",
+ ConstLabels: constLabels,
+ },
+ []string{"database", "table"},
+ )
+
+ m.numDisconnects = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "disconnects_total",
+ Help: "Count of libovsdb disconnects encountered",
+ ConstLabels: constLabels,
+ },
+ )
+
+ m.numMonitors = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "monitors",
+ Help: "Number of running libovsdb ovsdb monitors",
+ ConstLabels: constLabels,
+ },
+ )
+}
+
+func (m *metrics) register(r prometheus.Registerer) {
+ m.registerOnce.Do(func() {
+ r.MustRegister(
+ m.numUpdates,
+ m.numTableUpdates,
+ m.numDisconnects,
+ m.numMonitors,
+ )
+ })
+}
+
+func (o *ovsdbClient) registerMetrics() {
+ if !o.options.shouldRegisterMetrics || o.options.registry == nil {
+ return
+ }
+ o.metrics.register(o.options.registry)
+ o.options.shouldRegisterMetrics = false
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/monitor.go b/vendor/github.com/ovn-org/libovsdb/client/monitor.go
new file mode 100644
index 0000000000..4a0270a87a
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/monitor.go
@@ -0,0 +1,136 @@
+package client
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+const emptyUUID = "00000000-0000-0000-0000-000000000000"
+
+// Monitor represents a monitor
+type Monitor struct {
+ Method string
+ Tables []TableMonitor
+ Errors []error
+ LastTransactionID string
+}
+
+// newMonitor creates a new *Monitor with default values
+func newMonitor() *Monitor {
+ return &Monitor{
+ Method: ovsdb.ConditionalMonitorSinceRPC,
+ Errors: make([]error, 0),
+ LastTransactionID: emptyUUID,
+ }
+}
+
+// NewMonitor creates a new Monitor with the provided options
+func (o *ovsdbClient) NewMonitor(opts ...MonitorOption) *Monitor {
+ m := newMonitor()
+ for _, opt := range opts {
+ err := opt(o, m)
+ if err != nil {
+ m.Errors = append(m.Errors, err)
+ }
+ }
+ return m
+}
+
+// MonitorOption adds Tables to a Monitor
+type MonitorOption func(o *ovsdbClient, m *Monitor) error
+
+// MonitorCookie is the struct we pass to correlate from updates back to their
+// originating Monitor request.
+type MonitorCookie struct {
+ DatabaseName string `json:"databaseName"`
+ ID string `json:"id"`
+}
+
+func newMonitorCookie(dbName string) MonitorCookie {
+ return MonitorCookie{
+ DatabaseName: dbName,
+ ID: uuid.NewString(),
+ }
+}
+
+// TableMonitor is a table to be monitored
+type TableMonitor struct {
+ // Table is the table to be monitored
+ Table string
+ // Conditions are the conditions under which the table should be monitored
+ Conditions []ovsdb.Condition
+ // Fields are the fields in the model to monitor
+ // If none are supplied, all fields will be used
+ Fields []string
+}
+
+func newTableMonitor(o *ovsdbClient, m model.Model, conditions []model.Condition, fields []interface{}) (*TableMonitor, error) {
+ dbModel := o.primaryDB().model
+ tableName := dbModel.FindTable(reflect.TypeOf(m))
+ if tableName == "" {
+ return nil, fmt.Errorf("object of type %s is not part of the ClientDBModel", reflect.TypeOf(m))
+ }
+
+ var columns []string
+ var ovsdbConds []ovsdb.Condition
+
+ if len(fields) == 0 && len(conditions) == 0 {
+ return &TableMonitor{
+ Table: tableName,
+ Conditions: ovsdbConds,
+ Fields: columns,
+ }, nil
+ }
+
+ data, err := dbModel.NewModelInfo(m)
+ if err != nil {
+ return nil, fmt.Errorf("unable to obtain info from model %v: %v", m, err)
+ }
+ for _, f := range fields {
+ column, err := data.ColumnByPtr(f)
+ if err != nil {
+ return nil, fmt.Errorf("unable to obtain column from model %v: %v", data, err)
+ }
+ columns = append(columns, column)
+ }
+ db := o.databases[o.primaryDBName]
+ mmapper := db.model.Mapper
+ for _, modelCond := range conditions {
+ ovsdbCond, err := mmapper.NewCondition(data, modelCond.Field, modelCond.Function, modelCond.Value)
+ if err != nil {
+ return nil, fmt.Errorf("unable to convert condition %v: %v", modelCond, err)
+ }
+ ovsdbConds = append(ovsdbConds, *ovsdbCond)
+ }
+ return &TableMonitor{
+ Table: tableName,
+ Conditions: ovsdbConds,
+ Fields: columns,
+ }, nil
+}
+
+func WithTable(m model.Model, fields ...interface{}) MonitorOption {
+ return func(o *ovsdbClient, monitor *Monitor) error {
+ tableMonitor, err := newTableMonitor(o, m, []model.Condition{}, fields)
+ if err != nil {
+ return err
+ }
+ monitor.Tables = append(monitor.Tables, *tableMonitor)
+ return nil
+ }
+}
+
+func WithConditionalTable(m model.Model, conditions []model.Condition, fields ...interface{}) MonitorOption {
+ return func(o *ovsdbClient, monitor *Monitor) error {
+ tableMonitor, err := newTableMonitor(o, m, conditions, fields)
+ if err != nil {
+ return err
+ }
+ monitor.Tables = append(monitor.Tables, *tableMonitor)
+ return nil
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/client/options.go b/vendor/github.com/ovn-org/libovsdb/client/options.go
new file mode 100644
index 0000000000..81ccffe203
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/client/options.go
@@ -0,0 +1,164 @@
+package client
+
+import (
+ "crypto/tls"
+ "net/url"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/go-logr/logr"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ defaultTCPEndpoint = "tcp:127.0.0.1:6640"
+ defaultSSLEndpoint = "ssl:127.0.0.1:6640"
+ defaultUnixEndpoint = "unix:/var/run/openvswitch/ovsdb.sock"
+)
+
+type options struct {
+ endpoints []string
+ tlsConfig *tls.Config
+ reconnect bool
+ leaderOnly bool
+ timeout time.Duration
+ backoff backoff.BackOff
+ logger *logr.Logger
+ registry prometheus.Registerer
+ shouldRegisterMetrics bool // in case metrics are changed after-the-fact
+ metricNamespace string // prometheus metric namespace
+ metricSubsystem string // prometheus metric subsystem
+ inactivityTimeout time.Duration
+}
+
+type Option func(o *options) error
+
+func newOptions(opts ...Option) (*options, error) {
+ o := &options{}
+ for _, opt := range opts {
+ if err := opt(o); err != nil {
+ return nil, err
+ }
+ }
+ // if no endpoints are supplied, use the default unix socket
+ if len(o.endpoints) == 0 {
+ o.endpoints = []string{defaultUnixEndpoint}
+ }
+ return o, nil
+}
+
+// WithTLSConfig sets the tls.Config for use by the client
+func WithTLSConfig(cfg *tls.Config) Option {
+ return func(o *options) error {
+ o.tlsConfig = cfg
+ return nil
+ }
+}
+
+// WithEndpoint sets the endpoint to be used by the client
+// It can be used multiple times, and the first endpoint that
+// successfully connects will be used.
+// Endpoints are specified in OVSDB Connection Format
+// For more details, see the ovsdb(7) man page
+func WithEndpoint(endpoint string) Option {
+ return func(o *options) error {
+ ep, err := url.Parse(endpoint)
+ if err != nil {
+ return err
+ }
+ switch ep.Scheme {
+ case UNIX:
+ if len(ep.Path) == 0 {
+ o.endpoints = append(o.endpoints, defaultUnixEndpoint)
+ return nil
+ }
+ case TCP:
+ if len(ep.Opaque) == 0 {
+ o.endpoints = append(o.endpoints, defaultTCPEndpoint)
+ return nil
+ }
+ case SSL:
+ if len(ep.Opaque) == 0 {
+ o.endpoints = append(o.endpoints, defaultSSLEndpoint)
+ return nil
+ }
+ }
+ o.endpoints = append(o.endpoints, endpoint)
+ return nil
+ }
+}
+
+// WithLeaderOnly tells the client to treat endpoints that are clustered
+// and not the leader as down.
+func WithLeaderOnly(leaderOnly bool) Option {
+ return func(o *options) error {
+ o.leaderOnly = leaderOnly
+ return nil
+ }
+}
+
+// WithReconnect tells the client to automatically reconnect when
+// disconnected. The timeout is used to construct the context on
+// each call to Connect, while backoff dictates the backoff
+// algorithm to use. Using WithReconnect implies that
+// requested transactions will block until the client has fully reconnected,
+// rather than immediately returning an error if there is no connection.
+func WithReconnect(timeout time.Duration, backoff backoff.BackOff) Option {
+ return func(o *options) error {
+ o.reconnect = true
+ o.timeout = timeout
+ o.backoff = backoff
+ return nil
+ }
+}
+
+// WithInactivityCheck tells the client to send Echo request to ovsdb server periodically
+// upon inactivityTimeout. When Echo request fails, then it attempts to reconnect
+// with server. The inactivity check is performed as long as the connection is established.
+// The reconnectTimeout argument is used to construct the context on each call to Connect,
+// while reconnectBackoff dictates the backoff algorithm to use.
+func WithInactivityCheck(inactivityTimeout, reconnectTimeout time.Duration,
+ reconnectBackoff backoff.BackOff) Option {
+ return func(o *options) error {
+ o.reconnect = true
+ o.timeout = reconnectTimeout
+ o.backoff = reconnectBackoff
+ o.inactivityTimeout = inactivityTimeout
+ return nil
+ }
+}
+
+// WithLogger allows setting a specific log sink. Otherwise, the default
+// go log package is used.
+func WithLogger(l *logr.Logger) Option {
+ return func(o *options) error {
+ o.logger = l
+ return nil
+ }
+}
+
+// WithMetricsRegistry allows the user to specify a Prometheus metrics registry.
+// If supplied, the metrics as defined in metrics.go will be registered.
+func WithMetricsRegistry(r prometheus.Registerer) Option {
+ return func(o *options) error {
+ o.registry = r
+ o.shouldRegisterMetrics = (r != nil)
+ return nil
+ }
+}
+
+// WithMetricsRegistryNamespaceSubsystem allows the user to specify a Prometheus metrics registry
+// and Prometheus metric namespace and subsystem of the component utilizing libovsdb.
+// If supplied, the metrics as defined in metrics.go will be registered.
+func WithMetricsRegistryNamespaceSubsystem(r prometheus.Registerer, namespace, subsystem string) Option {
+ if namespace == "" || subsystem == "" {
+ panic("libovsdb function WithMetricsRegistryNamespaceSubsystem arguments 'namespace' and 'subsystem' must not be empty")
+ }
+ return func(o *options) error {
+ o.registry = r
+ o.shouldRegisterMetrics = (r != nil)
+ o.metricNamespace = namespace
+ o.metricSubsystem = subsystem
+ return nil
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/database/database.go b/vendor/github.com/ovn-org/libovsdb/database/database.go
new file mode 100644
index 0000000000..12f1222f19
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/database.go
@@ -0,0 +1,33 @@
+package database
+
+import (
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// Database abstracts a database that a server can use to store and transact data
+type Database interface {
+ CreateDatabase(database string, model ovsdb.DatabaseSchema) error
+ Exists(database string) bool
+ NewTransaction(database string) Transaction
+ Commit(database string, id uuid.UUID, update Update) error
+ CheckIndexes(database string, table string, m model.Model) error
+ List(database, table string, conditions ...ovsdb.Condition) (map[string]model.Model, error)
+ Get(database, table string, uuid string) (model.Model, error)
+ GetReferences(database, table, row string) (References, error)
+}
+
+// Transaction abstracts a database transaction that can generate database
+// updates
+type Transaction interface {
+ Transact(operations ...ovsdb.Operation) ([]*ovsdb.OperationResult, Update)
+}
+
+// Update abstracts an update that can be committed to a database
+type Update interface {
+ GetUpdatedTables() []string
+ ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error
+ ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error
+ ForReferenceUpdates(do func(references References) error) error
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/database/doc.go b/vendor/github.com/ovn-org/libovsdb/database/doc.go
new file mode 100644
index 0000000000..c0a858c208
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/doc.go
@@ -0,0 +1,5 @@
+/*
+Package database collects database related types, interfaces and
+implementations.
+*/
+package database
diff --git a/vendor/github.com/ovn-org/libovsdb/database/inmemory/doc.go b/vendor/github.com/ovn-org/libovsdb/database/inmemory/doc.go
new file mode 100644
index 0000000000..bde3ffc06f
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/inmemory/doc.go
@@ -0,0 +1,4 @@
+/*
+Package inmemory provides a in-memory database implementation
+*/
+package inmemory
diff --git a/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go b/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go
new file mode 100644
index 0000000000..6c1dce9e79
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go
@@ -0,0 +1,145 @@
+package inmemory
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "sync"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/cache"
+ dbase "github.com/ovn-org/libovsdb/database"
+ "github.com/ovn-org/libovsdb/database/transaction"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+type inMemoryDatabase struct {
+ databases map[string]*cache.TableCache
+ models map[string]model.ClientDBModel
+ references map[string]dbase.References
+ logger *logr.Logger
+ mutex sync.RWMutex
+}
+
+func NewDatabase(models map[string]model.ClientDBModel) dbase.Database {
+ logger := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("database")
+ return &inMemoryDatabase{
+ databases: make(map[string]*cache.TableCache),
+ models: models,
+ references: make(map[string]dbase.References),
+ mutex: sync.RWMutex{},
+ logger: &logger,
+ }
+}
+
+func (db *inMemoryDatabase) NewTransaction(dbName string) dbase.Transaction {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+ var model model.DatabaseModel
+ if database, ok := db.databases[dbName]; ok {
+ model = database.DatabaseModel()
+ }
+ transaction := transaction.NewTransaction(model, dbName, db, db.logger)
+ return &transaction
+}
+
+func (db *inMemoryDatabase) CreateDatabase(name string, schema ovsdb.DatabaseSchema) error {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+ var mo model.ClientDBModel
+ var ok bool
+ if mo, ok = db.models[schema.Name]; !ok {
+ return fmt.Errorf("no db model provided for schema with name %s", name)
+ }
+ dbModel, errs := model.NewDatabaseModel(schema, mo)
+ if len(errs) > 0 {
+ return fmt.Errorf("failed to create DatabaseModel: %#+v", errs)
+ }
+ database, err := cache.NewTableCache(dbModel, nil, nil)
+ if err != nil {
+ return err
+ }
+ db.databases[name] = database
+ db.references[name] = make(dbase.References)
+ return nil
+}
+
+func (db *inMemoryDatabase) Exists(name string) bool {
+ db.mutex.RLock()
+ defer db.mutex.RUnlock()
+ _, ok := db.databases[name]
+ return ok
+}
+
+func (db *inMemoryDatabase) Commit(database string, id uuid.UUID, update dbase.Update) error {
+ if !db.Exists(database) {
+ return fmt.Errorf("db does not exist")
+ }
+ db.mutex.RLock()
+ targetDb := db.databases[database]
+ db.mutex.RUnlock()
+
+ err := targetDb.ApplyCacheUpdate(update)
+ if err != nil {
+ return err
+ }
+
+ return update.ForReferenceUpdates(func(references dbase.References) error {
+ db.references[database].UpdateReferences(references)
+ return nil
+ })
+}
+
+func (db *inMemoryDatabase) CheckIndexes(database string, table string, m model.Model) error {
+ if !db.Exists(database) {
+ return nil
+ }
+ db.mutex.RLock()
+ targetDb := db.databases[database]
+ db.mutex.RUnlock()
+ targetTable := targetDb.Table(table)
+ return targetTable.IndexExists(m)
+}
+
+func (db *inMemoryDatabase) List(database, table string, conditions ...ovsdb.Condition) (map[string]model.Model, error) {
+ if !db.Exists(database) {
+ return nil, fmt.Errorf("db does not exist")
+ }
+ db.mutex.RLock()
+ targetDb := db.databases[database]
+ db.mutex.RUnlock()
+
+ targetTable := targetDb.Table(table)
+ if targetTable == nil {
+ return nil, fmt.Errorf("table does not exist")
+ }
+
+ return targetTable.RowsByCondition(conditions)
+}
+
+func (db *inMemoryDatabase) Get(database, table string, uuid string) (model.Model, error) {
+ if !db.Exists(database) {
+ return nil, fmt.Errorf("db does not exist")
+ }
+ db.mutex.RLock()
+ targetDb := db.databases[database]
+ db.mutex.RUnlock()
+
+ targetTable := targetDb.Table(table)
+ if targetTable == nil {
+ return nil, fmt.Errorf("table does not exist")
+ }
+ return targetTable.Row(uuid), nil
+}
+
+func (db *inMemoryDatabase) GetReferences(database, table, row string) (dbase.References, error) {
+ if !db.Exists(database) {
+ return nil, fmt.Errorf("db does not exist")
+ }
+ db.mutex.RLock()
+ defer db.mutex.RUnlock()
+ return db.references[database].GetReferences(table, row), nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/database/references.go b/vendor/github.com/ovn-org/libovsdb/database/references.go
new file mode 100644
index 0000000000..d8181a7a51
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/references.go
@@ -0,0 +1,71 @@
+package database
+
+// References tracks the references to rows from other rows at specific
+// locations in the schema.
+type References map[ReferenceSpec]Reference
+
+// ReferenceSpec specifies details about where in the schema a reference occurs.
+type ReferenceSpec struct {
+ // ToTable is the table of the row to which the reference is made
+ ToTable string
+
+ // FromTable is the table of the row from which the reference is made
+ FromTable string
+
+ // FromColumn is the column of the row from which the reference is made
+ FromColumn string
+
+ // FromValue flags if the reference is made on a map key or map value when
+ // the column is a map
+ FromValue bool
+}
+
+// Reference maps the UUIDs of rows to which the reference is made to the
+// rows it is made from
+type Reference map[string][]string
+
+// GetReferences gets references to a row
+func (rs References) GetReferences(table, uuid string) References {
+ refs := References{}
+ for spec, values := range rs {
+ if spec.ToTable != table {
+ continue
+ }
+ if _, ok := values[uuid]; ok {
+ refs[spec] = Reference{uuid: values[uuid]}
+ }
+ }
+ return refs
+}
+
+// UpdateReferences updates the references with the provided ones. Dangling
+// references, that is, the references of rows that are no longer referenced
+// from anywhere, are cleaned up.
+func (rs References) UpdateReferences(other References) {
+ for spec, otherRefs := range other {
+ for to, from := range otherRefs {
+ rs.updateReference(spec, to, from)
+ }
+ }
+}
+
+// updateReference updates the references to a row at a specific location in the
+// schema
+func (rs References) updateReference(spec ReferenceSpec, to string, from []string) {
+ thisRefs, ok := rs[spec]
+ if !ok && len(from) > 0 {
+ // add references from a previously untracked location
+ rs[spec] = Reference{to: from}
+ return
+ }
+ if len(from) > 0 {
+ // replace references to this row at this specific location
+ thisRefs[to] = from
+ return
+ }
+ // otherwise remove previously tracked references
+ delete(thisRefs, to)
+ if len(thisRefs) == 0 {
+ delete(rs, spec)
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/database/transaction/doc.go b/vendor/github.com/ovn-org/libovsdb/database/transaction/doc.go
new file mode 100644
index 0000000000..36d35aa7f7
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/transaction/doc.go
@@ -0,0 +1,4 @@
+/*
+Package transaction provides a transaction implementation
+*/
+package transaction
diff --git a/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go b/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go
new file mode 100644
index 0000000000..35e47c7294
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go
@@ -0,0 +1,17 @@
+package transaction
+
+import (
+ "fmt"
+
+ "github.com/ovn-org/libovsdb/cache"
+)
+
+func newIndexExistsDetails(err cache.ErrIndexExists) string {
+ return fmt.Sprintf("operation would cause rows in the \"%s\" table to have identical values (%v) for index on column \"%s\". First row, with UUID %s, was inserted by this transaction. Second row, with UUID %s, existed in the database before this operation and was not modified",
+ err.Table,
+ err.Value,
+ err.Index,
+ err.New,
+ err.Existing,
+ )
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go b/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go
new file mode 100644
index 0000000000..69736d0048
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go
@@ -0,0 +1,496 @@
+package transaction
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/go-logr/logr"
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/cache"
+ "github.com/ovn-org/libovsdb/database"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+ "github.com/ovn-org/libovsdb/updates"
+)
+
+type Transaction struct {
+ ID uuid.UUID
+ Cache *cache.TableCache
+ DeletedRows map[string]struct{}
+ Model model.DatabaseModel
+ DbName string
+ Database database.Database
+ logger *logr.Logger
+}
+
+func NewTransaction(model model.DatabaseModel, dbName string, database database.Database, logger *logr.Logger) Transaction {
+ if logger != nil {
+ l := logger.WithName("transaction")
+ logger = &l
+ }
+
+ return Transaction{
+ ID: uuid.New(),
+ DeletedRows: make(map[string]struct{}),
+ Model: model,
+ DbName: dbName,
+ Database: database,
+ logger: logger,
+ }
+}
+
+func (t *Transaction) Transact(operations ...ovsdb.Operation) ([]*ovsdb.OperationResult, database.Update) {
+ results := make([]*ovsdb.OperationResult, len(operations), len(operations)+1)
+ update := updates.ModelUpdates{}
+
+ if !t.Database.Exists(t.DbName) {
+ r := ovsdb.ResultFromError(fmt.Errorf("database does not exist"))
+ results[0] = &r
+ return results, updates.NewDatabaseUpdate(update, nil)
+ }
+
+ err := t.initializeCache()
+ if err != nil {
+ r := ovsdb.ResultFromError(err)
+ results[0] = &r
+ return results, updates.NewDatabaseUpdate(update, nil)
+ }
+
+ // Every Insert operation must have a UUID
+ for i := range operations {
+ op := &operations[i]
+ if op.Op == ovsdb.OperationInsert && op.UUID == "" {
+ op.UUID = uuid.NewString()
+ }
+ }
+
+ // Ensure Named UUIDs are expanded in all operations
+ operations, err = ovsdb.ExpandNamedUUIDs(operations, &t.Model.Schema)
+ if err != nil {
+ r := ovsdb.ResultFromError(err)
+ results[0] = &r
+ return results, updates.NewDatabaseUpdate(update, nil)
+ }
+
+ var r ovsdb.OperationResult
+ for i, op := range operations {
+ var u *updates.ModelUpdates
+ switch op.Op {
+ case ovsdb.OperationInsert:
+ r, u = t.Insert(&op)
+ case ovsdb.OperationSelect:
+ r = t.Select(op.Table, op.Where, op.Columns)
+ case ovsdb.OperationUpdate:
+ r, u = t.Update(&op)
+ case ovsdb.OperationMutate:
+ r, u = t.Mutate(&op)
+ case ovsdb.OperationDelete:
+ r, u = t.Delete(&op)
+ case ovsdb.OperationWait:
+ r = t.Wait(op.Table, op.Timeout, op.Where, op.Columns, op.Until, op.Rows)
+ case ovsdb.OperationCommit:
+ durable := op.Durable
+ r = t.Commit(*durable)
+ case ovsdb.OperationAbort:
+ r = t.Abort()
+ case ovsdb.OperationComment:
+ r = t.Comment(*op.Comment)
+ case ovsdb.OperationAssert:
+ r = t.Assert(*op.Lock)
+ default:
+ r = ovsdb.ResultFromError(&ovsdb.NotSupported{})
+ }
+
+ if r.Error == "" && u != nil {
+ err := update.Merge(t.Model, *u)
+ if err != nil {
+ r = ovsdb.ResultFromError(err)
+ }
+ if err := t.Cache.ApplyCacheUpdate(*u); err != nil {
+ r = ovsdb.ResultFromError(err)
+ }
+ u = nil
+ }
+
+ result := r
+ results[i] = &result
+
+ // if an operation failed, no need to process any further operation
+ if r.Error != "" {
+ break
+ }
+ }
+
+ // if an operation failed, no need to do any further validation
+ if r.Error != "" {
+ return results, updates.NewDatabaseUpdate(update, nil)
+ }
+
+ // if there is no updates, no need to do any further validation
+ if len(update.GetUpdatedTables()) == 0 {
+ return results, updates.NewDatabaseUpdate(update, nil)
+ }
+
+ // check & update references
+ update, refUpdates, refs, err := updates.ProcessReferences(t.Model, t.Database, update)
+ if err != nil {
+ r = ovsdb.ResultFromError(err)
+ results = append(results, &r)
+ return results, updates.NewDatabaseUpdate(update, refs)
+ }
+
+ // apply updates resulting from referential integrity to the transaction
+ // caches so they are accounted for when checking index constraints
+ err = t.applyReferenceUpdates(refUpdates)
+ if err != nil {
+ r = ovsdb.ResultFromError(err)
+ results = append(results, &r)
+ return results, updates.NewDatabaseUpdate(update, refs)
+ }
+
+ // check index constraints
+ if err := t.checkIndexes(); err != nil {
+ if indexExists, ok := err.(*cache.ErrIndexExists); ok {
+ err = ovsdb.NewConstraintViolation(newIndexExistsDetails(*indexExists))
+ r := ovsdb.ResultFromError(err)
+ results = append(results, &r)
+ } else {
+ r := ovsdb.ResultFromError(err)
+ results = append(results, &r)
+ }
+
+ return results, updates.NewDatabaseUpdate(update, refs)
+ }
+
+ return results, updates.NewDatabaseUpdate(update, refs)
+}
+
+func (t *Transaction) applyReferenceUpdates(update updates.ModelUpdates) error {
+ tables := update.GetUpdatedTables()
+ for _, table := range tables {
+ err := update.ForEachModelUpdate(table, func(uuid string, old, new model.Model) error {
+ // track deleted rows due to reference updates
+ if old != nil && new == nil {
+ t.DeletedRows[uuid] = struct{}{}
+ }
+ // warm the cache with updated and deleted rows due to reference
+ // updates
+ if old != nil && !t.Cache.Table(table).HasRow(uuid) {
+ row, err := t.Database.Get(t.DbName, table, uuid)
+ if err != nil {
+ return err
+ }
+ err = t.Cache.Table(table).Create(uuid, row, false)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ // apply reference updates to the cache
+ return t.Cache.ApplyCacheUpdate(update)
+}
+
+func (t *Transaction) initializeCache() error {
+ if t.Cache != nil {
+ return nil
+ }
+ var err error
+ t.Cache, err = cache.NewTableCache(t.Model, nil, t.logger)
+ return err
+}
+
+func (t *Transaction) rowsFromTransactionCacheAndDatabase(table string, where []ovsdb.Condition) (map[string]model.Model, error) {
+ err := t.initializeCache()
+ if err != nil {
+ return nil, err
+ }
+
+ txnRows, err := t.Cache.Table(table).RowsByCondition(where)
+ if err != nil {
+ return nil, fmt.Errorf("failed getting rows for table %s from transaction cache: %v", table, err)
+ }
+ rows, err := t.Database.List(t.DbName, table, where...)
+ if err != nil {
+ return nil, fmt.Errorf("failed getting rows for table %s from database: %v", table, err)
+ }
+
+ // prefer rows from transaction cache while copying into cache
+ // rows that are in the db.
+ for rowUUID, row := range rows {
+ if txnRow, found := txnRows[rowUUID]; found {
+ rows[rowUUID] = txnRow
+ // delete txnRows so that only inserted rows remain in txnRows
+ delete(txnRows, rowUUID)
+ } else {
+ // warm the transaction cache with the current contents of the row
+ if err := t.Cache.Table(table).Create(rowUUID, row, false); err != nil {
+ return nil, fmt.Errorf("failed warming transaction cache row %s %v for table %s: %v", rowUUID, row, table, err)
+ }
+ }
+ }
+ // add rows that have been inserted in this transaction
+ for rowUUID, row := range txnRows {
+ rows[rowUUID] = row
+ }
+ // exclude deleted rows
+ for rowUUID := range t.DeletedRows {
+ delete(rows, rowUUID)
+ }
+ return rows, nil
+}
+
+// checkIndexes checks that there are no index conflicts:
+// - no duplicate indexes among any two rows operated with in the transaction
+// - no duplicate indexes of any transaction row with any database row
+func (t *Transaction) checkIndexes() error {
+ // check for index conflicts.
+ tables := t.Cache.Tables()
+ for _, table := range tables {
+ tc := t.Cache.Table(table)
+ for _, row := range tc.RowsShallow() {
+ err := tc.IndexExists(row)
+ if err != nil {
+ return err
+ }
+ err = t.Database.CheckIndexes(t.DbName, table, row)
+ errIndexExists, isErrIndexExists := err.(*cache.ErrIndexExists)
+ if err == nil {
+ continue
+ }
+ if !isErrIndexExists {
+ return err
+ }
+ for _, existing := range errIndexExists.Existing {
+ if _, isDeleted := t.DeletedRows[existing]; isDeleted {
+ // this model is deleted in the transaction, ignore it
+ continue
+ }
+ if tc.HasRow(existing) {
+ // this model is updated in the transaction and was not
+ // detected as a duplicate, so an index must have been
+ // updated, ignore it
+ continue
+ }
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (t *Transaction) Insert(op *ovsdb.Operation) (ovsdb.OperationResult, *updates.ModelUpdates) {
+ if err := ovsdb.ValidateUUID(op.UUID); err != nil {
+ return ovsdb.ResultFromError(err), nil
+ }
+
+ update := updates.ModelUpdates{}
+ err := update.AddOperation(t.Model, op.Table, op.UUID, nil, op)
+ if err != nil {
+ return ovsdb.ResultFromError(err), nil
+ }
+
+ result := ovsdb.OperationResult{
+ UUID: ovsdb.UUID{GoUUID: op.UUID},
+ }
+
+ return result, &update
+}
+
+func (t *Transaction) Select(table string, where []ovsdb.Condition, columns []string) ovsdb.OperationResult {
+ var results []ovsdb.Row
+ dbModel := t.Model
+
+ rows, err := t.rowsFromTransactionCacheAndDatabase(table, where)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+
+ m := dbModel.Mapper
+ for _, row := range rows {
+ info, err := dbModel.NewModelInfo(row)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+ resultRow, err := m.NewRow(info)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+ results = append(results, resultRow)
+ }
+ return ovsdb.OperationResult{
+ Rows: results,
+ }
+}
+
+func (t *Transaction) Update(op *ovsdb.Operation) (ovsdb.OperationResult, *updates.ModelUpdates) {
+ rows, err := t.rowsFromTransactionCacheAndDatabase(op.Table, op.Where)
+ if err != nil {
+ return ovsdb.ResultFromError(err), nil
+ }
+
+ update := updates.ModelUpdates{}
+ for uuid, old := range rows {
+ err := update.AddOperation(t.Model, op.Table, uuid, old, op)
+ if err != nil {
+ return ovsdb.ResultFromError(err), nil
+ }
+ }
+
+ // FIXME: We need to filter the returned columns
+ return ovsdb.OperationResult{Count: len(rows)}, &update
+}
+
+func (t *Transaction) Mutate(op *ovsdb.Operation) (ovsdb.OperationResult, *updates.ModelUpdates) {
+ rows, err := t.rowsFromTransactionCacheAndDatabase(op.Table, op.Where)
+ if err != nil {
+ return ovsdb.ResultFromError(err), nil
+ }
+
+ update := updates.ModelUpdates{}
+ for uuid, old := range rows {
+ err := update.AddOperation(t.Model, op.Table, uuid, old, op)
+ if err != nil {
+ return ovsdb.ResultFromError(err), nil
+ }
+ }
+
+ return ovsdb.OperationResult{Count: len(rows)}, &update
+}
+
+func (t *Transaction) Delete(op *ovsdb.Operation) (ovsdb.OperationResult, *updates.ModelUpdates) {
+ rows, err := t.rowsFromTransactionCacheAndDatabase(op.Table, op.Where)
+ if err != nil {
+ return ovsdb.ResultFromError(err), nil
+ }
+
+ update := updates.ModelUpdates{}
+ for uuid, row := range rows {
+ err := update.AddOperation(t.Model, op.Table, uuid, row, op)
+ if err != nil {
+ return ovsdb.ResultFromError(err), nil
+ }
+
+ // track delete operation in transaction to complement cache
+ t.DeletedRows[uuid] = struct{}{}
+ }
+
+ return ovsdb.OperationResult{Count: len(rows)}, &update
+}
+
+func (t *Transaction) Wait(table string, timeout *int, where []ovsdb.Condition, columns []string, until string, rows []ovsdb.Row) ovsdb.OperationResult {
+ start := time.Now()
+
+ if until != "!=" && until != "==" {
+ return ovsdb.ResultFromError(&ovsdb.NotSupported{})
+ }
+
+ dbModel := t.Model
+ realTable := dbModel.Schema.Table(table)
+ if realTable == nil {
+ return ovsdb.ResultFromError(&ovsdb.NotSupported{})
+ }
+ model, err := dbModel.NewModel(table)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+
+Loop:
+ for {
+ var filteredRows []ovsdb.Row
+ foundRowModels, err := t.rowsFromTransactionCacheAndDatabase(table, where)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+
+ m := dbModel.Mapper
+ for _, rowModel := range foundRowModels {
+ info, err := dbModel.NewModelInfo(rowModel)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+
+ foundMatch := true
+ for _, column := range columns {
+ columnSchema := info.Metadata.TableSchema.Column(column)
+ for _, r := range rows {
+ i, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+ err = dbModel.Mapper.GetRowData(&r, i)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+ x, err := i.FieldByColumn(column)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+
+ // check to see if field value is default for given rows
+ // if it is default (not provided) we shouldn't try to compare
+ // for equality
+ if ovsdb.IsDefaultValue(columnSchema, x) {
+ continue
+ }
+ y, err := info.FieldByColumn(column)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+ if !reflect.DeepEqual(x, y) {
+ foundMatch = false
+ }
+ }
+ }
+
+ if foundMatch {
+ resultRow, err := m.NewRow(info)
+ if err != nil {
+ return ovsdb.ResultFromError(err)
+ }
+ filteredRows = append(filteredRows, resultRow)
+ }
+
+ }
+
+ if until == "==" && len(filteredRows) == len(rows) {
+ return ovsdb.OperationResult{}
+ } else if until == "!=" && len(filteredRows) != len(rows) {
+ return ovsdb.OperationResult{}
+ }
+
+ if timeout != nil {
+ // TODO(trozet): this really shouldn't just break and loop on a time interval
+ // Really this client handler should pause, wait for another handler to update the DB
+ // and then try again. However the server is single threaded for now and not capable of
+ // doing something like that.
+ if time.Since(start) > time.Duration(*timeout)*time.Millisecond {
+ break Loop
+ }
+ }
+ time.Sleep(200 * time.Millisecond)
+ }
+
+ return ovsdb.ResultFromError(&ovsdb.TimedOut{})
+}
+
+func (t *Transaction) Commit(durable bool) ovsdb.OperationResult {
+ return ovsdb.ResultFromError(&ovsdb.NotSupported{})
+}
+
+func (t *Transaction) Abort() ovsdb.OperationResult {
+ return ovsdb.ResultFromError(&ovsdb.NotSupported{})
+}
+
+func (t *Transaction) Comment(comment string) ovsdb.OperationResult {
+ return ovsdb.ResultFromError(&ovsdb.NotSupported{})
+}
+
+func (t *Transaction) Assert(lock string) ovsdb.OperationResult {
+ return ovsdb.ResultFromError(&ovsdb.NotSupported{})
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/mapper/info.go b/vendor/github.com/ovn-org/libovsdb/mapper/info.go
new file mode 100644
index 0000000000..8ac436c790
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/mapper/info.go
@@ -0,0 +1,179 @@
+package mapper
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// ErrColumnNotFound is an error that can occur when the column does not exist for a table
+type ErrColumnNotFound struct {
+ column string
+ table string
+}
+
+// Error implements the error interface
+func (e *ErrColumnNotFound) Error() string {
+ return fmt.Sprintf("column: %s not found in table: %s", e.column, e.table)
+}
+
+func NewErrColumnNotFound(column, table string) *ErrColumnNotFound {
+ return &ErrColumnNotFound{
+ column: column,
+ table: table,
+ }
+}
+
+// Info is a struct that wraps an object with its metadata
+type Info struct {
+ // FieldName indexed by column
+ Obj interface{}
+ Metadata Metadata
+}
+
+// Metadata represents the information needed to know how to map OVSDB columns into an objetss fields
+type Metadata struct {
+ Fields map[string]string // Map of ColumnName -> FieldName
+ TableSchema *ovsdb.TableSchema // TableSchema associated
+ TableName string // Table name
+}
+
+// FieldByColumn returns the field value that corresponds to a column
+func (i *Info) FieldByColumn(column string) (interface{}, error) {
+ fieldName, ok := i.Metadata.Fields[column]
+ if !ok {
+ return nil, NewErrColumnNotFound(column, i.Metadata.TableName)
+ }
+ return reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName).Interface(), nil
+}
+
+// FieldByColumn returns the field value that corresponds to a column
+func (i *Info) hasColumn(column string) bool {
+ _, ok := i.Metadata.Fields[column]
+ return ok
+}
+
+// SetField sets the field in the column to the specified value
+func (i *Info) SetField(column string, value interface{}) error {
+ fieldName, ok := i.Metadata.Fields[column]
+ if !ok {
+ return fmt.Errorf("SetField: column %s not found in orm info", column)
+ }
+ fieldValue := reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName)
+
+ if !fieldValue.Type().AssignableTo(reflect.TypeOf(value)) {
+ return fmt.Errorf("column %s: native value %v (%s) is not assignable to field %s (%s)",
+ column, value, reflect.TypeOf(value), fieldName, fieldValue.Type())
+ }
+ fieldValue.Set(reflect.ValueOf(value))
+ return nil
+}
+
+// ColumnByPtr returns the column name that corresponds to the field by the field's pointer
+func (i *Info) ColumnByPtr(fieldPtr interface{}) (string, error) {
+ fieldPtrVal := reflect.ValueOf(fieldPtr)
+ if fieldPtrVal.Kind() != reflect.Ptr {
+ return "", ovsdb.NewErrWrongType("ColumnByPointer", "pointer to a field in the struct", fieldPtr)
+ }
+ offset := fieldPtrVal.Pointer() - reflect.ValueOf(i.Obj).Pointer()
+ objType := reflect.TypeOf(i.Obj).Elem()
+ for j := 0; j < objType.NumField(); j++ {
+ if objType.Field(j).Offset == offset {
+ column := objType.Field(j).Tag.Get("ovsdb")
+ if _, ok := i.Metadata.Fields[column]; !ok {
+ return "", fmt.Errorf("field does not have orm column information")
+ }
+ return column, nil
+ }
+ }
+ return "", fmt.Errorf("field pointer does not correspond to orm struct")
+}
+
+// getValidIndexes inspects the object and returns the a list of indexes (set of columns) for witch
+// the object has non-default values
+func (i *Info) getValidIndexes() ([][]string, error) {
+ var validIndexes [][]string
+ var possibleIndexes [][]string
+
+ possibleIndexes = append(possibleIndexes, []string{"_uuid"})
+ possibleIndexes = append(possibleIndexes, i.Metadata.TableSchema.Indexes...)
+
+ // Iterate through indexes and validate them
+OUTER:
+ for _, idx := range possibleIndexes {
+ for _, col := range idx {
+ if !i.hasColumn(col) {
+ continue OUTER
+ }
+ columnSchema := i.Metadata.TableSchema.Column(col)
+ if columnSchema == nil {
+ continue OUTER
+ }
+ field, err := i.FieldByColumn(col)
+ if err != nil {
+ return nil, err
+ }
+ if !reflect.ValueOf(field).IsValid() || ovsdb.IsDefaultValue(columnSchema, field) {
+ continue OUTER
+ }
+ }
+ validIndexes = append(validIndexes, idx)
+ }
+ return validIndexes, nil
+}
+
+// NewInfo creates a MapperInfo structure around an object based on a given table schema
+func NewInfo(tableName string, table *ovsdb.TableSchema, obj interface{}) (*Info, error) {
+ objPtrVal := reflect.ValueOf(obj)
+ if objPtrVal.Type().Kind() != reflect.Ptr {
+ return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj)
+ }
+ objVal := reflect.Indirect(objPtrVal)
+ if objVal.Kind() != reflect.Struct {
+ return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj)
+ }
+ objType := objVal.Type()
+
+ fields := make(map[string]string, objType.NumField())
+ for i := 0; i < objType.NumField(); i++ {
+ field := objType.Field(i)
+ colName := field.Tag.Get("ovsdb")
+ if colName == "" {
+ // Untagged fields are ignored
+ continue
+ }
+ column := table.Column(colName)
+ if column == nil {
+ return nil, &ErrMapper{
+ objType: objType.String(),
+ field: field.Name,
+ fieldType: field.Type.String(),
+ fieldTag: colName,
+ reason: "Column does not exist in schema",
+ }
+ }
+
+ // Perform schema-based type checking
+ expType := ovsdb.NativeType(column)
+ if expType != field.Type {
+ return nil, &ErrMapper{
+ objType: objType.String(),
+ field: field.Name,
+ fieldType: field.Type.String(),
+ fieldTag: colName,
+ reason: fmt.Sprintf("Wrong type, column expects %s", expType),
+ }
+ }
+ fields[colName] = field.Name
+ }
+
+ return &Info{
+ Obj: obj,
+ Metadata: Metadata{
+ Fields: fields,
+ TableSchema: table,
+ TableName: tableName,
+ },
+ }, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go b/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go
new file mode 100644
index 0000000000..5ca7a412bb
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go
@@ -0,0 +1,317 @@
+package mapper
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// Mapper offers functions to interact with libovsdb through user-provided native structs.
+// The way to specify what field of the struct goes
+// to what column in the database id through field a field tag.
+// The tag used is "ovsdb" and has the following structure
+// 'ovsdb:"${COLUMN_NAME}"'
+// where COLUMN_NAME is the name of the column and must match the schema
+//
+//Example:
+// type MyObj struct {
+// Name string `ovsdb:"name"`
+// }
+type Mapper struct {
+ Schema ovsdb.DatabaseSchema
+}
+
+// ErrMapper describes an error in an Mapper type
+type ErrMapper struct {
+ objType string
+ field string
+ fieldType string
+ fieldTag string
+ reason string
+}
+
+func (e *ErrMapper) Error() string {
+ return fmt.Sprintf("Mapper Error. Object type %s contains field %s (%s) ovs tag %s: %s",
+ e.objType, e.field, e.fieldType, e.fieldTag, e.reason)
+}
+
+// NewMapper returns a new mapper
+func NewMapper(schema ovsdb.DatabaseSchema) Mapper {
+ return Mapper{
+ Schema: schema,
+ }
+}
+
+// GetRowData transforms a Row to a struct based on its tags
+// The result object must be given as pointer to an object with the right tags
+func (m Mapper) GetRowData(row *ovsdb.Row, result *Info) error {
+ if row == nil {
+ return nil
+ }
+ return m.getData(*row, result)
+}
+
+// getData transforms a map[string]interface{} containing OvS types (e.g: a ResultRow
+// has this format) to orm struct
+// The result object must be given as pointer to an object with the right tags
+func (m Mapper) getData(ovsData ovsdb.Row, result *Info) error {
+ for name, column := range result.Metadata.TableSchema.Columns {
+ if !result.hasColumn(name) {
+ // If provided struct does not have a field to hold this value, skip it
+ continue
+ }
+
+ ovsElem, ok := ovsData[name]
+ if !ok {
+ // Ignore missing columns
+ continue
+ }
+
+ nativeElem, err := ovsdb.OvsToNative(column, ovsElem)
+ if err != nil {
+ return fmt.Errorf("table %s, column %s: failed to extract native element: %s",
+ result.Metadata.TableName, name, err.Error())
+ }
+
+ if err := result.SetField(name, nativeElem); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewRow transforms an orm struct to a map[string] interface{} that can be used as libovsdb.Row
+// By default, default or null values are skipped. This behavior can be modified by specifying
+// a list of fields (pointers to fields in the struct) to be added to the row
+func (m Mapper) NewRow(data *Info, fields ...interface{}) (ovsdb.Row, error) {
+ columns := make(map[string]*ovsdb.ColumnSchema)
+ for k, v := range data.Metadata.TableSchema.Columns {
+ columns[k] = v
+ }
+ columns["_uuid"] = &ovsdb.UUIDColumn
+ ovsRow := make(map[string]interface{}, len(columns))
+ for name, column := range columns {
+ nativeElem, err := data.FieldByColumn(name)
+ if err != nil {
+ // If provided struct does not have a field to hold this value, skip it
+ continue
+ }
+
+ // add specific fields
+ if len(fields) > 0 {
+ found := false
+ for _, f := range fields {
+ col, err := data.ColumnByPtr(f)
+ if err != nil {
+ return nil, err
+ }
+ if col == name {
+ found = true
+ break
+ }
+ }
+ if !found {
+ continue
+ }
+ }
+ if len(fields) == 0 && ovsdb.IsDefaultValue(column, nativeElem) {
+ continue
+ }
+ ovsElem, err := ovsdb.NativeToOvs(column, nativeElem)
+ if err != nil {
+ return nil, fmt.Errorf("table %s, column %s: failed to generate ovs element. %s", data.Metadata.TableName, name, err.Error())
+ }
+ ovsRow[name] = ovsElem
+ }
+ return ovsRow, nil
+}
+
+// NewEqualityCondition returns a list of equality conditions that match a given object
+// A list of valid columns that shall be used as a index can be provided.
+// If none are provided, we will try to use object's field that matches the '_uuid' ovsdb tag
+// If it does not exist or is null (""), then we will traverse all of the table indexes and
+// use the first index (list of simultaneously unique columns) for which the provided mapper
+// object has valid data. The order in which they are traversed matches the order defined
+// in the schema.
+// By `valid data` we mean non-default data.
+func (m Mapper) NewEqualityCondition(data *Info, fields ...interface{}) ([]ovsdb.Condition, error) {
+ var conditions []ovsdb.Condition
+ var condIndex [][]string
+
+ // If index is provided, use it. If not, obtain the valid indexes from the mapper info
+ if len(fields) > 0 {
+ providedIndex := []string{}
+ for i := range fields {
+ if col, err := data.ColumnByPtr(fields[i]); err == nil {
+ providedIndex = append(providedIndex, col)
+ } else {
+ return nil, err
+ }
+ }
+ condIndex = append(condIndex, providedIndex)
+ } else {
+ var err error
+ condIndex, err = data.getValidIndexes()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(condIndex) == 0 {
+ return nil, fmt.Errorf("failed to find a valid index")
+ }
+
+ // Pick the first valid index
+ for _, col := range condIndex[0] {
+ field, err := data.FieldByColumn(col)
+ if err != nil {
+ return nil, err
+ }
+
+ column := data.Metadata.TableSchema.Column(col)
+ if column == nil {
+ return nil, fmt.Errorf("column %s not found", col)
+ }
+ ovsVal, err := ovsdb.NativeToOvs(column, field)
+ if err != nil {
+ return nil, err
+ }
+ conditions = append(conditions, ovsdb.NewCondition(col, ovsdb.ConditionEqual, ovsVal))
+ }
+ return conditions, nil
+}
+
+// EqualFields compares two mapped objects.
+// The indexes to use for comparison are, the _uuid, the table indexes and the columns that correspond
+// to the mapped fields pointed to by 'fields'. They must be pointers to fields on the first mapped element (i.e: one)
+func (m Mapper) EqualFields(one, other *Info, fields ...interface{}) (bool, error) {
+ indexes := []string{}
+ for _, f := range fields {
+ col, err := one.ColumnByPtr(f)
+ if err != nil {
+ return false, err
+ }
+ indexes = append(indexes, col)
+ }
+ return m.equalIndexes(one, other, indexes...)
+}
+
+// NewCondition returns a ovsdb.Condition based on the model
+func (m Mapper) NewCondition(data *Info, field interface{}, function ovsdb.ConditionFunction, value interface{}) (*ovsdb.Condition, error) {
+ column, err := data.ColumnByPtr(field)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check that the condition is valid
+ columnSchema := data.Metadata.TableSchema.Column(column)
+ if columnSchema == nil {
+ return nil, fmt.Errorf("column %s not found", column)
+ }
+ if err := ovsdb.ValidateCondition(columnSchema, function, value); err != nil {
+ return nil, err
+ }
+
+ ovsValue, err := ovsdb.NativeToOvs(columnSchema, value)
+ if err != nil {
+ return nil, err
+ }
+
+ ovsdbCondition := ovsdb.NewCondition(column, function, ovsValue)
+
+ return &ovsdbCondition, nil
+
+}
+
+// NewMutation creates a RFC7047 mutation object based on an ORM object and the mutation fields (in native format)
+// It takes care of field validation against the column type
+func (m Mapper) NewMutation(data *Info, column string, mutator ovsdb.Mutator, value interface{}) (*ovsdb.Mutation, error) {
+ // Check the column exists in the object
+ if !data.hasColumn(column) {
+ return nil, fmt.Errorf("mutation contains column %s that does not exist in object %v", column, data)
+ }
+ // Check that the mutation is valid
+ columnSchema := data.Metadata.TableSchema.Column(column)
+ if columnSchema == nil {
+ return nil, fmt.Errorf("column %s not found", column)
+ }
+ if err := ovsdb.ValidateMutation(columnSchema, mutator, value); err != nil {
+ return nil, err
+ }
+
+ var ovsValue interface{}
+ var err error
+ // Usually a mutation value is of the same type of the value being mutated
+ // except for delete mutation of maps where it can also be a list of same type of
+ // keys (rfc7047 5.1). Handle this special case here.
+ if mutator == "delete" && columnSchema.Type == ovsdb.TypeMap && reflect.TypeOf(value).Kind() != reflect.Map {
+ // It's OK to cast the value to a list of elements because validation has passed
+ ovsSet, err := ovsdb.NewOvsSet(value)
+ if err != nil {
+ return nil, err
+ }
+ ovsValue = ovsSet
+ } else {
+ ovsValue, err = ovsdb.NativeToOvs(columnSchema, value)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &ovsdb.Mutation{Column: column, Mutator: mutator, Value: ovsValue}, nil
+}
+
+// equalIndexes returns whether both models are equal from the DB point of view
+// Two objects are considered equal if any of the following conditions is true
+// They have a field tagged with column name '_uuid' and their values match
+// For any of the indexes defined in the Table Schema, the values all of its columns are simultaneously equal
+// (as per RFC7047)
+// The values of all of the optional indexes passed as variadic parameter to this function are equal.
+func (m Mapper) equalIndexes(one, other *Info, indexes ...string) (bool, error) {
+ match := false
+
+ oneIndexes, err := one.getValidIndexes()
+ if err != nil {
+ return false, err
+ }
+
+ otherIndexes, err := other.getValidIndexes()
+ if err != nil {
+ return false, err
+ }
+
+ oneIndexes = append(oneIndexes, indexes)
+ otherIndexes = append(otherIndexes, indexes)
+
+ for _, lidx := range oneIndexes {
+ for _, ridx := range otherIndexes {
+ if reflect.DeepEqual(ridx, lidx) {
+ // All columns in an index must be simultaneously equal
+ for _, col := range lidx {
+ if !one.hasColumn(col) || !other.hasColumn(col) {
+ break
+ }
+ lfield, err := one.FieldByColumn(col)
+ if err != nil {
+ return false, err
+ }
+ rfield, err := other.FieldByColumn(col)
+ if err != nil {
+ return false, err
+ }
+ if reflect.DeepEqual(lfield, rfield) {
+ match = true
+ } else {
+ match = false
+ break
+ }
+ }
+ if match {
+ return true, nil
+ }
+ }
+ }
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/model/client.go b/vendor/github.com/ovn-org/libovsdb/model/client.go
new file mode 100644
index 0000000000..5eb686244a
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/model/client.go
@@ -0,0 +1,171 @@
+package model
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// ColumnKey addresses a column and optionally a key within a column
+type ColumnKey struct {
+ Column string
+ Key interface{}
+}
+
+// ClientIndex defines a client index by a set of columns
+type ClientIndex struct {
+ Columns []ColumnKey
+}
+
+// ClientDBModel contains the client information needed to build a DatabaseModel
+type ClientDBModel struct {
+ name string
+ types map[string]reflect.Type
+ indexes map[string][]ClientIndex
+}
+
+// NewModel returns a new instance of a model from a specific string
+func (db ClientDBModel) newModel(table string) (Model, error) {
+ mtype, ok := db.types[table]
+ if !ok {
+ return nil, fmt.Errorf("table %s not found in database model", string(table))
+ }
+ model := reflect.New(mtype.Elem())
+ return model.Interface().(Model), nil
+}
+
+// Name returns the database name
+func (db ClientDBModel) Name() string {
+ return db.name
+}
+
+// Indexes returns the client indexes for a model
+func (db ClientDBModel) Indexes(table string) []ClientIndex {
+ if len(db.indexes) == 0 {
+ return nil
+ }
+ if _, ok := db.indexes[table]; ok {
+ return copyIndexes(db.indexes)[table]
+ }
+ return nil
+}
+
+// SetIndexes sets the client indexes. Client indexes are optional, similar to
+// schema indexes and are only tracked in the specific client instances that are
+// provided with this client model. A client index may point to multiple models
+// as uniqueness is not enforced. They are defined per table and multiple
+// indexes can be defined for a table. Each index consists of a set of columns.
+// If the column is a map, specific keys of that map can be addressed for the
+// index.
+func (db *ClientDBModel) SetIndexes(indexes map[string][]ClientIndex) {
+ db.indexes = copyIndexes(indexes)
+}
+
+// Validate validates the DatabaseModel against the input schema
+// Returns all the errors detected
+func (db ClientDBModel) validate(schema ovsdb.DatabaseSchema) []error {
+ var errors []error
+ if db.name != schema.Name {
+ errors = append(errors, fmt.Errorf("database model name (%s) does not match schema (%s)",
+ db.name, schema.Name))
+ }
+
+ infos := make(map[string]*mapper.Info, len(db.types))
+ for tableName := range db.types {
+ tableSchema := schema.Table(tableName)
+ if tableSchema == nil {
+ errors = append(errors, fmt.Errorf("database model contains a model for table %s that does not exist in schema", tableName))
+ continue
+ }
+ model, err := db.newModel(tableName)
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+ info, err := mapper.NewInfo(tableName, tableSchema, model)
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+ infos[tableName] = info
+ }
+
+ for tableName, indexSets := range db.indexes {
+ info, ok := infos[tableName]
+ if !ok {
+ errors = append(errors, fmt.Errorf("database model contains a client index for table %s that does not exist in schema", tableName))
+ continue
+ }
+ for _, indexSet := range indexSets {
+ for _, indexColumn := range indexSet.Columns {
+ f, err := info.FieldByColumn(indexColumn.Column)
+ if err != nil {
+ errors = append(
+ errors,
+ fmt.Errorf("database model contains a client index for column %s that does not exist in table %s",
+ indexColumn.Column,
+ tableName))
+ continue
+ }
+ if indexColumn.Key != nil && reflect.ValueOf(f).Kind() != reflect.Map {
+ errors = append(
+ errors,
+ fmt.Errorf("database model contains a client index for key %s in column %s of table %s that is not a map",
+ indexColumn.Key,
+ indexColumn.Column,
+ tableName))
+ continue
+ }
+ }
+ }
+ }
+ return errors
+}
+
+// NewClientDBModel constructs a ClientDBModel based on a database name and dictionary of models indexed by table name
+func NewClientDBModel(name string, models map[string]Model) (ClientDBModel, error) {
+ types := make(map[string]reflect.Type, len(models))
+ for table, model := range models {
+ modelType := reflect.TypeOf(model)
+ if modelType.Kind() != reflect.Ptr || modelType.Elem().Kind() != reflect.Struct {
+ return ClientDBModel{}, fmt.Errorf("model is expected to be a pointer to struct")
+ }
+ hasUUID := false
+ for i := 0; i < modelType.Elem().NumField(); i++ {
+ if field := modelType.Elem().Field(i); field.Tag.Get("ovsdb") == "_uuid" &&
+ field.Type.Kind() == reflect.String {
+ hasUUID = true
+ break
+ }
+ }
+ if !hasUUID {
+ return ClientDBModel{}, fmt.Errorf("model is expected to have a string field called uuid")
+ }
+
+ types[table] = modelType
+ }
+ return ClientDBModel{
+ types: types,
+ name: name,
+ }, nil
+}
+
+func copyIndexes(src map[string][]ClientIndex) map[string][]ClientIndex {
+ if len(src) == 0 {
+ return nil
+ }
+ dst := make(map[string][]ClientIndex, len(src))
+ for table, indexSets := range src {
+ dst[table] = make([]ClientIndex, 0, len(indexSets))
+ for _, indexSet := range indexSets {
+ indexSetCopy := ClientIndex{
+ Columns: make([]ColumnKey, len(indexSet.Columns)),
+ }
+ copy(indexSetCopy.Columns, indexSet.Columns)
+ dst[table] = append(dst[table], indexSetCopy)
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/model/database.go b/vendor/github.com/ovn-org/libovsdb/model/database.go
new file mode 100644
index 0000000000..0857d903f3
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/model/database.go
@@ -0,0 +1,118 @@
+package model
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// A DatabaseModel represents libovsdb's metadata about the database.
+// It's the result of combining the client's ClientDBModel and the server's Schema
+type DatabaseModel struct {
+ client ClientDBModel
+ Schema ovsdb.DatabaseSchema
+ Mapper mapper.Mapper
+ metadata map[reflect.Type]mapper.Metadata
+}
+
+// NewDatabaseModel returns a new DatabaseModel
+func NewDatabaseModel(schema ovsdb.DatabaseSchema, client ClientDBModel) (DatabaseModel, []error) {
+ dbModel := &DatabaseModel{
+ Schema: schema,
+ client: client,
+ }
+ errs := client.validate(schema)
+ if len(errs) > 0 {
+ return DatabaseModel{}, errs
+ }
+ dbModel.Mapper = mapper.NewMapper(schema)
+ var metadata map[reflect.Type]mapper.Metadata
+ metadata, errs = generateModelInfo(schema, client.types)
+ if len(errs) > 0 {
+ return DatabaseModel{}, errs
+ }
+ dbModel.metadata = metadata
+ return *dbModel, nil
+}
+
+// NewPartialDatabaseModel returns a DatabaseModel what does not have a schema yet
+func NewPartialDatabaseModel(client ClientDBModel) DatabaseModel {
+ return DatabaseModel{
+ client: client,
+ }
+}
+
+// Valid returns whether the DatabaseModel is fully functional
+func (db DatabaseModel) Valid() bool {
+ return !reflect.DeepEqual(db.Schema, ovsdb.DatabaseSchema{})
+}
+
+// Client returns the DatabaseModel's client dbModel
+func (db DatabaseModel) Client() ClientDBModel {
+ return db.client
+}
+
+// NewModel returns a new instance of a model from a specific string
+func (db DatabaseModel) NewModel(table string) (Model, error) {
+ mtype, ok := db.client.types[table]
+ if !ok {
+ return nil, fmt.Errorf("table %s not found in database model", string(table))
+ }
+ model := reflect.New(mtype.Elem())
+ return model.Interface().(Model), nil
+}
+
+// Types returns the DatabaseModel Types
+// the DatabaseModel types is a map of reflect.Types indexed by string
+// The reflect.Type is a pointer to a struct that contains 'ovs' tags
+// as described above. Such pointer to struct also implements the Model interface
+func (db DatabaseModel) Types() map[string]reflect.Type {
+ return db.client.types
+}
+
+// FindTable returns the string associated with a reflect.Type or ""
+func (db DatabaseModel) FindTable(mType reflect.Type) string {
+ for table, tType := range db.client.types {
+ if tType == mType {
+ return table
+ }
+ }
+ return ""
+}
+
+// generateModelMetadata creates metadata objects from all models included in the
+// database and caches them for future re-use
+func generateModelInfo(dbSchema ovsdb.DatabaseSchema, modelTypes map[string]reflect.Type) (map[reflect.Type]mapper.Metadata, []error) {
+ errors := []error{}
+ metadata := make(map[reflect.Type]mapper.Metadata, len(modelTypes))
+ for tableName, tType := range modelTypes {
+ tableSchema := dbSchema.Table(tableName)
+ if tableSchema == nil {
+ errors = append(errors, fmt.Errorf("database Model contains model for table %s which is not present in schema", tableName))
+ continue
+ }
+
+ obj := reflect.New(tType.Elem()).Interface().(Model)
+ info, err := mapper.NewInfo(tableName, tableSchema, obj)
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+ metadata[tType] = info.Metadata
+ }
+ return metadata, errors
+}
+
+// NewModelInfo returns a mapper.Info object based on a provided model
+func (db DatabaseModel) NewModelInfo(obj interface{}) (*mapper.Info, error) {
+ meta, ok := db.metadata[reflect.TypeOf(obj)]
+ if !ok {
+ return nil, ovsdb.NewErrWrongType("NewModelInfo", "type that is part of the DatabaseModel", obj)
+ }
+ return &mapper.Info{
+ Obj: obj,
+ Metadata: meta,
+ }, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/model/model.go b/vendor/github.com/ovn-org/libovsdb/model/model.go
new file mode 100644
index 0000000000..c8575f5bf3
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/model/model.go
@@ -0,0 +1,130 @@
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// A Model is the base interface used to build Database Models. It is used
+// to express how data from a specific Database Table shall be translated into structs
+// A Model is a struct with at least one (most likely more) field tagged with the 'ovs' tag
+// The value of 'ovs' field must be a valid column name in the OVS Database
+// A field associated with the "_uuid" column mandatory. The rest of the columns are optional
+// The struct may also have non-tagged fields (which will be ignored by the API calls)
+// The Model interface must be implemented by the pointer to such type
+// Example:
+//type MyLogicalRouter struct {
+// UUID string `ovsdb:"_uuid"`
+// Name string `ovsdb:"name"`
+// ExternalIDs map[string]string `ovsdb:"external_ids"`
+// LoadBalancers []string `ovsdb:"load_balancer"`
+//}
+type Model interface{}
+
+type CloneableModel interface {
+ CloneModel() Model
+ CloneModelInto(Model)
+}
+
+type ComparableModel interface {
+ EqualsModel(Model) bool
+}
+
+// Clone creates a deep copy of a model
+func Clone(a Model) Model {
+ if cloner, ok := a.(CloneableModel); ok {
+ return cloner.CloneModel()
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(a))
+ b := reflect.New(val.Type()).Interface()
+ aBytes, _ := json.Marshal(a)
+ _ = json.Unmarshal(aBytes, b)
+ return b
+}
+
+// CloneInto deep copies a model into another one
+func CloneInto(src, dst Model) {
+ if cloner, ok := src.(CloneableModel); ok {
+ cloner.CloneModelInto(dst)
+ return
+ }
+
+ aBytes, _ := json.Marshal(src)
+ _ = json.Unmarshal(aBytes, dst)
+}
+
+func Equal(l, r Model) bool {
+ if comparator, ok := l.(ComparableModel); ok {
+ return comparator.EqualsModel(r)
+ }
+
+ return reflect.DeepEqual(l, r)
+}
+
+func modelSetUUID(model Model, uuid string) error {
+ modelVal := reflect.ValueOf(model).Elem()
+ for i := 0; i < modelVal.NumField(); i++ {
+ if field := modelVal.Type().Field(i); field.Tag.Get("ovsdb") == "_uuid" &&
+ field.Type.Kind() == reflect.String {
+ modelVal.Field(i).Set(reflect.ValueOf(uuid))
+ return nil
+ }
+ }
+ return fmt.Errorf("model is expected to have a string field mapped to column _uuid")
+}
+
+// Condition is a model-based representation of an OVSDB Condition
+type Condition struct {
+ // Pointer to the field of the model where the operation applies
+ Field interface{}
+ // Condition function
+ Function ovsdb.ConditionFunction
+ // Value to use in the condition
+ Value interface{}
+}
+
+// Mutation is a model-based representation of an OVSDB Mutation
+type Mutation struct {
+ // Pointer to the field of the model that shall be mutated
+ Field interface{}
+ // String representing the mutator (as per RFC7047)
+ Mutator ovsdb.Mutator
+ // Value to use in the mutation
+ Value interface{}
+}
+
+// CreateModel creates a new Model instance based on an OVSDB Row information
+func CreateModel(dbModel DatabaseModel, tableName string, row *ovsdb.Row, uuid string) (Model, error) {
+ if !dbModel.Valid() {
+ return nil, fmt.Errorf("database model not valid")
+ }
+
+ table := dbModel.Schema.Table(tableName)
+ if table == nil {
+ return nil, fmt.Errorf("table %s not found", tableName)
+ }
+ model, err := dbModel.NewModel(tableName)
+ if err != nil {
+ return nil, err
+ }
+ info, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ err = dbModel.Mapper.GetRowData(row, info)
+ if err != nil {
+ return nil, err
+ }
+
+ if uuid != "" {
+ if err := info.SetField("_uuid", uuid); err != nil {
+ return nil, err
+ }
+ }
+
+ return model, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go
new file mode 100644
index 0000000000..aebe2c2d0a
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go
@@ -0,0 +1,427 @@
+package ovsdb
+
+import (
+ "fmt"
+ "reflect"
+)
+
+var (
+ intType = reflect.TypeOf(0)
+ realType = reflect.TypeOf(0.0)
+ boolType = reflect.TypeOf(true)
+ strType = reflect.TypeOf("")
+)
+
+// ErrWrongType describes typing error
+type ErrWrongType struct {
+ from string
+ expected string
+ got interface{}
+}
+
+func (e *ErrWrongType) Error() string {
+ return fmt.Sprintf("Wrong Type (%s): expected %s but got %+v (%s)",
+ e.from, e.expected, e.got, reflect.TypeOf(e.got))
+}
+
+// NewErrWrongType creates a new ErrWrongType
+func NewErrWrongType(from, expected string, got interface{}) error {
+ return &ErrWrongType{
+ from: from,
+ expected: expected,
+ got: got,
+ }
+}
+
+// NativeTypeFromAtomic returns the native type that can hold a value of an
+// AtomicType
+func NativeTypeFromAtomic(basicType string) reflect.Type {
+ switch basicType {
+ case TypeInteger:
+ return intType
+ case TypeReal:
+ return realType
+ case TypeBoolean:
+ return boolType
+ case TypeString:
+ return strType
+ case TypeUUID:
+ return strType
+ default:
+ panic("Unknown basic type %s basicType")
+ }
+}
+
+// NativeType returns the reflect.Type that can hold the value of a column
+// OVS Type to Native Type convertions:
+//
+// OVS sets -> go slices or a go native type depending on the key
+// OVS uuid -> go strings
+// OVS map -> go map
+// OVS enum -> go native type depending on the type of the enum key
+func NativeType(column *ColumnSchema) reflect.Type {
+ switch column.Type {
+ case TypeInteger, TypeReal, TypeBoolean, TypeUUID, TypeString:
+ return NativeTypeFromAtomic(column.Type)
+ case TypeEnum:
+ return NativeTypeFromAtomic(column.TypeObj.Key.Type)
+ case TypeMap:
+ keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type)
+ valueType := NativeTypeFromAtomic(column.TypeObj.Value.Type)
+ return reflect.MapOf(keyType, valueType)
+ case TypeSet:
+ keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type)
+ // optional type
+ if column.TypeObj.Min() == 0 && column.TypeObj.Max() == 1 {
+ return reflect.PtrTo(keyType)
+ }
+ // non-optional type with max 1
+ if column.TypeObj.Min() == 1 && column.TypeObj.Max() == 1 {
+ return keyType
+ }
+ return reflect.SliceOf(keyType)
+ default:
+ panic(fmt.Errorf("unknown extended type %s", column.Type))
+ }
+}
+
+// OvsToNativeAtomic returns the native type of the basic ovs type
+func OvsToNativeAtomic(basicType string, ovsElem interface{}) (interface{}, error) {
+ switch basicType {
+ case TypeReal, TypeString, TypeBoolean:
+ naType := NativeTypeFromAtomic(basicType)
+ if reflect.TypeOf(ovsElem) != naType {
+ return nil, NewErrWrongType("OvsToNativeAtomic", naType.String(), ovsElem)
+ }
+ return ovsElem, nil
+ case TypeInteger:
+ naType := NativeTypeFromAtomic(basicType)
+ // Default decoding of numbers is float64, convert them to int
+ if !reflect.TypeOf(ovsElem).ConvertibleTo(naType) {
+ return nil, NewErrWrongType("OvsToNativeAtomic", fmt.Sprintf("Convertible to %s", naType), ovsElem)
+ }
+ return reflect.ValueOf(ovsElem).Convert(naType).Interface(), nil
+ case TypeUUID:
+ uuid, ok := ovsElem.(UUID)
+ if !ok {
+ return nil, NewErrWrongType("OvsToNativeAtomic", "UUID", ovsElem)
+ }
+ return uuid.GoUUID, nil
+ default:
+ panic(fmt.Errorf("unknown atomic type %s", basicType))
+ }
+}
+
+func OvsToNativeSlice(baseType string, ovsElem interface{}) (interface{}, error) {
+ naType := NativeTypeFromAtomic(baseType)
+ var nativeSet reflect.Value
+ switch ovsSet := ovsElem.(type) {
+ case OvsSet:
+ nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, len(ovsSet.GoSet))
+ for _, v := range ovsSet.GoSet {
+ nv, err := OvsToNativeAtomic(baseType, v)
+ if err != nil {
+ return nil, err
+ }
+ nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv))
+ }
+
+ default:
+ nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, 1)
+ nv, err := OvsToNativeAtomic(baseType, ovsElem)
+ if err != nil {
+ return nil, err
+ }
+
+ nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv))
+ }
+ return nativeSet.Interface(), nil
+}
+
+// OvsToNative transforms an ovs type to native one based on the column type information
+func OvsToNative(column *ColumnSchema, ovsElem interface{}) (interface{}, error) {
+ switch column.Type {
+ case TypeReal, TypeString, TypeBoolean, TypeInteger, TypeUUID:
+ return OvsToNativeAtomic(column.Type, ovsElem)
+ case TypeEnum:
+ return OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem)
+ case TypeSet:
+ naType := NativeType(column)
+ // The inner slice is []interface{}
+ // We need to convert it to the real type os slice
+ switch naType.Kind() {
+ case reflect.Ptr:
+ switch ovsSet := ovsElem.(type) {
+ case OvsSet:
+ if len(ovsSet.GoSet) > 1 {
+ return nil, fmt.Errorf("expected a slice of len =< 1, but got a slice with %d elements", len(ovsSet.GoSet))
+ }
+ if len(ovsSet.GoSet) == 0 {
+ return reflect.Zero(naType).Interface(), nil
+ }
+ native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsSet.GoSet[0])
+ if err != nil {
+ return nil, err
+ }
+ pv := reflect.New(naType.Elem())
+ pv.Elem().Set(reflect.ValueOf(native))
+ return pv.Interface(), nil
+ default:
+ native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem)
+ if err != nil {
+ return nil, err
+ }
+ pv := reflect.New(naType.Elem())
+ pv.Elem().Set(reflect.ValueOf(native))
+ return pv.Interface(), nil
+ }
+ case reflect.Slice:
+ return OvsToNativeSlice(column.TypeObj.Key.Type, ovsElem)
+ default:
+ return nil, fmt.Errorf("native type was not slice or pointer. got %d", naType.Kind())
+ }
+ case TypeMap:
+ naType := NativeType(column)
+ ovsMap, ok := ovsElem.(OvsMap)
+ if !ok {
+ return nil, NewErrWrongType("OvsToNative", "OvsMap", ovsElem)
+ }
+ // The inner slice is map[interface]interface{}
+ // We need to convert it to the real type os slice
+ nativeMap := reflect.MakeMapWithSize(naType, len(ovsMap.GoMap))
+ for k, v := range ovsMap.GoMap {
+ nk, err := OvsToNativeAtomic(column.TypeObj.Key.Type, k)
+ if err != nil {
+ return nil, err
+ }
+ nv, err := OvsToNativeAtomic(column.TypeObj.Value.Type, v)
+ if err != nil {
+ return nil, err
+ }
+ nativeMap.SetMapIndex(reflect.ValueOf(nk), reflect.ValueOf(nv))
+ }
+ return nativeMap.Interface(), nil
+ default:
+ panic(fmt.Sprintf("Unknown Type: %v", column.Type))
+ }
+}
+
+// NativeToOvsAtomic returns the OVS type of the atomic native value
+func NativeToOvsAtomic(basicType string, nativeElem interface{}) (interface{}, error) {
+ naType := NativeTypeFromAtomic(basicType)
+ if reflect.TypeOf(nativeElem) != naType {
+ return nil, NewErrWrongType("NativeToOvsAtomic", naType.String(), nativeElem)
+ }
+ switch basicType {
+ case TypeUUID:
+ return UUID{GoUUID: nativeElem.(string)}, nil
+ default:
+ return nativeElem, nil
+ }
+}
+
+// NativeToOvs transforms an native type to a ovs type based on the column type information
+func NativeToOvs(column *ColumnSchema, rawElem interface{}) (interface{}, error) {
+ naType := NativeType(column)
+ if t := reflect.TypeOf(rawElem); t != naType {
+ return nil, NewErrWrongType("NativeToOvs", naType.String(), rawElem)
+ }
+
+ switch column.Type {
+ case TypeInteger, TypeReal, TypeString, TypeBoolean, TypeEnum:
+ return rawElem, nil
+ case TypeUUID:
+ return UUID{GoUUID: rawElem.(string)}, nil
+ case TypeSet:
+ var ovsSet OvsSet
+ if column.TypeObj.Key.Type == TypeUUID {
+ ovsSlice := []interface{}{}
+ if _, ok := rawElem.([]string); ok {
+ for _, v := range rawElem.([]string) {
+ uuid := UUID{GoUUID: v}
+ ovsSlice = append(ovsSlice, uuid)
+ }
+ } else if _, ok := rawElem.(*string); ok {
+ v := rawElem.(*string)
+ if v != nil {
+ uuid := UUID{GoUUID: *v}
+ ovsSlice = append(ovsSlice, uuid)
+ }
+ } else {
+ return nil, fmt.Errorf("uuid slice was neither []string or *string")
+ }
+ ovsSet = OvsSet{GoSet: ovsSlice}
+
+ } else {
+ var err error
+ ovsSet, err = NewOvsSet(rawElem)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ovsSet, nil
+ case TypeMap:
+ nativeMapVal := reflect.ValueOf(rawElem)
+ ovsMap := make(map[interface{}]interface{}, nativeMapVal.Len())
+ for _, key := range nativeMapVal.MapKeys() {
+ ovsKey, err := NativeToOvsAtomic(column.TypeObj.Key.Type, key.Interface())
+ if err != nil {
+ return nil, err
+ }
+ ovsVal, err := NativeToOvsAtomic(column.TypeObj.Value.Type, nativeMapVal.MapIndex(key).Interface())
+ if err != nil {
+ return nil, err
+ }
+ ovsMap[ovsKey] = ovsVal
+ }
+ return OvsMap{GoMap: ovsMap}, nil
+
+ default:
+ panic(fmt.Sprintf("Unknown Type: %v", column.Type))
+ }
+}
+
+// IsDefaultValue checks if a provided native element corresponds to the default value of its
+// designated column type
+func IsDefaultValue(column *ColumnSchema, nativeElem interface{}) bool {
+ switch column.Type {
+ case TypeEnum:
+ return isDefaultBaseValue(nativeElem, column.TypeObj.Key.Type)
+ default:
+ return isDefaultBaseValue(nativeElem, column.Type)
+ }
+}
+
+// ValidateMutationAtomic checks if the mutation is valid for a specific AtomicType
+func validateMutationAtomic(atype string, mutator Mutator, value interface{}) error {
+ nType := NativeTypeFromAtomic(atype)
+ if reflect.TypeOf(value) != nType {
+ return NewErrWrongType(fmt.Sprintf("Mutation of atomic type %s", atype), nType.String(), value)
+ }
+
+ switch atype {
+ case TypeUUID, TypeString, TypeBoolean:
+ return fmt.Errorf("atomictype %s does not support mutation", atype)
+ case TypeReal:
+ switch mutator {
+ case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide:
+ return nil
+ default:
+ return fmt.Errorf("wrong mutator for real type %s", mutator)
+ }
+ case TypeInteger:
+ switch mutator {
+ case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo:
+ return nil
+ default:
+ return fmt.Errorf("wrong mutator for integer type: %s", mutator)
+ }
+ default:
+ panic("Unsupported Atomic Type")
+ }
+}
+
+// ValidateMutation checks if the mutation value and mutator string area appropriate
+// for a given column based on the rules specified RFC7047
+func ValidateMutation(column *ColumnSchema, mutator Mutator, value interface{}) error {
+ if !column.Mutable() {
+ return fmt.Errorf("column is not mutable")
+ }
+ switch column.Type {
+ case TypeSet:
+ switch mutator {
+ case MutateOperationInsert, MutateOperationDelete:
+ // RFC7047 says a may be an with a single
+ // element. Check if we can store this value in our column
+ if reflect.TypeOf(value).Kind() != reflect.Slice {
+ if NativeType(column) != reflect.SliceOf(reflect.TypeOf(value)) {
+ return NewErrWrongType(fmt.Sprintf("Mutation %s of single value in to column %s", mutator, column),
+ NativeType(column).String(), reflect.SliceOf(reflect.TypeOf(value)).String())
+ }
+ return nil
+ }
+ if NativeType(column) != reflect.TypeOf(value) {
+ return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column),
+ NativeType(column).String(), value)
+ }
+ return nil
+ default:
+ return validateMutationAtomic(column.TypeObj.Key.Type, mutator, value)
+ }
+ case TypeMap:
+ switch mutator {
+ case MutateOperationInsert:
+ // Value must be a map of the same kind
+ if reflect.TypeOf(value) != NativeType(column) {
+ return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column),
+ NativeType(column).String(), value)
+ }
+ return nil
+ case MutateOperationDelete:
+ // Value must be a map of the same kind or a set of keys to delete
+ if reflect.TypeOf(value) != NativeType(column) &&
+ reflect.TypeOf(value) != reflect.SliceOf(NativeTypeFromAtomic(column.TypeObj.Key.Type)) {
+ return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column),
+ "compatible map type", value)
+ }
+ return nil
+
+ default:
+ return fmt.Errorf("wrong mutator for map type: %s", mutator)
+ }
+ case TypeEnum:
+ // RFC does not clarify what to do with enums.
+ return fmt.Errorf("enums do not support mutation")
+ default:
+ return validateMutationAtomic(column.Type, mutator, value)
+ }
+}
+
+func ValidateCondition(column *ColumnSchema, function ConditionFunction, nativeValue interface{}) error {
+ if NativeType(column) != reflect.TypeOf(nativeValue) {
+ return NewErrWrongType(fmt.Sprintf("Condition for column %s", column),
+ NativeType(column).String(), nativeValue)
+ }
+
+ switch column.Type {
+ case TypeSet, TypeMap, TypeBoolean, TypeString, TypeUUID:
+ switch function {
+ case ConditionEqual, ConditionNotEqual, ConditionIncludes, ConditionExcludes:
+ return nil
+ default:
+ return fmt.Errorf("wrong condition function %s for type: %s", function, column.Type)
+ }
+ case TypeInteger, TypeReal:
+ // All functions are valid
+ return nil
+ default:
+ panic("Unsupported Type")
+ }
+}
+
+func isDefaultBaseValue(elem interface{}, etype ExtendedType) bool {
+ value := reflect.ValueOf(elem)
+ if !value.IsValid() {
+ return true
+ }
+ if reflect.TypeOf(elem).Kind() == reflect.Ptr {
+ return reflect.ValueOf(elem).IsZero()
+ }
+ switch etype {
+ case TypeUUID:
+ return elem.(string) == "00000000-0000-0000-0000-000000000000" || elem.(string) == ""
+ case TypeMap, TypeSet:
+ if value.Kind() == reflect.Array {
+ return value.Len() == 0
+ }
+ return value.IsNil() || value.Len() == 0
+ case TypeString:
+ return elem.(string) == ""
+ case TypeInteger:
+ return elem.(int) == 0
+ case TypeReal:
+ return elem.(float64) == 0
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go
new file mode 100644
index 0000000000..783ac0f554
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go
@@ -0,0 +1,223 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+type ConditionFunction string
+type WaitCondition string
+
+const (
+ // ConditionLessThan is the less than condition
+ ConditionLessThan ConditionFunction = "<"
+ // ConditionLessThanOrEqual is the less than or equal condition
+ ConditionLessThanOrEqual ConditionFunction = "<="
+ // ConditionEqual is the equal condition
+ ConditionEqual ConditionFunction = "=="
+ // ConditionNotEqual is the not equal condition
+ ConditionNotEqual ConditionFunction = "!="
+ // ConditionGreaterThan is the greater than condition
+ ConditionGreaterThan ConditionFunction = ">"
+ // ConditionGreaterThanOrEqual is the greater than or equal condition
+ ConditionGreaterThanOrEqual ConditionFunction = ">="
+ // ConditionIncludes is the includes condition
+ ConditionIncludes ConditionFunction = "includes"
+ // ConditionExcludes is the excludes condition
+ ConditionExcludes ConditionFunction = "excludes"
+
+ // WaitConditionEqual is the equal condition
+ WaitConditionEqual WaitCondition = "=="
+ // WaitConditionNotEqual is the not equal condition
+ WaitConditionNotEqual WaitCondition = "!="
+)
+
+// Condition is described in RFC 7047: 5.1
+type Condition struct {
+ Column string
+ Function ConditionFunction
+ Value interface{}
+}
+
+func (c Condition) String() string {
+ return fmt.Sprintf("where column %s %s %v", c.Column, c.Function, c.Value)
+}
+
+// NewCondition returns a new condition
+func NewCondition(column string, function ConditionFunction, value interface{}) Condition {
+ return Condition{
+ Column: column,
+ Function: function,
+ Value: value,
+ }
+}
+
+// MarshalJSON marshals a condition to a 3 element JSON array
+func (c Condition) MarshalJSON() ([]byte, error) {
+ v := []interface{}{c.Column, c.Function, c.Value}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON converts a 3 element JSON array to a Condition
+func (c *Condition) UnmarshalJSON(b []byte) error {
+ var v []interface{}
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+ if len(v) != 3 {
+ return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v))
+ }
+ c.Column = v[0].(string)
+ function := ConditionFunction(v[1].(string))
+ switch function {
+ case ConditionEqual,
+ ConditionNotEqual,
+ ConditionIncludes,
+ ConditionExcludes,
+ ConditionGreaterThan,
+ ConditionGreaterThanOrEqual,
+ ConditionLessThan,
+ ConditionLessThanOrEqual:
+ c.Function = function
+ default:
+ return fmt.Errorf("%s is not a valid function", function)
+ }
+ vv, err := ovsSliceToGoNotation(v[2])
+ if err != nil {
+ return err
+ }
+ c.Value = vv
+ return nil
+}
+
+// Evaluate will evaluate the condition on the two provided values
+// The conditions operately differently depending on the type of
+// the provided values. The behavior is as described in RFC7047
+func (c ConditionFunction) Evaluate(a interface{}, b interface{}) (bool, error) {
+ x := reflect.ValueOf(a)
+ y := reflect.ValueOf(b)
+ if x.Kind() != y.Kind() {
+ return false, fmt.Errorf("comparison between %s and %s not supported", x.Kind(), y.Kind())
+ }
+ switch c {
+ case ConditionEqual:
+ return reflect.DeepEqual(a, b), nil
+ case ConditionNotEqual:
+ return !reflect.DeepEqual(a, b), nil
+ case ConditionIncludes:
+ switch x.Kind() {
+ case reflect.Slice:
+ return sliceContains(x, y), nil
+ case reflect.Map:
+ return mapContains(x, y), nil
+ case reflect.Int, reflect.Float64, reflect.Bool, reflect.String:
+ return reflect.DeepEqual(a, b), nil
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionExcludes:
+ switch x.Kind() {
+ case reflect.Slice:
+ return !sliceContains(x, y), nil
+ case reflect.Map:
+ return !mapContains(x, y), nil
+ case reflect.Int, reflect.Float64, reflect.Bool, reflect.String:
+ return !reflect.DeepEqual(a, b), nil
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionGreaterThan:
+ switch x.Kind() {
+ case reflect.Int:
+ return x.Int() > y.Int(), nil
+ case reflect.Float64:
+ return x.Float() > y.Float(), nil
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionGreaterThanOrEqual:
+ switch x.Kind() {
+ case reflect.Int:
+ return x.Int() >= y.Int(), nil
+ case reflect.Float64:
+ return x.Float() >= y.Float(), nil
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionLessThan:
+ switch x.Kind() {
+ case reflect.Int:
+ return x.Int() < y.Int(), nil
+ case reflect.Float64:
+ return x.Float() < y.Float(), nil
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ case ConditionLessThanOrEqual:
+ switch x.Kind() {
+ case reflect.Int:
+ return x.Int() <= y.Int(), nil
+ case reflect.Float64:
+ return x.Float() <= y.Float(), nil
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
+ default:
+ return false, fmt.Errorf("condition not supported on %s", x.Kind())
+ }
+ default:
+ return false, fmt.Errorf("unsupported condition function %s", c)
+ }
+ // we should never get here
+ return false, fmt.Errorf("unreachable condition")
+}
+
+func sliceContains(x, y reflect.Value) bool {
+ for i := 0; i < y.Len(); i++ {
+ found := false
+ vy := y.Index(i)
+ for j := 0; j < x.Len(); j++ {
+ vx := x.Index(j)
+ if vy.Kind() == reflect.Interface {
+ if vy.Elem() == vx.Elem() {
+ found = true
+ break
+ }
+ } else {
+ if vy.Interface() == vx.Interface() {
+ found = true
+ break
+ }
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
+
+func mapContains(x, y reflect.Value) bool {
+ iter := y.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ vx := x.MapIndex(k)
+ if !vx.IsValid() {
+ return false
+ }
+ if v.Kind() != reflect.Interface {
+ if v.Interface() != vx.Interface() {
+ return false
+ }
+ } else {
+ if v.Elem() != vx.Elem() {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go
new file mode 100644
index 0000000000..4a85c541ce
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go
@@ -0,0 +1,373 @@
+package ovsdb
+
+import "fmt"
+
+const (
+ referentialIntegrityViolation = "referential integrity violation"
+ constraintViolation = "constraint violation"
+ resourcesExhausted = "resources exhausted"
+ ioError = "I/O error"
+ duplicateUUIDName = "duplicate uuid name"
+ domainError = "domain error"
+ rangeError = "range error"
+ timedOut = "timed out"
+ notSupported = "not supported"
+ aborted = "aborted"
+ notOwner = "not owner"
+)
+
+// errorFromResult returns an specific OVSDB error type from
+// an OperationResult
+func errorFromResult(op *Operation, r OperationResult) OperationError {
+ if r.Error == "" {
+ return nil
+ }
+ switch r.Error {
+ case referentialIntegrityViolation:
+ return &ReferentialIntegrityViolation{r.Details, op}
+ case constraintViolation:
+ return &ConstraintViolation{r.Details, op}
+ case resourcesExhausted:
+ return &ResourcesExhausted{r.Details, op}
+ case ioError:
+ return &IOError{r.Details, op}
+ case duplicateUUIDName:
+ return &DuplicateUUIDName{r.Details, op}
+ case domainError:
+ return &DomainError{r.Details, op}
+ case rangeError:
+ return &RangeError{r.Details, op}
+ case timedOut:
+ return &TimedOut{r.Details, op}
+ case notSupported:
+ return &NotSupported{r.Details, op}
+ case aborted:
+ return &Aborted{r.Details, op}
+ case notOwner:
+ return &NotOwner{r.Details, op}
+ default:
+ return &Error{r.Error, r.Details, op}
+ }
+}
+
+func ResultFromError(err error) OperationResult {
+ if err == nil {
+ panic("Program error: passed nil error to resultFromError")
+ }
+ switch e := err.(type) {
+ case *ReferentialIntegrityViolation:
+ return OperationResult{Error: referentialIntegrityViolation, Details: e.details}
+ case *ConstraintViolation:
+ return OperationResult{Error: constraintViolation, Details: e.details}
+ case *ResourcesExhausted:
+ return OperationResult{Error: resourcesExhausted, Details: e.details}
+ case *IOError:
+ return OperationResult{Error: ioError, Details: e.details}
+ case *DuplicateUUIDName:
+ return OperationResult{Error: duplicateUUIDName, Details: e.details}
+ case *DomainError:
+ return OperationResult{Error: domainError, Details: e.details}
+ case *RangeError:
+ return OperationResult{Error: rangeError, Details: e.details}
+ case *TimedOut:
+ return OperationResult{Error: timedOut, Details: e.details}
+ case *NotSupported:
+ return OperationResult{Error: notSupported, Details: e.details}
+ case *Aborted:
+ return OperationResult{Error: aborted, Details: e.details}
+ case *NotOwner:
+ return OperationResult{Error: notOwner, Details: e.details}
+ default:
+ return OperationResult{Error: e.Error()}
+ }
+}
+
+// CheckOperationResults checks whether the provided operation was a success
+// If the operation was a success, it will return nil, nil
+// If the operation failed, due to a error committing the transaction it will
+// return nil, error.
+// Finally, in the case where one or more of the operations in the transaction
+// failed, we return []OperationErrors, error
+// Within []OperationErrors, the OperationErrors.Index() corresponds to the same index in
+// the original Operations struct. You may also perform type assertions against
+// the error so the caller can decide how best to handle it
+func CheckOperationResults(result []OperationResult, ops []Operation) ([]OperationError, error) {
+ // this shouldn't happen, but we'll cover the case to be certain
+ if len(result) < len(ops) {
+ return nil, fmt.Errorf("ovsdb transaction error. %d operations submitted but only %d results received", len(ops), len(result))
+ }
+ var errs []OperationError
+ for i, op := range result {
+ // RFC 7047: if all of the operations succeed, but the results cannot
+ // be committed, then "result" will have one more element than "params",
+ // with the additional element being an .
+ if i >= len(ops) {
+ return errs, errorFromResult(nil, op)
+ }
+ if err := errorFromResult(&ops[i], op); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) > 0 {
+ return errs, fmt.Errorf("%d ovsdb operations failed", len(errs))
+ }
+ return nil, nil
+}
+
+// OperationError represents an error that occurred as part of an
+// OVSDB Operation
+type OperationError interface {
+ error
+ // Operation is a pointer to the operation which caused the error
+ Operation() *Operation
+}
+
+// ReferentialIntegrityViolation is explained in RFC 7047 4.1.3
+type ReferentialIntegrityViolation struct {
+ details string
+ operation *Operation
+}
+
+func NewReferentialIntegrityViolation(details string) *ReferentialIntegrityViolation {
+ return &ReferentialIntegrityViolation{details: details}
+}
+
+// Error implements the error interface
+func (e *ReferentialIntegrityViolation) Error() string {
+ msg := referentialIntegrityViolation
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *ReferentialIntegrityViolation) Operation() *Operation {
+ return e.operation
+}
+
+// ConstraintViolation is described in RFC 7047: 4.1.3
+type ConstraintViolation struct {
+ details string
+ operation *Operation
+}
+
+func NewConstraintViolation(details string) *ConstraintViolation {
+ return &ConstraintViolation{details: details}
+}
+
+// Error implements the error interface
+func (e *ConstraintViolation) Error() string {
+ msg := constraintViolation
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *ConstraintViolation) Operation() *Operation {
+ return e.operation
+}
+
+// ResourcesExhausted is described in RFC 7047: 4.1.3
+type ResourcesExhausted struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *ResourcesExhausted) Error() string {
+ msg := resourcesExhausted
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *ResourcesExhausted) Operation() *Operation {
+ return e.operation
+}
+
+// IOError is described in RFC7047: 4.1.3
+type IOError struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *IOError) Error() string {
+ msg := ioError
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *IOError) Operation() *Operation {
+ return e.operation
+}
+
+// DuplicateUUIDName is described in RFC7047 5.2.1
+type DuplicateUUIDName struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *DuplicateUUIDName) Error() string {
+ msg := duplicateUUIDName
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *DuplicateUUIDName) Operation() *Operation {
+ return e.operation
+}
+
+// DomainError is described in RFC 7047: 5.2.4
+type DomainError struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *DomainError) Error() string {
+ msg := domainError
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *DomainError) Operation() *Operation {
+ return e.operation
+}
+
+// RangeError is described in RFC 7047: 5.2.4
+type RangeError struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *RangeError) Error() string {
+ msg := rangeError
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *RangeError) Operation() *Operation {
+ return e.operation
+}
+
+// TimedOut is described in RFC 7047: 5.2.6
+type TimedOut struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *TimedOut) Error() string {
+ msg := timedOut
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *TimedOut) Operation() *Operation {
+ return e.operation
+}
+
+// NotSupported is described in RFC 7047: 5.2.7
+type NotSupported struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *NotSupported) Error() string {
+ msg := notSupported
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *NotSupported) Operation() *Operation {
+ return e.operation
+}
+
+// Aborted is described in RFC 7047: 5.2.8
+type Aborted struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *Aborted) Error() string {
+ msg := aborted
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *Aborted) Operation() *Operation {
+ return e.operation
+}
+
+// NotOwner is described in RFC 7047: 5.2.9
+type NotOwner struct {
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *NotOwner) Error() string {
+ msg := notOwner
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *NotOwner) Operation() *Operation {
+ return e.operation
+}
+
+// Error is a generic OVSDB Error type that implements the
+// OperationError and error interfaces
+type Error struct {
+ name string
+ details string
+ operation *Operation
+}
+
+// Error implements the error interface
+func (e *Error) Error() string {
+ msg := e.name
+ if e.details != "" {
+ msg += ": " + e.details
+ }
+ return msg
+}
+
+// Operation implements the OperationError interface
+func (e *Error) Operation() *Operation {
+ return e.operation
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go
new file mode 100644
index 0000000000..893a9774fc
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go
@@ -0,0 +1,92 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+// OvsMap is the JSON map structure used for OVSDB
+// RFC 7047 uses the following notation for map as JSON doesn't support non-string keys for maps.
+// A 2-element JSON array that represents a database map value. The
+// first element of the array must be the string "map", and the
+// second element must be an array of zero or more s giving the
+// values in the map. All of the s must have the same key and
+// value types.
+type OvsMap struct {
+ GoMap map[interface{}]interface{}
+}
+
+// MarshalJSON marshalls an OVSDB style Map to a byte array
+func (o OvsMap) MarshalJSON() ([]byte, error) {
+ if len(o.GoMap) > 0 {
+ var ovsMap, innerMap []interface{}
+ ovsMap = append(ovsMap, "map")
+ for key, val := range o.GoMap {
+ var mapSeg []interface{}
+ mapSeg = append(mapSeg, key)
+ mapSeg = append(mapSeg, val)
+ innerMap = append(innerMap, mapSeg)
+ }
+ ovsMap = append(ovsMap, innerMap)
+ return json.Marshal(ovsMap)
+ }
+ return []byte("[\"map\",[]]"), nil
+}
+
+// UnmarshalJSON unmarshals an OVSDB style Map from a byte array
+func (o *OvsMap) UnmarshalJSON(b []byte) (err error) {
+ var oMap []interface{}
+ o.GoMap = make(map[interface{}]interface{})
+ if err := json.Unmarshal(b, &oMap); err == nil && len(oMap) > 1 {
+ innerSlice := oMap[1].([]interface{})
+ for _, val := range innerSlice {
+ f := val.([]interface{})
+ var k interface{}
+ switch f[0].(type) {
+ case []interface{}:
+ vSet := f[0].([]interface{})
+ if len(vSet) != 2 || vSet[0] == "map" {
+ return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)}
+ }
+ goSlice, err := ovsSliceToGoNotation(vSet)
+ if err != nil {
+ return err
+ }
+ k = goSlice
+ default:
+ k = f[0]
+ }
+ switch f[1].(type) {
+ case []interface{}:
+ vSet := f[1].([]interface{})
+ if len(vSet) != 2 || vSet[0] == "map" {
+ return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)}
+ }
+ goSlice, err := ovsSliceToGoNotation(vSet)
+ if err != nil {
+ return err
+ }
+ o.GoMap[k] = goSlice
+ default:
+ o.GoMap[k] = f[1]
+ }
+ }
+ }
+ return err
+}
+
+// NewOvsMap will return an OVSDB style map from a provided Golang Map
+func NewOvsMap(goMap interface{}) (OvsMap, error) {
+ v := reflect.ValueOf(goMap)
+ if v.Kind() != reflect.Map {
+ return OvsMap{}, fmt.Errorf("ovsmap supports only go map types")
+ }
+
+ genMap := make(map[interface{}]interface{})
+ keys := v.MapKeys()
+ for _, key := range keys {
+ genMap[key.Interface()] = v.MapIndex(key).Interface()
+ }
+ return OvsMap{genMap}, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go
new file mode 100644
index 0000000000..b97e062857
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go
@@ -0,0 +1,88 @@
+package ovsdb
+
+import "encoding/json"
+
+// MonitorSelect represents a monitor select according to RFC7047
+type MonitorSelect struct {
+ initial *bool
+ insert *bool
+ delete *bool
+ modify *bool
+}
+
+// NewMonitorSelect returns a new MonitorSelect with the provided values
+func NewMonitorSelect(initial, insert, delete, modify bool) *MonitorSelect {
+ return &MonitorSelect{
+ initial: &initial,
+ insert: &insert,
+ delete: &delete,
+ modify: &modify,
+ }
+}
+
+// NewDefaultMonitorSelect returns a new MonitorSelect with default values
+func NewDefaultMonitorSelect() *MonitorSelect {
+ return NewMonitorSelect(true, true, true, true)
+}
+
+// Initial returns whether or not an initial response will be sent
+func (m MonitorSelect) Initial() bool {
+ if m.initial == nil {
+ return true
+ }
+ return *m.initial
+}
+
+// Insert returns whether we will receive updates for inserts
+func (m MonitorSelect) Insert() bool {
+ if m.insert == nil {
+ return true
+ }
+ return *m.insert
+}
+
+// Delete returns whether we will receive updates for deletions
+func (m MonitorSelect) Delete() bool {
+ if m.delete == nil {
+ return true
+ }
+ return *m.delete
+}
+
+// Modify returns whether we will receive updates for modifications
+func (m MonitorSelect) Modify() bool {
+ if m.modify == nil {
+ return true
+ }
+ return *m.modify
+}
+
+type monitorSelect struct {
+ Initial *bool `json:"initial,omitempty"`
+ Insert *bool `json:"insert,omitempty"`
+ Delete *bool `json:"delete,omitempty"`
+ Modify *bool `json:"modify,omitempty"`
+}
+
+func (m MonitorSelect) MarshalJSON() ([]byte, error) {
+ ms := monitorSelect{
+ Initial: m.initial,
+ Insert: m.insert,
+ Delete: m.delete,
+ Modify: m.modify,
+ }
+ return json.Marshal(ms)
+}
+
+func (m *MonitorSelect) UnmarshalJSON(data []byte) error {
+ var ms monitorSelect
+ err := json.Unmarshal(data, &ms)
+ if err != nil {
+ return err
+ }
+ m.initial = ms.Initial
+ m.insert = ms.Insert
+ m.delete = ms.Delete
+ m.modify = ms.Modify
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go
new file mode 100644
index 0000000000..dc8b0f6d4d
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go
@@ -0,0 +1,87 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type Mutator string
+
+const (
+ // MutateOperationDelete is the delete mutator
+ MutateOperationDelete Mutator = "delete"
+ // MutateOperationInsert is the insert mutator
+ MutateOperationInsert Mutator = "insert"
+ // MutateOperationAdd is the add mutator
+ MutateOperationAdd Mutator = "+="
+ // MutateOperationSubtract is the subtract mutator
+ MutateOperationSubtract Mutator = "-="
+ // MutateOperationMultiply is the multiply mutator
+ MutateOperationMultiply Mutator = "*="
+ // MutateOperationDivide is the divide mutator
+ MutateOperationDivide Mutator = "/="
+ // MutateOperationModulo is the modulo mutator
+ MutateOperationModulo Mutator = "%="
+)
+
+// Mutation is described in RFC 7047: 5.1
+type Mutation struct {
+ Column string
+ Mutator Mutator
+ Value interface{}
+}
+
+// NewMutation returns a new mutation
+func NewMutation(column string, mutator Mutator, value interface{}) *Mutation {
+ return &Mutation{
+ Column: column,
+ Mutator: mutator,
+ Value: value,
+ }
+}
+
+// MarshalJSON marshals a mutation to a 3 element JSON array
+func (m Mutation) MarshalJSON() ([]byte, error) {
+ v := []interface{}{m.Column, m.Mutator, m.Value}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON converts a 3 element JSON array to a Mutation
+func (m *Mutation) UnmarshalJSON(b []byte) error {
+ var v []interface{}
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+ if len(v) != 3 {
+ return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v))
+ }
+ ok := false
+ m.Column, ok = v[0].(string)
+ if !ok {
+ return fmt.Errorf("expected column name %v to be a valid string", v[0])
+ }
+ mutatorString, ok := v[1].(string)
+ if !ok {
+ return fmt.Errorf("expected mutator %v to be a valid string", v[1])
+ }
+ mutator := Mutator(mutatorString)
+ switch mutator {
+ case MutateOperationDelete,
+ MutateOperationInsert,
+ MutateOperationAdd,
+ MutateOperationSubtract,
+ MutateOperationMultiply,
+ MutateOperationDivide,
+ MutateOperationModulo:
+ m.Mutator = mutator
+ default:
+ return fmt.Errorf("%s is not a valid mutator", mutator)
+ }
+ vv, err := ovsSliceToGoNotation(v[2])
+ if err != nil {
+ return err
+ }
+ m.Value = vv
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go
new file mode 100644
index 0000000000..29034ee9d6
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go
@@ -0,0 +1,165 @@
+package ovsdb
+
+import (
+ "fmt"
+)
+
+// ExpandNamedUUIDs replaces named UUIDs in columns that contain UUID types
+// throughout the operation. The caller must ensure each input operation has
+// a valid UUID, which may be replaced if a previous operation created a
+// matching named UUID mapping. Returns the updated operations or an error.
+func ExpandNamedUUIDs(ops []Operation, schema *DatabaseSchema) ([]Operation, error) {
+ uuidMap := make(map[string]string)
+
+ // Pass 1: replace the named UUID with a real UUID for each operation and
+ // build the substitution map
+ for i := range ops {
+ op := &ops[i]
+ if op.Op != OperationInsert {
+ // Only Insert operations can specify a Named UUID
+ continue
+ }
+
+ if err := ValidateUUID(op.UUID); err != nil {
+ return nil, fmt.Errorf("operation UUID %q invalid: %v", op.UUID, err)
+ }
+
+ if op.UUIDName != "" {
+ if uuid, ok := uuidMap[op.UUIDName]; ok {
+ if op.UUID != "" && op.UUID != uuid {
+ return nil, fmt.Errorf("named UUID %q maps to UUID %q but found existing UUID %q",
+ op.UUIDName, uuid, op.UUID)
+ }
+ // If there's already a mapping for this named UUID use it
+ op.UUID = uuid
+ } else {
+ uuidMap[op.UUIDName] = op.UUID
+ }
+ op.UUIDName = ""
+ }
+ }
+
+ // Pass 2: replace named UUIDs in operation fields with the real UUID
+ for i := range ops {
+ op := &ops[i]
+ tableSchema := schema.Table(op.Table)
+ if tableSchema == nil {
+ return nil, fmt.Errorf("table %q not found in schema %q", op.Table, schema.Name)
+ }
+
+ for i, condition := range op.Where {
+ newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, condition.Column, condition.Value, uuidMap)
+ if err != nil {
+ return nil, err
+ }
+ op.Where[i].Value = newVal
+ }
+ for i, mutation := range op.Mutations {
+ newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, mutation.Column, mutation.Value, uuidMap)
+ if err != nil {
+ return nil, err
+ }
+ op.Mutations[i].Value = newVal
+ }
+ for _, row := range op.Rows {
+ for k, v := range row {
+ newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap)
+ if err != nil {
+ return nil, err
+ }
+ row[k] = newVal
+ }
+ }
+ for k, v := range op.Row {
+ newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap)
+ if err != nil {
+ return nil, err
+ }
+ op.Row[k] = newVal
+ }
+ }
+
+ return ops, nil
+}
+
+func expandColumnNamedUUIDs(tableSchema *TableSchema, tableName, columnName string, value interface{}, uuidMap map[string]string) (interface{}, error) {
+ column := tableSchema.Column(columnName)
+ if column == nil {
+ return nil, fmt.Errorf("column %q not found in table %q", columnName, tableName)
+ }
+ return expandNamedUUID(column, value, uuidMap), nil
+}
+
+func expandNamedUUID(column *ColumnSchema, value interface{}, namedUUIDs map[string]string) interface{} {
+ var keyType, valType ExtendedType
+
+ switch column.Type {
+ case TypeUUID:
+ keyType = column.Type
+ case TypeSet:
+ keyType = column.TypeObj.Key.Type
+ case TypeMap:
+ keyType = column.TypeObj.Key.Type
+ valType = column.TypeObj.Value.Type
+ }
+
+ if valType == TypeUUID {
+ if m, ok := value.(OvsMap); ok {
+ for k, v := range m.GoMap {
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, k, namedUUIDs); ok {
+ m.GoMap[newUUID] = m.GoMap[k]
+ delete(m.GoMap, k)
+ k = newUUID
+ }
+ if newUUID, ok := expandNamedUUIDAtomic(valType, v, namedUUIDs); ok {
+ m.GoMap[k] = newUUID
+ }
+ }
+ }
+ } else if keyType == TypeUUID {
+ if ovsSet, ok := value.(OvsSet); ok {
+ for i, s := range ovsSet.GoSet {
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok {
+ ovsSet.GoSet[i] = newUUID
+ }
+ }
+ return value
+ } else if strSet, ok := value.([]string); ok {
+ for i, s := range strSet {
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok {
+ strSet[i] = newUUID.(string)
+ }
+ }
+ return value
+ } else if uuidSet, ok := value.([]UUID); ok {
+ for i, s := range uuidSet {
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok {
+ uuidSet[i] = newUUID.(UUID)
+ }
+ }
+ return value
+ }
+
+ if newUUID, ok := expandNamedUUIDAtomic(keyType, value, namedUUIDs); ok {
+ return newUUID
+ }
+ }
+
+ // No expansion required; return original value
+ return value
+}
+
+func expandNamedUUIDAtomic(valueType ExtendedType, value interface{}, namedUUIDs map[string]string) (interface{}, bool) {
+ if valueType == TypeUUID {
+ if uuid, ok := value.(UUID); ok {
+ if newUUID, ok := namedUUIDs[uuid.GoUUID]; ok {
+ return UUID{GoUUID: newUUID}, true
+ }
+ } else if uuid, ok := value.(string); ok {
+ if newUUID, ok := namedUUIDs[uuid]; ok {
+ return newUUID, true
+ }
+ }
+ }
+ return value, false
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go
new file mode 100644
index 0000000000..afad87cdc4
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go
@@ -0,0 +1,129 @@
+package ovsdb
+
+import (
+ "encoding/json"
+)
+
+const (
+ // OperationInsert is an insert operation
+ OperationInsert = "insert"
+ // OperationSelect is a select operation
+ OperationSelect = "select"
+ // OperationUpdate is an update operation
+ OperationUpdate = "update"
+ // OperationMutate is a mutate operation
+ OperationMutate = "mutate"
+ // OperationDelete is a delete operation
+ OperationDelete = "delete"
+ // OperationWait is a wait operation
+ OperationWait = "wait"
+ // OperationCommit is a commit operation
+ OperationCommit = "commit"
+ // OperationAbort is an abort operation
+ OperationAbort = "abort"
+ // OperationComment is a comment operation
+ OperationComment = "comment"
+ // OperationAssert is an assert operation
+ OperationAssert = "assert"
+)
+
+// Operation represents an operation according to RFC7047 section 5.2
+type Operation struct {
+ Op string `json:"op"`
+ Table string `json:"table,omitempty"`
+ Row Row `json:"row,omitempty"`
+ Rows []Row `json:"rows,omitempty"`
+ Columns []string `json:"columns,omitempty"`
+ Mutations []Mutation `json:"mutations,omitempty"`
+ Timeout *int `json:"timeout,omitempty"`
+ Where []Condition `json:"where,omitempty"`
+ Until string `json:"until,omitempty"`
+ Durable *bool `json:"durable,omitempty"`
+ Comment *string `json:"comment,omitempty"`
+ Lock *string `json:"lock,omitempty"`
+ UUID string `json:"uuid,omitempty"`
+ UUIDName string `json:"uuid-name,omitempty"`
+}
+
+// MarshalJSON marshalls 'Operation' to a byte array
+// For 'select' operations, we don't omit the 'Where' field
+// to allow selecting all rows of a table
+func (o Operation) MarshalJSON() ([]byte, error) {
+ type OpAlias Operation
+ switch o.Op {
+ case "select":
+ where := o.Where
+ if where == nil {
+ where = make([]Condition, 0)
+ }
+ return json.Marshal(&struct {
+ Where []Condition `json:"where"`
+ OpAlias
+ }{
+ Where: where,
+ OpAlias: (OpAlias)(o),
+ })
+ default:
+ return json.Marshal(&struct {
+ OpAlias
+ }{
+ OpAlias: (OpAlias)(o),
+ })
+ }
+}
+
+// MonitorRequests represents a group of monitor requests according to RFC7047
+// We cannot use MonitorRequests by inlining the MonitorRequest Map structure till GoLang issue #6213 makes it.
+// The only option is to go with raw map[string]interface{} option :-( that sucks !
+// Refer to client.go : MonitorAll() function for more details
+type MonitorRequests struct {
+ Requests map[string]MonitorRequest `json:"requests"`
+}
+
+// MonitorRequest represents a monitor request according to RFC7047
+type MonitorRequest struct {
+ Columns []string `json:"columns,omitempty"`
+ Where []Condition `json:"where,omitempty"`
+ Select *MonitorSelect `json:"select,omitempty"`
+}
+
+// TransactResponse represents the response to a Transact Operation
+type TransactResponse struct {
+ Result []OperationResult `json:"result"`
+ Error string `json:"error"`
+}
+
+// OperationResult is the result of an Operation
+type OperationResult struct {
+ Count int `json:"count,omitempty"`
+ Error string `json:"error,omitempty"`
+ Details string `json:"details,omitempty"`
+ UUID UUID `json:"uuid,omitempty"`
+ Rows []Row `json:"rows,omitempty"`
+}
+
+func ovsSliceToGoNotation(val interface{}) (interface{}, error) {
+ switch sl := val.(type) {
+ case []interface{}:
+ bsliced, err := json.Marshal(sl)
+ if err != nil {
+ return nil, err
+ }
+ switch sl[0] {
+ case "uuid", "named-uuid":
+ var uuid UUID
+ err = json.Unmarshal(bsliced, &uuid)
+ return uuid, err
+ case "set":
+ var oSet OvsSet
+ err = json.Unmarshal(bsliced, &oSet)
+ return oSet, err
+ case "map":
+ var oMap OvsMap
+ err = json.Unmarshal(bsliced, &oMap)
+ return oMap, err
+ }
+ return val, nil
+ }
+ return val, nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go
new file mode 100644
index 0000000000..9a253f74f1
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go
@@ -0,0 +1,26 @@
+package ovsdb
+
+import "encoding/json"
+
+// Row is a table Row according to RFC7047
+type Row map[string]interface{}
+
+// UnmarshalJSON unmarshalls a byte array to an OVSDB Row
+func (r *Row) UnmarshalJSON(b []byte) (err error) {
+ *r = make(map[string]interface{})
+ var raw map[string]interface{}
+ err = json.Unmarshal(b, &raw)
+ for key, val := range raw {
+ val, err = ovsSliceToGoNotation(val)
+ if err != nil {
+ return err
+ }
+ (*r)[key] = val
+ }
+ return err
+}
+
+// NewRow returns a new empty row
+func NewRow() Row {
+ return Row(make(map[string]interface{}))
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go
new file mode 100644
index 0000000000..f1e5980058
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go
@@ -0,0 +1,79 @@
+package ovsdb
+
+const (
+ // MonitorRPC is the monitor RPC method
+ MonitorRPC = "monitor"
+ // ConditionalMonitorRPC is the monitor_cond
+ ConditionalMonitorRPC = "monitor_cond"
+ // ConditionalMonitorSinceRPC is the monitor_cond_since RPC method
+ ConditionalMonitorSinceRPC = "monitor_cond_since"
+)
+
+// NewEchoArgs creates a new set of arguments for an echo RPC
+func NewEchoArgs() []interface{} {
+ return []interface{}{"libovsdb echo"}
+}
+
+// NewGetSchemaArgs creates a new set of arguments for a get_schemas RPC
+func NewGetSchemaArgs(schema string) []interface{} {
+ return []interface{}{schema}
+}
+
+// NewTransactArgs creates a new set of arguments for a transact RPC
+func NewTransactArgs(database string, operations ...Operation) []interface{} {
+ dbSlice := make([]interface{}, 1)
+ dbSlice[0] = database
+
+ opsSlice := make([]interface{}, len(operations))
+ for i, d := range operations {
+ opsSlice[i] = d
+ }
+
+ ops := append(dbSlice, opsSlice...)
+ return ops
+}
+
+// NewCancelArgs creates a new set of arguments for a cancel RPC
+func NewCancelArgs(id interface{}) []interface{} {
+ return []interface{}{id}
+}
+
+// NewMonitorArgs creates a new set of arguments for a monitor RPC
+func NewMonitorArgs(database string, value interface{}, requests map[string]MonitorRequest) []interface{} {
+ return []interface{}{database, value, requests}
+}
+
+// NewMonitorCondSinceArgs creates a new set of arguments for a monitor_cond_since RPC
+func NewMonitorCondSinceArgs(database string, value interface{}, requests map[string]MonitorRequest, lastTransactionID string) []interface{} {
+ return []interface{}{database, value, requests, lastTransactionID}
+}
+
+// NewMonitorCancelArgs creates a new set of arguments for a monitor_cancel RPC
+func NewMonitorCancelArgs(value interface{}) []interface{} {
+ return []interface{}{value}
+}
+
+// NewLockArgs creates a new set of arguments for a lock, steal or unlock RPC
+func NewLockArgs(id interface{}) []interface{} {
+ return []interface{}{id}
+}
+
+// NotificationHandler is the interface that must be implemented to receive notifications
+type NotificationHandler interface {
+ // RFC 7047 section 4.1.6 Update Notification
+ Update(context interface{}, tableUpdates TableUpdates)
+
+ // ovsdb-server.7 update2 notifications
+ Update2(context interface{}, tableUpdates TableUpdates2)
+
+ // RFC 7047 section 4.1.9 Locked Notification
+ Locked([]interface{})
+
+ // RFC 7047 section 4.1.10 Stolen Notification
+ Stolen([]interface{})
+
+ // RFC 7047 section 4.1.11 Echo Notification
+ Echo([]interface{})
+
+ Disconnected()
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go
new file mode 100644
index 0000000000..285d1e02a0
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go
@@ -0,0 +1,641 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "strings"
+)
+
+// DatabaseSchema is a database schema according to RFC7047
+type DatabaseSchema struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Tables map[string]TableSchema `json:"tables"`
+ allTablesRoot *bool
+}
+
+// UUIDColumn is a static column that represents the _uuid column, common to all tables
+var UUIDColumn = ColumnSchema{
+ Type: TypeUUID,
+}
+
+// Table returns a TableSchema Schema for a given table and column name
+func (schema DatabaseSchema) Table(tableName string) *TableSchema {
+ if table, ok := schema.Tables[tableName]; ok {
+ return &table
+ }
+ return nil
+}
+
+// IsRoot whether a table is root or not
+func (schema DatabaseSchema) IsRoot(tableName string) (bool, error) {
+ t := schema.Table(tableName)
+ if t == nil {
+ return false, fmt.Errorf("Table %s not in schame", tableName)
+ }
+ if t.IsRoot {
+ return true, nil
+ }
+ // As per RFC7047, for compatibility with schemas created before
+ // "isRoot" was introduced, if "isRoot" is omitted or false in every
+ // in a given , then every table is part
+ // of the root set.
+ if schema.allTablesRoot == nil {
+ allTablesRoot := true
+ for _, tSchema := range schema.Tables {
+ if tSchema.IsRoot {
+ allTablesRoot = false
+ break
+ }
+ }
+ schema.allTablesRoot = &allTablesRoot
+ }
+ return *schema.allTablesRoot, nil
+}
+
+// Print will print the contents of the DatabaseSchema
+func (schema DatabaseSchema) Print(w io.Writer) {
+ fmt.Fprintf(w, "%s, (%s)\n", schema.Name, schema.Version)
+ for table, tableSchema := range schema.Tables {
+ fmt.Fprintf(w, "\t %s", table)
+ if len(tableSchema.Indexes) > 0 {
+ fmt.Fprintf(w, "(%v)\n", tableSchema.Indexes)
+ } else {
+ fmt.Fprintf(w, "\n")
+ }
+ for column, columnSchema := range tableSchema.Columns {
+ fmt.Fprintf(w, "\t\t %s => %s\n", column, columnSchema)
+ }
+ }
+}
+
+// SchemaFromFile returns a DatabaseSchema from a file
+func SchemaFromFile(f *os.File) (DatabaseSchema, error) {
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return DatabaseSchema{}, err
+ }
+ var schema DatabaseSchema
+ err = json.Unmarshal(data, &schema)
+ if err != nil {
+ return DatabaseSchema{}, err
+ }
+ return schema, nil
+}
+
+// ValidateOperations performs basic validation for operations against a DatabaseSchema
+func (schema DatabaseSchema) ValidateOperations(operations ...Operation) bool {
+ for _, op := range operations {
+ switch op.Op {
+ case OperationAbort, OperationAssert, OperationComment, OperationCommit, OperationWait:
+ continue
+ case OperationInsert, OperationSelect, OperationUpdate, OperationMutate, OperationDelete:
+ table, ok := schema.Tables[op.Table]
+ if ok {
+ for column := range op.Row {
+ if _, ok := table.Columns[column]; !ok {
+ if column != "_uuid" && column != "_version" {
+ return false
+ }
+ }
+ }
+ for _, row := range op.Rows {
+ for column := range row {
+ if _, ok := table.Columns[column]; !ok {
+ if column != "_uuid" && column != "_version" {
+ return false
+ }
+ }
+ }
+ }
+ for _, column := range op.Columns {
+ if _, ok := table.Columns[column]; !ok {
+ if column != "_uuid" && column != "_version" {
+ return false
+ }
+ }
+ }
+ } else {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// TableSchema is a table schema according to RFC7047
+type TableSchema struct {
+ Columns map[string]*ColumnSchema `json:"columns"`
+ Indexes [][]string `json:"indexes,omitempty"`
+ IsRoot bool `json:"isRoot,omitempty"`
+}
+
+// Column returns the Column object for a specific column name
+func (t TableSchema) Column(columnName string) *ColumnSchema {
+ if columnName == "_uuid" {
+ return &UUIDColumn
+ }
+ if column, ok := t.Columns[columnName]; ok {
+ return column
+ }
+ return nil
+}
+
+/*RFC7047 defines some atomic-types (e.g: integer, string, etc). However, the Column's type
+can also hold other more complex types such as set, enum and map. The way to determine the type
+depends on internal, not directly marshallable fields. Therefore, in order to simplify the usage
+of this library, we define an ExtendedType that includes all possible column types (including
+atomic fields).
+*/
+
+// ExtendedType includes atomic types as defined in the RFC plus Enum, Map and Set
+type ExtendedType = string
+
+// RefType is used to define the possible RefTypes
+type RefType = string
+
+// unlimited is not constant as we can't take the address of int constants
+var (
+ // Unlimited is used to express unlimited "Max"
+ Unlimited = -1
+)
+
+const (
+ unlimitedString = "unlimited"
+ //Strong RefType
+ Strong RefType = "strong"
+ //Weak RefType
+ Weak RefType = "weak"
+
+ //ExtendedType associated with Atomic Types
+
+ //TypeInteger is equivalent to 'int'
+ TypeInteger ExtendedType = "integer"
+ //TypeReal is equivalent to 'float64'
+ TypeReal ExtendedType = "real"
+ //TypeBoolean is equivalent to 'bool'
+ TypeBoolean ExtendedType = "boolean"
+ //TypeString is equivalent to 'string'
+ TypeString ExtendedType = "string"
+ //TypeUUID is equivalent to 'libovsdb.UUID'
+ TypeUUID ExtendedType = "uuid"
+
+ //Extended Types used to summarize the internal type of the field.
+
+ //TypeEnum is an enumerator of type defined by Key.Type
+ TypeEnum ExtendedType = "enum"
+ //TypeMap is a map whose type depend on Key.Type and Value.Type
+ TypeMap ExtendedType = "map"
+ //TypeSet is a set whose type depend on Key.Type
+ TypeSet ExtendedType = "set"
+)
+
+// BaseType is a base-type structure as per RFC7047
+type BaseType struct {
+ Type string
+ Enum []interface{}
+ minReal *float64
+ maxReal *float64
+ minInteger *int
+ maxInteger *int
+ minLength *int
+ maxLength *int
+ refTable *string
+ refType *RefType
+}
+
+func (b *BaseType) simpleAtomic() bool {
+ return isAtomicType(b.Type) && b.Enum == nil && b.minReal == nil && b.maxReal == nil && b.minInteger == nil && b.maxInteger == nil && b.minLength == nil && b.maxLength == nil && b.refTable == nil && b.refType == nil
+}
+
+// MinReal returns the minimum real value
+// RFC7047 does not define a default, but we assume this to be
+// the smallest non zero value a float64 could hold
+func (b *BaseType) MinReal() (float64, error) {
+ if b.Type != TypeReal {
+ return 0, fmt.Errorf("%s is not a real", b.Type)
+ }
+ if b.minReal != nil {
+ return *b.minReal, nil
+ }
+ return math.SmallestNonzeroFloat64, nil
+}
+
+// MaxReal returns the maximum real value
+// RFC7047 does not define a default, but this would be the maximum
+// value held by a float64
+func (b *BaseType) MaxReal() (float64, error) {
+ if b.Type != TypeReal {
+ return 0, fmt.Errorf("%s is not a real", b.Type)
+ }
+ if b.maxReal != nil {
+ return *b.maxReal, nil
+ }
+ return math.MaxFloat64, nil
+}
+
+// MinInteger returns the minimum integer value
+// RFC7047 specifies the minimum to be -2^63
+func (b *BaseType) MinInteger() (int, error) {
+ if b.Type != TypeInteger {
+ return 0, fmt.Errorf("%s is not an integer", b.Type)
+ }
+ if b.minInteger != nil {
+ return *b.minInteger, nil
+ }
+ return int(math.Pow(-2, 63)), nil
+}
+
+// MaxInteger returns the minimum integer value
+// RFC7047 specifies the minimum to be 2^63-1
+func (b *BaseType) MaxInteger() (int, error) {
+ if b.Type != TypeInteger {
+ return 0, fmt.Errorf("%s is not an integer", b.Type)
+ }
+ if b.maxInteger != nil {
+ return *b.maxInteger, nil
+ }
+ return int(math.Pow(2, 63)) - 1, nil
+}
+
+// MinLength returns the minimum string length
+// RFC7047 doesn't specify a default, but we assume
+// that it must be >= 0
+func (b *BaseType) MinLength() (int, error) {
+ if b.Type != TypeString {
+ return 0, fmt.Errorf("%s is not an string", b.Type)
+ }
+ if b.minLength != nil {
+ return *b.minLength, nil
+ }
+ return 0, nil
+}
+
+// MaxLength returns the maximum string length
+// RFC7047 doesn't specify a default, but we assume
+// that it must 2^63-1
+func (b *BaseType) MaxLength() (int, error) {
+ if b.Type != TypeString {
+ return 0, fmt.Errorf("%s is not an string", b.Type)
+ }
+ if b.maxLength != nil {
+ return *b.maxLength, nil
+ }
+ return int(math.Pow(2, 63)) - 1, nil
+}
+
+// RefTable returns the table to which a UUID type refers
+// It will return an empty string if not set
+func (b *BaseType) RefTable() (string, error) {
+ if b.Type != TypeUUID {
+ return "", fmt.Errorf("%s is not a uuid", b.Type)
+ }
+ if b.refTable != nil {
+ return *b.refTable, nil
+ }
+ return "", nil
+}
+
+// RefType returns the reference type for a UUID field
+// RFC7047 infers the RefType is strong if omitted
+func (b *BaseType) RefType() (RefType, error) {
+ if b.Type != TypeUUID {
+ return "", fmt.Errorf("%s is not a uuid", b.Type)
+ }
+ if b.refType != nil {
+ return *b.refType, nil
+ }
+ return Strong, nil
+}
+
+// UnmarshalJSON unmarshals a json-formatted base type
+func (b *BaseType) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err == nil {
+ if isAtomicType(s) {
+ b.Type = s
+ } else {
+ return fmt.Errorf("non atomic type %s in ", s)
+ }
+ return nil
+ }
+ // temporary type to avoid recursive call to unmarshal
+ var bt struct {
+ Type string `json:"type"`
+ Enum interface{} `json:"enum,omitempty"`
+ MinReal *float64 `json:"minReal,omitempty"`
+ MaxReal *float64 `json:"maxReal,omitempty"`
+ MinInteger *int `json:"minInteger,omitempty"`
+ MaxInteger *int `json:"maxInteger,omitempty"`
+ MinLength *int `json:"minLength,omitempty"`
+ MaxLength *int `json:"maxLength,omitempty"`
+ RefTable *string `json:"refTable,omitempty"`
+ RefType *RefType `json:"refType,omitempty"`
+ }
+ err := json.Unmarshal(data, &bt)
+ if err != nil {
+ return err
+ }
+
+ if bt.Enum != nil {
+ // 'enum' is a list or a single element representing a list of exactly one element
+ switch bt.Enum.(type) {
+ case []interface{}:
+ // it's an OvsSet
+ oSet := bt.Enum.([]interface{})
+ innerSet := oSet[1].([]interface{})
+ b.Enum = make([]interface{}, len(innerSet))
+ copy(b.Enum, innerSet)
+ default:
+ b.Enum = []interface{}{bt.Enum}
+ }
+ }
+ b.Type = bt.Type
+ b.minReal = bt.MinReal
+ b.maxReal = bt.MaxReal
+ b.minInteger = bt.MinInteger
+ b.maxInteger = bt.MaxInteger
+ b.minLength = bt.MaxLength
+ b.maxLength = bt.MaxLength
+ b.refTable = bt.RefTable
+ b.refType = bt.RefType
+ return nil
+}
+
+// MarshalJSON marshals a base type to JSON
+func (b BaseType) MarshalJSON() ([]byte, error) {
+ j := struct {
+ Type string `json:"type,omitempty"`
+ Enum *OvsSet `json:"enum,omitempty"`
+ MinReal *float64 `json:"minReal,omitempty"`
+ MaxReal *float64 `json:"maxReal,omitempty"`
+ MinInteger *int `json:"minInteger,omitempty"`
+ MaxInteger *int `json:"maxInteger,omitempty"`
+ MinLength *int `json:"minLength,omitempty"`
+ MaxLength *int `json:"maxLength,omitempty"`
+ RefTable *string `json:"refTable,omitempty"`
+ RefType *RefType `json:"refType,omitempty"`
+ }{
+ Type: b.Type,
+ MinReal: b.minReal,
+ MaxReal: b.maxReal,
+ MinInteger: b.minInteger,
+ MaxInteger: b.maxInteger,
+ MinLength: b.maxLength,
+ MaxLength: b.maxLength,
+ RefTable: b.refTable,
+ RefType: b.refType,
+ }
+ if len(b.Enum) > 0 {
+ set, err := NewOvsSet(b.Enum)
+ if err != nil {
+ return nil, err
+ }
+ j.Enum = &set
+ }
+ return json.Marshal(j)
+}
+
+// ColumnType is a type object as per RFC7047
+// "key": required
+// "value": optional
+// "min": optional (default: 1)
+// "max": or "unlimited" optional (default: 1)
+type ColumnType struct {
+ Key *BaseType
+ Value *BaseType
+ min *int
+ max *int
+}
+
+// Max returns the maximum value of a ColumnType. -1 is Unlimited
+func (c *ColumnType) Max() int {
+ if c.max == nil {
+ return 1
+ }
+ return *c.max
+}
+
+// Min returns the minimum value of a ColumnType
+func (c *ColumnType) Min() int {
+ if c.min == nil {
+ return 1
+ }
+ return *c.min
+}
+
+// UnmarshalJSON unmarshals a json-formatted column type
+func (c *ColumnType) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err == nil {
+ if isAtomicType(s) {
+ c.Key = &BaseType{Type: s}
+ } else {
+ return fmt.Errorf("non atomic type %s in ", s)
+ }
+ return nil
+ }
+ var colType struct {
+ Key *BaseType `json:"key"`
+ Value *BaseType `json:"value"`
+ Min *int `json:"min"`
+ Max interface{} `json:"max"`
+ }
+ err := json.Unmarshal(data, &colType)
+ if err != nil {
+ return err
+ }
+ c.Key = colType.Key
+ c.Value = colType.Value
+ c.min = colType.Min
+ switch v := colType.Max.(type) {
+ case string:
+ if v == unlimitedString {
+ c.max = &Unlimited
+ } else {
+ return fmt.Errorf("unexpected string value in max field")
+ }
+ case float64:
+ i := int(v)
+ c.max = &i
+ default:
+ c.max = nil
+ }
+ return nil
+}
+
+// MarshalJSON marshalls a column type to JSON
+func (c ColumnType) MarshalJSON() ([]byte, error) {
+ if c.Value == nil && c.max == nil && c.min == nil && c.Key.simpleAtomic() {
+ return json.Marshal(c.Key.Type)
+ }
+ if c.Max() == Unlimited {
+ colType := struct {
+ Key *BaseType `json:"key"`
+ Value *BaseType `json:"value,omitempty"`
+ Min *int `json:"min,omitempty"`
+ Max string `json:"max,omitempty"`
+ }{
+ Key: c.Key,
+ Value: c.Value,
+ Min: c.min,
+ Max: unlimitedString,
+ }
+ return json.Marshal(&colType)
+ }
+ colType := struct {
+ Key *BaseType `json:"key"`
+ Value *BaseType `json:"value,omitempty"`
+ Min *int `json:"min,omitempty"`
+ Max *int `json:"max,omitempty"`
+ }{
+ Key: c.Key,
+ Value: c.Value,
+ Min: c.min,
+ Max: c.max,
+ }
+ return json.Marshal(&colType)
+}
+
+// ColumnSchema is a column schema according to RFC7047
+type ColumnSchema struct {
+ // According to RFC7047, "type" field can be, either an
+ // Or a ColumnType defined below. To try to simplify the usage, the
+ // json message will be parsed manually and Type will indicate the "extended"
+ // type. Depending on its value, more information may be available in TypeObj.
+ // E.g: If Type == TypeEnum, TypeObj.Key.Enum contains the possible values
+ Type ExtendedType
+ TypeObj *ColumnType
+ ephemeral *bool
+ mutable *bool
+}
+
+// Mutable returns whether a column is mutable
+func (c *ColumnSchema) Mutable() bool {
+ if c.mutable != nil {
+ return *c.mutable
+ }
+ // default true
+ return true
+}
+
+// Ephemeral returns whether a column is ephemeral
+func (c *ColumnSchema) Ephemeral() bool {
+ if c.ephemeral != nil {
+ return *c.ephemeral
+ }
+ // default false
+ return false
+}
+
+// UnmarshalJSON unmarshals a json-formatted column
+func (c *ColumnSchema) UnmarshalJSON(data []byte) error {
+ // ColumnJSON represents the known json values for a Column
+ var colJSON struct {
+ Type *ColumnType `json:"type"`
+ Ephemeral *bool `json:"ephemeral,omitempty"`
+ Mutable *bool `json:"mutable,omitempty"`
+ }
+
+ // Unmarshal known keys
+ if err := json.Unmarshal(data, &colJSON); err != nil {
+ return fmt.Errorf("cannot parse column object %s", err)
+ }
+
+ c.ephemeral = colJSON.Ephemeral
+ c.mutable = colJSON.Mutable
+ c.TypeObj = colJSON.Type
+
+ // Infer the ExtendedType from the TypeObj
+ if c.TypeObj.Value != nil {
+ c.Type = TypeMap
+ } else if c.TypeObj.Min() != 1 || c.TypeObj.Max() != 1 {
+ c.Type = TypeSet
+ } else if len(c.TypeObj.Key.Enum) > 0 {
+ c.Type = TypeEnum
+ } else {
+ c.Type = c.TypeObj.Key.Type
+ }
+ return nil
+}
+
+// MarshalJSON marshalls a column schema to JSON
+func (c ColumnSchema) MarshalJSON() ([]byte, error) {
+ type colJSON struct {
+ Type *ColumnType `json:"type"`
+ Ephemeral *bool `json:"ephemeral,omitempty"`
+ Mutable *bool `json:"mutable,omitempty"`
+ }
+ column := colJSON{
+ Type: c.TypeObj,
+ Ephemeral: c.ephemeral,
+ Mutable: c.mutable,
+ }
+ return json.Marshal(column)
+}
+
+// String returns a string representation of the (native) column type
+func (c *ColumnSchema) String() string {
+ var flags []string
+ var flagStr string
+ var typeStr string
+ if c.Ephemeral() {
+ flags = append(flags, "E")
+ }
+ if c.Mutable() {
+ flags = append(flags, "M")
+ }
+ if len(flags) > 0 {
+ flagStr = fmt.Sprintf("[%s]", strings.Join(flags, ","))
+ }
+
+ switch c.Type {
+ case TypeInteger, TypeReal, TypeBoolean, TypeString:
+ typeStr = string(c.Type)
+ case TypeUUID:
+ if c.TypeObj != nil && c.TypeObj.Key != nil {
+ // ignore err as we've already asserted this is a uuid
+ reftable, _ := c.TypeObj.Key.RefTable()
+ reftype := ""
+ if s, err := c.TypeObj.Key.RefType(); err != nil {
+ reftype = s
+ }
+ typeStr = fmt.Sprintf("uuid [%s (%s)]", reftable, reftype)
+ } else {
+ typeStr = "uuid"
+ }
+
+ case TypeEnum:
+ typeStr = fmt.Sprintf("enum (type: %s): %v", c.TypeObj.Key.Type, c.TypeObj.Key.Enum)
+ case TypeMap:
+ typeStr = fmt.Sprintf("[%s]%s", c.TypeObj.Key.Type, c.TypeObj.Value.Type)
+ case TypeSet:
+ var keyStr string
+ if c.TypeObj.Key.Type == TypeUUID {
+ // ignore err as we've already asserted this is a uuid
+ reftable, _ := c.TypeObj.Key.RefTable()
+ reftype, _ := c.TypeObj.Key.RefType()
+ keyStr = fmt.Sprintf(" [%s (%s)]", reftable, reftype)
+ } else {
+ keyStr = string(c.TypeObj.Key.Type)
+ }
+ typeStr = fmt.Sprintf("[]%s (min: %d, max: %d)", keyStr, c.TypeObj.Min(), c.TypeObj.Max())
+ default:
+ panic(fmt.Sprintf("Unsupported type %s", c.Type))
+ }
+
+ return strings.Join([]string{typeStr, flagStr}, " ")
+}
+
+func isAtomicType(atype string) bool {
+ switch atype {
+ case TypeInteger, TypeReal, TypeBoolean, TypeString, TypeUUID:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore
new file mode 100644
index 0000000000..33f8bff56f
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore
@@ -0,0 +1 @@
+*.ovsschema
\ No newline at end of file
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go
new file mode 100644
index 0000000000..274a7164fe
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go
@@ -0,0 +1,182 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package serverdb
+
+import "github.com/ovn-org/libovsdb/model"
+
+const DatabaseTable = "Database"
+
+type (
+ DatabaseModel = string
+)
+
+var (
+ DatabaseModelStandalone DatabaseModel = "standalone"
+ DatabaseModelClustered DatabaseModel = "clustered"
+ DatabaseModelRelay DatabaseModel = "relay"
+)
+
+// Database defines an object in Database table
+type Database struct {
+ UUID string `ovsdb:"_uuid"`
+ Cid *string `ovsdb:"cid"`
+ Connected bool `ovsdb:"connected"`
+ Index *int `ovsdb:"index"`
+ Leader bool `ovsdb:"leader"`
+ Model DatabaseModel `ovsdb:"model"`
+ Name string `ovsdb:"name"`
+ Schema *string `ovsdb:"schema"`
+ Sid *string `ovsdb:"sid"`
+}
+
+func (a *Database) GetUUID() string {
+ return a.UUID
+}
+
+func (a *Database) GetCid() *string {
+ return a.Cid
+}
+
+func copyDatabaseCid(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDatabaseCid(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Database) GetConnected() bool {
+ return a.Connected
+}
+
+func (a *Database) GetIndex() *int {
+ return a.Index
+}
+
+func copyDatabaseIndex(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDatabaseIndex(a, b *int) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Database) GetLeader() bool {
+ return a.Leader
+}
+
+func (a *Database) GetModel() DatabaseModel {
+ return a.Model
+}
+
+func (a *Database) GetName() string {
+ return a.Name
+}
+
+func (a *Database) GetSchema() *string {
+ return a.Schema
+}
+
+func copyDatabaseSchema(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDatabaseSchema(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Database) GetSid() *string {
+ return a.Sid
+}
+
+func copyDatabaseSid(a *string) *string {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+func equalDatabaseSid(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == b {
+ return true
+ }
+ return *a == *b
+}
+
+func (a *Database) DeepCopyInto(b *Database) {
+ *b = *a
+ b.Cid = copyDatabaseCid(a.Cid)
+ b.Index = copyDatabaseIndex(a.Index)
+ b.Schema = copyDatabaseSchema(a.Schema)
+ b.Sid = copyDatabaseSid(a.Sid)
+}
+
+func (a *Database) DeepCopy() *Database {
+ b := new(Database)
+ a.DeepCopyInto(b)
+ return b
+}
+
+func (a *Database) CloneModelInto(b model.Model) {
+ c := b.(*Database)
+ a.DeepCopyInto(c)
+}
+
+func (a *Database) CloneModel() model.Model {
+ return a.DeepCopy()
+}
+
+func (a *Database) Equals(b *Database) bool {
+ return a.UUID == b.UUID &&
+ equalDatabaseCid(a.Cid, b.Cid) &&
+ a.Connected == b.Connected &&
+ equalDatabaseIndex(a.Index, b.Index) &&
+ a.Leader == b.Leader &&
+ a.Model == b.Model &&
+ a.Name == b.Name &&
+ equalDatabaseSchema(a.Schema, b.Schema) &&
+ equalDatabaseSid(a.Sid, b.Sid)
+}
+
+func (a *Database) EqualsModel(b model.Model) bool {
+ c := b.(*Database)
+ return a.Equals(c)
+}
+
+var _ model.CloneableModel = &Database{}
+var _ model.ComparableModel = &Database{}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go
new file mode 100644
index 0000000000..5923af60ab
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go
@@ -0,0 +1,6 @@
+package serverdb
+
+// server_model is a database model for the special _Server database that all
+// ovsdb instances export. It reports back status of the server process itself.
+
+//go:generate ../../bin/modelgen --extended -p serverdb -o . _server.ovsschema
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go
new file mode 100644
index 0000000000..3c117faa26
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go
@@ -0,0 +1,99 @@
+// Code generated by "libovsdb.modelgen"
+// DO NOT EDIT.
+
+package serverdb
+
+import (
+ "encoding/json"
+
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb
+func FullDatabaseModel() (model.ClientDBModel, error) {
+ return model.NewClientDBModel("_Server", map[string]model.Model{
+ "Database": &Database{},
+ })
+}
+
+var schema = `{
+ "name": "_Server",
+ "version": "1.2.0",
+ "tables": {
+ "Database": {
+ "columns": {
+ "cid": {
+ "type": {
+ "key": {
+ "type": "uuid"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "connected": {
+ "type": "boolean"
+ },
+ "index": {
+ "type": {
+ "key": {
+ "type": "integer"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "leader": {
+ "type": "boolean"
+ },
+ "model": {
+ "type": {
+ "key": {
+ "type": "string",
+ "enum": [
+ "set",
+ [
+ "standalone",
+ "clustered",
+ "relay"
+ ]
+ ]
+ }
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "schema": {
+ "type": {
+ "key": {
+ "type": "string"
+ },
+ "min": 0,
+ "max": 1
+ }
+ },
+ "sid": {
+ "type": {
+ "key": {
+ "type": "uuid"
+ },
+ "min": 0,
+ "max": 1
+ }
+ }
+ },
+ "isRoot": true
+ }
+ }
+}`
+
+func Schema() ovsdb.DatabaseSchema {
+ var s ovsdb.DatabaseSchema
+ err := json.Unmarshal([]byte(schema), &s)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go
new file mode 100644
index 0000000000..ae1ec59ae2
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go
@@ -0,0 +1,109 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+// OvsSet is an OVSDB style set
+// RFC 7047 has a weird (but understandable) notation for set as described as :
+// Either an , representing a set with exactly one element, or
+// a 2-element JSON array that represents a database set value. The
+// first element of the array must be the string "set", and the
+// second element must be an array of zero or more s giving the
+// values in the set. All of the s must have the same type.
+type OvsSet struct {
+ GoSet []interface{}
+}
+
+// NewOvsSet creates a new OVSDB style set from a Go interface (object)
+func NewOvsSet(obj interface{}) (OvsSet, error) {
+ ovsSet := make([]interface{}, 0)
+ var v reflect.Value
+ if reflect.TypeOf(obj).Kind() == reflect.Ptr {
+ v = reflect.ValueOf(obj).Elem()
+ if v.Kind() == reflect.Invalid {
+ // must be a nil pointer, so just return an empty set
+ return OvsSet{ovsSet}, nil
+ }
+ } else {
+ v = reflect.ValueOf(obj)
+ }
+
+ switch v.Kind() {
+ case reflect.Slice:
+ for i := 0; i < v.Len(); i++ {
+ ovsSet = append(ovsSet, v.Index(i).Interface())
+ }
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.Bool:
+ ovsSet = append(ovsSet, v.Interface())
+ case reflect.Struct:
+ if v.Type() == reflect.TypeOf(UUID{}) {
+ ovsSet = append(ovsSet, v.Interface())
+ } else {
+ return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types")
+ }
+ default:
+ return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types")
+ }
+ return OvsSet{ovsSet}, nil
+}
+
+// MarshalJSON wil marshal an OVSDB style Set in to a JSON byte array
+func (o OvsSet) MarshalJSON() ([]byte, error) {
+ switch l := len(o.GoSet); {
+ case l == 1:
+ return json.Marshal(o.GoSet[0])
+ case l > 0:
+ var oSet []interface{}
+ oSet = append(oSet, "set")
+ oSet = append(oSet, o.GoSet)
+ return json.Marshal(oSet)
+ }
+ return []byte("[\"set\",[]]"), nil
+}
+
+// UnmarshalJSON will unmarshal a JSON byte array to an OVSDB style Set
+func (o *OvsSet) UnmarshalJSON(b []byte) (err error) {
+ o.GoSet = make([]interface{}, 0)
+ addToSet := func(o *OvsSet, v interface{}) error {
+ goVal, err := ovsSliceToGoNotation(v)
+ if err == nil {
+ o.GoSet = append(o.GoSet, goVal)
+ }
+ return err
+ }
+
+ var inter interface{}
+ if err = json.Unmarshal(b, &inter); err != nil {
+ return err
+ }
+ switch inter.(type) {
+ case []interface{}:
+ var oSet []interface{}
+ oSet = inter.([]interface{})
+ // it's a single uuid object
+ if len(oSet) == 2 && (oSet[0] == "uuid" || oSet[0] == "named-uuid") {
+ return addToSet(o, UUID{GoUUID: oSet[1].(string)})
+ }
+ if oSet[0] != "set" {
+ // it is a slice, but is not a set
+ return &json.UnmarshalTypeError{Value: reflect.ValueOf(inter).String(), Type: reflect.TypeOf(*o)}
+ }
+ innerSet := oSet[1].([]interface{})
+ for _, val := range innerSet {
+ err := addToSet(o, val)
+ if err != nil {
+ return err
+ }
+ }
+ return err
+ default:
+ // it is a single object
+ return addToSet(o, inter)
+ }
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go
new file mode 100644
index 0000000000..a24ce64ad5
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go
@@ -0,0 +1,51 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type MonitorCondSinceReply struct {
+ Found bool
+ LastTransactionID string
+ Updates TableUpdates2
+}
+
+func (m MonitorCondSinceReply) MarshalJSON() ([]byte, error) {
+ v := []interface{}{m.Found, m.LastTransactionID, m.Updates}
+ return json.Marshal(v)
+}
+
+func (m *MonitorCondSinceReply) UnmarshalJSON(b []byte) error {
+ var v []json.RawMessage
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+ if len(v) != 3 {
+ return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v))
+ }
+
+ var found bool
+ err = json.Unmarshal(v[0], &found)
+ if err != nil {
+ return err
+ }
+
+ var lastTransactionID string
+ err = json.Unmarshal(v[1], &lastTransactionID)
+ if err != nil {
+ return err
+ }
+
+ var updates TableUpdates2
+ err = json.Unmarshal(v[2], &updates)
+ if err != nil {
+ return err
+ }
+
+ m.Found = found
+ m.LastTransactionID = lastTransactionID
+ m.Updates = updates
+ return nil
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go
new file mode 100644
index 0000000000..5a47d0c44a
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go
@@ -0,0 +1,35 @@
+package ovsdb
+
+// TableUpdates is an object that maps from a table name to a
+// TableUpdate
+type TableUpdates map[string]TableUpdate
+
+// TableUpdate is an object that maps from the row's UUID to a
+// RowUpdate
+type TableUpdate map[string]*RowUpdate
+
+// RowUpdate represents a row update according to RFC7047
+type RowUpdate struct {
+ New *Row `json:"new,omitempty"`
+ Old *Row `json:"old,omitempty"`
+}
+
+// Insert returns true if this is an update for an insert operation
+func (r RowUpdate) Insert() bool {
+ return r.New != nil && r.Old == nil
+}
+
+// Modify returns true if this is an update for a modify operation
+func (r RowUpdate) Modify() bool {
+ return r.New != nil && r.Old != nil
+}
+
+// Delete returns true if this is an update for a delete operation
+func (r RowUpdate) Delete() bool {
+ return r.New == nil && r.Old != nil
+}
+
+func (r *RowUpdate) FromRowUpdate2(ru2 RowUpdate2) {
+ r.Old = ru2.Old
+ r.New = ru2.New
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go
new file mode 100644
index 0000000000..a040894c97
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go
@@ -0,0 +1,19 @@
+package ovsdb
+
+// TableUpdates2 is an object that maps from a table name to a
+// TableUpdate2
+type TableUpdates2 map[string]TableUpdate2
+
+// TableUpdate2 is an object that maps from the row's UUID to a
+// RowUpdate2
+type TableUpdate2 map[string]*RowUpdate2
+
+// RowUpdate2 represents a row update according to ovsdb-server.7
+type RowUpdate2 struct {
+ Initial *Row `json:"initial,omitempty"`
+ Insert *Row `json:"insert,omitempty"`
+ Modify *Row `json:"modify,omitempty"`
+ Delete *Row `json:"delete,omitempty"`
+ Old *Row `json:"-"`
+ New *Row `json:"-"`
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go
new file mode 100644
index 0000000000..6bc4636537
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go
@@ -0,0 +1,59 @@
+package ovsdb
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+)
+
+var validUUID = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`)
+
+// UUID is a UUID according to RFC7047
+type UUID struct {
+ GoUUID string `json:"uuid"`
+}
+
+// MarshalJSON will marshal an OVSDB style UUID to a JSON encoded byte array
+func (u UUID) MarshalJSON() ([]byte, error) {
+ var uuidSlice []string
+ err := ValidateUUID(u.GoUUID)
+ if err == nil {
+ uuidSlice = []string{"uuid", u.GoUUID}
+ } else {
+ uuidSlice = []string{"named-uuid", u.GoUUID}
+ }
+
+ return json.Marshal(uuidSlice)
+}
+
+// UnmarshalJSON will unmarshal a JSON encoded byte array to a OVSDB style UUID
+func (u *UUID) UnmarshalJSON(b []byte) (err error) {
+ var ovsUUID []string
+ if err := json.Unmarshal(b, &ovsUUID); err == nil {
+ u.GoUUID = ovsUUID[1]
+ }
+ return err
+}
+
+func ValidateUUID(uuid string) error {
+ if len(uuid) != 36 {
+ return fmt.Errorf("uuid exceeds 36 characters")
+ }
+
+ if !validUUID.MatchString(uuid) {
+ return fmt.Errorf("uuid does not match regexp")
+ }
+
+ return nil
+}
+
+func IsNamedUUID(uuid string) bool {
+ return len(uuid) > 0 && !validUUID.MatchString(uuid)
+}
+
+func IsValidUUID(uuid string) bool {
+ if err := ValidateUUID(uuid); err != nil {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/server/doc.go b/vendor/github.com/ovn-org/libovsdb/server/doc.go
new file mode 100644
index 0000000000..a4af0953cd
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/server/doc.go
@@ -0,0 +1,8 @@
+/*
+Package server provides an alpha-quality implementation of an OVSDB Server
+
+It is designed only to be used for testing the functionality of the client
+library such that assertions can be made on the cache that backs the
+client's monitor or the server
+*/
+package server
diff --git a/vendor/github.com/ovn-org/libovsdb/server/monitor.go b/vendor/github.com/ovn-org/libovsdb/server/monitor.go
new file mode 100644
index 0000000000..2dedf992b0
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/server/monitor.go
@@ -0,0 +1,213 @@
+package server
+
+import (
+ "encoding/json"
+ "log"
+ "sync"
+
+ "github.com/cenkalti/rpc2"
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/database"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// connectionMonitors maps a connection to a map or monitors
+type connectionMonitors struct {
+ monitors map[string]*monitor
+ mu sync.RWMutex
+}
+
+func newConnectionMonitors() *connectionMonitors {
+ return &connectionMonitors{
+ monitors: make(map[string]*monitor),
+ mu: sync.RWMutex{},
+ }
+}
+
+// monitor represents a connection to a client where db changes
+// will be reflected
+type monitor struct {
+ id string
+ kind monitorKind
+ request map[string]*ovsdb.MonitorRequest
+ client *rpc2.Client
+}
+
+type monitorKind int
+
+const (
+ monitorKindOriginal monitorKind = iota
+ monitorKindConditional
+ monitorKindConditionalSince
+)
+
+func newMonitor(id string, request map[string]*ovsdb.MonitorRequest, client *rpc2.Client) *monitor {
+ m := &monitor{
+ id: id,
+ kind: monitorKindOriginal,
+ request: request,
+ client: client,
+ }
+ return m
+}
+
+func newConditionalMonitor(id string, request map[string]*ovsdb.MonitorRequest, client *rpc2.Client) *monitor {
+ m := &monitor{
+ id: id,
+ kind: monitorKindConditional,
+ request: request,
+ client: client,
+ }
+ return m
+}
+
+func newConditionalSinceMonitor(id string, request map[string]*ovsdb.MonitorRequest, client *rpc2.Client) *monitor {
+ m := &monitor{
+ id: id,
+ kind: monitorKindConditional,
+ request: request,
+ client: client,
+ }
+ return m
+}
+
+// Send will send an update if it matches the tables and monitor select arguments
+// we take the update by value (not reference) so we can mutate it in place before
+// queuing it for dispatch
+func (m *monitor) Send(update database.Update) {
+ // remove updates for tables that we aren't watching
+ tu := m.filter(update)
+ if len(tu) == 0 {
+ return
+ }
+ args := []interface{}{json.RawMessage([]byte(m.id)), tu}
+ var reply interface{}
+ err := m.client.Call("update2", args, &reply)
+ if err != nil {
+ log.Printf("client error handling update rpc: %v", err)
+ }
+}
+
+// Send2 will send an update if it matches the tables and monitor select arguments
+// we take the update by value (not reference) so we can mutate it in place before
+// queuing it for dispatch
+func (m *monitor) Send2(update database.Update) {
+ // remove updates for tables that we aren't watching
+ tu := m.filter2(update)
+ if len(tu) == 0 {
+ return
+ }
+ args := []interface{}{json.RawMessage([]byte(m.id)), tu}
+ var reply interface{}
+ err := m.client.Call("update2", args, &reply)
+ if err != nil {
+ log.Printf("client error handling update2 rpc: %v", err)
+ }
+}
+
+// Send3 will send an update if it matches the tables and monitor select arguments
+// we take the update by value (not reference) so we can mutate it in place before
+// queuing it for dispatch
+func (m *monitor) Send3(id uuid.UUID, update database.Update) {
+ // remove updates for tables that we aren't watching
+ tu := m.filter2(update)
+ if len(tu) == 0 {
+ return
+ }
+ args := []interface{}{json.RawMessage([]byte(m.id)), id.String(), tu}
+ var reply interface{}
+ err := m.client.Call("update2", args, &reply)
+ if err != nil {
+ log.Printf("client error handling update3 rpc: %v", err)
+ }
+}
+
+func filterColumns(row *ovsdb.Row, columns map[string]bool) *ovsdb.Row {
+ if row == nil {
+ return nil
+ }
+ new := make(ovsdb.Row, len(*row))
+ for k, v := range *row {
+ if _, ok := columns[k]; ok {
+ new[k] = v
+ }
+ }
+ return &new
+}
+
+func (m *monitor) filter(update database.Update) ovsdb.TableUpdates {
+ // remove updates for tables that we aren't watching
+ tables := update.GetUpdatedTables()
+ tus := make(ovsdb.TableUpdates, len(tables))
+ for _, table := range tables {
+ if _, ok := m.request[table]; len(m.request) > 0 && !ok {
+ // only remove updates for tables that were not requested if other
+ // tables were requested, otherwise all tables are watched.
+ continue
+ }
+ tu := ovsdb.TableUpdate{}
+ cols := make(map[string]bool)
+ cols["_uuid"] = true
+ for _, c := range m.request[table].Columns {
+ cols[c] = true
+ }
+ _ = update.ForEachRowUpdate(table, func(uuid string, ru2 ovsdb.RowUpdate2) error {
+ ru := &ovsdb.RowUpdate{}
+ ru.FromRowUpdate2(ru2)
+ switch {
+ case ru.Insert() && m.request[table].Select.Insert():
+ fallthrough
+ case ru.Modify() && m.request[table].Select.Modify():
+ fallthrough
+ case ru.Delete() && m.request[table].Select.Delete():
+ if len(cols) == 0 {
+ return nil
+ }
+ ru.New = filterColumns(ru.New, cols)
+ ru.Old = filterColumns(ru.Old, cols)
+ tu[uuid] = ru
+ }
+ return nil
+ })
+ tus[table] = tu
+ }
+ return tus
+}
+
+func (m *monitor) filter2(update database.Update) ovsdb.TableUpdates2 {
+ // remove updates for tables that we aren't watching
+ tables := update.GetUpdatedTables()
+ tus2 := make(ovsdb.TableUpdates2, len(tables))
+ for _, table := range tables {
+ if _, ok := m.request[table]; len(m.request) > 0 && !ok {
+ // only remove updates for tables that were not requested if other
+ // tables were requested, otherwise all tables are watched.
+ continue
+ }
+ tu2 := ovsdb.TableUpdate2{}
+ cols := make(map[string]bool)
+ cols["_uuid"] = true
+ for _, c := range m.request[table].Columns {
+ cols[c] = true
+ }
+ _ = update.ForEachRowUpdate(table, func(uuid string, ru2 ovsdb.RowUpdate2) error {
+ switch {
+ case ru2.Insert != nil && m.request[table].Select.Insert():
+ fallthrough
+ case ru2.Modify != nil && m.request[table].Select.Modify():
+ fallthrough
+ case ru2.Delete != nil && m.request[table].Select.Delete():
+ if len(cols) == 0 {
+ return nil
+ }
+ ru2.Insert = filterColumns(ru2.Insert, cols)
+ ru2.Modify = filterColumns(ru2.Modify, cols)
+ ru2.Delete = filterColumns(ru2.Delete, cols)
+ tu2[uuid] = &ru2
+ }
+ return nil
+ })
+ tus2[table] = tu2
+ }
+ return tus2
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/server/server.go b/vendor/github.com/ovn-org/libovsdb/server/server.go
new file mode 100644
index 0000000000..ec60ea5d20
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/server/server.go
@@ -0,0 +1,416 @@
+package server
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "sync"
+
+ "github.com/cenkalti/rpc2"
+ "github.com/cenkalti/rpc2/jsonrpc"
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+ "github.com/google/uuid"
+ "github.com/ovn-org/libovsdb/database"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// OvsdbServer is an ovsdb server
+type OvsdbServer struct {
+ srv *rpc2.Server
+ listener net.Listener
+ done chan struct{}
+ db database.Database
+ ready bool
+ doEcho bool
+ readyMutex sync.RWMutex
+ models map[string]model.DatabaseModel
+ modelsMutex sync.RWMutex
+ monitors map[*rpc2.Client]*connectionMonitors
+ monitorMutex sync.RWMutex
+ logger logr.Logger
+ txnMutex sync.Mutex
+}
+
+func init() {
+ stdr.SetVerbosity(5)
+}
+
+// NewOvsdbServer returns a new OvsdbServer
+func NewOvsdbServer(db database.Database, models ...model.DatabaseModel) (*OvsdbServer, error) {
+ l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("server")
+ o := &OvsdbServer{
+ done: make(chan struct{}, 1),
+ doEcho: true,
+ db: db,
+ models: make(map[string]model.DatabaseModel),
+ modelsMutex: sync.RWMutex{},
+ monitors: make(map[*rpc2.Client]*connectionMonitors),
+ monitorMutex: sync.RWMutex{},
+ logger: l,
+ }
+ o.modelsMutex.Lock()
+ for _, model := range models {
+ o.models[model.Schema.Name] = model
+ }
+ o.modelsMutex.Unlock()
+ for database, model := range o.models {
+ if err := o.db.CreateDatabase(database, model.Schema); err != nil {
+ return nil, err
+ }
+ }
+ o.srv = rpc2.NewServer()
+ o.srv.Handle("list_dbs", o.ListDatabases)
+ o.srv.Handle("get_schema", o.GetSchema)
+ o.srv.Handle("transact", o.Transact)
+ o.srv.Handle("cancel", o.Cancel)
+ o.srv.Handle("monitor", o.Monitor)
+ o.srv.Handle("monitor_cond", o.MonitorCond)
+ o.srv.Handle("monitor_cond_since", o.MonitorCondSince)
+ o.srv.Handle("monitor_cancel", o.MonitorCancel)
+ o.srv.Handle("steal", o.Steal)
+ o.srv.Handle("unlock", o.Unlock)
+ o.srv.Handle("echo", o.Echo)
+ return o, nil
+}
+
+// OnConnect registers a function to run when a client connects.
+func (o *OvsdbServer) OnConnect(f func(*rpc2.Client)) {
+ o.srv.OnConnect(f)
+}
+
+// OnDisConnect registers a function to run when a client disconnects.
+func (o *OvsdbServer) OnDisConnect(f func(*rpc2.Client)) {
+ o.srv.OnDisconnect(f)
+}
+
+func (o *OvsdbServer) DoEcho(ok bool) {
+ o.readyMutex.Lock()
+ o.doEcho = ok
+ o.readyMutex.Unlock()
+}
+
+// Serve starts the OVSDB server on the given path and protocol
+func (o *OvsdbServer) Serve(protocol string, path string) error {
+ var err error
+ o.listener, err = net.Listen(protocol, path)
+ if err != nil {
+ return err
+ }
+ o.readyMutex.Lock()
+ o.ready = true
+ o.readyMutex.Unlock()
+ for {
+ conn, err := o.listener.Accept()
+ if err != nil {
+ if !o.Ready() {
+ return nil
+ }
+ return err
+ }
+
+ // TODO: Need to cleanup when connection is closed
+ go o.srv.ServeCodec(jsonrpc.NewJSONCodec(conn))
+ }
+}
+
+func isClosed(ch <-chan struct{}) bool {
+ select {
+ case <-ch:
+ return true
+ default:
+ }
+
+ return false
+}
+
+// Close closes the OvsdbServer
+func (o *OvsdbServer) Close() {
+ o.readyMutex.Lock()
+ o.ready = false
+ o.readyMutex.Unlock()
+ // Only close the listener if Serve() has been called
+ if o.listener != nil {
+ if err := o.listener.Close(); err != nil {
+ o.logger.Error(err, "failed to close listener")
+ }
+ }
+ if !isClosed(o.done) {
+ close(o.done)
+ }
+}
+
+// Ready returns true if a server is ready to handle connections
+func (o *OvsdbServer) Ready() bool {
+ o.readyMutex.RLock()
+ defer o.readyMutex.RUnlock()
+ return o.ready
+}
+
+// ListDatabases lists the databases in the current system
+func (o *OvsdbServer) ListDatabases(client *rpc2.Client, args []interface{}, reply *[]string) error {
+ dbs := []string{}
+ o.modelsMutex.RLock()
+ for _, db := range o.models {
+ dbs = append(dbs, db.Schema.Name)
+ }
+ o.modelsMutex.RUnlock()
+ *reply = dbs
+ return nil
+}
+
+func (o *OvsdbServer) GetSchema(client *rpc2.Client, args []interface{}, reply *ovsdb.DatabaseSchema,
+) error {
+ db, ok := args[0].(string)
+ if !ok {
+ return fmt.Errorf("database %v is not a string", args[0])
+ }
+ o.modelsMutex.RLock()
+ model, ok := o.models[db]
+ if !ok {
+ return fmt.Errorf("database %s does not exist", db)
+ }
+ o.modelsMutex.RUnlock()
+ *reply = model.Schema
+ return nil
+}
+
+// Transact issues a new database transaction and returns the results
+func (o *OvsdbServer) Transact(client *rpc2.Client, args []json.RawMessage, reply *[]*ovsdb.OperationResult) error {
+ // While allowing other rpc handlers to run in parallel, this ovsdb server expects transactions
+ // to be serialized. The following mutex ensures that.
+ // Ref: https://github.com/cenkalti/rpc2/blob/c1acbc6ec984b7ae6830b6a36b62f008d5aefc4c/client.go#L187
+ o.txnMutex.Lock()
+ defer o.txnMutex.Unlock()
+
+ if len(args) < 2 {
+ return fmt.Errorf("not enough args")
+ }
+ var db string
+ err := json.Unmarshal(args[0], &db)
+ if err != nil {
+ return fmt.Errorf("database %v is not a string", args[0])
+ }
+ var ops []ovsdb.Operation
+ for i := 1; i < len(args); i++ {
+ var op ovsdb.Operation
+ err = json.Unmarshal(args[i], &op)
+ if err != nil {
+ return err
+ }
+ ops = append(ops, op)
+ }
+ response, updates := o.transact(db, ops)
+ *reply = response
+ for _, operResult := range response {
+ if operResult.Error != "" {
+ o.logger.Error(errors.New("failed to process operation"), "Skipping transaction DB commit due to error", "operations", ops, "results", response, "operation error", operResult.Error)
+ return nil
+ }
+ }
+ transactionID := uuid.New()
+ o.processMonitors(transactionID, updates)
+ return o.db.Commit(db, transactionID, updates)
+}
+
+func (o *OvsdbServer) transact(name string, operations []ovsdb.Operation) ([]*ovsdb.OperationResult, database.Update) {
+ transaction := o.db.NewTransaction(name)
+ return transaction.Transact(operations...)
+}
+
+// Cancel cancels the last transaction
+func (o *OvsdbServer) Cancel(client *rpc2.Client, args []interface{}, reply *[]interface{}) error {
+ return fmt.Errorf("not implemented")
+}
+
+// Monitor monitors a given database table and provides updates to the client via an RPC callback
+func (o *OvsdbServer) Monitor(client *rpc2.Client, args []json.RawMessage, reply *ovsdb.TableUpdates) error {
+ var db string
+ if err := json.Unmarshal(args[0], &db); err != nil {
+ return fmt.Errorf("database %v is not a string", args[0])
+ }
+ if !o.db.Exists(db) {
+ return fmt.Errorf("db does not exist")
+ }
+ value := string(args[1])
+ var request map[string]*ovsdb.MonitorRequest
+ if err := json.Unmarshal(args[2], &request); err != nil {
+ return err
+ }
+ o.monitorMutex.Lock()
+ defer o.monitorMutex.Unlock()
+ clientMonitors, ok := o.monitors[client]
+ if !ok {
+ o.monitors[client] = newConnectionMonitors()
+ } else {
+ if _, ok := clientMonitors.monitors[value]; ok {
+ return fmt.Errorf("monitor with that value already exists")
+ }
+ }
+
+ transaction := o.db.NewTransaction(db)
+
+ tableUpdates := make(ovsdb.TableUpdates)
+ for t, request := range request {
+ op := ovsdb.Operation{Op: ovsdb.OperationSelect, Table: t, Columns: request.Columns}
+ result, _ := transaction.Transact(op)
+ if len(result) == 0 || len(result[0].Rows) == 0 {
+ continue
+ }
+ rows := result[0].Rows
+ tableUpdates[t] = make(ovsdb.TableUpdate, len(rows))
+ for i := range rows {
+ uuid := rows[i]["_uuid"].(ovsdb.UUID).GoUUID
+ tableUpdates[t][uuid] = &ovsdb.RowUpdate{New: &rows[i]}
+ }
+ }
+ *reply = tableUpdates
+ o.monitors[client].monitors[value] = newMonitor(value, request, client)
+ return nil
+}
+
+// MonitorCond monitors a given database table and provides updates to the client via an RPC callback
+func (o *OvsdbServer) MonitorCond(client *rpc2.Client, args []json.RawMessage, reply *ovsdb.TableUpdates2) error {
+ var db string
+ if err := json.Unmarshal(args[0], &db); err != nil {
+ return fmt.Errorf("database %v is not a string", args[0])
+ }
+ if !o.db.Exists(db) {
+ return fmt.Errorf("db does not exist")
+ }
+ value := string(args[1])
+ var request map[string]*ovsdb.MonitorRequest
+ if err := json.Unmarshal(args[2], &request); err != nil {
+ return err
+ }
+ o.monitorMutex.Lock()
+ defer o.monitorMutex.Unlock()
+ clientMonitors, ok := o.monitors[client]
+ if !ok {
+ o.monitors[client] = newConnectionMonitors()
+ } else {
+ if _, ok := clientMonitors.monitors[value]; ok {
+ return fmt.Errorf("monitor with that value already exists")
+ }
+ }
+
+ transaction := o.db.NewTransaction(db)
+
+ tableUpdates := make(ovsdb.TableUpdates2)
+ for t, request := range request {
+ op := ovsdb.Operation{Op: ovsdb.OperationSelect, Table: t, Columns: request.Columns}
+ result, _ := transaction.Transact(op)
+ if len(result) == 0 || len(result[0].Rows) == 0 {
+ continue
+ }
+ rows := result[0].Rows
+ tableUpdates[t] = make(ovsdb.TableUpdate2, len(rows))
+ for i := range rows {
+ uuid := rows[i]["_uuid"].(ovsdb.UUID).GoUUID
+ tableUpdates[t][uuid] = &ovsdb.RowUpdate2{Initial: &rows[i]}
+ }
+ }
+ *reply = tableUpdates
+ o.monitors[client].monitors[value] = newConditionalMonitor(value, request, client)
+ return nil
+}
+
+// MonitorCondSince monitors a given database table and provides updates to the client via an RPC callback
+func (o *OvsdbServer) MonitorCondSince(client *rpc2.Client, args []json.RawMessage, reply *ovsdb.MonitorCondSinceReply) error {
+ var db string
+ if err := json.Unmarshal(args[0], &db); err != nil {
+ return fmt.Errorf("database %v is not a string", args[0])
+ }
+ if !o.db.Exists(db) {
+ return fmt.Errorf("db does not exist")
+ }
+ value := string(args[1])
+ var request map[string]*ovsdb.MonitorRequest
+ if err := json.Unmarshal(args[2], &request); err != nil {
+ return err
+ }
+ o.monitorMutex.Lock()
+ defer o.monitorMutex.Unlock()
+ clientMonitors, ok := o.monitors[client]
+ if !ok {
+ o.monitors[client] = newConnectionMonitors()
+ } else {
+ if _, ok := clientMonitors.monitors[value]; ok {
+ return fmt.Errorf("monitor with that value already exists")
+ }
+ }
+
+ transaction := o.db.NewTransaction(db)
+
+ tableUpdates := make(ovsdb.TableUpdates2)
+ for t, request := range request {
+ op := ovsdb.Operation{Op: ovsdb.OperationSelect, Table: t, Columns: request.Columns}
+ result, _ := transaction.Transact(op)
+ if len(result) == 0 || len(result[0].Rows) == 0 {
+ continue
+ }
+ rows := result[0].Rows
+ tableUpdates[t] = make(ovsdb.TableUpdate2, len(rows))
+ for i := range rows {
+ uuid := rows[i]["_uuid"].(ovsdb.UUID).GoUUID
+ tableUpdates[t][uuid] = &ovsdb.RowUpdate2{Initial: &rows[i]}
+ }
+ }
+ *reply = ovsdb.MonitorCondSinceReply{Found: false, LastTransactionID: "00000000-0000-0000-000000000000", Updates: tableUpdates}
+ o.monitors[client].monitors[value] = newConditionalSinceMonitor(value, request, client)
+ return nil
+}
+
+// MonitorCancel cancels a monitor on a given table
+func (o *OvsdbServer) MonitorCancel(client *rpc2.Client, args []interface{}, reply *[]interface{}) error {
+ return fmt.Errorf("not implemented")
+}
+
+// Lock acquires a lock on a table for a the client
+func (o *OvsdbServer) Lock(client *rpc2.Client, args []interface{}, reply *[]interface{}) error {
+ return fmt.Errorf("not implemented")
+}
+
+// Steal steals a lock for a client
+func (o *OvsdbServer) Steal(client *rpc2.Client, args []interface{}, reply *[]interface{}) error {
+ return fmt.Errorf("not implemented")
+}
+
+// Unlock releases a lock for a client
+func (o *OvsdbServer) Unlock(client *rpc2.Client, args []interface{}, reply *[]interface{}) error {
+ return fmt.Errorf("not implemented")
+}
+
+// Echo tests the liveness of the connection
+func (o *OvsdbServer) Echo(client *rpc2.Client, args []interface{}, reply *[]interface{}) error {
+ o.readyMutex.Lock()
+ defer o.readyMutex.Unlock()
+ if !o.doEcho {
+ return fmt.Errorf("no echo reply")
+ }
+ echoReply := make([]interface{}, len(args))
+ copy(echoReply, args)
+ *reply = echoReply
+ return nil
+}
+
+func (o *OvsdbServer) processMonitors(id uuid.UUID, update database.Update) {
+ o.monitorMutex.RLock()
+ for _, c := range o.monitors {
+ for _, m := range c.monitors {
+ switch m.kind {
+ case monitorKindOriginal:
+ m.Send(update)
+ case monitorKindConditional:
+ m.Send2(update)
+ case monitorKindConditionalSince:
+ m.Send3(id, update)
+ }
+ }
+ }
+ o.monitorMutex.RUnlock()
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/difference.go b/vendor/github.com/ovn-org/libovsdb/updates/difference.go
new file mode 100644
index 0000000000..7ebfe8bb51
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/difference.go
@@ -0,0 +1,209 @@
+package updates
+
+import "reflect"
+
+// difference between value 'a' and value 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// The result is calculated in 'a' in-place and returned unless the
+// difference is 'b' in which case 'b' is returned unmodified. Also returns a
+// boolean indicating if there is an actual difference.
+func difference(a, b interface{}) (interface{}, bool) {
+ return mergeDifference(nil, a, b)
+}
+
+// applyDifference returns the result of applying difference 'd' to value 'v'
+// along with a boolean indicating if 'v' was changed.
+func applyDifference(v, d interface{}) (interface{}, bool) {
+ if d == nil {
+ return v, false
+ }
+ // difference can be applied with the same algorithm used to calculate it
+ // f(x,f(x,y))=y
+ result, changed := difference(v, d)
+ dv := reflect.ValueOf(d)
+ switch dv.Kind() {
+ case reflect.Slice:
+ fallthrough
+ case reflect.Map:
+ // but we need to tweak the interpretation of change for map and slices:
+ // when there is no difference between the value and non-empty delta, it
+ // actually means the value needs to be emptied so there is actually a
+ // change
+ if !changed && dv.Len() > 0 {
+ return result, true
+ }
+ // there are no changes when delta is empty
+ return result, changed && dv.Len() > 0
+ }
+ return result, changed
+}
+
+// mergeDifference, given an original value 'o' and two differences 'a' and 'b',
+// returns a new equivalent difference that when applied on 'o' it would have
+// the same result as applying 'a' and 'b' consecutively.
+// If 'o' is nil, returns the difference between 'a' and 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// The result is calculated in 'a' in-place and returned unless the result is
+// 'b' in which case 'b' is returned unmodified. Also returns a boolean
+// indicating if there is an actual difference.
+func mergeDifference(o, a, b interface{}) (interface{}, bool) {
+ kind := reflect.ValueOf(b).Kind()
+ if kind == reflect.Invalid {
+ kind = reflect.ValueOf(a).Kind()
+ }
+ switch kind {
+ case reflect.Invalid:
+ return nil, false
+ case reflect.Slice:
+ // set differences are transitive
+ return setDifference(a, b)
+ case reflect.Map:
+ return mergeMapDifference(o, a, b)
+ case reflect.Array:
+ panic("Not implemented")
+ default:
+ return mergeAtomicDifference(o, a, b)
+ }
+}
+
+// setDifference calculates the difference between set 'a' and set 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// The result is calculated in 'a' in-place and returned unless the difference
+// is 'b' in which case 'b' is returned unmodified. Also returns a boolean
+// indicating if there is an actual difference.
+func setDifference(a, b interface{}) (interface{}, bool) {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ if !av.IsValid() && !bv.IsValid() {
+ return nil, false
+ } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() {
+ return b, bv.Len() != 0
+ } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() {
+ return a, av.Len() != 0
+ }
+
+ // From https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+ // The difference between two sets are all elements that only belong to one
+ // of the sets.
+ difference := make(map[interface{}]struct{}, bv.Len())
+ for i := 0; i < bv.Len(); i++ {
+ // supossedly we are working with comparable atomic types with no
+ // pointers so we can use the values as map key
+ difference[bv.Index(i).Interface()] = struct{}{}
+ }
+ j := av.Len()
+ for i := 0; i < j; {
+ vv := av.Index(i)
+ vi := vv.Interface()
+ if _, ok := difference[vi]; ok {
+ // this value of 'a' is in 'b', so remove it from 'a'; to do that,
+ // overwrite it with the last value and re-evaluate
+ vv.Set(av.Index(j - 1))
+ // decrease where the last 'a' value is at
+ j--
+ // remove from 'b' values
+ delete(difference, vi)
+ } else {
+ // this value of 'a' is not in 'b', evaluate the next value
+ i++
+ }
+ }
+ // trim the slice to the actual values held
+ av = av.Slice(0, j)
+ for item := range difference {
+ // this value of 'b' is not in 'a', so add it
+ av = reflect.Append(av, reflect.ValueOf(item))
+ }
+
+ if av.Len() == 0 {
+ return reflect.Zero(av.Type()).Interface(), false
+ }
+
+ return av.Interface(), true
+}
+
+// mergeMapDifference, given an original map 'o' and two differences 'a' and
+// 'b', returns a new equivalent difference that when applied on 'o' it would
+// have the same result as applying 'a' and 'b' consecutively.
+// If 'o' is nil, returns the difference between 'a' and 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// The result is calculated in 'a' in-place and returned unless the result is
+// 'b' in which case 'b' is returned unmodified.
+// Returns a boolean indicating if there is an actual difference.
+func mergeMapDifference(o, a, b interface{}) (interface{}, bool) {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ if !av.IsValid() && !bv.IsValid() {
+ return nil, false
+ } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() {
+ return b, bv.Len() != 0
+ } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() {
+ return a, av.Len() != 0
+ }
+
+ ov := reflect.ValueOf(o)
+ if !ov.IsValid() {
+ ov = reflect.Zero(av.Type())
+ }
+
+ // From
+ // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+ // The difference between two maps are all key-value pairs whose keys
+ // appears in only one of the maps, plus the key-value pairs whose keys
+ // appear in both maps but with different values. For the latter elements,
+ // includes the value from the new column.
+
+ // We can assume that difference is a transitive operation so we calculate
+ // the difference between 'a' and 'b' but we need to handle exceptions when
+ // the same key is present in all values.
+ for i := bv.MapRange(); i.Next(); {
+ kv := i.Key()
+ bvv := i.Value()
+ avv := av.MapIndex(kv)
+ ovv := ov.MapIndex(kv)
+ // supossedly we are working with comparable types with no pointers so
+ // we can compare directly here
+ switch {
+ case ovv.IsValid() && avv.IsValid() && ovv.Interface() == bvv.Interface():
+ // key is present in the three values
+ // final result would restore key to the original value, delete from 'a'
+ av.SetMapIndex(kv, reflect.Value{})
+ case ovv.IsValid() && avv.IsValid() && avv.Interface() == bvv.Interface():
+ // key is present in the three values
+ // final result would remove key, set in 'a' with 'o' value
+ av.SetMapIndex(kv, ovv)
+ case avv.IsValid() && avv.Interface() == bvv.Interface():
+ // key/value is in 'a' and 'b', delete from 'a'
+ av.SetMapIndex(kv, reflect.Value{})
+ default:
+ // key/value in 'b' is not in 'a', set in 'a' with 'b' value
+ av.SetMapIndex(kv, bvv)
+ }
+ }
+
+ if av.Len() == 0 {
+ return reflect.Zero(av.Type()).Interface(), false
+ }
+
+ return av.Interface(), true
+}
+
+// mergeAtomicDifference, given an original atomic value 'o' and two differences
+// 'a' and 'b', returns a new equivalent difference that when applied on 'o' it
+// would have the same result as applying 'a' and 'b' consecutively.
+// If 'o' is nil, returns the difference between 'a' and 'b'.
+// This difference is calculated as described in
+// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+// Returns a boolean indicating if there is an actual difference.
+func mergeAtomicDifference(o, a, b interface{}) (interface{}, bool) {
+ if o != nil {
+ return b, !reflect.DeepEqual(o, b)
+ }
+ return b, !reflect.DeepEqual(a, b)
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/doc.go b/vendor/github.com/ovn-org/libovsdb/updates/doc.go
new file mode 100644
index 0000000000..3e6fe18a0b
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/doc.go
@@ -0,0 +1,15 @@
+/*
+Package updates provides an utility to perform and aggregate model updates.
+
+As input, it supports OVSDB Operations, RowUpdate or RowUpdate2 notations via
+the corresponding Add methods.
+
+As output, it supports both OVSDB RowUpdate2 as well as model notation via the
+corresponding ForEach iterative methods.
+
+Several updates can be added and will be merged with any previous updates even
+if they are for the same model. If several updates for the same model are
+aggregated, the user is responsible that the provided model to be updated
+matches the updated model of the previous update.
+*/
+package updates
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/merge.go b/vendor/github.com/ovn-org/libovsdb/updates/merge.go
new file mode 100644
index 0000000000..562f226232
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/merge.go
@@ -0,0 +1,160 @@
+package updates
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+func merge(ts *ovsdb.TableSchema, a, b modelUpdate) (modelUpdate, error) {
+ // handle model update
+ switch {
+ case b.old == nil && b.new == nil:
+ // noop
+ case a.old == nil && a.new == nil:
+ // first op
+ a.old = b.old
+ a.new = b.new
+ case a.new != nil && b.old != nil && b.new != nil:
+ // update after an insert or an update
+ a.new = b.new
+ case b.old != nil && b.new == nil:
+ // a final delete
+ a.new = nil
+ default:
+ return modelUpdate{}, fmt.Errorf("sequence of updates not supported")
+ }
+
+ // handle row update
+ ru2, err := mergeRowUpdate(ts, a.rowUpdate2, b.rowUpdate2)
+ if err != nil {
+ return modelUpdate{}, err
+ }
+ if ru2 == nil {
+ return modelUpdate{}, nil
+ }
+ a.rowUpdate2 = ru2
+
+ return a, nil
+}
+
+func mergeRowUpdate(ts *ovsdb.TableSchema, a, b *rowUpdate2) (*rowUpdate2, error) {
+ switch {
+ case b == nil:
+ // noop
+ case a == nil:
+ // first op
+ a = b
+ case a.Insert != nil && b.Modify != nil:
+ // update after an insert
+ a.New = b.New
+ a.Insert = b.New
+ case a.Modify != nil && b.Modify != nil:
+ // update after update
+ a.New = b.New
+ a.Modify = mergeModifyRow(ts, a.Old, a.Modify, b.Modify)
+ if a.Modify == nil {
+ // we merged two modifications that brought back the row to its
+ // original value which is a no op
+ a = nil
+ }
+ case a.Insert != nil && b.Delete != nil:
+ // delete after insert
+ a = nil
+ case b.Delete != nil:
+ // a final delete
+ a.Initial = nil
+ a.Insert = nil
+ a.Modify = nil
+ a.New = nil
+ a.Delete = b.Delete
+ default:
+ return &rowUpdate2{}, fmt.Errorf("sequence of updates not supported")
+ }
+ return a, nil
+}
+
+// mergeModifyRow merges two modification rows 'a' and 'b' with respect an
+// original row 'o'. Two modifications that restore the original value cancel
+// each other and won't be included in the result. Returns nil if there are no
+// resulting modifications.
+func mergeModifyRow(ts *ovsdb.TableSchema, o, a, b *ovsdb.Row) *ovsdb.Row {
+ original := *o
+ aMod := *a
+ bMod := *b
+ for k, v := range bMod {
+ if _, ok := aMod[k]; !ok {
+ aMod[k] = v
+ continue
+ }
+
+ var result interface{}
+ var changed bool
+
+ // handle maps or sets first
+ switch v.(type) {
+ // difference only supports set or map values that are comparable with
+ // no pointers. This should be currently fine because the set or map
+ // values should only be non pointer atomic types or the UUID struct.
+ case ovsdb.OvsSet:
+ aSet := aMod[k].(ovsdb.OvsSet)
+ bSet := v.(ovsdb.OvsSet)
+ // handle sets of multiple values, single value sets are handled as
+ // atomic values
+ if ts.Column(k).TypeObj.Max() != 1 {
+ // set difference is a fully transitive operation so we dont
+ // need to do anything special to merge two differences
+ result, changed = setDifference(aSet.GoSet, bSet.GoSet)
+ result = ovsdb.OvsSet{GoSet: result.([]interface{})}
+ }
+ case ovsdb.OvsMap:
+ aMap := aMod[k].(ovsdb.OvsMap)
+ bMap := v.(ovsdb.OvsMap)
+ var originalMap ovsdb.OvsMap
+ if v, ok := original[k]; ok {
+ originalMap = v.(ovsdb.OvsMap)
+ }
+ // map difference is not transitive with respect to the original
+ // value so we have to take the original value into account when
+ // merging
+ result, changed = mergeMapDifference(originalMap.GoMap, aMap.GoMap, bMap.GoMap)
+ result = ovsdb.OvsMap{GoMap: result.(map[interface{}]interface{})}
+ }
+
+ // was neither a map nor a set
+ if result == nil {
+ // atomic difference is not transitive with respect to the original
+ // value so we have to take the original value into account when
+ // merging
+ o := original[k]
+ if o == nil {
+ // assume zero value if original does not have the column
+ o = reflect.Zero(reflect.TypeOf(v)).Interface()
+ }
+ if set, ok := o.(ovsdb.OvsSet); ok {
+ // atomic optional values are cleared out with an empty set
+ // if the original value was also cleared out, use an empty set
+ // instead of a nil set so that mergeAtomicDifference notices
+ // that we are returning to the original value
+ if set.GoSet == nil {
+ set.GoSet = []interface{}{}
+ }
+ o = set
+ }
+ result, changed = mergeAtomicDifference(o, aMod[k], v)
+ }
+
+ if !changed {
+ delete(aMod, k)
+ continue
+ }
+ aMod[k] = result
+ }
+
+ if len(aMod) == 0 {
+ return nil
+ }
+
+ return a
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/mutate.go b/vendor/github.com/ovn-org/libovsdb/updates/mutate.go
new file mode 100644
index 0000000000..1d87737fcd
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/mutate.go
@@ -0,0 +1,297 @@
+package updates
+
+import (
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+func removeFromSlice(a, b reflect.Value) (reflect.Value, bool) {
+ for i := 0; i < a.Len(); i++ {
+ if a.Index(i).Interface() == b.Interface() {
+ v := reflect.AppendSlice(a.Slice(0, i), a.Slice(i+1, a.Len()))
+ return v, true
+ }
+ }
+ return a, false
+}
+
+func insertToSlice(a, b reflect.Value) (reflect.Value, bool) {
+ for i := 0; i < a.Len(); i++ {
+ if a.Index(i).Interface() == b.Interface() {
+ return a, false
+ }
+ }
+ return reflect.Append(a, b), true
+}
+
+func mutate(current interface{}, mutator ovsdb.Mutator, value interface{}) (interface{}, interface{}) {
+ switch current.(type) {
+ case bool, string:
+ return current, value
+ }
+ switch mutator {
+ case ovsdb.MutateOperationInsert:
+ // for insert, the delta will be the new value added
+ return mutateInsert(current, value)
+ case ovsdb.MutateOperationDelete:
+ return mutateDelete(current, value)
+ case ovsdb.MutateOperationAdd:
+ // for add, the delta is the new value
+ new := mutateAdd(current, value)
+ return new, new
+ case ovsdb.MutateOperationSubtract:
+ // for subtract, the delta is the new value
+ new := mutateSubtract(current, value)
+ return new, new
+ case ovsdb.MutateOperationMultiply:
+ new := mutateMultiply(current, value)
+ return new, new
+ case ovsdb.MutateOperationDivide:
+ new := mutateDivide(current, value)
+ return new, new
+ case ovsdb.MutateOperationModulo:
+ new := mutateModulo(current, value)
+ return new, new
+ }
+ return current, value
+}
+
+func mutateInsert(current, value interface{}) (interface{}, interface{}) {
+ switch current.(type) {
+ case int, float64:
+ return current, current
+ }
+ vc := reflect.ValueOf(current)
+ vv := reflect.ValueOf(value)
+ if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) {
+ v, ok := insertToSlice(vc, vv)
+ var diff interface{}
+ if ok {
+ diff = value
+ }
+ return v.Interface(), diff
+ }
+ if !vc.IsValid() {
+ if vv.IsValid() {
+ return vv.Interface(), vv.Interface()
+ }
+ return nil, nil
+ }
+ if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice {
+ v := vc
+ diff := reflect.Indirect(reflect.New(vv.Type()))
+ for i := 0; i < vv.Len(); i++ {
+ var ok bool
+ v, ok = insertToSlice(v, vv.Index(i))
+ if ok {
+ diff = reflect.Append(diff, vv.Index(i))
+ }
+ }
+ if diff.Len() > 0 {
+ return v.Interface(), diff.Interface()
+ }
+ return v.Interface(), nil
+ }
+ if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map {
+ if vc.IsNil() && vv.Len() > 0 {
+ return value, value
+ }
+ diff := reflect.MakeMap(vc.Type())
+ iter := vv.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ if !vc.MapIndex(k).IsValid() {
+ vc.SetMapIndex(k, iter.Value())
+ diff.SetMapIndex(k, iter.Value())
+ }
+ }
+ if diff.Len() > 0 {
+ return current, diff.Interface()
+ }
+ return current, nil
+ }
+ return current, nil
+}
+
+func mutateDelete(current, value interface{}) (interface{}, interface{}) {
+ switch current.(type) {
+ case int, float64:
+ return current, nil
+ }
+ vc := reflect.ValueOf(current)
+ vv := reflect.ValueOf(value)
+ if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) {
+ v, ok := removeFromSlice(vc, vv)
+ diff := value
+ if !ok {
+ diff = nil
+ }
+ return v.Interface(), diff
+ }
+ if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice {
+ v := vc
+ diff := reflect.Indirect(reflect.New(vv.Type()))
+ for i := 0; i < vv.Len(); i++ {
+ var ok bool
+ v, ok = removeFromSlice(v, vv.Index(i))
+ if ok {
+ diff = reflect.Append(diff, vv.Index(i))
+ }
+ }
+ if diff.Len() > 0 {
+ return v.Interface(), diff.Interface()
+ }
+ return v.Interface(), nil
+ }
+ if vc.Kind() == reflect.Map && vv.Type() == reflect.SliceOf(vc.Type().Key()) {
+ diff := reflect.MakeMap(vc.Type())
+ for i := 0; i < vv.Len(); i++ {
+ if vc.MapIndex(vv.Index(i)).IsValid() {
+ diff.SetMapIndex(vv.Index(i), vc.MapIndex(vv.Index(i)))
+ vc.SetMapIndex(vv.Index(i), reflect.Value{})
+ }
+ }
+ if diff.Len() > 0 {
+ return current, diff.Interface()
+ }
+ return current, nil
+ }
+ if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map {
+ diff := reflect.MakeMap(vc.Type())
+ iter := vv.MapRange()
+ for iter.Next() {
+ vvk := iter.Key()
+ vvv := iter.Value()
+ vcv := vc.MapIndex(vvk)
+ if vcv.IsValid() && reflect.DeepEqual(vcv.Interface(), vvv.Interface()) {
+ diff.SetMapIndex(vvk, vcv)
+ vc.SetMapIndex(vvk, reflect.Value{})
+ }
+ }
+ if diff.Len() > 0 {
+ return current, diff.Interface()
+ }
+ return current, nil
+ }
+ return current, nil
+}
+
+func mutateAdd(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i + v
+ }
+ if i, ok := current.(float64); ok {
+ v := value.(float64)
+ return i + v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j + v
+ }
+ return is
+ }
+ if is, ok := current.([]float64); ok {
+ v := value.(float64)
+ for i, j := range is {
+ is[i] = j + v
+ }
+ return is
+ }
+ return current
+}
+
+func mutateSubtract(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i - v
+ }
+ if i, ok := current.(float64); ok {
+ v := value.(float64)
+ return i - v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j - v
+ }
+ return is
+ }
+ if is, ok := current.([]float64); ok {
+ v := value.(float64)
+ for i, j := range is {
+ is[i] = j - v
+ }
+ return is
+ }
+ return current
+}
+
+func mutateMultiply(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i * v
+ }
+ if i, ok := current.(float64); ok {
+ v := value.(float64)
+ return i * v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j * v
+ }
+ return is
+ }
+ if is, ok := current.([]float64); ok {
+ v := value.(float64)
+ for i, j := range is {
+ is[i] = j * v
+ }
+ return is
+ }
+ return current
+}
+
+func mutateDivide(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i / v
+ }
+ if i, ok := current.(float64); ok {
+ v := value.(float64)
+ return i / v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j / v
+ }
+ return is
+ }
+ if is, ok := current.([]float64); ok {
+ v := value.(float64)
+ for i, j := range is {
+ is[i] = j / v
+ }
+ return is
+ }
+ return current
+}
+
+func mutateModulo(current, value interface{}) interface{} {
+ if i, ok := current.(int); ok {
+ v := value.(int)
+ return i % v
+ }
+ if is, ok := current.([]int); ok {
+ v := value.(int)
+ for i, j := range is {
+ is[i] = j % v
+ }
+ return is
+ }
+ return current
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/references.go b/vendor/github.com/ovn-org/libovsdb/updates/references.go
new file mode 100644
index 0000000000..938d02aae9
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/references.go
@@ -0,0 +1,797 @@
+package updates
+
+import (
+ "fmt"
+
+ "github.com/ovn-org/libovsdb/database"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+// ReferenceProvider should be implemented by a database that tracks references
+type ReferenceProvider interface {
+ // GetReferences provides the references to the provided row
+ GetReferences(database, table, uuid string) (database.References, error)
+ // Get provides the corresponding model
+ Get(database, table string, uuid string) (model.Model, error)
+}
+
+// DatabaseUpdate bundles updates together with the updated
+// reference information
+type DatabaseUpdate struct {
+ ModelUpdates
+ referenceUpdates database.References
+}
+
+func (u DatabaseUpdate) ForReferenceUpdates(do func(references database.References) error) error {
+ refsCopy := database.References{}
+ // since refsCopy is empty, this will just copy everything
+ applyReferenceModifications(refsCopy, u.referenceUpdates)
+ return do(refsCopy)
+}
+
+func NewDatabaseUpdate(updates ModelUpdates, references database.References) DatabaseUpdate {
+ return DatabaseUpdate{
+ ModelUpdates: updates,
+ referenceUpdates: references,
+ }
+}
+
+// ProcessReferences tracks referential integrity for the provided set of
+// updates. It returns an updated set of updates which includes additional
+// updates and updated references as a result of the reference garbage
+// collection described in RFC7047. These additional updates resulting from the
+// reference garbage collection are also returned separately. Any constraint or
+// referential integrity violation is returned as an error.
+func ProcessReferences(dbModel model.DatabaseModel, provider ReferenceProvider, updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) {
+ referenceTracker := newReferenceTracker(dbModel, provider)
+ return referenceTracker.processReferences(updates)
+}
+
+type referenceTracker struct {
+ dbModel model.DatabaseModel
+ provider ReferenceProvider
+
+ // updates that are being processed
+ updates ModelUpdates
+
+ // references are the updated references by the set of updates processed
+ references database.References
+
+ // helper maps to track the rows that we are processing and their tables
+ tracked map[string]string
+ added map[string]string
+ deleted map[string]string
+}
+
+func newReferenceTracker(dbModel model.DatabaseModel, provider ReferenceProvider) *referenceTracker {
+ return &referenceTracker{
+ dbModel: dbModel,
+ provider: provider,
+ }
+}
+
+func (rt *referenceTracker) processReferences(updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) {
+ rt.updates = updates
+ rt.tracked = make(map[string]string)
+ rt.added = make(map[string]string)
+ rt.deleted = make(map[string]string)
+ rt.references = make(database.References)
+
+ referenceUpdates, err := rt.processReferencesLoop(updates)
+ if err != nil {
+ return ModelUpdates{}, ModelUpdates{}, nil, err
+ }
+
+ // merge the updates generated from reference tracking into the main updates
+ err = updates.Merge(rt.dbModel, referenceUpdates)
+ if err != nil {
+ return ModelUpdates{}, ModelUpdates{}, nil, err
+ }
+
+ return updates, referenceUpdates, rt.references, nil
+}
+
+func (rt *referenceTracker) processReferencesLoop(updates ModelUpdates) (ModelUpdates, error) {
+ referenceUpdates := ModelUpdates{}
+
+ // references can be transitive and deleting them can lead to further
+ // references having to be removed so loop until there are no updates to be
+ // made
+ for len(updates.updates) > 0 {
+ // update the references from the updates
+ err := rt.processModelUpdates(updates)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // process strong reference integrity
+ updates, err = rt.processStrongReferences()
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // process weak reference integrity
+ weakUpdates, err := rt.processWeakReferences()
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // merge strong and weak reference updates
+ err = updates.Merge(rt.dbModel, weakUpdates)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // merge updates from this iteration to the overall reference updates
+ err = referenceUpdates.Merge(rt.dbModel, updates)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ return referenceUpdates, nil
+}
+
+// processModelUpdates keeps track of the updated references by a set of updates
+func (rt *referenceTracker) processModelUpdates(updates ModelUpdates) error {
+ tables := updates.GetUpdatedTables()
+ for _, table := range tables {
+ err := updates.ForEachRowUpdate(table, func(uuid string, row ovsdb.RowUpdate2) error {
+ return rt.processRowUpdate(table, uuid, &row)
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// processRowUpdate keeps track of the updated references by a given row update
+func (rt *referenceTracker) processRowUpdate(table, uuid string, row *ovsdb.RowUpdate2) error {
+
+ // getReferencesFromRowModify extracts updated references from the
+ // modifications. Following the same strategy as the modify field of Update2
+ // notification, it will extract a difference, that is, both old removed
+ // references and new added references are extracted. This difference will
+ // then be applied to currently tracked references to come up with the
+ // updated references.
+
+ // For more info on the modify field of Update2 notification and the
+ // strategy used to apply differences, check
+ // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification
+
+ var updateRefs database.References
+ switch {
+ case row.Delete != nil:
+ rt.deleted[uuid] = table
+ updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Old, row.Old)
+ case row.Modify != nil:
+ updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Modify, row.Old)
+ case row.Insert != nil:
+ if !isRoot(&rt.dbModel, table) {
+ // track rows added that are not part of the root set, we might need
+ // to delete those later
+ rt.added[uuid] = table
+ rt.tracked[uuid] = table
+ }
+ updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Insert, nil)
+ }
+
+ // (lazy) initialize existing references to the same rows from the database
+ for spec, refs := range updateRefs {
+ for to := range refs {
+ err := rt.initReferences(spec.ToTable, to)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // apply the reference modifications to the initialized references
+ applyReferenceModifications(rt.references, updateRefs)
+
+ return nil
+}
+
+// processStrongReferences adds delete operations for rows that are not part of
+// the root set and are no longer strongly referenced. Returns a referential
+// integrity violation if a nonexistent row is strongly referenced or a strongly
+// referenced row has been deleted.
+func (rt *referenceTracker) processStrongReferences() (ModelUpdates, error) {
+ // make sure that we are tracking the references to the deleted rows
+ err := rt.initReferencesOfDeletedRows()
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // track if rows are referenced or not
+ isReferenced := map[string]bool{}
+
+ // go over the updated references
+ for spec, refs := range rt.references {
+
+ // we only care about strong references
+ if !isStrong(&rt.dbModel, spec) {
+ continue
+ }
+
+ for to, from := range refs {
+ // check if the referenced row exists
+ exists, err := rt.rowExists(spec.ToTable, to)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ if !exists {
+ for _, uuid := range from {
+ // strong reference to a row that does not exist
+ return ModelUpdates{}, ovsdb.NewReferentialIntegrityViolation(fmt.Sprintf(
+ "Table %s column %s row %s references nonexistent or deleted row %s in table %s",
+ spec.FromTable, spec.FromColumn, uuid, to, spec.ToTable))
+ }
+ // we deleted the row ourselves on a previous loop
+ continue
+ }
+
+ // track if this row is referenced from this location spec
+ isReferenced[to] = isReferenced[to] || len(from) > 0
+ }
+ }
+
+ // inserted rows that are unreferenced and not part of the root set will
+ // silently be dropped from the updates
+ for uuid := range rt.added {
+ if isReferenced[uuid] {
+ continue
+ }
+ isReferenced[uuid] = false
+ }
+
+ // delete rows that are not referenced
+ updates := ModelUpdates{}
+ for uuid, isReferenced := range isReferenced {
+ if isReferenced {
+ // row is still referenced, ignore
+ continue
+ }
+
+ if rt.deleted[uuid] != "" {
+ // already deleted, ignore
+ continue
+ }
+
+ table := rt.tracked[uuid]
+ if isRoot(&rt.dbModel, table) {
+ // table is part of the root set, ignore
+ continue
+ }
+
+ // delete row that is not part of the root set and is no longer
+ // referenced
+ update, err := rt.deleteRow(table, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ err = updates.Merge(rt.dbModel, update)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ return updates, nil
+}
+
+// processWeakReferences deletes weak references to rows that were deleted.
+// Returns a constraint violation if this results in invalid values
+func (rt *referenceTracker) processWeakReferences() (ModelUpdates, error) {
+ // make sure that we are tracking the references to rows that might have
+ // been deleted as a result of strong reference garbage collection
+ err := rt.initReferencesOfDeletedRows()
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ tables := map[string]string{}
+ originalRows := map[string]ovsdb.Row{}
+ updatedRows := map[string]ovsdb.Row{}
+
+ for spec, refs := range rt.references {
+ // fetch some reference information from the schema
+ extendedType, minLenAllowed, refType, _ := refInfo(&rt.dbModel, spec.FromTable, spec.FromColumn, spec.FromValue)
+ isEmptyAllowed := minLenAllowed == 0
+
+ if refType != ovsdb.Weak {
+ // we only care about weak references
+ continue
+ }
+
+ for to, from := range refs {
+ if len(from) == 0 {
+ // not referenced from anywhere, ignore
+ continue
+ }
+
+ // check if the referenced row exists
+ exists, err := rt.rowExists(spec.ToTable, to)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ if exists {
+ // we only care about rows that have been deleted or otherwise
+ // don't exist
+ continue
+ }
+
+ // generate the updates to remove the references to deleted rows
+ for _, uuid := range from {
+ if _, ok := updatedRows[uuid]; !ok {
+ updatedRows[uuid] = ovsdb.NewRow()
+ }
+
+ if rt.deleted[uuid] != "" {
+ // already deleted, ignore
+ continue
+ }
+
+ // fetch the original rows
+ if originalRows[uuid] == nil {
+ originalRow, err := rt.getRow(spec.FromTable, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ if originalRow == nil {
+ return ModelUpdates{}, fmt.Errorf("reference from non-existent model with uuid %s", uuid)
+ }
+ originalRows[uuid] = *originalRow
+ }
+
+ var becomesLen int
+ switch extendedType {
+ case ovsdb.TypeMap:
+ // a map referencing the row
+ // generate the mutation to remove the entry form the map
+ originalMap := originalRows[uuid][spec.FromColumn].(ovsdb.OvsMap).GoMap
+ var mutationMap map[interface{}]interface{}
+ value, ok := updatedRows[uuid][spec.FromColumn]
+ if !ok {
+ mutationMap = map[interface{}]interface{}{}
+ } else {
+ mutationMap = value.(ovsdb.OvsMap).GoMap
+ }
+ // copy the map entries referencing the row from the original map
+ mutationMap = copyMapKeyValues(originalMap, mutationMap, !spec.FromValue, ovsdb.UUID{GoUUID: to})
+
+ // track the new length of the map
+ if !isEmptyAllowed {
+ becomesLen = len(originalMap) - len(mutationMap)
+ }
+
+ updatedRows[uuid][spec.FromColumn] = ovsdb.OvsMap{GoMap: mutationMap}
+
+ case ovsdb.TypeSet:
+ // a set referencing the row
+ // generate the mutation to remove the entry form the set
+ var mutationSet []interface{}
+ value, ok := updatedRows[uuid][spec.FromColumn]
+ if !ok {
+ mutationSet = []interface{}{}
+ } else {
+ mutationSet = value.(ovsdb.OvsSet).GoSet
+ }
+ mutationSet = append(mutationSet, ovsdb.UUID{GoUUID: to})
+
+ // track the new length of the set
+ if !isEmptyAllowed {
+ originalSet := originalRows[uuid][spec.FromColumn].(ovsdb.OvsSet).GoSet
+ becomesLen = len(originalSet) - len(mutationSet)
+ }
+
+ updatedRows[uuid][spec.FromColumn] = ovsdb.OvsSet{GoSet: mutationSet}
+
+ case ovsdb.TypeUUID:
+ // this is an atomic UUID value that needs to be cleared
+ updatedRows[uuid][spec.FromColumn] = nil
+ becomesLen = 0
+ }
+
+ if becomesLen < minLenAllowed {
+ return ModelUpdates{}, ovsdb.NewConstraintViolation(fmt.Sprintf(
+ "Deletion of a weak reference to a deleted (or never-existing) row from column %s in table %s "+
+ "row %s caused this column to have an invalid length.",
+ spec.FromColumn, spec.FromTable, uuid))
+ }
+
+ // track the table of the row we are going to update
+ tables[uuid] = spec.FromTable
+ }
+ }
+ }
+
+ // process the updates
+ updates := ModelUpdates{}
+ for uuid, rowUpdate := range updatedRows {
+ update, err := rt.updateRow(tables[uuid], uuid, rowUpdate)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ err = updates.Merge(rt.dbModel, update)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ return updates, nil
+}
+
+func copyMapKeyValues(from, to map[interface{}]interface{}, isKey bool, keyValue ovsdb.UUID) map[interface{}]interface{} {
+ if isKey {
+ to[keyValue] = from[keyValue]
+ return to
+ }
+ for key, value := range from {
+ if value.(ovsdb.UUID) == keyValue {
+ to[key] = from[key]
+ }
+ }
+ return to
+}
+
+// initReferences initializes the references to the provided row from the
+// database
+func (rt *referenceTracker) initReferences(table, uuid string) error {
+ if _, ok := rt.tracked[uuid]; ok {
+ // already initialized
+ return nil
+ }
+ existingRefs, err := rt.provider.GetReferences(rt.dbModel.Client().Name(), table, uuid)
+ if err != nil {
+ return err
+ }
+ rt.references.UpdateReferences(existingRefs)
+ rt.tracked[uuid] = table
+ return nil
+}
+
+func (rt *referenceTracker) initReferencesOfDeletedRows() error {
+ for uuid, table := range rt.deleted {
+ err := rt.initReferences(table, uuid)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// deleteRow adds an update to delete the provided row.
+func (rt *referenceTracker) deleteRow(table, uuid string) (ModelUpdates, error) {
+ model, err := rt.getModel(table, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ row, err := rt.getRow(table, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ updates := ModelUpdates{}
+ update := ovsdb.RowUpdate2{Delete: &ovsdb.Row{}, Old: row}
+ err = updates.AddRowUpdate2(rt.dbModel, table, uuid, model, update)
+
+ rt.deleted[uuid] = table
+
+ return updates, err
+}
+
+// updateRow generates updates for the provided row
+func (rt *referenceTracker) updateRow(table, uuid string, row ovsdb.Row) (ModelUpdates, error) {
+ model, err := rt.getModel(table, uuid)
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+
+ // In agreement with processWeakReferences, columns with values are assumed
+ // to be values of sets or maps that need to be mutated for deletion.
+ // Columns with no values are assumed to be atomic optional values that need
+ // to be cleared with an update.
+
+ mutations := make([]ovsdb.Mutation, 0, len(row))
+ update := ovsdb.Row{}
+ for column, value := range row {
+ if value != nil {
+ mutations = append(mutations, *ovsdb.NewMutation(column, ovsdb.MutateOperationDelete, value))
+ continue
+ }
+ update[column] = ovsdb.OvsSet{GoSet: []interface{}{}}
+ }
+
+ updates := ModelUpdates{}
+
+ if len(mutations) > 0 {
+ err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{
+ Op: ovsdb.OperationMutate,
+ Table: table,
+ Mutations: mutations,
+ Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})},
+ })
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ if len(update) > 0 {
+ err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{
+ Op: ovsdb.OperationUpdate,
+ Table: table,
+ Row: update,
+ Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})},
+ })
+ if err != nil {
+ return ModelUpdates{}, err
+ }
+ }
+
+ return updates, nil
+}
+
+// getModel gets the model from the updates or the database
+func (rt *referenceTracker) getModel(table, uuid string) (model.Model, error) {
+ if _, deleted := rt.deleted[uuid]; deleted {
+ // model has been deleted
+ return nil, nil
+ }
+ // look for the model in the updates
+ model := rt.updates.GetModel(table, uuid)
+ if model != nil {
+ return model, nil
+ }
+ // look for the model in the database
+ model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid)
+ if err != nil {
+ return nil, err
+ }
+ return model, nil
+}
+
+// getRow gets the row from the updates or the database
+func (rt *referenceTracker) getRow(table, uuid string) (*ovsdb.Row, error) {
+ if _, deleted := rt.deleted[uuid]; deleted {
+ // row has been deleted
+ return nil, nil
+ }
+ // look for the row in the updates
+ row := rt.updates.GetRow(table, uuid)
+ if row != nil {
+ return row, nil
+ }
+ // look for the model in the database and build the row
+ model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid)
+ if err != nil {
+ return nil, err
+ }
+ info, err := rt.dbModel.NewModelInfo(model)
+ if err != nil {
+ return nil, err
+ }
+ newRow, err := rt.dbModel.Mapper.NewRow(info)
+ if err != nil {
+ return nil, err
+ }
+ return &newRow, nil
+}
+
+// rowExists returns whether the row exists either in the updates or the database
+func (rt *referenceTracker) rowExists(table, uuid string) (bool, error) {
+ model, err := rt.getModel(table, uuid)
+ return model != nil, err
+}
+
+func getReferenceModificationsFromRow(dbModel *model.DatabaseModel, table, uuid string, modify, old *ovsdb.Row) database.References {
+ refs := database.References{}
+ for column, value := range *modify {
+ var oldValue interface{}
+ if old != nil {
+ oldValue = (*old)[column]
+ }
+ crefs := getReferenceModificationsFromColumn(dbModel, table, uuid, column, value, oldValue)
+ refs.UpdateReferences(crefs)
+ }
+ return refs
+}
+
+func getReferenceModificationsFromColumn(dbModel *model.DatabaseModel, table, uuid, column string, modify, old interface{}) database.References {
+ switch v := modify.(type) {
+ case ovsdb.UUID:
+ var oldUUID ovsdb.UUID
+ if old != nil {
+ oldUUID = old.(ovsdb.UUID)
+ }
+ return getReferenceModificationsFromAtom(dbModel, table, uuid, column, v, oldUUID)
+ case ovsdb.OvsSet:
+ var oldSet ovsdb.OvsSet
+ if old != nil {
+ oldSet = old.(ovsdb.OvsSet)
+ }
+ return getReferenceModificationsFromSet(dbModel, table, uuid, column, v, oldSet)
+ case ovsdb.OvsMap:
+ return getReferenceModificationsFromMap(dbModel, table, uuid, column, v)
+ }
+ return nil
+}
+
+func getReferenceModificationsFromMap(dbModel *model.DatabaseModel, table, uuid, column string, value ovsdb.OvsMap) database.References {
+ if len(value.GoMap) == 0 {
+ return nil
+ }
+
+ // get the referenced table
+ keyRefTable := refTable(dbModel, table, column, false)
+ valueRefTable := refTable(dbModel, table, column, true)
+ if keyRefTable == "" && valueRefTable == "" {
+ return nil
+ }
+
+ from := uuid
+ keySpec := database.ReferenceSpec{ToTable: keyRefTable, FromTable: table, FromColumn: column, FromValue: false}
+ valueSpec := database.ReferenceSpec{ToTable: valueRefTable, FromTable: table, FromColumn: column, FromValue: true}
+
+ refs := database.References{}
+ for k, v := range value.GoMap {
+ if keyRefTable != "" {
+ switch to := k.(type) {
+ case ovsdb.UUID:
+ if _, ok := refs[keySpec]; !ok {
+ refs[keySpec] = database.Reference{to.GoUUID: []string{from}}
+ } else if _, ok := refs[keySpec][to.GoUUID]; !ok {
+ refs[keySpec][to.GoUUID] = append(refs[keySpec][to.GoUUID], from)
+ }
+ }
+ }
+ if valueRefTable != "" {
+ switch to := v.(type) {
+ case ovsdb.UUID:
+ if _, ok := refs[valueSpec]; !ok {
+ refs[valueSpec] = database.Reference{to.GoUUID: []string{from}}
+ } else if _, ok := refs[valueSpec][to.GoUUID]; !ok {
+ refs[valueSpec][to.GoUUID] = append(refs[valueSpec][to.GoUUID], from)
+ }
+ }
+ }
+ }
+
+ return refs
+}
+
+func getReferenceModificationsFromSet(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.OvsSet) database.References {
+ // if the modify set is empty, it means the op is clearing an atomic value
+ // so pick the old value instead
+ value := modify
+ if len(modify.GoSet) == 0 {
+ value = old
+ }
+
+ if len(value.GoSet) == 0 {
+ return nil
+ }
+
+ // get the referenced table
+ refTable := refTable(dbModel, table, column, false)
+ if refTable == "" {
+ return nil
+ }
+
+ spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column}
+ from := uuid
+ refs := database.References{spec: database.Reference{}}
+ for _, v := range value.GoSet {
+ switch to := v.(type) {
+ case ovsdb.UUID:
+ refs[spec][to.GoUUID] = append(refs[spec][to.GoUUID], from)
+ }
+ }
+ return refs
+}
+
+func getReferenceModificationsFromAtom(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.UUID) database.References {
+ // get the referenced table
+ refTable := refTable(dbModel, table, column, false)
+ if refTable == "" {
+ return nil
+ }
+ spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column}
+ from := uuid
+ to := modify.GoUUID
+ refs := database.References{spec: {to: {from}}}
+ if old.GoUUID != "" {
+ // extract the old value as well
+ refs[spec][old.GoUUID] = []string{from}
+ }
+ return refs
+}
+
+// applyReferenceModifications updates references in 'a' from those in 'b'
+func applyReferenceModifications(a, b database.References) {
+ for spec, bv := range b {
+ for to, bfrom := range bv {
+ if av, ok := a[spec]; ok {
+ if afrom, ok := av[to]; ok {
+ r, _ := applyDifference(afrom, bfrom)
+ av[to] = r.([]string)
+ } else {
+ // this reference is not in 'a', so add it
+ av[to] = bfrom
+ }
+ } else {
+ // this reference is not in 'a', so add it
+ a[spec] = database.Reference{to: bfrom}
+ }
+ }
+ }
+}
+
+func refInfo(dbModel *model.DatabaseModel, table, column string, mapValue bool) (ovsdb.ExtendedType, int, ovsdb.RefType, string) {
+ tSchema := dbModel.Schema.Table(table)
+ if tSchema == nil {
+ panic(fmt.Sprintf("unexpected schema error: no schema for table %s", table))
+ }
+
+ cSchema := tSchema.Column(column)
+ if cSchema == nil {
+ panic(fmt.Sprintf("unexpected schema error: no schema for column %s", column))
+ }
+
+ cType := cSchema.TypeObj
+ if cType == nil {
+ // this is not a reference
+ return "", 0, "", ""
+ }
+
+ var bType *ovsdb.BaseType
+ switch {
+ case !mapValue && cType.Key != nil:
+ bType = cType.Key
+ case mapValue && cType.Value != nil:
+ bType = cType.Value
+ default:
+ panic(fmt.Sprintf("unexpected schema error: no schema for map value on column %s", column))
+ }
+ if bType.Type != ovsdb.TypeUUID {
+ // this is not a reference
+ return "", 0, "", ""
+ }
+
+ // treat optional values represented with sets as atomic UUIDs
+ extendedType := cSchema.Type
+ if extendedType == ovsdb.TypeSet && cType.Min() == 0 && cType.Max() == 1 {
+ extendedType = ovsdb.TypeUUID
+ }
+
+ rType, err := bType.RefType()
+ if err != nil {
+ panic(fmt.Sprintf("unexpected schema error: %v", err))
+ }
+
+ rTable, err := bType.RefTable()
+ if err != nil {
+ panic(fmt.Sprintf("unexpected schema error: %v", err))
+ }
+
+ return extendedType, cType.Min(), rType, rTable
+}
+
+func refTable(dbModel *model.DatabaseModel, table, column string, mapValue bool) ovsdb.RefType {
+ _, _, _, refTable := refInfo(dbModel, table, column, mapValue)
+ return refTable
+}
+
+func isRoot(dbModel *model.DatabaseModel, table string) bool {
+ isRoot, err := dbModel.Schema.IsRoot(table)
+ if err != nil {
+ panic(fmt.Sprintf("unexpected schema error: %v", err))
+ }
+ return isRoot
+}
+
+func isStrong(dbModel *model.DatabaseModel, spec database.ReferenceSpec) bool {
+ _, _, refType, _ := refInfo(dbModel, spec.FromTable, spec.FromColumn, spec.FromValue)
+ return refType == ovsdb.Strong
+}
diff --git a/vendor/github.com/ovn-org/libovsdb/updates/updates.go b/vendor/github.com/ovn-org/libovsdb/updates/updates.go
new file mode 100644
index 0000000000..4ff2363a05
--- /dev/null
+++ b/vendor/github.com/ovn-org/libovsdb/updates/updates.go
@@ -0,0 +1,528 @@
+package updates
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/ovn-org/libovsdb/mapper"
+ "github.com/ovn-org/libovsdb/model"
+ "github.com/ovn-org/libovsdb/ovsdb"
+)
+
+type rowUpdate2 = ovsdb.RowUpdate2
+
+// modelUpdate contains an update in model and OVSDB RowUpdate2 notation
+type modelUpdate struct {
+ rowUpdate2 *rowUpdate2
+ old model.Model
+ new model.Model
+}
+
+// isEmpty returns whether this update is empty
+func (mu modelUpdate) isEmpty() bool {
+ return mu == modelUpdate{}
+}
+
+// ModelUpdates contains updates indexed by table and uuid
+type ModelUpdates struct {
+ updates map[string]map[string]modelUpdate
+}
+
+// GetUpdatedTables returns the tables that have updates
+func (u ModelUpdates) GetUpdatedTables() []string {
+ tables := make([]string, 0, len(u.updates))
+ for table, updates := range u.updates {
+ if len(updates) > 0 {
+ tables = append(tables, table)
+ }
+ }
+ return tables
+}
+
+// ForEachModelUpdate processes each row update of a given table in model
+// notation
+func (u ModelUpdates) ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error {
+ models := u.updates[table]
+ for uuid, model := range models {
+ err := do(uuid, model.old, model.new)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ForEachRowUpdate processes each row update of a given table in OVSDB
+// RowUpdate2 notation
+func (u ModelUpdates) ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error {
+ rows := u.updates[table]
+ for uuid, row := range rows {
+ err := do(uuid, *row.rowUpdate2)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetModel returns the last known state of the requested model. If the model is
+// unknown or has been deleted, returns nil.
+func (u ModelUpdates) GetModel(table, uuid string) model.Model {
+ if u.updates == nil {
+ return nil
+ }
+ if t, found := u.updates[table]; found {
+ if update, found := t[uuid]; found {
+ return update.new
+ }
+ }
+ return nil
+}
+
+// GetRow returns the last known state of the requested row. If the row is
+// unknown or has been deleted, returns nil.
+func (u ModelUpdates) GetRow(table, uuid string) *ovsdb.Row {
+ if u.updates == nil {
+ return nil
+ }
+ if t, found := u.updates[table]; found {
+ if update, found := t[uuid]; found {
+ return update.rowUpdate2.New
+ }
+ }
+ return nil
+}
+
+// Merge a set of updates with an earlier set of updates
+func (u *ModelUpdates) Merge(dbModel model.DatabaseModel, new ModelUpdates) error {
+ for table, models := range new.updates {
+ for uuid, update := range models {
+ err := u.addUpdate(dbModel, table, uuid, update)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// AddOperation adds an update for a model from a OVSDB Operation. If several
+// updates for the same model are aggregated, the user is responsible that the
+// provided model to be updated matches the updated model of the previous
+// update.
+func (u *ModelUpdates) AddOperation(dbModel model.DatabaseModel, table, uuid string, current model.Model, op *ovsdb.Operation) error {
+ switch op.Op {
+ case ovsdb.OperationInsert:
+ return u.addInsertOperation(dbModel, table, uuid, op)
+ case ovsdb.OperationUpdate:
+ return u.addUpdateOperation(dbModel, table, uuid, current, op)
+ case ovsdb.OperationMutate:
+ return u.addMutateOperation(dbModel, table, uuid, current, op)
+ case ovsdb.OperationDelete:
+ return u.addDeleteOperation(dbModel, table, uuid, current, op)
+ default:
+ return fmt.Errorf("database update from operation %#v not supported", op.Op)
+ }
+}
+
+// AddRowUpdate adds an update for a model from a OVSDB RowUpdate. If several
+// updates for the same model are aggregated, the user is responsible that the
+// provided model to be updated matches the updated model of the previous
+// update.
+func (u *ModelUpdates) AddRowUpdate(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru ovsdb.RowUpdate) error {
+ switch {
+ case ru.Old == nil && ru.New != nil:
+ new, err := model.CreateModel(dbModel, table, ru.New, uuid)
+ if err != nil {
+ return err
+ }
+ err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &rowUpdate2{New: ru.New}})
+ if err != nil {
+ return err
+ }
+ case ru.Old != nil && ru.New != nil:
+ old := current
+ new := model.Clone(current)
+ info, err := dbModel.NewModelInfo(new)
+ if err != nil {
+ return err
+ }
+ changed, err := updateModel(dbModel, table, info, ru.New, nil)
+ if !changed || err != nil {
+ return err
+ }
+ err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &rowUpdate2{Old: ru.Old, New: ru.New}})
+ if err != nil {
+ return err
+ }
+ case ru.New == nil:
+ old := current
+ err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &rowUpdate2{Old: ru.Old}})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddRowUpdate2 adds an update for a model from a OVSDB RowUpdate2. If several
+// updates for the same model are aggregated, the user is responsible that the
+// provided model to be updated matches the updated model of the previous
+// update.
+func (u *ModelUpdates) AddRowUpdate2(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru2 ovsdb.RowUpdate2) error {
+ switch {
+ case ru2.Initial != nil:
+ ru2.Insert = ru2.Initial
+ fallthrough
+ case ru2.Insert != nil:
+ new, err := model.CreateModel(dbModel, table, ru2.Insert, uuid)
+ if err != nil {
+ return err
+ }
+ err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &ru2})
+ if err != nil {
+ return err
+ }
+ case ru2.Modify != nil:
+ old := current
+ new := model.Clone(current)
+ info, err := dbModel.NewModelInfo(new)
+ if err != nil {
+ return err
+ }
+ changed, err := modifyModel(dbModel, table, info, ru2.Modify)
+ if !changed || err != nil {
+ return err
+ }
+ err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &ru2})
+ if err != nil {
+ return err
+ }
+ default:
+ old := current
+ err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &ru2})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (u *ModelUpdates) addUpdate(dbModel model.DatabaseModel, table, uuid string, update modelUpdate) error {
+ if u.updates == nil {
+ u.updates = map[string]map[string]modelUpdate{}
+ }
+ if _, ok := u.updates[table]; !ok {
+ u.updates[table] = make(map[string]modelUpdate)
+ }
+
+ ts := dbModel.Schema.Table(table)
+ update, err := merge(ts, u.updates[table][uuid], update)
+ if err != nil {
+ return err
+ }
+
+ if !update.isEmpty() {
+ u.updates[table][uuid] = update
+ return nil
+ }
+
+ // If after the merge this amounts to no update, remove it from the list and
+ // clean up
+ delete(u.updates[table], uuid)
+ if len(u.updates[table]) == 0 {
+ delete(u.updates, table)
+ }
+ if len(u.updates) == 0 {
+ u.updates = nil
+ }
+
+ return nil
+}
+
+func (u *ModelUpdates) addInsertOperation(dbModel model.DatabaseModel, table, uuid string, op *ovsdb.Operation) error {
+ m := dbModel.Mapper
+
+ model, err := dbModel.NewModel(table)
+ if err != nil {
+ return err
+ }
+
+ mapperInfo, err := dbModel.NewModelInfo(model)
+ if err != nil {
+ return err
+ }
+
+ err = m.GetRowData(&op.Row, mapperInfo)
+ if err != nil {
+ return err
+ }
+
+ err = mapperInfo.SetField("_uuid", uuid)
+ if err != nil {
+ return err
+ }
+
+ resultRow, err := m.NewRow(mapperInfo)
+ if err != nil {
+ return err
+ }
+
+ err = u.addUpdate(dbModel, table, uuid,
+ modelUpdate{
+ old: nil,
+ new: model,
+ rowUpdate2: &rowUpdate2{
+ Insert: &resultRow,
+ New: &resultRow,
+ Old: nil,
+ },
+ },
+ )
+
+ return err
+}
+
+func (u *ModelUpdates) addUpdateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error {
+ m := dbModel.Mapper
+
+ oldInfo, err := dbModel.NewModelInfo(old)
+ if err != nil {
+ return err
+ }
+
+ oldRow, err := m.NewRow(oldInfo)
+ if err != nil {
+ return err
+ }
+
+ new := model.Clone(old)
+ newInfo, err := dbModel.NewModelInfo(new)
+ if err != nil {
+ return err
+ }
+
+ delta := ovsdb.NewRow()
+ changed, err := updateModel(dbModel, table, newInfo, &op.Row, &delta)
+ if err != nil {
+ return err
+ }
+ if !changed {
+ return nil
+ }
+
+ newRow, err := m.NewRow(newInfo)
+ if err != nil {
+ return err
+ }
+
+ err = u.addUpdate(dbModel, table, uuid,
+ modelUpdate{
+ old: old,
+ new: new,
+ rowUpdate2: &rowUpdate2{
+ Modify: &delta,
+ Old: &oldRow,
+ New: &newRow,
+ },
+ },
+ )
+
+ return err
+}
+
+func (u *ModelUpdates) addMutateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error {
+ m := dbModel.Mapper
+ schema := dbModel.Schema.Table(table)
+
+ oldInfo, err := dbModel.NewModelInfo(old)
+ if err != nil {
+ return err
+ }
+
+ oldRow, err := m.NewRow(oldInfo)
+ if err != nil {
+ return err
+ }
+
+ new := model.Clone(old)
+ newInfo, err := dbModel.NewModelInfo(new)
+ if err != nil {
+ return err
+ }
+
+ differences := make(map[string]interface{})
+ for _, mutation := range op.Mutations {
+ column := schema.Column(mutation.Column)
+ if column == nil {
+ continue
+ }
+
+ var nativeValue interface{}
+ // Usually a mutation value is of the same type of the value being mutated
+ // except for delete mutation of maps where it can also be a list of same type of
+ // keys (rfc7047 5.1). Handle this special case here.
+ if mutation.Mutator == "delete" && column.Type == ovsdb.TypeMap && reflect.TypeOf(mutation.Value) != reflect.TypeOf(ovsdb.OvsMap{}) {
+ nativeValue, err = ovsdb.OvsToNativeSlice(column.TypeObj.Key.Type, mutation.Value)
+ if err != nil {
+ return err
+ }
+ } else {
+ nativeValue, err = ovsdb.OvsToNative(column, mutation.Value)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := ovsdb.ValidateMutation(column, mutation.Mutator, nativeValue); err != nil {
+ return err
+ }
+
+ current, err := newInfo.FieldByColumn(mutation.Column)
+ if err != nil {
+ return err
+ }
+
+ newValue, diff := mutate(current, mutation.Mutator, nativeValue)
+ if err := newInfo.SetField(mutation.Column, newValue); err != nil {
+ return err
+ }
+
+ old, err := oldInfo.FieldByColumn(mutation.Column)
+ if err != nil {
+ return err
+ }
+ diff, changed := mergeDifference(old, differences[mutation.Column], diff)
+ if changed {
+ differences[mutation.Column] = diff
+ } else {
+ delete(differences, mutation.Column)
+ }
+ }
+
+ if len(differences) == 0 {
+ return nil
+ }
+
+ delta := ovsdb.NewRow()
+ for column, diff := range differences {
+ colSchema := schema.Column(column)
+ diffOvs, err := ovsdb.NativeToOvs(colSchema, diff)
+ if err != nil {
+ return err
+ }
+ delta[column] = diffOvs
+ }
+
+ newRow, err := m.NewRow(newInfo)
+ if err != nil {
+ return err
+ }
+
+ err = u.addUpdate(dbModel, table, uuid,
+ modelUpdate{
+ old: old,
+ new: new,
+ rowUpdate2: &rowUpdate2{
+ Modify: &delta,
+ Old: &oldRow,
+ New: &newRow,
+ },
+ },
+ )
+
+ return err
+}
+
+func (u *ModelUpdates) addDeleteOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error {
+ m := dbModel.Mapper
+
+ info, err := dbModel.NewModelInfo(old)
+ if err != nil {
+ return err
+ }
+
+ oldRow, err := m.NewRow(info)
+ if err != nil {
+ return err
+ }
+
+ err = u.addUpdate(dbModel, table, uuid,
+ modelUpdate{
+ old: old,
+ new: nil,
+ rowUpdate2: &rowUpdate2{
+ Delete: &ovsdb.Row{},
+ Old: &oldRow,
+ },
+ },
+ )
+
+ return err
+}
+
+func updateModel(dbModel model.DatabaseModel, table string, info *mapper.Info, update, modify *ovsdb.Row) (bool, error) {
+ return updateOrModifyModel(dbModel, table, info, update, modify, false)
+}
+
+func modifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, modify *ovsdb.Row) (bool, error) {
+ return updateOrModifyModel(dbModel, table, info, modify, nil, true)
+}
+
+// updateOrModifyModel updates info about a model with a given row containing
+// the change. The change row itself can be interpreted as an update or a
+// modify. If the change is an update and a modify row is provided, it will be
+// filled with the modify data.
+func updateOrModifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, changeRow, modifyRow *ovsdb.Row, isModify bool) (bool, error) {
+ schema := dbModel.Schema.Table(table)
+ var changed bool
+
+ for column, updateOvs := range *changeRow {
+ colSchema := schema.Column(column)
+ if colSchema == nil {
+ // ignore columns we don't know about in our schema
+ continue
+ }
+
+ currentNative, err := info.FieldByColumn(column)
+ if err != nil {
+ return false, err
+ }
+
+ updateNative, err := ovsdb.OvsToNative(colSchema, updateOvs)
+ if err != nil {
+ return false, err
+ }
+
+ if isModify {
+ differenceNative, isDifferent := applyDifference(currentNative, updateNative)
+ if isDifferent && !colSchema.Mutable() {
+ return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table))
+ }
+ changed = changed || isDifferent
+ err = info.SetField(column, differenceNative)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ differenceNative, isDifferent := difference(currentNative, updateNative)
+ if isDifferent && !colSchema.Mutable() {
+ return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table))
+ }
+ changed = changed || isDifferent
+ if isDifferent && modifyRow != nil {
+ deltaOvs, err := ovsdb.NativeToOvs(colSchema, differenceNative)
+ if err != nil {
+ return false, err
+ }
+ (*modifyRow)[column] = deltaOvs
+ }
+ err = info.SetField(column, updateNative)
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+
+ return changed, nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 54e3581163..3b9dd12741 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -40,6 +40,16 @@ github.com/blang/semver/v4
# github.com/cenkalti/backoff v2.2.1+incompatible
## explicit
github.com/cenkalti/backoff
+# github.com/cenkalti/backoff/v4 v4.2.1
+## explicit; go 1.18
+github.com/cenkalti/backoff/v4
+# github.com/cenkalti/hub v1.0.1
+## explicit
+github.com/cenkalti/hub
+# github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984
+## explicit
+github.com/cenkalti/rpc2
+github.com/cenkalti/rpc2/jsonrpc
# github.com/cespare/xxhash/v2 v2.2.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
@@ -157,6 +167,9 @@ github.com/go-errors/errors
## explicit; go 1.16
github.com/go-logr/logr
github.com/go-logr/logr/funcr
+# github.com/go-logr/stdr v1.2.2
+## explicit; go 1.16
+github.com/go-logr/stdr
# github.com/go-logr/zapr v1.2.4
## explicit; go 1.16
github.com/go-logr/zapr
@@ -226,6 +239,9 @@ github.com/google/gofuzz/bytesource
# github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1
## explicit; go 1.14
github.com/google/pprof/profile
+# github.com/google/renameio/v2 v2.0.0
+## explicit; go 1.13
+github.com/google/renameio/v2
# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
## explicit; go 1.13
github.com/google/shlex
@@ -521,6 +537,19 @@ github.com/openshift/machine-config-operator/pkg/generated/informers/externalver
github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/machineconfiguration.openshift.io/v1
github.com/openshift/machine-config-operator/pkg/generated/listers/machineconfiguration.openshift.io/v1
github.com/openshift/machine-config-operator/pkg/version
+# github.com/ovn-org/libovsdb v0.6.1-0.20240125124854-03f787b1a892
+## explicit; go 1.18
+github.com/ovn-org/libovsdb/cache
+github.com/ovn-org/libovsdb/client
+github.com/ovn-org/libovsdb/database
+github.com/ovn-org/libovsdb/database/inmemory
+github.com/ovn-org/libovsdb/database/transaction
+github.com/ovn-org/libovsdb/mapper
+github.com/ovn-org/libovsdb/model
+github.com/ovn-org/libovsdb/ovsdb
+github.com/ovn-org/libovsdb/ovsdb/serverdb
+github.com/ovn-org/libovsdb/server
+github.com/ovn-org/libovsdb/updates
# github.com/peterbourgon/diskv v2.0.1+incompatible
## explicit
github.com/peterbourgon/diskv