diff --git a/.github/workflows/binary.yml b/.github/workflows/binary.yml index 2f64664..c6f3ece 100644 --- a/.github/workflows/binary.yml +++ b/.github/workflows/binary.yml @@ -20,10 +20,10 @@ jobs: - name: "Build binary" run: | sudo apt-get update - sudo apt-get install -y build-essential libvirt-dev make libguestfs-dev + sudo apt-get install -y build-essential libvirt-dev make genisoimage libguestfs-dev libcephfs-dev librbd-dev librados-dev make - uses: actions/upload-artifact@v3 with: name: yavirt-ubuntu - path: bin \ No newline at end of file + path: bin diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 4a93af3..a1f2622 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -10,14 +10,27 @@ on: jobs: lint: runs-on: ubuntu-latest - container: projecteru2/footstone:yavirt-prebuild steps: - - uses: actions/checkout@v3 + - name: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 - - uses: actions/setup-go@v3 + - name: "Setup go" + uses: actions/setup-go@v5 with: go-version-file: 'go.mod' + + - name: "Install dependencies" + run: | + sudo apt-get update + sudo apt-get install -y build-essential libvirt-dev make genisoimage libguestfs-dev libcephfs-dev librbd-dev librados-dev + + - name: "Install dependencies" + run: | + sudo apt-get update + sudo apt-get install -y build-essential libvirt-dev make genisoimage libguestfs-dev libcephfs-dev librbd-dev librados-dev - - uses: golangci/golangci-lint-action@v3 + - uses: golangci/golangci-lint-action@v6 with: args: --timeout=8m diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e64159b..3e3b319 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,14 +11,25 @@ on: jobs: unittests: runs-on: ubuntu-latest - container: projecteru2/footstone:yavirt-prebuild - steps: - - uses: actions/checkout@v3 + - name: checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 - - uses: actions/setup-go@v3 + - name: "Setup go" + uses: actions/setup-go@v4 with: go-version-file: 'go.mod' + + - name: "Install dependencies" + run: | + sudo apt-get update + sudo apt-get install -y build-essential libvirt-dev make genisoimage libguestfs-dev libcephfs-dev librbd-dev librados-dev + - name: "Install dependencies" + run: | + sudo apt-get update + sudo apt-get install -y build-essential libvirt-dev make genisoimage libguestfs-dev libcephfs-dev librbd-dev librados-dev - name: unit tests run: make test diff --git a/.gitignore b/.gitignore index 02e03c6..44d7609 100644 --- a/.gitignore +++ b/.gitignore @@ -3,5 +3,6 @@ cscope.* vendor/ dist/ yavirt - -.vscode \ No newline at end of file +.idea/ +.vscode +/tmp diff --git a/.golangci.yml b/.golangci.yml index d4c89f4..f46b35f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,12 +1,17 @@ run: timeout: 5m tests: false - skip-dirs: + modules-download-mode: readonly + +issues: + exclude-dirs: - vendor - tools - 3rdmocks - modules-download-mode: readonly - + - thirdpart + - tmp + - mocks + linters-settings: nakedret: max-func-lines: 59 @@ -27,7 +32,6 @@ linters-settings: for-loops: true errcheck: check-type-assertions: true - check-blank: true gocritic: disabled-checks: - captLocal diff --git a/Dockerfile b/Dockerfile index a166787..709de07 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,30 @@ -FROM projecteru2/footstone:yavirt-prebuild-go1.20 AS BUILD +FROM ubuntu:jammy AS BUILD # make binary # RUN git clone https://github.com/projecteru2/yavirt.git /go/src/github.com/projecteru2/yavirt COPY . /go/src/github.com/projecteru2/yavirt WORKDIR /go/src/github.com/projecteru2/yavirt ARG KEEP_SYMBOL -RUN make deps && make && ./bin/yavirtd --version +RUN sed -i 's@//.*archive.ubuntu.com@//mirrors.ustc.edu.cn@g' /etc/apt/sources.list +RUN apt update +RUN apt install -y golang-1.20 build-essential libvirt-dev make genisoimage libguestfs-dev libcephfs-dev librbd-dev librados-dev +RUN apt install -y git +# RUN snap install go --classic +ENV PATH="$PATH:/usr/lib/go-1.20/bin/" -FROM alpine:latest +RUN go version +RUN make deps CN=1 +RUN make && ./bin/yavirtd --version + +FROM ubuntu:jammy + +RUN mkdir /etc/yavirt/ && \ + sed -i 's@//.*archive.ubuntu.com@//mirrors.ustc.edu.cn@g' /etc/apt/sources.list && \ + apt update && \ + apt install -y libvirt-dev libguestfs-dev genisoimage libcephfs-dev librbd-dev librados-dev -RUN mkdir /etc/yavirt/ LABEL ERU=1 COPY --from=BUILD /go/src/github.com/projecteru2/yavirt/bin/yavirtd /usr/bin/yavirtd COPY --from=BUILD /go/src/github.com/projecteru2/yavirt/bin/yavirtctl /usr/bin/yavirtctl -COPY --from=BUILD /go/src/github.com/projecteru2/yavirt/internal/virt/template/disk.xml /etc/yavirt/disk.xml -COPY --from=BUILD /go/src/github.com/projecteru2/yavirt/internal/virt/template/guest.xml /etc/yavirt/guest.xml +COPY --from=BUILD /go/src/github.com/projecteru2/yavirt/internal/virt/domain/templates/disk.xml /etc/yavirt/disk.xml +COPY --from=BUILD /go/src/github.com/projecteru2/yavirt/internal/virt/domain/templates/guest.xml /etc/yavirt/guest.xml diff --git a/Makefile b/Makefile index b3aa8b5..b47891c 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,20 @@ +ifeq ($(CN), 1) +ENV := GOPROXY=https://goproxy.cn,direct +endif + NS := github.com/projecteru2/yavirt BUILD := go build -race -TEST := go test -count=1 -race -cover +TEST := go test -count=1 -race -cover -gcflags=all=-l -LDFLAGS += -X "$(NS)/internal/ver.Git=$(shell git rev-parse HEAD)" -LDFLAGS += -X "$(NS)/internal/ver.Compile=$(shell go version)" -LDFLAGS += -X "$(NS)/internal/ver.Date=$(shell date +'%F %T %z')" +REVISION := $(shell git rev-parse HEAD || unknown) +BUILTAT := $(shell date +%Y-%m-%dT%H:%M:%S) +VERSION := $(shell git describe --tags $(shell git rev-list --tags --max-count=1)) -PKGS := $$(go list ./... | grep -v -P '$(NS)/third_party|vendor/') +LDFLAGS += -X "$(NS)/internal/ver.REVISION=$(REVISION)" +LDFLAGS += -X "$(NS)/internal/ver.BUILTAT=$(BUILTAT)" +LDFLAGS += -X "$(NS)/internal/ver.VERSION=$(VERSION)" + +PKGS := $$(go list ./... | grep -v -P '$(NS)/third_party|vendor/|mocks|ovn') .PHONY: all test build setup @@ -20,12 +28,15 @@ build-srv: build-ctl: $(BUILD) -ldflags '$(LDFLAGS)' -o bin/yavirtctl cmd/cmd.go -setup: - go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest - go install github.com/vektra/mockery/v2@latest +setup: setup-lint + $(ENV) go install github.com/vektra/mockery/v2@latest + +setup-lint: + $(ENV) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 lint: format - golangci-lint run --skip-dirs-use-default --skip-dirs=thirdparty + golangci-lint --version + golangci-lint run format: vet gofmt -s -w $$(find . -iname '*.go' | grep -v -P '\./third_party|\./vendor/') @@ -34,7 +45,8 @@ vet: go vet $(PKGS) deps: - go mod tidy + $(ENV) go mod tidy + $(ENV) go mod vendor mock: deps mockery --dir pkg/libvirt --output pkg/libvirt/mocks --all @@ -43,10 +55,12 @@ mock: deps mockery --dir pkg/utils --output pkg/utils/mocks --name Locker mockery --dir internal/virt/agent --output internal/virt/agent/mocks --all mockery --dir internal/virt/domain --output internal/virt/domain/mocks --name Domain - mockery --dir internal/virt/guest/manager --output internal/virt/guest/manager/mocks --name Manageable mockery --dir internal/virt/guest --output internal/virt/guest/mocks --name Bot mockery --dir internal/virt/guestfs --output internal/virt/guestfs/mocks --name Guestfs - mockery --dir internal/virt/volume --output internal/virt/volume/mocks --name Bot + mockery --dir internal/volume --output internal/volume/mocks --name Volume + mockery --dir internal/volume/base --output internal/volume/base/mocks --name SnapshotAPI + mockery --dir internal/eru/store --output internal/eru/store/mocks --name Store + mockery --dir internal/service --output internal/service/mocks --name Service clean: rm -fr bin/* diff --git a/cmd/cmd.go b/cmd/cmd.go index 545effd..e3185cb 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1,21 +1,24 @@ package main import ( + "encoding/json" "fmt" "os" "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/guest" "github.com/projecteru2/yavirt/cmd/image" - "github.com/projecteru2/yavirt/cmd/maint" "github.com/projecteru2/yavirt/cmd/network" + "github.com/projecteru2/yavirt/cmd/run" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/service/boar" "github.com/projecteru2/yavirt/internal/ver" - "github.com/projecteru2/yavirt/pkg/errors" ) func main() { - cli.VersionPrinter = func(c *cli.Context) { + cli.VersionPrinter = func(_ *cli.Context) { fmt.Println(ver.Version()) } @@ -29,7 +32,7 @@ func main() { }, &cli.StringFlag{ Name: "log-level", - Value: "INFO", + Value: "", Usage: "set log level", EnvVars: []string{"ERU_YAVIRT_LOG_LEVEL"}, }, @@ -57,22 +60,62 @@ func main() { Usage: "change hostname", EnvVars: []string{"ERU_HOSTNAME", "HOSTNAME"}, }, - &cli.BoolFlag{ - Name: "skip-setup-host", - Value: false, + &cli.IntFlag{ + Name: "timeout", + Value: 300, + Usage: "command timeout", + EnvVars: []string{"ERU_YAVIRT_CMD_TIMEOUT"}, }, }, Commands: []*cli.Command{ + { + Name: "info", + Action: run.Run(info), + }, guest.Command(), image.Command(), network.Command(), - maint.Command(), }, Version: "v", } if err := app.Run(os.Args); err != nil { - fmt.Println(errors.Stack(err)) + fmt.Println(errors.GetReportableStackTrace(err)) + } +} + +func info(c *cli.Context, _ run.Runtime) (err error) { + cfg := &configs.Conf + + if err := cfg.Load(c.String("config")); err != nil { + return errors.Wrap(err, "") + } + if err := cfg.Prepare(c); err != nil { + return err + } + // disable eru-related features + cfg.Eru.Enable = false + + svc, err := boar.New(c.Context, cfg, nil) + if err != nil { + return err + } + info, err := svc.Info() + if err != nil { + return err + } + ans := map[string]string{ + "addr": cfg.Host.Addr, + "hostname": cfg.Host.Name, + } + for name, res := range info.Resources { + ans[name] = string(res) + } + b, err := json.MarshalIndent(ans, "", "\t") + if err != nil { + return err } + fmt.Printf("%s\n", string(b)) + return nil } diff --git a/cmd/guest/attach.go b/cmd/guest/attach.go new file mode 100644 index 0000000..659aa5e --- /dev/null +++ b/cmd/guest/attach.go @@ -0,0 +1,157 @@ +package guest + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "sync" + "time" + + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/cmd/run" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/pkg/utils" + "github.com/urfave/cli/v2" + "golang.org/x/term" +) + +type buffer struct { + sync.Mutex + fromQ *utils.BytesQueue + + to chan []byte + quit chan struct{} +} + +func (b *buffer) Close() error { + close(b.to) + close(b.quit) + return nil +} + +func (b *buffer) Read(p []byte) (int, error) { + return b.fromQ.Read(p) +} + +func (b *buffer) Write(p []byte) (int, error) { + b.to <- bytes.Clone(p) + return len(p), nil +} + +func (b *buffer) UserRead() ([]byte, error) { + bs, ok := <-b.to + if !ok { + return nil, io.EOF + } + return bs, nil +} + +func (b *buffer) UserWrite(bs []byte) error { + _, err := b.fromQ.Write(bs) + return err +} + +func attachGuest(c *cli.Context, runtime run.Runtime) error { //nolint + id := c.Args().First() + cmds := c.Args().Tail() + timeout := c.Int("timeout") + force := c.Bool("force") + safe := c.Bool("safe") + devname := c.String("devname") + + log.Debugf(c.Context, "attaching guest %s timeout %d", id, timeout) + + flags := intertypes.NewOpenConsoleFlags(force, safe, cmds) + flags.Devname = devname + stream := &buffer{ + fromQ: utils.NewBytesQueue(), + to: make(chan []byte, 10), + } + + ctx, cancel := context.WithCancel(context.TODO()) + var lck sync.Mutex + lastActive := time.Now() + needExit := func() bool { + lck.Lock() + defer lck.Unlock() + + now := time.Now() + elapse := now.Sub(lastActive) + if elapse.Seconds() > float64(timeout) { + cancel() + return true + } + lastActive = now + return false + } + go func() { + err := runtime.Svc.AttachGuest(ctx, id, stream, flags) + if err != nil { + log.Errorf(c.Context, err, "attach guest error") + } + }() + + log.Debugf(c.Context, "start terminal...") + + oldState, err := term.MakeRaw(int(os.Stdin.Fd())) + if err != nil { + panic(err) + } + defer term.Restore(int(os.Stdin.Fd()), oldState) //nolint + + done1, done2 := make(chan struct{}), make(chan struct{}) + go func() { + defer close(done1) + defer log.Debugf(c.Context, "stdin done\n") + + buf := make([]byte, 100*1024) + for { + if needExit() { + return + } + n, err := os.Stdin.Read(buf) + if err != nil { + fmt.Printf("Stdin: %s\n", err) + return + } + bs := bytes.Clone(buf[:n]) + // find ^] + if bytes.Contains(bs, []byte{uint8(29)}) { + return + } + err = stream.UserWrite(bs) + if err != nil { + fmt.Printf("Stdin(Stream): %s\n", err) + return + } + } + }() + go func() { + defer close(done2) + defer fmt.Printf("stdout done\n") + for { + if needExit() { + return + } + bs, err := stream.UserRead() + if err != nil { + fmt.Printf("Stdout(Stream): %s\n", err) + return + } + log.Debugf(c.Context, "[Exec:Stdout] got from stream: %v\r\n", bs) + _, err = os.Stdout.Write(bs) + if err != nil { + fmt.Printf("Stdout: %s\n", err) + return + } + } + }() + + select { + case <-done1: + case <-done2: + } + return nil +} diff --git a/cmd/guest/capture.go b/cmd/guest/capture.go index 0821ff1..d040aba 100644 --- a/cmd/guest/capture.go +++ b/cmd/guest/capture.go @@ -5,16 +5,12 @@ import ( "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/pkg/errors" ) func captureFlags() []cli.Flag { return []cli.Flag{ - &cli.StringFlag{ - Name: "user", - Required: true, - }, &cli.StringFlag{ Name: "name", Required: true, @@ -31,12 +27,11 @@ func capture(c *cli.Context, runtime run.Runtime) error { return errors.New("Guest ID is required") } - user := c.String("user") name := c.String("name") overridden := c.Bool("overridden") - _, err := runtime.Guest.Capture(runtime.VirtContext(), id, user, name, overridden) + _, err := runtime.Svc.CaptureGuest(runtime.Ctx, id, name, overridden) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } fmt.Printf("%s captured\n", name) diff --git a/cmd/guest/create.go b/cmd/guest/create.go index 1c1d6a2..6483845 100644 --- a/cmd/guest/create.go +++ b/cmd/guest/create.go @@ -1,17 +1,20 @@ package guest import ( + "encoding/json" "fmt" "strings" "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/internal/models" - "github.com/projecteru2/yavirt/internal/virt/types" - "github.com/projecteru2/yavirt/internal/vnet" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/types" "github.com/projecteru2/yavirt/pkg/utils" + + stotypes "github.com/projecteru2/resource-storage/storage/types" + rbdtypes "github.com/yuyang0/resource-rbd/rbd/types" ) func createFlags() []cli.Flag { @@ -45,24 +48,27 @@ func createFlags() []cli.Flag { } func create(c *cli.Context, runtime run.Runtime) error { - vols, err := getVols(c.String("storage")) + res, err := generateResources(c) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } + cnt := c.Int("count") + networkMode := c.String("network") + + if networkMode == "" { + return errors.New("network can't be empty") + } opts := types.GuestCreateOption{ CPU: c.Int("cpu"), Mem: c.Int64("memory"), ImageName: c.Args().First(), ImageUser: c.String("image-user"), DmiUUID: c.String("dmi"), - } - - cnt := c.Int("count") - network := c.String("network") - - if len(network) < 1 { - network = runtime.Host.NetworkMode + Labels: map[string]string{ + network.ModeLabelKey: networkMode, + }, + Resources: res, } switch { @@ -74,14 +80,12 @@ func create(c *cli.Context, runtime run.Runtime) error { return fmt.Errorf("--memory is required") case cnt < 1: return fmt.Errorf("--count must be greater than 0") - case network != vnet.NetworkCalico && network != vnet.NetworkVlan: - return fmt.Errorf("--network is invalid: %s", network) + case networkMode != network.CalicoMode && networkMode != network.VlanMode: + return fmt.Errorf("--network is invalid: %s", networkMode) } - runtime.Host.NetworkMode = network - for i := 0; i < cnt; i++ { - g, err := runtime.Guest.Create(runtime.VirtContext(), opts, runtime.Host, vols) + g, err := runtime.Svc.CreateGuest(runtime.Ctx, opts) if err != nil { return err } @@ -92,28 +96,38 @@ func create(c *cli.Context, runtime run.Runtime) error { return nil } -func getVols(mounts string) ([]*models.Volume, error) { - if len(mounts) < 1 { - return nil, nil - } - - var vols = []*models.Volume{} - - for _, raw := range strings.Split(mounts, ",") { - mnt, rawCap := utils.PartRight(raw, ":") - - volCap, err := utils.Atoi64(rawCap) +func generateResources(c *cli.Context) (ans map[string][]byte, err error) { + ans = map[string][]byte{} + // for storage resources + { + mounts := c.String("storage") + if len(mounts) < 1 { + return + } + eParmas := stotypes.EngineParams{ + Volumes: strings.Split(mounts, ","), + } + bs, err := json.Marshal(eParmas) if err != nil { - return nil, errors.Trace(err) + return nil, err } + ans["storage"] = bs + } - vol, err := models.NewDataVolume(mnt, volCap) + // for rbd resources + { + mounts := c.String("rbd") + if len(mounts) < 1 { + return + } + eParmas := rbdtypes.EngineParams{ + Volumes: strings.Split(mounts, ","), + } + bs, err := json.Marshal(eParmas) if err != nil { - return nil, errors.Trace(err) + return nil, err } - - vols = append(vols, vol) + ans["rbd"] = bs } - - return vols, nil + return } diff --git a/cmd/guest/exec.go b/cmd/guest/exec.go new file mode 100644 index 0000000..8b0c15e --- /dev/null +++ b/cmd/guest/exec.go @@ -0,0 +1,59 @@ +package guest + +import ( + "os" + + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/cmd/run" + "github.com/urfave/cli/v2" +) + +func execFlags() []cli.Flag { + return []cli.Flag{ + &cli.BoolFlag{ + Name: "i", + Value: false, + }, + &cli.StringFlag{ + Name: "devname", + Value: "", + }, + &cli.BoolFlag{ + Name: "force", + Value: false, + }, + &cli.BoolFlag{ + Name: "safe", + Value: false, + }, + } +} + +func exec(c *cli.Context, runtime run.Runtime) (err error) { + // defer runtime.CancelFn() + + if c.Bool("i") { + return attachGuest(c, runtime) + } else { //nolint + return execGuest(c, runtime) + } +} + +func execGuest(c *cli.Context, runtime run.Runtime) error { + id := c.Args().First() + cmds := c.Args().Tail() + + log.Debugf(c.Context, "exec guest %s, cmd: %v", id, cmds) + msg, err := runtime.Svc.ExecuteGuest(runtime.Ctx, id, cmds) + if err != nil { + log.Errorf(c.Context, err, "exec guest error") + return err + } + if msg.ExitCode == 0 { + os.Stdout.Write(msg.Data) + } else { + os.Stderr.Write(msg.Data) + } + log.Debugf(c.Context, "+_+_+_ %s", string(msg.Data)) + return err +} diff --git a/cmd/guest/forward.go b/cmd/guest/forward.go deleted file mode 100644 index 9a0ac30..0000000 --- a/cmd/guest/forward.go +++ /dev/null @@ -1,54 +0,0 @@ -package guest - -import ( - "fmt" - - "github.com/urfave/cli/v2" - - "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/internal/models" - "github.com/projecteru2/yavirt/pkg/errors" -) - -func forwardFlags() []cli.Flag { - return []cli.Flag{ - &cli.StringFlag{ - Name: "status", - Required: true, - }, - } -} - -func forward(c *cli.Context, runtime run.Runtime) error { - validStatus := func(st string) error { - for _, status := range models.AllStatuses { - if st == status { - return nil - } - } - return errors.Errorf("invalid dest. status: %s", st) - } - - st := c.String("status") - if err := validStatus(st); err != nil { - return err - } - - id := c.Args().First() - if len(id) < 1 { - return errors.New("Guest ID is required") - } - - g, err := runtime.Guest.Load(runtime.VirtContext(), id) - if err != nil { - return err - } - - if err := g.ForwardStatus(st, false); err != nil { - return errors.Trace(err) - } - - fmt.Printf("%s forward to %s\n", id, st) - - return nil -} diff --git a/cmd/guest/get.go b/cmd/guest/get.go index aa9d61b..82009d9 100644 --- a/cmd/guest/get.go +++ b/cmd/guest/get.go @@ -5,8 +5,8 @@ import ( "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/pkg/errors" ) func get(c *cli.Context, runtime run.Runtime) error { @@ -15,34 +15,36 @@ func get(c *cli.Context, runtime run.Runtime) error { return errors.New("Guest ID is required") } - g, err := runtime.Guest.Load(runtime.VirtContext(), id) + g, err := runtime.Svc.GetGuest(runtime.Ctx, id) if err != nil { return err } fmt.Printf("guest: %s\n", g.ID) + fmt.Printf("Status: %s\n", g.Status) fmt.Printf("CPU: %d\n", g.CPU) - fmt.Printf("Memory: %d\n", g.Memory) + fmt.Printf("Memory: %d\n", g.Mem) - fmt.Println("volume:") - for _, vol := range g.Vols { - fmt.Printf(" %s\n", vol) - } + // TODO: add more information to guest + // fmt.Println("volume:") + // for _, vol := range g.Vols { + // fmt.Printf(" %s\n", vol) + // } fmt.Println("IP:") for _, ip := range g.IPs { - fmt.Printf(" %s, gw: %s\n", ip, ip.GatewayAddr()) + fmt.Printf(" %s\n", ip) } - hc, err := g.HealthCheck() - if err != nil { - if errors.Contain(err, errors.ErrKeyNotExists) { - return nil - } - return err - } - fmt.Println("HealthCheck:") - fmt.Printf(" %v\n", hc.TCPEndpoints()) - fmt.Printf(" %v\n", hc.HTTPEndpoints()) + // hc, err := g.HealthCheck() + // if err != nil { + // if errors.Contain(err, errors.ErrKeyNotExists) { + // return nil + // } + // return err + // } + // fmt.Println("HealthCheck:") + // fmt.Printf(" %v\n", hc.TCPEndpoints()) + // fmt.Printf(" %v\n", hc.HTTPEndpoints()) return nil } diff --git a/cmd/guest/guest.go b/cmd/guest/guest.go index 313ff9f..0ca8adb 100644 --- a/cmd/guest/guest.go +++ b/cmd/guest/guest.go @@ -4,13 +4,10 @@ import ( "github.com/urfave/cli/v2" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/internal/models" ) // Command . func Command() *cli.Command { - models.Setup() - return &cli.Command{ Name: "guest", Subcommands: []*cli.Command{ @@ -21,7 +18,7 @@ func Command() *cli.Command { { Name: "list", Flags: listFlags(), - Action: run.Run(list), + Action: run.Run(listCmd), }, { Name: "create", @@ -30,6 +27,7 @@ func Command() *cli.Command { }, { Name: "start", + Flags: controlFlags(), Action: run.Run(start), }, { @@ -42,18 +40,18 @@ func Command() *cli.Command { }, { Name: "stop", - Flags: stopFlags(), + Flags: controlFlags(), Action: run.Run(stop), }, { Name: "destroy", - Flags: destroyFlags(), + Flags: controlFlags(), Action: run.Run(destroy), }, { - Name: "forward", - Flags: forwardFlags(), - Action: run.Run(forward), + Name: "exec", + Flags: execFlags(), + Action: run.Run(exec), }, { Name: "resize", diff --git a/cmd/guest/list.go b/cmd/guest/list.go index 92cb62e..3195678 100644 --- a/cmd/guest/list.go +++ b/cmd/guest/list.go @@ -5,9 +5,11 @@ import ( "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/run" "github.com/projecteru2/yavirt/configs" "github.com/projecteru2/yavirt/internal/models" + "github.com/projecteru2/yavirt/pkg/terrors" ) func listFlags() []cli.Flag { @@ -21,7 +23,7 @@ func listFlags() []cli.Flag { } } -func list(c *cli.Context, _ run.Runtime) error { +func listCmd(c *cli.Context, _ run.Runtime) error { all := c.Bool("all") var err error @@ -35,7 +37,7 @@ func list(c *cli.Context, _ run.Runtime) error { } guests, err = models.GetNodeGuests(nodename) } - if err != nil { + if err != nil && !errors.Is(err, terrors.ErrKeyNotExists) { return err } diff --git a/cmd/guest/network.go b/cmd/guest/network.go index f8faa84..cb88582 100644 --- a/cmd/guest/network.go +++ b/cmd/guest/network.go @@ -5,8 +5,8 @@ import ( "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/pkg/errors" ) func connectExtraNetworkFlags() []cli.Flag { @@ -38,8 +38,8 @@ func disconnectExtraNetwork(c *cli.Context, runtime run.Runtime) error { network := c.String("network") - if err := runtime.Guest.DisconnectExtraNetwork(runtime.VirtContext(), id, network); err != nil { - return errors.Trace(err) + if err := runtime.Svc.DisconnectNetwork(runtime.Ctx, id, network); err != nil { + return errors.Wrap(err, "") } fmt.Printf("guest %s had been disconnected from network %s\n", id, network) @@ -56,9 +56,9 @@ func connectExtraNetwork(c *cli.Context, runtime run.Runtime) error { network := c.String("network") ipv4 := c.String("ipv4") - dest, err := runtime.Guest.ConnectExtraNetwork(runtime.VirtContext(), id, network, ipv4) + dest, err := runtime.Svc.ConnectNetwork(runtime.Ctx, id, network, ipv4) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } if len(ipv4) < 1 { diff --git a/cmd/guest/op.go b/cmd/guest/op.go index 1f901e7..92ea5cc 100644 --- a/cmd/guest/op.go +++ b/cmd/guest/op.go @@ -2,25 +2,16 @@ package guest import ( "fmt" - "time" "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/libyavirt/types" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/internal/virt" - "github.com/projecteru2/yavirt/pkg/errors" ) -func destroyFlags() []cli.Flag { - return []cli.Flag{ - &cli.BoolFlag{ - Name: "force", - Value: false, - }, - } -} - -func stopFlags() []cli.Flag { +func controlFlags() []cli.Flag { return []cli.Flag{ &cli.BoolFlag{ Name: "force", @@ -30,9 +21,13 @@ func stopFlags() []cli.Flag { } func start(c *cli.Context, runtime run.Runtime) error { - id, err := op(c, runtime, runtime.Guest.Start) - if err != nil { - return errors.Trace(err) + defer runtime.CancelFn() + + id := c.Args().First() + log.Debugf(c.Context, "Starting guest %s", id) + + if err := runtime.Svc.ControlGuest(runtime.Ctx, id, types.OpStart, c.Bool("force")); err != nil { + return errors.Wrap(err, "") } fmt.Printf("%s started\n", id) @@ -41,9 +36,12 @@ func start(c *cli.Context, runtime run.Runtime) error { } func suspend(c *cli.Context, runtime run.Runtime) error { - id, err := op(c, runtime, runtime.Guest.Suspend) - if err != nil { - return errors.Trace(err) + defer runtime.CancelFn() + + id := c.Args().First() + log.Debugf(c.Context, "Suspending guest %s", id) + if err := runtime.Svc.ControlGuest(runtime.Ctx, id, types.OpSuspend, false); err != nil { + return errors.Wrap(err, "") } fmt.Printf("%s suspended\n", id) @@ -52,9 +50,12 @@ func suspend(c *cli.Context, runtime run.Runtime) error { } func resume(c *cli.Context, runtime run.Runtime) error { - id, err := op(c, runtime, runtime.Guest.Resume) - if err != nil { - return errors.Trace(err) + defer runtime.CancelFn() + + id := c.Args().First() + log.Debugf(c.Context, "Resuming guest %s", id) + if err := runtime.Svc.ControlGuest(runtime.Ctx, id, types.OpResume, false); err != nil { + return errors.Wrap(err, "") } fmt.Printf("%s resumed\n", id) @@ -63,13 +64,12 @@ func resume(c *cli.Context, runtime run.Runtime) error { } func stop(c *cli.Context, runtime run.Runtime) error { - shut := func(ctx virt.Context, id string) error { - return runtime.Guest.Stop(ctx, id, c.Bool("force")) - } + defer runtime.CancelFn() - id, err := op(c, runtime, shut) - if err != nil { - return errors.Trace(err) + id := c.Args().First() + log.Debugf(c.Context, "Stopping guest %s", id) + if err := runtime.Svc.ControlGuest(runtime.Ctx, id, types.OpStop, c.Bool("force")); err != nil { + return errors.Wrap(err, "") } fmt.Printf("%s stopped\n", id) @@ -77,33 +77,18 @@ func stop(c *cli.Context, runtime run.Runtime) error { return nil } -func destroy(c *cli.Context, runtime run.Runtime) error { - destroy := func(ctx virt.Context, id string) error { - done, err := runtime.Guest.Destroy(ctx, id, c.Bool("force")) - if err != nil { - return errors.Trace(err) - } - - select { - case err := <-done: - return err - case <-time.After(time.Minute): - return errors.ErrTimeout - } - } +func destroy(c *cli.Context, runtime run.Runtime) (err error) { + defer runtime.CancelFn() + + id := c.Args().First() + log.Debugf(c.Context, "Destroying guest %s", id) - id, err := op(c, runtime, destroy) + err = runtime.Svc.ControlGuest(runtime.Ctx, id, types.OpDestroy, c.Bool("force")) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } fmt.Printf("%s destroyed\n", id) return nil } - -func op(c *cli.Context, runtime run.Runtime, fn func(virt.Context, string) error) (id string, err error) { - id = c.Args().First() - err = fn(runtime.VirtContext(), id) - return -} diff --git a/cmd/guest/resize.go b/cmd/guest/resize.go index 4d6615d..3a04879 100644 --- a/cmd/guest/resize.go +++ b/cmd/guest/resize.go @@ -5,9 +5,11 @@ import ( "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/utils" + "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/volume" + "github.com/projecteru2/yavirt/internal/volume/local" ) func resizeFlags() []cli.Flag { @@ -25,12 +27,13 @@ func resizeFlags() []cli.Flag { } func resize(c *cli.Context, runtime run.Runtime) (err error) { - vs := map[string]int64{} + vs := map[string]volume.Volume{} for _, raw := range c.StringSlice("volumes") { - mnt, cap := utils.PartRight(raw, ":") //nolint - if vs[mnt], err = utils.Atoi64(cap); err != nil { - return errors.Trace(err) + vol, err := local.NewVolumeFromStr(raw) + if err != nil { + return errors.Wrap(err, "") } + vs[vol.GetMountDir()] = vol } id := c.Args().First() @@ -40,8 +43,14 @@ func resize(c *cli.Context, runtime run.Runtime) (err error) { cpu := c.Int("cpu") mem := c.Int64("memory") - if err = runtime.Guest.Resize(runtime.VirtContext(), id, cpu, mem, vs); err != nil { - return errors.Trace(err) + req := &types.GuestResizeOption{ + ID: id, + CPU: cpu, + Mem: mem, + //TODO: add resources + } + if err = runtime.Svc.ResizeGuest(runtime.Ctx, id, req); err != nil { + return errors.Wrap(err, "") } fmt.Printf("%s resized\n", id) diff --git a/cmd/guest/snapshot.go b/cmd/guest/snapshot.go index 1285a31..f878441 100644 --- a/cmd/guest/snapshot.go +++ b/cmd/guest/snapshot.go @@ -5,8 +5,9 @@ import ( "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" + "github.com/projecteru2/libyavirt/types" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/pkg/errors" ) func listSnapshotFlags() []cli.Flag { @@ -68,18 +69,18 @@ func listSnapshot(c *cli.Context, runtime run.Runtime) error { } } - volSnap, err := runtime.Guest.ListSnapshot(runtime.VirtContext(), id, volID) + req := types.ListSnapshotReq{ + ID: id, + VolID: volID, + } + volSnap, err := runtime.Svc.ListSnapshot(runtime.Ctx, req) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - for vol, snaps := range volSnap { - fmt.Printf("Vol: %s\n", vol) - fmt.Printf("Total: %d snapshot(s)\n", len(snaps)) - for _, s := range snaps { - fmt.Printf("%s\n", s) - } - fmt.Println() + fmt.Printf("Total: %d snapshot(s)\n", len(volSnap)) + for _, snap := range volSnap { + fmt.Printf("%v\n", snap) } return nil @@ -97,7 +98,11 @@ func createSnapshot(c *cli.Context, runtime run.Runtime) error { return errors.New("Volume ID is required") } - return runtime.Guest.CreateSnapshot(runtime.VirtContext(), id, volID) + req := types.CreateSnapshotReq{ + ID: id, + VolID: volID, + } + return runtime.Svc.CreateSnapshot(runtime.Ctx, req) } func commitSnapshot(c *cli.Context, runtime run.Runtime) error { @@ -121,9 +126,14 @@ func commitSnapshot(c *cli.Context, runtime run.Runtime) error { } if len(snapID) > 0 { - return runtime.Guest.CommitSnapshot(runtime.VirtContext(), id, volID, snapID) + req := types.CommitSnapshotReq{ + ID: id, + VolID: volID, + SnapID: snapID, + } + return runtime.Svc.CommitSnapshot(runtime.Ctx, req) } - return runtime.Guest.CommitSnapshotByDay(runtime.VirtContext(), id, volID, day) + return runtime.Svc.CommitSnapshotByDay(runtime.Ctx, id, volID, day) } @@ -144,5 +154,10 @@ func restoreSnapshot(c *cli.Context, runtime run.Runtime) error { return errors.New("Snapshot ID is required") } - return runtime.Guest.RestoreSnapshot(runtime.VirtContext(), id, volID, snapID) + req := types.RestoreSnapshotReq{ + ID: id, + VolID: volID, + SnapID: snapID, + } + return runtime.Svc.RestoreSnapshot(runtime.Ctx, req) } diff --git a/cmd/image/image.go b/cmd/image/image.go index a042125..cd211a7 100644 --- a/cmd/image/image.go +++ b/cmd/image/image.go @@ -1,13 +1,21 @@ package image import ( + "context" "fmt" + "io" + "os" + "time" + "github.com/ceph/go-ceph/rados" + "github.com/ceph/go-ceph/rbd" "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/internal/models" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/utils" + vmiFact "github.com/yuyang0/vmimage/factory" ) // Command . @@ -22,12 +30,10 @@ func Command() *cli.Command { }, { Name: "get", - Flags: getFlags(), Action: run.Run(get), }, { Name: "rm", - Flags: rmFlags(), Action: run.Run(rm), }, { @@ -41,6 +47,12 @@ func Command() *cli.Command { Flags: digestFlags(), Action: run.Run(digest), }, + { + Name: "rbd", + Usage: "", + Flags: rbdFlags(), + Action: run.Run(rbdAction), + }, }, } } @@ -54,23 +66,6 @@ func listFlags() []cli.Flag { } } -func rmFlags() []cli.Flag { - return []cli.Flag{ - &cli.StringFlag{ - Name: "user", - Usage: "the owner of an image", - }, - } -} - -func getFlags() []cli.Flag { - return []cli.Flag{ - &cli.StringFlag{ - Name: "user", - }, - } -} - func addFlags() []cli.Flag { return []cli.Flag{ &cli.Int64Flag{ @@ -95,14 +90,24 @@ func digestFlags() []cli.Flag { } } +func rbdFlags() []cli.Flag { + return []cli.Flag{ + &cli.BoolFlag{ + Name: "update", + Usage: "update rbd for image", + Value: false, + }, + } +} + func list(c *cli.Context, _ run.Runtime) error { - imgs, err := models.ListImages(c.String("user")) + imgs, err := vmiFact.ListLocalImages(c.Context, c.String("user")) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } for _, img := range imgs { - fmt.Printf("%s\n", img) + fmt.Printf("%s\n", img.Fullname()) } return nil @@ -113,13 +118,12 @@ func get(c *cli.Context, _ run.Runtime) error { if len(name) < 1 { return errors.New("image name is required") } - - img, err := models.LoadImage(name, c.String("user")) + img, err := vmiFact.LoadImage(c.Context, name) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - fmt.Printf("image: %s, user: %s, filepath: %s\n", img.GetName(), img.GetUser(), img.Filepath()) + fmt.Printf("image: %s, filepath: %s\n", img.Fullname(), img.Filepath()) return nil } @@ -135,24 +139,31 @@ func add(c *cli.Context, _ run.Runtime) error { case size < 1: return errors.New("--size is required") } - - img := models.NewSysImage() - img.Name = name - img.Size = size - - if err := img.Create(); err != nil { - return errors.Trace(err) - + img, err := vmiFact.NewImage(name) + if err != nil { + return err + } + fmt.Printf("*** Prepare image\n") + if rc, err := vmiFact.Prepare(filePath, img); err != nil { + return errors.Wrap(err, "") + } else { //nolint + defer rc.Close() + if _, err := io.Copy(os.Stdout, rc); err != nil { + return errors.Wrap(err, "") + } } - fmt.Printf("image %s created\n", img.Name) - - if len(filePath) > 0 { - // TODO: add image with file to check hash - // TODO: or download hash from image-hub - return nil + fmt.Printf("*** Push image\n") + if rc, err := vmiFact.Push(c.Context, img, false); err != nil { + return errors.Wrap(err, "") + } else { //nolint + defer rc.Close() + if _, err = io.Copy(os.Stdout, rc); err != nil { + return errors.Wrap(err, "") + } } + fmt.Printf("image %s created\n", img.Fullname()) return nil } @@ -162,17 +173,16 @@ func rm(c *cli.Context, _ run.Runtime) error { return errors.New("image name is required") } - user := c.String("user") - img, err := models.LoadImage(name, user) + img, err := vmiFact.LoadImage(c.Context, name) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - if err := img.Delete(); err != nil { - return errors.Trace(err) + if err := vmiFact.RemoveLocal(c.Context, img); err != nil { + return errors.Wrap(err, "") } - fmt.Printf("%s has been deleted\n", img) + fmt.Printf("%s has been deleted\n", img.Fullname()) return nil } @@ -188,22 +198,78 @@ func digest(c *cli.Context, _ run.Runtime) error { return nil } - img, err := models.LoadSysImage(name) + img, err := vmiFact.LoadImage(c.Context, name) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - if len(img.Hash) > 0 { - fmt.Printf("hash of %s: %s\n", img.Name, img.Hash) - return nil + fmt.Printf("hash of %s: %s\n", img.Fullname(), img.GetDigest()) + + return nil +} + +func createAndProtectSnapshot(pool, imgName, snapName string, update bool) error { + conn, err := rados.NewConnWithUser(configs.Conf.Storage.Ceph.Username) + if err != nil { + return err + } + if err := conn.ReadDefaultConfigFile(); err != nil { + return err + } + if err := conn.Connect(); err != nil { + return err + } + defer conn.Shutdown() + ctx, err := conn.OpenIOContext(pool) + if err != nil { + return err } + defer ctx.Destroy() - hash, err := img.UpdateHash() + rbdImage, err := rbd.OpenImage(ctx, imgName, rbd.NoSnapshot) if err != nil { - return errors.Trace(err) + return err + } + if update { + // rename snapshot + oldSnap := rbdImage.GetSnapshot(snapName) + oldName := fmt.Sprintf("%s_%d", snapName, time.Now().UnixNano()) + if err := oldSnap.Rename(oldName); err != nil { + return err + } + } + snapshot, err := rbdImage.CreateSnapshot(snapName) + if err != nil { + return err + } + return snapshot.Protect() +} + +func rbdAction(c *cli.Context, _ run.Runtime) error { + name := c.Args().First() + if len(name) < 1 { + return errors.New("image name is required") } - fmt.Printf("hash of %s: %s\n", img.Name, hash) + img, err := vmiFact.LoadImage(c.Context, name) + if err != nil { + return errors.Wrap(err, "") + } + + rbdDisk := fmt.Sprintf("rbd:eru/%s:id=%s", img.RBDName(), configs.Conf.Storage.Ceph.Username) + if c.Bool("update") { + if err := utils.ForceWriteBLK(context.TODO(), img.Filepath(), rbdDisk); err != nil { + return errors.Wrap(err, "") + } + } else { + if err := utils.WriteBLK(context.TODO(), img.Filepath(), rbdDisk, true); err != nil { + return errors.Wrap(err, "") + } + } + if err = createAndProtectSnapshot("eru", img.RBDName(), "latest", c.Bool("update")); err != nil { + return errors.Wrap(err, "") + } + fmt.Printf("write %s to %s successfully", name, rbdDisk) return nil } diff --git a/cmd/maint/fasten.go b/cmd/maint/fasten.go deleted file mode 100644 index 9cae147..0000000 --- a/cmd/maint/fasten.go +++ /dev/null @@ -1,250 +0,0 @@ -package maint - -import ( - "context" - "encoding/xml" - "fmt" - "path/filepath" - "strconv" - "strings" - - "github.com/urfave/cli/v2" - - "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/internal/models" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/libvirt" - "github.com/projecteru2/yavirt/pkg/netx" - "github.com/projecteru2/yavirt/pkg/store" -) - -var intIPSubnets = map[int64]int64{} - -// fasten all local guests that are dangling. -func fasten(_ *cli.Context, _ run.Runtime) error { - virt, err := libvirt.Connect("qemu:///system") - if err != nil { - return errors.Trace(err) - } - defer virt.Close() - - ids, err := virt.ListDomainsNames() - if err != nil { - return errors.Trace(err) - } - - prefix := filepath.Join(configs.Conf.EtcdPrefix, "ips", "/") - data, _, err := store.GetPrefix(context.Background(), prefix, (1<<32)-1) //nolint:gomnd // max value of int32 - if err != nil { - return errors.Trace(err) - } - for key := range data { - if !strings.Contains(key, "occupied") { - continue - } - key = strings.TrimPrefix(key, configs.Conf.EtcdPrefix) - key = strings.TrimLeft(key, "/") - parts := strings.Split(key, "/") - intSubnet, err := strconv.ParseInt(parts[1], 10, 64) - if err != nil { - return errors.Annotatef(err, "parse subnet %s failed", parts[1]) - } - intIP, err := strconv.ParseInt(parts[3], 10, 64) - if err != nil { - return errors.Annotatef(err, "parse subnet %d ip %s failed", intSubnet, parts[3]) - } - intIPSubnets[intIP] = intSubnet - } - - for _, id := range ids { - switch _, err := models.LoadGuest(id); { - case err == nil: - fmt.Printf("valid guest: %s\n", id) - continue - - case errors.IsKeyNotExistsErr(err): - if err := fastenDangling(id, virt); err != nil { - return errors.Trace(err) - } - - default: - return errors.Trace(err) - } - } - - return nil -} - -var ips = map[string]string{ - "guest-000104": "10.129.144.1", - "guest-000160": "10.129.144.24", - "guest-000172": "10.129.144.28", - "guest-000145": "10.129.144.15", - "guest-000189": "10.129.144.39", - "guest-000128": "10.129.144.11", - "guest-000156": "10.129.144.21", - "guest-000175": "10.129.144.30", - "guest-000157": "10.129.144.22", - "guest-000174": "10.129.144.32", - "guest-000155": "10.129.144.20", - "guest-000173": "10.129.144.31", - "guest-000144": "10.129.144.16", - "guest-000188": "10.129.144.38", - "guest-000164": "10.129.144.26", - "guest-000184": "10.129.144.36", - "guest-000150": "10.129.144.18", - "guest-000152": "10.129.144.19", - "guest-000183": "10.129.144.34", - "guest-000168": "10.129.144.27", - "guest-000177": "10.129.144.35", - "guest-000140": "10.129.144.14", - "guest-000186": "10.129.144.37", - "guest-000158": "10.129.144.23", - "guest-000171": "10.129.144.29", - "guest-000114": "10.129.144.4", - "guest-000162": "10.129.144.25", - "guest-000180": "10.129.144.33", - "guest-000138": "10.129.140.16", - "guest-000190": "10.129.140.36", - "guest-000170": "10.129.140.28", - "guest-000130": "10.129.140.13", - "guest-000154": "10.129.140.20", - "guest-000181": "10.129.140.32", - "guest-000167": "10.129.140.26", - "guest-000169": "10.129.140.27", - "guest-000166": "10.129.140.25", - "guest-000179": "10.129.140.31", - "guest-000117": "10.129.140.7", - "guest-000187": "10.129.140.35", - "guest-000165": "10.129.140.24", - "guest-000182": "10.129.140.33", - "guest-000163": "10.129.140.23", - "guest-000195": "10.129.140.37", - "guest-000129": "10.129.140.12", - "guest-000159": "10.129.140.21", - "guest-000185": "10.129.140.34", - "guest-000151": "10.129.140.18", - "guest-000176": "10.129.140.30", - "guest-000141": "10.129.140.17", - "guest-000153": "10.129.140.19", - "guest-000178": "10.129.140.29", - "guest-000142": "10.129.152.13", - "guest-000107": "10.129.152.2", - "guest-000136": "10.129.152.6", - "guest-000192": "10.129.152.15", - "guest-000143": "10.129.132.10", - "guest-000194": "10.129.132.13", - "guest-000137": "10.129.132.8", - "guest-000191": "10.129.132.11", - "guest-000139": "10.129.132.9", - "guest-000193": "10.129.132.12", -} - -func fastenDangling(id string, virt *libvirt.Libvirtee) error { - dom, err := virt.LookupDomain(id) - if err != nil { - return errors.Trace(err) - } - defer dom.Free() - - guest, err := models.NewGuest(nil, nil) - if err != nil { - return errors.Trace(err) - } - guest.HostName = configs.Hostname() - guest.ID = id - guest.ImageName = "ubuntu1604-sto" - - state, err := dom.GetState() - if err != nil { - return errors.Trace(err) - } - switch state { - case libvirt.DomainRunning: - guest.Status = models.StatusRunning - case libvirt.DomainShutoff: - guest.Status = models.StatusStopped - default: - return errors.Errorf("doesn't support %s", state) - } - - info, err := dom.GetInfo() - if err != nil { - return errors.Trace(err) - } - guest.CPU = int(info.NrVirtCpu) - guest.Memory = int64(info.MaxMem) * 1024 - - var flags libvirt.DomainXMLFlags - txt, err := dom.GetXMLDesc(flags) - if err != nil { - return errors.Trace(err) - } - dx := domainXML{} - if err = xml.Unmarshal([]byte(txt), &dx); err != nil { - return errors.Trace(err) - } - - for _, disk := range dx.Devices.Disks { - fn := filepath.Base(disk.Source.File) - if strings.HasPrefix(fn, "sys-") { - fn = fn[:len(fn)-4] // to remove '.vol' ext. - id = fn[4:] // to remove 'sys-' prefix. - if id = strings.TrimLeft(id, "0"); len(id) <= 3 { - id = fmt.Sprintf("%06s", id) - } else { - id = fmt.Sprintf("%32s", id) - } - guest.VolIDs = []string{id} - } - } - if len(guest.VolIDs) < 1 { - return errors.Errorf("guest %s can't find sys volume", guest.ID) - } - - ip := ips[guest.ID] - if len(ip) < 1 { - fmt.Printf("guest %s hasn't IP, skip it\n", guest.ID) - return nil - } - intIP, err := netx.IPv4ToInt(ip) - if err != nil { - return errors.Errorf("guest %s has invalid IP: %s", guest.ID, ip) - } - intSubnet, ok := intIPSubnets[intIP] - if !ok { - return errors.Errorf("guest %s IP %s hasn't subnet", guest.ID, ip) - } - guest.IPNets = meta.IPNets{ - &meta.IPNet{ - IntIP: intIP, - IntSubnet: intSubnet, - }, - } - - res := meta.Resources{guest} - data, err := res.Encode() - if err != nil { - return errors.Trace(err) - } - - if err := store.Create(context.Background(), data); err != nil { - return errors.Annotatef(err, "create %s failed", data) - } - - fmt.Printf("created %s\n", data) - - return nil -} - -type domainXML struct { - Devices struct { - Disks []struct { - Source struct { - File string `xml:"file,attr"` - } `xml:"source"` - } `xml:"disk"` - } `xml:"devices"` -} diff --git a/cmd/maint/maint.go b/cmd/maint/maint.go deleted file mode 100644 index 5cf7862..0000000 --- a/cmd/maint/maint.go +++ /dev/null @@ -1,20 +0,0 @@ -package maint - -import ( - "github.com/urfave/cli/v2" - - "github.com/projecteru2/yavirt/cmd/run" -) - -// Command . -func Command() *cli.Command { - return &cli.Command{ - Name: "maint", - Subcommands: []*cli.Command{ - { - Name: "fasten", - Action: run.Run(fasten), - }, - }, - } -} diff --git a/cmd/network/calico/align.go b/cmd/network/calico/align.go index b8a9fb7..5d9c79e 100644 --- a/cmd/network/calico/align.go +++ b/cmd/network/calico/align.go @@ -9,8 +9,11 @@ import ( libcaliopt "github.com/projectcalico/calico/libcalico-go/lib/options" "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/cmd/run" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/network/drivers/calico" + networkFactory "github.com/projecteru2/yavirt/internal/network/factory" ) func alignFlags() []cli.Flag { @@ -22,23 +25,28 @@ func alignFlags() []cli.Flag { } } -func align(c *cli.Context, runtime run.Runtime) error { - bound, err := getGatewayBoundIPs(runtime) +func align(c *cli.Context, _ run.Runtime) error { + bound, err := getGatewayBoundIPs() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - return alignGatewayIPs(runtime, bound, c.Bool("dry-run")) + return alignGatewayIPs(bound, c.Bool("dry-run")) } -func getGatewayBoundIPs(runtime run.Runtime) ([]net.IP, error) { - if err := runtime.CalicoHandler.InitGateway("yavirt-cali-gw"); err != nil { - return nil, errors.Trace(err) +func getGatewayBoundIPs() ([]net.IP, error) { + drv := networkFactory.GetDriver(network.CalicoMode) + if drv == nil { + return nil, errors.New("calico driver is not intialized") + } + cali, _ := drv.(*calico.Driver) + if err := cali.InitGateway("yavirt-cali-gw"); err != nil { + return nil, errors.Wrap(err, "") } - gw := runtime.CalicoHandler.Gateway() + gw := cali.Gateway() addrs, err := gw.ListAddr() if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } ips := make([]net.IP, addrs.Len()) @@ -49,8 +57,13 @@ func getGatewayBoundIPs(runtime run.Runtime) ([]net.IP, error) { return ips, nil } -func alignGatewayIPs(runtime run.Runtime, bound []net.IP, dryRun bool) error { - wep := runtime.CalicoHandler.GatewayWorkloadEndpoint() +func alignGatewayIPs(bound []net.IP, dryRun bool) error { + drv := networkFactory.GetDriver(network.CalicoMode) + if drv == nil { + return errors.New("calico driver is not intialized") + } + cali, _ := drv.(*calico.Driver) + wep := cali.GatewayWorkloadEndpoint() for _, bip := range bound { ipn := libcalinet.IPNet{ @@ -81,7 +94,12 @@ func alignGatewayIPs(runtime run.Runtime, bound []net.IP, dryRun bool) error { continue } - _, err := runtime.CalicoDriver.WorkloadEndpoint().WorkloadEndpoints().Update(context.Background(), wep, libcaliopt.SetOptions{}) + drv := networkFactory.GetDriver(network.CalicoMode) + if drv == nil { + return errors.New("calico driver is not intialized") + } + cali, _ := drv.(*calico.Driver) + _, err := cali.WorkloadEndpoints().Update(context.Background(), wep, libcaliopt.SetOptions{}) if err != nil { return err } diff --git a/cmd/run/run.go b/cmd/run/run.go index 4b8ec87..fee81e2 100644 --- a/cmd/run/run.go +++ b/cmd/run/run.go @@ -2,25 +2,17 @@ package run import ( "context" - "os" - "strings" "time" "github.com/urfave/cli/v2" + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + coretypes "github.com/projecteru2/core/types" "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/internal/models" - "github.com/projecteru2/yavirt/internal/virt" - "github.com/projecteru2/yavirt/internal/virt/guest/manager" - "github.com/projecteru2/yavirt/internal/vnet" - "github.com/projecteru2/yavirt/internal/vnet/calico" - calinet "github.com/projecteru2/yavirt/internal/vnet/calico" - "github.com/projecteru2/yavirt/internal/vnet/device" - calihandler "github.com/projecteru2/yavirt/internal/vnet/handler/calico" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/idgen" + "github.com/projecteru2/yavirt/internal/service" + "github.com/projecteru2/yavirt/internal/service/boar" "github.com/projecteru2/yavirt/pkg/netx" - "github.com/projecteru2/yavirt/pkg/store" ) var runtime Runtime @@ -30,17 +22,9 @@ type Runner func(*cli.Context, Runtime) error // Runtime . type Runtime struct { - SkipSetupHost bool - Host *models.Host - Device *device.Driver - CalicoDriver *calinet.Driver - CalicoHandler *calihandler.Handler - Guest manager.Manager -} - -// VirtContext . -func (r Runtime) VirtContext() virt.Context { - return virt.NewContext(context.Background(), r.CalicoHandler) + Ctx context.Context + CancelFn context.CancelFunc + Svc service.Service } // ConvDecimal . @@ -59,83 +43,35 @@ func (r Runtime) ConvDecimal(ipv4 string) int64 { // Run . func Run(fn Runner) cli.ActionFunc { - return func(c *cli.Context) error { + return func(c *cli.Context) (err error) { cfg := &configs.Conf - if err := cfg.Load([]string{c.String("config")}); err != nil { - return errors.Trace(err) + if err := cfg.Load(c.String("config")); err != nil { + return errors.Wrap(err, "") } if err := cfg.Prepare(c); err != nil { return err } - runtime.SkipSetupHost = c.Bool("skip-setup-host") - runtime.Guest = manager.New() - // when add host, we need skip host setup - if c.Command.FullName() == "host add" { - runtime.SkipSetupHost = true - } - if err := setup(); err != nil { - return errors.Trace(err) - } - - return fn(c, runtime) - } -} -func setup() error { - if err := store.Setup("etcd"); err != nil { - return errors.Trace(err) - } - - if runtime.SkipSetupHost { - return nil - } - - if err := setupHost(); err != nil { - return errors.Trace(err) - } - - idgen.Setup(runtime.Host.ID, time.Now()) - - if runtime.Host.NetworkMode == vnet.NetworkCalico { - if err := setupCalico(); err != nil { - return errors.Trace(err) + // always send log to stdout + svcLog := &coretypes.ServerLogConfig{ + Level: configs.Conf.Log.Level, + UseJSON: configs.Conf.Log.UseJSON, + Filename: configs.Conf.Log.Filename, + MaxSize: configs.Conf.Log.MaxSize, + MaxAge: configs.Conf.Log.MaxAge, + MaxBackups: configs.Conf.Log.MaxBackups, } - } - - return nil -} - -func setupHost() (err error) { - if runtime.Host, err = models.LoadHost(); err != nil { - return errors.Trace(err) - } - - return nil -} - -func setupCalico() (err error) { - if endps := os.Getenv("ETCD_ENDPOINTS"); len(endps) < 1 { - if err = os.Setenv("ETCD_ENDPOINTS", strings.Join(configs.Conf.EtcdEndpoints, ",")); err != nil { - return + if err := log.SetupLog(c.Context, svcLog, configs.Conf.Log.SentryDSN); err != nil { + return err } - } - - if runtime.Device, err = device.New(); err != nil { - return - } - if runtime.CalicoDriver, err = calico.NewDriver(configs.Conf.CalicoConfigFile, configs.Conf.CalicoPoolNames); err != nil { - return - } + runtime.Ctx, runtime.CancelFn = context.WithTimeout(context.Background(), time.Duration(c.Int("timeout"))*time.Second) + runtime.Svc, err = boar.New(c.Context, &configs.Conf, nil) + if err != nil { + return errors.Wrap(err, "") + } - var outboundIP string - if outboundIP, err = netx.GetOutboundIP(configs.Conf.Core.Addrs[0]); err != nil { - return + return fn(c, runtime) } - - runtime.CalicoHandler = calihandler.New(runtime.Device, runtime.CalicoDriver, configs.Conf.CalicoPoolNames, outboundIP) - err = runtime.CalicoHandler.InitGateway(configs.Conf.CalicoGatewayName) - - return } diff --git a/config.example.toml b/config.example.toml new file mode 100644 index 0000000..c306c0b --- /dev/null +++ b/config.example.toml @@ -0,0 +1,110 @@ +env = "dev" +graceful_timeout = "20s" +virt_timeout = "1h" +health_check_timeout = "2s" +qmp_connect_timeout = "8s" + +resize_volume_min_ratio = 0.05 +resize_volume_min_size = 10737418240 + +max_concurrency = 100000 # optional, default 100000 for pool size +max_snapshots_count = 30 +snapshot_restorable_days = 7 + +meta_timeout = "1m" +meta_type = "etcd" + +virt_dir = "/opt/yavirtd" +virt_bridge = "yavirbr0" +virt_cpu_cache_passthrough = true + +ga_disk_timeout = "16m" +ga_boot_timeout = "30m" + +recovery_on = false +recovery_max_retries = 2 +recovery_retry_interval = "3m" +recovery_interval = "10m" + +cert_path = "/etc/eru/tls" # optional, if you need connect to daemon without https + + +[log] +level = "info" +use_json = false +filename = "" + +[resource] +min_cpu = 1 +max_cpu = 112 +min_memory = 536870912 # 0.5GB +max_memory = 549755813888 # 512GB + +[host] +id = "unique id for host" +addr = "{{ inventory_hostname }}" +name = "{{ node_yavirt_name }}" +subnet = "127.0.0.1" +cpu = 0 +memory = "" +storage = "" +network = "calico" + +[eru] +addrs = ["127.0.0.1:5001"] +username = "{{ core_username }}" +password = "{{ core_password }}" +status_check_interval = "64s" + +[etcd] +prefix = "/yavirt/v1" +endpoints = ["127.0.0.1:2379"] + +[network] +modes = ["calico"] +default_mode = "calico" + +[network.calico] +pools = ["{{ calico_ippool_name }}"] + +[network.cni] +plugin_path = "/usr/bin/yavirt-cni" +config_path = "/etc/cni/net.d/yavirt-cni.conf" + +[network.ovn] +nb_addr = "{{ ovn_nb_addr }}" +ovsdb_addr = "{{ ovsdb_addr }}" + +[image_hub] +type = "docker" +prefix = "{{ image_prefix }}" +username = "{{ image_hub_username }}" +password = "{{ image_hub_password }}" +pull_policy = "{{ image_pull_policy }}" + +[auth] +username = "{{ yavirt_username }}" +password = "{{ yavirt_password }}" + +[storage] +init_guest_volume = false +[storage.ceph] +monitor_addrs = ["127.0.0.1:6789"] +username = "{{ ceph_username}}" +secret_uuid = "{{ ceph_secret_uuid }}" + +[notify] +type = "all" + +[notify.all] +types = ["dingding", "mail"] + +[notify.dingding] +token = "{{ dingtalk_token }}" + +[notify.mail] +smtp_host = "smtp.qiye.aliyun.com" +smtp_port = 465 +sender = "{{ email_sender }}" +password = "{{ email_password }}" +receivers = ["user1@xxx.com"] diff --git a/configs/batch.go b/configs/batch.go index f67aa19..9217fa7 100644 --- a/configs/batch.go +++ b/configs/batch.go @@ -2,18 +2,20 @@ package configs import ( "strings" + "time" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/pkg/terrors" ) // Batch . type Batch struct { - Bins []string `toml:"bins"` - FlagFile string `toml:"flag_file"` - ForceOK bool `toml:"force_ok"` - Timeout Duration `toml:"timeout"` - Retry bool `toml:"retry"` - Interval Duration `toml:"interval"` + Bins []string `toml:"bins"` + FlagFile string `toml:"flag_file"` + ForceOK bool `toml:"force_ok"` + Timeout time.Duration `toml:"timeout"` + Retry bool `toml:"retry"` + Interval time.Duration `toml:"interval"` } // IsRunOnce . @@ -28,7 +30,7 @@ func (b Batch) GetCommands() (map[string][]string, error) { for _, bin := range b.Bins { switch parts := strings.Split(bin, " "); len(parts) { case 0: - return nil, errors.Annotatef(errors.ErrInvalidValue, "invalid command: %s", bin) + return nil, errors.Wrapf(terrors.ErrInvalidValue, "invalid command: %s", bin) case 1: cmds[parts[0]] = nil default: diff --git a/configs/check.go b/configs/check.go index c997cf7..be561c2 100644 --- a/configs/check.go +++ b/configs/check.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" ) type checker struct { //nolint @@ -29,15 +29,15 @@ func (c *checker) check() (err error) { //nolint } if c.fieldObj, c.val, err = c.getFieldValue(reflect.ValueOf(c.conf), c.field); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } if err := c.checkEnum(); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } if err := c.checkRange(); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } return diff --git a/configs/codec.go b/configs/codec.go index 6d40dff..2f7a0f8 100644 --- a/configs/codec.go +++ b/configs/codec.go @@ -5,13 +5,13 @@ import ( "github.com/BurntSushi/toml" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" ) // Decode . func Decode(raw string, conf *Config) error { if _, err := toml.Decode(raw, conf); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } return nil } @@ -26,7 +26,7 @@ func Encode(conf *Config, noIndents ...bool) (string, error) { } if err := enc.Encode(conf); err != nil { - return "", errors.Trace(err) + return "", errors.Wrap(err, "") } return buf.String(), nil diff --git a/configs/config.go b/configs/config.go index 48eed27..445c282 100644 --- a/configs/config.go +++ b/configs/config.go @@ -1,43 +1,34 @@ package configs import ( - "crypto/tls" "os" "path/filepath" "strings" "time" - _ "embed" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/transport" - + "github.com/cockroachdb/errors" "github.com/dustin/go-humanize" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/log" + "github.com/mcuadros/go-defaults" "github.com/projecteru2/yavirt/pkg/netx" + "github.com/projecteru2/yavirt/pkg/notify/bison" + "github.com/projecteru2/yavirt/pkg/utils" "github.com/urfave/cli/v2" + + coretypes "github.com/projecteru2/core/types" + vmitypes "github.com/yuyang0/vmimage/types" ) var ( - //go:embed default-config.toml - DefaultTemplate string - Conf = newDefault() + Conf = newDefault() ) type sizeType int64 type subnetType int64 -type CoreConfig struct { - Addrs []string `toml:"addrs"` - Username string `toml:"username"` - Password string `toml:"password"` - StatusCheckInterval Duration `toml:"status_check_interval"` - NodeStatusTTL Duration `toml:"nodestatus_ttl"` - Nodename string `toml:"nodename"` -} - func (a *sizeType) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } var err error i, err := humanize.ParseBytes(string(text)) if err != nil { @@ -60,90 +51,225 @@ func (a *subnetType) UnmarshalText(text []byte) error { return nil } +// HealthCheckConfig contain healthcheck config +type HealthCheckConfig struct { + Interval int `toml:"interval" default:"60"` + Timeout int `toml:"timeout" default:"10"` + CacheTTL int64 `toml:"cache_ttl" default:"300"` + EnableDefaultChecker bool `toml:"enable_default_checker" default:"true"` +} + +// Config contain all configs +type EruConfig struct { + Enable bool `toml:"enable" default:"true"` + Addrs []string `toml:"addrs"` + Username string `toml:"username"` + Password string `toml:"password"` + Podname string `toml:"podname" default:"virt"` + + Hostname string `toml:"-"` + HeartbeatInterval int `toml:"heartbeat_interval" default:"60"` + Labels []string `toml:"labels"` // node labels + + CheckOnlyMine bool `toml:"check_only_mine" default:"false"` + + HealthCheck HealthCheckConfig `toml:"healthcheck"` + + GlobalConnectionTimeout time.Duration `toml:"global_connection_timeout" default:"5s"` +} + +// GetHealthCheckStatusTTL returns the TTL for health check status. +// Because selfmon is integrated in eru-core, so there returns 0. +func (config *EruConfig) GetHealthCheckStatusTTL() int64 { + return 0 +} + type HostConfig struct { - Name string `json:"name" toml:"name"` - Addr string `json:"addr" toml:"addr"` - Type string `json:"type" toml:"type"` - Subnet subnetType `json:"subnet" toml:"subnet"` - CPU int `json:"cpu" toml:"cpu"` - Memory sizeType `json:"memory" toml:"memory"` - Storage sizeType `json:"storage" toml:"storage"` - NetworkMode string `json:"network,omitempty" toml:"network"` + ID uint32 `json:"id" toml:"id"` + Name string `json:"name" toml:"name"` + Addr string `json:"addr" toml:"addr"` + Type string `json:"type" toml:"type"` + Subnet subnetType `json:"subnet" toml:"subnet"` + CPU int `json:"cpu" toml:"cpu"` + Memory sizeType `json:"memory" toml:"memory"` + Storage sizeType `json:"storage" toml:"storage"` +} + +type ETCDConfig struct { + Prefix string `toml:"prefix" default:"/yavirt/v1"` + Endpoints []string `toml:"endpoints" default:"[http://127.0.0.1:2379]"` + Username string `toml:"username"` + Password string `toml:"password"` + CA string `toml:"ca"` + Key string `toml:"key"` + Cert string `toml:"cert"` +} + +type CalicoConfig struct { + ConfigFile string `toml:"config_file" default:"/etc/calico/calicoctl.cfg"` + Nodename string `toml:"nodename"` + PoolNames []string `toml:"pools" default:"[clouddev]"` + GatewayName string `toml:"gateway" default:"yavirt-cali-gw"` + ETCDEnv string `toml:"etcd_env" default:"ETCD_ENDPOINTS"` + IFNamePrefix string `toml:"ifname_prefix" default:"cali"` +} + +func (c *CalicoConfig) Check() error { + if len(c.ConfigFile) == 0 && len(os.Getenv(c.ETCDEnv)) == 0 { + return errors.New("either config_file or etcd_env must be set") + } + return nil +} + +type CNIConfig struct { + PluginPath string `toml:"plugin_path" default:"/usr/bin/yavirt-cni"` + ConfigPath string `toml:"config_path" default:"/etc/cni/net.d/yavirt-cni.conf"` + IFNamePrefix string `toml:"ifname_prefix" default:"yap"` +} + +func (c *CNIConfig) Check() error { + if c.PluginPath == "" || c.ConfigPath == "" { + return errors.New("cni config must be set") + } + return nil +} + +type VlanConfig struct { + Subnet subnetType `json:"subnet" toml:"subnet"` + IFNamePrefix string `toml:"ifname_prefix" default:"yap"` +} + +func (c *VlanConfig) Check() error { + return nil +} + +type OVNConfig struct { + NBAddrs []string `toml:"nb_addrs" default:"[tcp:127.0.0.1:6641]"` + OVSDBAddr string `toml:"ovsdb_addr" default:"unix:/var/run/openvswitch/db.sock"` + IFNamePrefix string `toml:"ifname_prefix" default:"yap"` +} + +func (c *OVNConfig) Check() error { + if len(c.NBAddrs) == 0 || c.OVSDBAddr == "" { + return errors.New("ovn config must be set") + } + return nil +} + +type NetworkConfig struct { + Modes []string `toml:"modes" default:"[calico]"` // supported network modes + DefaultMode string `toml:"default_mode" default:"calico"` + Calico CalicoConfig `toml:"calico"` + CNI CNIConfig `toml:"cni"` + Vlan VlanConfig `toml:"vlan"` + OVN OVNConfig `toml:"ovn"` +} + +type CephConfig struct { + MonitorAddrs []string `toml:"monitor_addrs"` + Username string `toml:"username" default:"eru"` + SecretUUID string `toml:"secret_uuid"` +} + +type LocalConfig struct { + Dir string `toml:"dir"` +} + +type StorageConfig struct { + InitGuestVolume bool `toml:"init_guest_volume"` + Ceph CephConfig `toml:"ceph"` + Local LocalConfig `toml:"local"` +} + +type ResourceConfig struct { + MinCPU int `toml:"min_cpu" default:"1"` + MaxCPU int `toml:"max_cpu" default:"112"` + MinMemory int64 `toml:"min_memory" default:"536870912"` // default: 512M + MaxMemory int64 `toml:"max_memory" default:"549755813888"` // default: 512G + ReservedMemory int64 `toml:"reserved_memory" default:"10737418240"` // default: 10GB + MinVolumeCap int64 `toml:"min_volume" default:"1073741824"` + MaxVolumeCap int64 `toml:"max_volume" default:"1099511627776"` + MaxVolumesCount int `toml:"max_volumes_count" default:"8"` + Bandwidth int64 `toml:"bandwidth" default:"50000000000"` + ExcludePCIs []string `toml:"exclude_pcis"` + + GPUProductMap map[string]string `toml:"gpu_product_map"` +} + +type VMAuthConfig struct { + Username string `toml:"username" default:"root"` + Password string `toml:"password" default:"root"` +} + +type LogConfig struct { + Level string `toml:"level" default:"info"` + UseJSON bool `toml:"use_json"` + SentryDSN string `toml:"sentry_dsn"` + Verbose bool `toml:"verbose"` + // for file log + Filename string `toml:"filename"` + MaxSize int `toml:"maxsize" default:"500"` + MaxAge int `toml:"max_age" default:"28"` + MaxBackups int `toml:"max_backups" default:"3"` } // Config . type Config struct { - Env string `toml:"env"` - // host-related config - Host HostConfig `toml:"host"` - Core CoreConfig `toml:"core"` + Env string `toml:"env" default:"dev"` + CertPath string `toml:"cert_path" default:"/etc/eru/tls"` - ProfHTTPPort int `toml:"prof_http_port"` - BindHTTPAddr string `toml:"bind_http_addr"` - BindGRPCAddr string `toml:"bind_grpc_addr"` + MaxConcurrency int `toml:"max_concurrency" default:"100000"` + ProfHTTPPort int `toml:"prof_http_port" default:"9999"` + BindHTTPAddr string `toml:"bind_http_addr" default:"0.0.0.0:9696"` + BindGRPCAddr string `toml:"bind_grpc_addr" default:"0.0.0.0:9697"` SkipGuestReportRegexps []string `toml:"skip_guest_report_regexps"` - EnabledCalicoCNI bool `toml:"enabled_calico_cni"` - CNIPluginPath string `toml:"cni_plugin_path"` - CNIConfigPath string `toml:"cni_config_path"` - - VirtTimeout Duration `toml:"virt_timeout"` - GracefulTimeout Duration `toml:"graceful_timeout"` - HealthCheckTimeout Duration `toml:"health_check_timeout"` - QMPConnectTimeout Duration `toml:"qmp_connect_timeout"` - - ImageHubDomain string `toml:"image_hub_domain"` - ImageHubNamespace string `toml:"image_hub_namespace"` - - GADiskTimeout Duration `toml:"ga_disk_timeout"` - GABootTimeout Duration `toml:"ga_boot_timeout"` - - ResizeVolumeMinRatio float64 `toml:"resize_volume_min_ratio"` - ResizeVolumeMinSize int64 `toml:"resize_volume_min_size"` - - MinCPU int `toml:"min_cpu"` - MaxCPU int `toml:"max_cpu"` - MinMemory int64 `toml:"min_memory"` - MaxMemory int64 `toml:"max_memory"` - MinVolumeCap int64 `toml:"min_volume"` - MaxVolumeCap int64 `toml:"max_volume"` - MaxVolumesCount int `toml:"max_volumes_count"` - MaxSnapshotsCount int `toml:"max_snapshots_count"` - SnapshotRestorableDay int `toml:"snapshot_restorable_days"` - - CalicoConfigFile string `toml:"calico_config_file"` - CalicoPoolNames []string `toml:"calico_pools"` - CalicoGatewayName string `toml:"calico_gateway"` - CalicoETCDEnv string `toml:"calico_etcd_env"` - - MetaTimeout Duration `toml:"meta_timeout"` - MetaType string `toml:"meta_type"` - - VirtDir string `toml:"virt_dir"` + EnableLibvirtMetrics bool `toml:"enable_libvirt_metrics"` + + VirtTimeout time.Duration `toml:"virt_timeout" default:"60m"` + GracefulTimeout time.Duration `toml:"graceful_timeout" default:"20s"` + HealthCheckTimeout time.Duration `toml:"health_check_timeout" default:"2s"` + QMPConnectTimeout time.Duration `toml:"qmp_connect_timeout" default:"8s"` + MemStatsPeriod int `toml:"mem_stats_period" default:"10"` // in seconds + + GADiskTimeout time.Duration `toml:"ga_disk_timeout" default:"16m"` + GABootTimeout time.Duration `toml:"ga_boot_timeout" default:"30m"` + + ResizeVolumeMinRatio float64 `toml:"resize_volume_min_ratio" default:"0.001"` + ResizeVolumeMinSize int64 `toml:"resize_volume_min_size" default:"1073741824"` // default 1GB + + MaxSnapshotsCount int `toml:"max_snapshots_count" default:"30"` + SnapshotRestorableDay int `toml:"snapshot_restorable_days" default:"7"` + + MetaTimeout time.Duration `toml:"meta_timeout" default:"1m"` + MetaType string `toml:"meta_type" default:"etcd"` + + VirtDir string `toml:"virt_dir" default:"/opt/yavirtd"` VirtFlockDir string `toml:"virt_flock_dir"` VirtTmplDir string `toml:"virt_temp_dir"` - VirtSockDir string `toml:"virt_sock_dir"` - VirtBridge string `toml:"virt_bridge"` - VirtCPUCachePassthrough bool `toml:"virt_cpu_cache_passthrough"` - - LogLevel string `toml:"log_level"` - LogFile string `toml:"log_file"` - LogSentry string `toml:"log_sentry"` - - EtcdPrefix string `toml:"etcd_prefix"` - EtcdEndpoints []string `toml:"etcd_endpoints"` - EtcdUsername string `toml:"etcd_username"` - EtcdPassword string `toml:"etcd_password"` - EtcdCA string `toml:"etcd_ca"` - EtcdKey string `toml:"etcd_key"` - EtcdCert string `toml:"etcd_cert"` + VirtCloudInitDir string `toml:"virt_cloud_init_dir"` + VirtBridge string `toml:"virt_bridge" default:"yavirbr0"` + VirtCPUCachePassthrough bool `toml:"virt_cpu_cache_passthrough" default:"true"` Batches []*Batch `toml:"batches"` // system recovery - RecoveryOn bool `toml:"recovery_on"` - RecoveryMaxRetries int `toml:"recovery_max_retries"` - RecoveryRetryInterval Duration `toml:"recovery_retry_interval"` - RecoveryInterval Duration `toml:"recovery_interval"` + RecoveryOn bool `toml:"recovery_on"` + RecoveryMaxRetries int `toml:"recovery_max_retries" default:"2"` + RecoveryRetryInterval time.Duration `toml:"recovery_retry_interval" default:"3m"` + RecoveryInterval time.Duration `toml:"recovery_interval" default:"10m"` + + // host-related config + Host HostConfig `toml:"host"` + Eru EruConfig `toml:"eru"` + Etcd ETCDConfig `toml:"etcd"` + Network NetworkConfig `toml:"network"` + Storage StorageConfig `toml:"storage"` + Resource ResourceConfig `toml:"resource"` + ImageHub vmitypes.Config `toml:"image_hub"` + Auth coretypes.AuthConfig `toml:"auth"` // grpc auth + VMAuth VMAuthConfig `toml:"vm_auth"` + Log LogConfig `toml:"log"` + Notify bison.Config `toml:"notify"` } func Hostname() string { @@ -151,12 +277,9 @@ func Hostname() string { } func newDefault() Config { - var conf Config - if err := Decode(DefaultTemplate, &conf); err != nil { - log.FatalStack(err) - } - - return conf + conf := new(Config) + defaults.SetDefaults(conf) + return *conf } // Dump . @@ -165,10 +288,10 @@ func (cfg *Config) Dump() (string, error) { } // Load . -func (cfg *Config) Load(files []string) error { +func (cfg *Config) Load(files ...string) error { for _, path := range files { if err := DecodeFile(path, cfg); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } } return nil @@ -193,24 +316,41 @@ func (cfg *Config) Prepare(c *cli.Context) (err error) { } if c.String("log-level") != "" { - cfg.LogLevel = c.String("log-level") + cfg.Log.Level = c.String("log-level") } if len(c.StringSlice("core-addrs")) > 0 { - cfg.Core.Addrs = c.StringSlice("core-addrs") + cfg.Eru.Addrs = c.StringSlice("core-addrs") } if c.String("core-username") != "" { - cfg.Core.Username = c.String("core-username") + cfg.Eru.Username = c.String("core-username") } if c.String("core-password") != "" { - cfg.Core.Password = c.String("core-password") + cfg.Eru.Password = c.String("core-password") } // prepare ETCD_ENDPOINTS(Calico needs this environment variable) - if len(cfg.EtcdEndpoints) > 0 { - if err = os.Setenv("ETCD_ENDPOINTS", strings.Join(cfg.EtcdEndpoints, ",")); err != nil { + if !utils.FileExists(cfg.Network.Calico.ConfigFile) { + cfg.Network.Calico.ConfigFile = "" + } + if cfg.Network.Calico.Nodename == "" { + cfg.Network.Calico.Nodename = cfg.Host.Name + } + if len(cfg.Etcd.Endpoints) > 0 { + if err = os.Setenv(cfg.Network.Calico.ETCDEnv, strings.Join(cfg.Etcd.Endpoints, ",")); err != nil { return err } } + if cfg.CertPath != "" { + if cfg.Etcd.CA == "" { + cfg.Etcd.CA = filepath.Join(cfg.CertPath, "etcd", "ca.pem") + } + if cfg.Etcd.Cert == "" { + cfg.Etcd.Cert = filepath.Join(cfg.CertPath, "etcd", "cert.pem") + } + if cfg.Etcd.Key == "" { + cfg.Etcd.Key = filepath.Join(cfg.CertPath, "etcd", "key.pem") + } + } if cfg.Host.Addr == "" { return errors.New("Address must be provided") @@ -219,19 +359,71 @@ func (cfg *Config) Prepare(c *cli.Context) (err error) { if cfg.Host.Name == "" { return errors.New("Hostname must be provided") } - if len(cfg.Core.Addrs) == 0 { + // Network + if err := cfg.checkNetwork(); err != nil { + return err + } + // eru + if len(cfg.Eru.Addrs) == 0 { return errors.New("Core addresses are needed") } - + cfg.Eru.Hostname = cfg.Host.Name + if err := cfg.ImageHub.CheckAndRefine(); err != nil { + return err + } return cfg.loadVirtDirs() } +func (cfg *Config) checkNetwork() error { + if len(cfg.Network.Modes) == 0 { + return errors.New("Network modes must be provided") + } + if cfg.Network.DefaultMode == "" { + cfg.Network.DefaultMode = cfg.Network.Modes[0] + } + var found bool + for _, mode := range cfg.Network.Modes { + if mode == cfg.Network.DefaultMode { + found = true + break + } + switch mode { + case "cni": + if err := cfg.Network.CNI.Check(); err != nil { + return err + } + case "calico": + if err := cfg.Network.Calico.Check(); err != nil { + return err + } + case "ovn": + if err := cfg.Network.OVN.Check(); err != nil { + return err + } + case "vlan": + if err := cfg.Network.Vlan.Check(); err != nil { + return err + } + default: + return errors.New("Invalid network mode") + } + } + if !found { + return errors.New("Invalid default network mode") + } + return nil +} + func (cfg *Config) loadVirtDirs() error { cfg.VirtFlockDir = filepath.Join(cfg.VirtDir, "flock") cfg.VirtTmplDir = filepath.Join(cfg.VirtDir, "template") - cfg.VirtSockDir = filepath.Join(cfg.VirtDir, "sock") + cfg.VirtCloudInitDir = filepath.Join(cfg.VirtDir, "cloud-init") + // ensure directories - for _, d := range []string{cfg.VirtFlockDir, cfg.VirtTmplDir, cfg.VirtSockDir} { + for _, d := range []string{cfg.VirtFlockDir, cfg.VirtTmplDir, cfg.VirtCloudInitDir} { + if err := os.MkdirAll(d, 0755); err != nil && !os.IsExist(err) { + return err + } err := os.MkdirAll(d, 0755) if err != nil && !os.IsExist(err) { return err @@ -239,49 +431,3 @@ func (cfg *Config) loadVirtDirs() error { } return nil } - -// NewEtcdConfig . -func (cfg *Config) NewEtcdConfig() (etcdcnf clientv3.Config, err error) { - etcdcnf.Endpoints = cfg.EtcdEndpoints - etcdcnf.Username = cfg.EtcdUsername - etcdcnf.Password = cfg.EtcdPassword - etcdcnf.TLS, err = cfg.newEtcdTLSConfig() - return -} - -func (cfg *Config) newEtcdTLSConfig() (*tls.Config, error) { - if len(cfg.EtcdCA) < 1 || len(cfg.EtcdKey) < 1 || len(cfg.EtcdCert) < 1 { - return nil, nil //nolint - } - - return transport.TLSInfo{ - TrustedCAFile: cfg.EtcdCA, - KeyFile: cfg.EtcdKey, - CertFile: cfg.EtcdCert, - }.ClientConfig() -} - -// CoreGuestStatusTTL . -func (cfg *Config) CoreGuestStatusTTL() time.Duration { - return 3 * cfg.Core.StatusCheckInterval.Duration() //nolint:gomnd // TTL is 3 times the interval -} - -// CoreGuestStatusCheckInterval . -func (cfg *Config) CoreGuestStatusCheckInterval() time.Duration { - return cfg.Core.StatusCheckInterval.Duration() -} - -// CoreGRPCTimeout . -func (cfg *Config) CoreGRPCTimeout() time.Duration { - return cfg.CoreStatusReportInterval() / 3 //nolint:gomnd // report timeout 3 times per interval -} - -// CoreStatusReportInterval . -func (cfg *Config) CoreStatusReportInterval() time.Duration { - return cfg.Core.StatusCheckInterval.Duration() / 3 //nolint:gomnd // report 3 times every check -} - -// HasImageHub indicates whether the config has ImageHub configurations. -func (cfg *Config) HasImageHub() bool { - return len(cfg.ImageHubDomain) > 0 && len(cfg.ImageHubNamespace) > 0 -} diff --git a/configs/config_test.go b/configs/config_test.go index 282abbc..2437597 100644 --- a/configs/config_test.go +++ b/configs/config_test.go @@ -2,6 +2,7 @@ package configs import ( "testing" + "time" "github.com/BurntSushi/toml" "github.com/projecteru2/yavirt/pkg/test/assert" @@ -9,6 +10,10 @@ import ( func TestHostConfig(t *testing.T) { ss := ` +meta_timeout = "3m" +ga_disk_timeout = "6m" +ga_boot_timeout = "10m" + [host] name = "host1" subnet = "127.0.0.1" @@ -16,10 +21,91 @@ cpu = 4 memory = "1gib" storage = "40gi" network = "calico" + +[resource.gpu_product_map] +"Nvidia 3070" = "nvidia-3070" +"Nvidia 4090" = "nvidia-4090" ` cfg := Config{} _, err := toml.Decode(ss, &cfg) assert.Nil(t, err) + assert.Equal(t, cfg.Host.Subnet, subnetType(2130706433)) assert.Equal(t, cfg.Host.Memory, sizeType(1*1024*1024*1024)) assert.Equal(t, cfg.Host.Storage, sizeType(40*1024*1024*1024)) + assert.Equal(t, cfg.MetaTimeout, 3*time.Minute) + assert.Equal(t, cfg.GADiskTimeout, 6*time.Minute) + assert.Equal(t, cfg.GABootTimeout, 10*time.Minute) + assert.Equal(t, cfg.Resource.GPUProductMap, map[string]string{ + "Nvidia 3070": "nvidia-3070", + "Nvidia 4090": "nvidia-4090", + }) + + ss = ` +subnet = "" +memory = "" +storage = 0 + ` + host := HostConfig{} + _, err = toml.Decode(ss, &host) + assert.Nil(t, err) + assert.Equal(t, host.Memory, sizeType(0)) + assert.Equal(t, host.Storage, sizeType(0)) + assert.Equal(t, host.Subnet, subnetType(0)) + + ss = ` +memory = 1234 + ` + host = HostConfig{} + _, err = toml.Decode(ss, &host) + assert.Nil(t, err) + assert.Equal(t, host.Memory, sizeType(1234)) +} + +func TestDefault(t *testing.T) { + cfg := newDefault() + assert.Equal(t, cfg.BindHTTPAddr, "0.0.0.0:9696") + assert.Equal(t, cfg.BindGRPCAddr, "0.0.0.0:9697") + assert.Equal(t, cfg.ProfHTTPPort, 9999) + assert.Equal(t, cfg.Resource.MinCPU, 1) + assert.Equal(t, cfg.Resource.MaxCPU, 112) + assert.Equal(t, cfg.MemStatsPeriod, 10) + assert.Equal(t, cfg.ImageHub.Type, "docker") + assert.Equal(t, cfg.ImageHub.Docker.Endpoint, "unix:///var/run/docker.sock") + // assert.Equal(t, cfg.ImageHub.PullPolicy, "always") + + assert.Equal(t, cfg.VirtDir, "/opt/yavirtd") + assert.Equal(t, cfg.VirtBridge, "yavirbr0") + // eru + assert.Equal(t, cfg.Eru.Enable, true) + assert.Equal(t, cfg.Eru.GlobalConnectionTimeout, 5*time.Second) + assert.Equal(t, cfg.Eru.HeartbeatInterval, 60) + assert.Equal(t, cfg.Eru.HealthCheck.Interval, 60) + assert.Equal(t, cfg.Eru.HealthCheck.Timeout, 10) + assert.Equal(t, cfg.Eru.HealthCheck.CacheTTL, int64(300)) + // etcd + assert.Equal(t, cfg.Etcd.Endpoints, []string{"http://127.0.0.1:2379"}) + assert.Equal(t, cfg.Etcd.Prefix, "/yavirt/v1") + + assert.Equal(t, cfg.MetaTimeout, time.Minute) + assert.Equal(t, cfg.MetaType, "etcd") + + assert.Equal(t, cfg.GADiskTimeout, 16*time.Minute) + assert.Equal(t, cfg.GABootTimeout, 30*time.Minute) + + assert.Equal(t, cfg.GracefulTimeout, 20*time.Second) + assert.Equal(t, cfg.VirtTimeout, time.Hour) + assert.Equal(t, cfg.HealthCheckTimeout, 2*time.Second) + assert.Equal(t, cfg.QMPConnectTimeout, 8*time.Second) + + assert.Equal(t, cfg.ResizeVolumeMinRatio, 0.001) + assert.Equal(t, cfg.ResizeVolumeMinSize, int64(1073741824)) + + assert.Equal(t, cfg.MaxConcurrency, 100000) + assert.Equal(t, cfg.MaxSnapshotsCount, 30) + assert.Equal(t, cfg.SnapshotRestorableDay, 7) + + assert.False(t, cfg.RecoveryOn) + assert.Equal(t, cfg.RecoveryMaxRetries, 2) + assert.Equal(t, cfg.RecoveryRetryInterval, 3*time.Minute) + assert.Equal(t, cfg.Network.OVN.NBAddrs, []string{"tcp:127.0.0.1:6641"}) } diff --git a/configs/default-config.toml b/configs/default-config.toml deleted file mode 100644 index aa64ef4..0000000 --- a/configs/default-config.toml +++ /dev/null @@ -1,62 +0,0 @@ -env = "dev" -prof_http_port = 9999 -bind_http_addr = "0.0.0.0:9696" -bind_grpc_addr = "0.0.0.0:9697" -graceful_timeout = "20s" -virt_timeout = "1h" -health_check_timeout = "2s" -qmp_connect_timeout = "8s" -cni_plugin_path = "/usr/bin/yavirt-cni" -cni_config_path = "/etc/cni/net.d/yavirt-cni.conf" - -resize_volume_min_ratio = 0.05 -resize_volume_min_size = 10737418240 - -min_cpu = 1 -max_cpu = 64 -min_memory = 1073741824 -max_memory = 68719476736 -min_volume = 1073741824 -max_volume = 1099511627776 -max_volumes_count = 8 -max_snapshots_count = 30 -snapshot_restorable_days = 7 - -meta_timeout = "1m" -meta_type = "etcd" - -virt_dir = "/tmp/virt" -virt_bridge = "virbr0" -virt_cpu_cache_passthrough = true - -calico_gateway = "yavirt-cali-gw" -calico_pools = ["clouddev"] -calico_etcd_env = "ETCD_ENDPOINTS" - -log_level = "info" - -etcd_prefix = "/yavirt-dev/v1" -etcd_endpoints = ["http://127.0.0.1:2379"] - -ga_disk_timeout = "16m" -ga_boot_timeout = "30m" - -recovery_on = false -recovery_max_retries = 2 -recovery_retry_interval = "3m" -recovery_interval = "10m" - -[host] -name = "host1" -subnet = "127.0.0.1" -cpu = 4 -memory = "1g" -storage = "40g" -network = "calico" - -[core] -addrs = ["127.0.0.1:5001"] -username = "admin" -password = "password" -status_check_interval = "64s" -nodestatus_ttl = "16m" diff --git a/configs/time.go b/configs/time.go deleted file mode 100644 index 9f293da..0000000 --- a/configs/time.go +++ /dev/null @@ -1,49 +0,0 @@ -package configs - -import ( - "fmt" - "time" -) - -// Duration . -type Duration time.Duration - -// Duration . -func (d Duration) Duration() time.Duration { - return time.Duration(d) -} - -// UnmarshalText . -func (d *Duration) UnmarshalText(text []byte) error { - var dur, err = time.ParseDuration(string(text)) - if err != nil { - return err - } - *d = Duration(dur) - return nil -} - -// MarshalText . -func (d Duration) MarshalText() ([]byte, error) { - if d == 0 { - return []byte("0"), nil - } - - var dur = time.Duration(d) - if dur < 0 { - dur = -dur - } - - switch { - case dur%time.Hour == 0: - return []byte(fmt.Sprintf("%dh", dur/time.Hour)), nil - case dur%time.Minute == 0: - return []byte(fmt.Sprintf("%dm", dur/time.Minute)), nil - case dur%time.Second == 0: - return []byte(fmt.Sprintf("%ds", dur/time.Second)), nil - case dur%time.Millisecond == 0: - return []byte(fmt.Sprintf("%dms", dur/time.Millisecond)), nil - default: - return []byte(dur.String()), nil - } -} diff --git a/go.mod b/go.mod index 12eaa08..fda6566 100644 --- a/go.mod +++ b/go.mod @@ -1,125 +1,212 @@ module github.com/projecteru2/yavirt -go 1.20 +go 1.22 + +toolchain go1.22.3 require ( - github.com/BurntSushi/toml v1.2.1 + github.com/BurntSushi/toml v1.3.2 + github.com/Masterminds/sprig/v3 v3.2.3 + github.com/agiledragon/gomonkey/v2 v2.11.0 + github.com/alphadose/haxmap v1.3.1 + github.com/antchfx/xmlquery v1.3.17 + github.com/blinkbean/dingtalk v0.0.0-20230927120905-796332ac4ba1 + github.com/cenkalti/backoff/v4 v4.2.1 + github.com/ceph/go-ceph v0.26.0 + github.com/cockroachdb/errors v1.11.1 github.com/containernetworking/cni v1.1.2 + github.com/deckarep/golang-set/v2 v2.3.1 + github.com/digitalocean/go-libvirt v0.0.0-20221205150000-2939327a8519 github.com/dustin/go-humanize v1.0.1 - github.com/getsentry/sentry-go v0.20.0 - github.com/gin-gonic/gin v1.9.0 - github.com/google/uuid v1.3.0 - github.com/juju/errors v1.0.0 - github.com/libvirt/libvirt-go v7.4.0+incompatible + github.com/emirpasic/gods v1.18.1 + github.com/florianl/go-tc v0.4.2 + github.com/go-logr/logr v1.4.1 + github.com/google/uuid v1.6.0 + github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 + github.com/jaypipes/ghw v0.10.0 + github.com/jordan-wright/email v4.0.1-0.20210109023952-943e75fe5223+incompatible + github.com/kdomanski/iso9660 v0.4.0 + github.com/kr/pretty v0.3.1 + github.com/mcuadros/go-defaults v1.2.0 + github.com/mitchellh/mapstructure v1.5.0 + github.com/ovn-org/libovsdb v0.6.1-0.20230912124059-239822fe891a + github.com/panjf2000/ants/v2 v2.9.0 + github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/pkg/errors v0.9.1 github.com/projectcalico/api v0.0.0-20230222223746-44aa60c2201f github.com/projectcalico/calico v1.11.0-cni-plugin.0.20230510161715-15d193738928 - github.com/projecteru2/core v0.0.0-20230512041401-f4113e25d62c - github.com/projecteru2/libyavirt v0.0.0-20230524090109-0faf050e0f3b - github.com/prometheus/client_golang v1.15.0 + github.com/projecteru2/core v0.0.0-20240613084815-dff459401ad7 + github.com/projecteru2/libyavirt v0.0.0-20231128023216-96fef06a6ca4 + github.com/projecteru2/resource-storage v0.0.0-20230206062354-d828802f6b96 + github.com/prometheus-community/pro-bing v0.4.0 + github.com/prometheus/client_golang v1.16.0 github.com/robfig/cron/v3 v3.0.1 - github.com/sirupsen/logrus v1.9.0 - github.com/stretchr/testify v1.8.2 - github.com/urfave/cli/v2 v2.25.1 + github.com/rs/zerolog v1.30.0 + github.com/russross/blackfriday/v2 v2.1.0 + github.com/samber/lo v1.39.0 + github.com/samber/slog-zerolog/v2 v2.3.0 + github.com/shirou/gopsutil v2.21.11+incompatible + github.com/stretchr/testify v1.9.0 + github.com/urfave/cli/v2 v2.27.1 github.com/vishvananda/netlink v1.2.1-beta.2.0.20230206183746-70ca0345eede + github.com/yuyang0/resource-bandwidth v0.0.0-20231102113253-8e47795c92e5 + github.com/yuyang0/resource-gpu v0.0.0-20231026065700-1577d804efa8 + github.com/yuyang0/resource-rbd v0.0.2-0.20230701090628-cb86da0f60b9 + github.com/yuyang0/vmimage v0.0.0-20240628091041-9f45a357a3ae go.etcd.io/etcd v3.3.27+incompatible - go.etcd.io/etcd/client/v3 v3.5.8 - golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 - golang.org/x/sys v0.8.0 - google.golang.org/grpc v1.54.1 + go.etcd.io/etcd/client/v3 v3.5.12 + go.etcd.io/etcd/tests/v3 v3.5.12 + golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df + golang.org/x/sys v0.20.0 + golang.org/x/term v0.20.0 + golang.org/x/tools v0.21.0 + google.golang.org/grpc v1.60.1 k8s.io/apimachinery v0.26.3 + libvirt.org/go/libvirtxml v1.9004.0 ) +replace github.com/ovn-org/libovsdb => github.com/yuyang0/libovsdb v0.0.0-20231222065958-6f259a987799 + require ( - github.com/alphadose/haxmap v1.2.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/antchfx/xpath v1.2.4 // indirect + github.com/benbjohnson/clock v1.3.3 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bytedance/sonic v1.8.7 // indirect + github.com/cenkalti/hub v1.0.1 // indirect + github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect - github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/containerd/containerd v1.7.11 // indirect github.com/coreos/etcd v3.3.27+incompatible // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/pkg v0.0.0-20230327231512-ba87abf18a23 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v24.0.9+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect - github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/getsentry/sentry-go v0.23.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-ping/ping v1.1.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.12.0 // indirect - github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.2 // indirect github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.15 // indirect + github.com/jaypipes/pcidb v1.0.0 // indirect github.com/jinzhu/configor v1.2.1 // indirect + github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect - github.com/kr/pretty v0.3.1 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/kr/text v0.2.0 // indirect - github.com/leodido/go-urn v1.2.3 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/panjf2000/ants/v2 v2.7.3 // indirect - github.com/patrickmn/go-cache v2.1.0+incompatible // indirect - github.com/pelletier/go-toml/v2 v2.0.7 // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/opencontainers/runc v1.1.12 // indirect + github.com/pierrec/lz4/v4 v4.1.14 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/projectcalico/go-json v0.0.0-20161128004156-6219dc7339ba // indirect github.com/projectcalico/go-yaml-wrapper v0.0.0-20191112210931-090425220c54 // indirect + github.com/projecteru2/vmihub v0.0.0-20240628073228-3417154bf02a // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/rs/zerolog v1.29.1 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/samber/slog-common v0.16.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/soheilhy/cmux v0.1.5 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/ugorji/go/codec v1.2.11 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/tklauser/go-sysconf v0.3.13 // indirect + github.com/tklauser/numcpus v0.7.0 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect + github.com/u-root/uio v0.0.0-20230220225925-ffce2a382923 // indirect github.com/vishvananda/netns v0.0.4 // indirect - github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - go.etcd.io/etcd/api/v3 v3.5.8 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.8 // indirect + github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 // indirect + github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.etcd.io/bbolt v1.3.8 // indirect + go.etcd.io/etcd/api/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/v2 v2.305.12 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.12 // indirect + go.etcd.io/etcd/raft/v3 v3.5.12 // indirect + go.etcd.io/etcd/server/v3 v3.5.12 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect + go.opentelemetry.io/otel v1.20.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect + go.opentelemetry.io/otel/metric v1.20.0 // indirect + go.opentelemetry.io/otel/sdk v1.20.0 // indirect + go.opentelemetry.io/otel/trace v1.20.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/goleak v1.2.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.8.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/term v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.3.0 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230215201556-9c5414ab4bde // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/go-playground/validator.v9 v9.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + howett.net/plist v1.0.0 // indirect k8s.io/api v0.26.3 // indirect k8s.io/client-go v0.26.3 // indirect k8s.io/klog/v2 v2.90.1 // indirect diff --git a/go.sum b/go.sum index 7e0b8f4..e32823d 100644 --- a/go.sum +++ b/go.sum @@ -1,57 +1,89 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= -github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= -github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/alphadose/haxmap v1.2.0 h1:noGrAmCE+gNheZ4KpW+sYj9W5uMcO1UAjbAq9XBOAfM= -github.com/alphadose/haxmap v1.2.0/go.mod h1:rjHw1IAqbxm0S3U5tD16GoKsiAd8FWx5BJ2IYqXwgmM= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/agiledragon/gomonkey/v2 v2.11.0 h1:5oxSgA+tC1xuGsrIorR+sYiziYltmJyEZ9qA25b6l5U= +github.com/agiledragon/gomonkey/v2 v2.11.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= +github.com/alphadose/haxmap v1.3.1 h1:KmZh75duO1tC8pt3LmUwoTYiZ9sh4K52FX8p7/yrlqU= +github.com/alphadose/haxmap v1.3.1/go.mod h1:rjHw1IAqbxm0S3U5tD16GoKsiAd8FWx5BJ2IYqXwgmM= +github.com/antchfx/xmlquery v1.3.17 h1:d0qWjPp/D+vtRw7ivCwT5ApH/3CkQU8JOeo3245PpTk= +github.com/antchfx/xmlquery v1.3.17/go.mod h1:Afkq4JIeXut75taLSuI31ISJ/zeq+3jG7TunF7noreA= +github.com/antchfx/xpath v1.2.4 h1:dW1HB/JxKvGtJ9WyVGJ0sIoEcqftV3SqIstujI+B9XY= +github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.3 h1:g+rSsSaAzhHJYcIQE78hJ3AhyjjtQvleKDjlhdBnIhc= +github.com/benbjohnson/clock v1.3.3/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blinkbean/dingtalk v0.0.0-20230927120905-796332ac4ba1 h1:G14RkaB3RRW099aQbfyHm4RFgNxGUOTeHSoN+CZy2YI= +github.com/blinkbean/dingtalk v0.0.0-20230927120905-796332ac4ba1/go.mod h1:9BaLuGSBqY3vT5hstValh48DbsKO7vaHaJnG9pXwbto= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.8.7 h1:d3sry5vGgVq/OpgozRUNP6xBsSo0mtNdwliApw+SAMQ= -github.com/bytedance/sonic v1.8.7/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA= +github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg= +github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/ceph/go-ceph v0.26.0 h1:LZoATo25ZH5aeL5t85BwIbrNLKCDfcDM+e0qV0cmwHY= +github.com/ceph/go-ceph v0.26.0/go.mod h1:ISxb295GszZwtLPkeWi+L2uLYBVsqbsh0M104jZMOX4= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk= +github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= -github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= -github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= -github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= +github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/bbolt v1.3.7 h1:jwixv2lofx/nLylxXRUufR6Jte5YRzsAgmQfsm64qcs= +github.com/coreos/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= github.com/coreos/etcd v3.3.27+incompatible h1:QIudLb9KeBsE5zyYxd1mjzRSkzLg9Wf9QlRwFgd6oTA= github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= @@ -60,90 +92,101 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20230327231512-ba87abf18a23 h1:SrdboTJZnOqc2r4cT4wQCzQJjGYwkclLwx2sPrDsx7g= github.com/coreos/pkg v0.0.0-20230327231512-ba87abf18a23/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A= +github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/digitalocean/go-libvirt v0.0.0-20221205150000-2939327a8519 h1:OpkN/n40cmKenDQS+IOAeW9DLhYy4DADSeZnouCEV/E= +github.com/digitalocean/go-libvirt v0.0.0-20221205150000-2939327a8519/go.mod h1:WyJJyfmJ0gWJvjV+ZH4DOgtOYZc1KOvYyBXWCLKxsUU= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/florianl/go-tc v0.4.2 h1:jan5zcOWCLhA9SRBHZhQ0SSAq7cmDUagiRPngAi5AOQ= +github.com/florianl/go-tc v0.4.2/go.mod h1:2W1jSMFryiYlpQigr4ZpSSpE9XNze+bW7cTsCXWbMwo= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= -github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= -github.com/getsentry/sentry-go v0.20.0 h1:bwXW98iMRIWxn+4FgPW7vMrjmbym6HblXALmhjHmQaQ= -github.com/getsentry/sentry-go v0.20.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= +github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= -github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= -github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-ping/ping v1.1.0 h1:3MCGhVX4fyEUuhsfwPrsEdQw6xspHkv5zHsiSoDFZYw= +github.com/go-ping/ping v1.1.0/go.mod h1:xIFjORFzTxqIV/tDVGO4eDy/bLuSyawEeojSm3GfRGk= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= -github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= @@ -157,9 +200,10 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -167,61 +211,86 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= -github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= -github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= -github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= -github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= +github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= +github.com/jaypipes/ghw v0.10.0 h1:UHu9UX08Py315iPojADFPOkmjTsNzHj4g4adsNKKteY= +github.com/jaypipes/ghw v0.10.0/go.mod h1:jeJGbkRB2lL3/gxYzNYzEDETV1ZJ56OKr+CSeSEym+g= +github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8= +github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/configor v1.2.1 h1:OKk9dsR8i6HPOCZR8BcMtcEImAFjIhbJFZNyn5GCZko= github.com/jinzhu/configor v1.2.1/go.mod h1:nX89/MOmDba7ZX7GCyU/VIaQ2Ar2aizBl2d3JLF/rDc= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/jordan-wright/email v4.0.1-0.20210109023952-943e75fe5223+incompatible h1:jdpOPRN1zP63Td1hDQbZW73xKmzDvZHzVdNYxhnTMDA= +github.com/jordan-wright/email v4.0.1-0.20210109023952-943e75fe5223+incompatible/go.mod h1:1c7szIrayyPPB/987hsnvNzLushdWf4o/79s3P08L8A= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= +github.com/jsimonetti/rtnetlink v1.3.5 h1:hVlNQNRlLDGZz31gBPicsG7Q53rnlsz1l1Ix/9XlpVA= +github.com/jsimonetti/rtnetlink v1.3.5/go.mod h1:0LFedyiTkebnd43tE4YAkWGIq9jQphow4CcwxaT2Y00= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/errors v1.0.0 h1:yiq7kjCLll1BiaRuNY53MGI0+EQ3rF6GB+wvboZDefM= -github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= -github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= -github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= -github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= -github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kdomanski/iso9660 v0.4.0 h1:BPKKdcINz3m0MdjIMwS0wx1nofsOjxOq8TOr45WGHFg= +github.com/kdomanski/iso9660 v0.4.0/go.mod h1:OxUSupHsO9ceI8lBLPJKWBTphLemjrCQY8LPXM7qSzU= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -232,73 +301,95 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= -github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/libvirt/libvirt-go v7.4.0+incompatible h1:crnSLkwPqCdXtg6jib/FxBG/hweAc/3Wxth1AehCXL4= -github.com/libvirt/libvirt-go v7.4.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mcuadros/go-defaults v1.2.0 h1:FODb8WSf0uGaY8elWJAkoLL0Ri6AlZ1bFlenk56oZtc= +github.com/mcuadros/go-defaults v1.2.0/go.mod h1:WEZtHEVIGYVDqkKSWBdWKUVdRyKlMfulPaGDWIVeCWY= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= +github.com/mdlayher/netlink v1.6.0/go.mod h1:0o3PlBmGst1xve7wQ7j/hwpNaFaH4qCRyWCdcZk8/vA= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= +github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mdlayher/socket v0.1.1/go.mod h1:mYV5YIZAfHh4dzDVzI8x8tWLWCliuX8Mon5Awbj+qDs= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.7.1 h1:YgLPk+gpqDtAPeRCWEmfO8oxE6ru3xcVSXAM7wn8w9I= +github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= +github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= -github.com/panjf2000/ants/v2 v2.7.3 h1:rHQ0hH0DQvuNUqqlWIMJtkMcDuL1uQAfpX2mIhQ5/s0= -github.com/panjf2000/ants/v2 v2.7.3/go.mod h1:KIBmYG9QQX5U2qzFP/yQJaq/nSb6rahS9iEHkrCMgM8= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo= +github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= -github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -315,55 +406,71 @@ github.com/projectcalico/go-json v0.0.0-20161128004156-6219dc7339ba h1:aaF2byUCZ github.com/projectcalico/go-json v0.0.0-20161128004156-6219dc7339ba/go.mod h1:q8EdCgBdMQzgiX/uk4GXLWLk+gIHd1a7mWUAamJKDb4= github.com/projectcalico/go-yaml-wrapper v0.0.0-20191112210931-090425220c54 h1:Jt2Pic9dxgJisekm8q2WV9FaWxUJhhRfwHSP640drww= github.com/projectcalico/go-yaml-wrapper v0.0.0-20191112210931-090425220c54/go.mod h1:UgC0aTQ2KMDxlX3lU/stndk7DMUBJqzN40yFiILHgxc= -github.com/projecteru2/core v0.0.0-20230512041401-f4113e25d62c h1:ovxsd205M4RZfAtPBHiUh34sDmz/ARSktYrgXXdRYRQ= -github.com/projecteru2/core v0.0.0-20230512041401-f4113e25d62c/go.mod h1:zpMzjZ0hohaoB2jLsbhBsJ0KQlL10WVORpj5/BKqTtw= -github.com/projecteru2/libyavirt v0.0.0-20230524090109-0faf050e0f3b h1:mXvbNYdr2uh2mhk5HdiBBSc9DhaR2RuulURaXhJaP2I= -github.com/projecteru2/libyavirt v0.0.0-20230524090109-0faf050e0f3b/go.mod h1:N41KaKmqbailweGs4x/mt2H0O0Y7MizObZQ+igLdzpw= -github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= -github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/projectcalico/hcsshim v0.8.9-calico h1:aRrOWouDTzKwaIoRGMV/I1QikR+ikwj1G9T9h3wD090= +github.com/projectcalico/hcsshim v0.8.9-calico/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/projecteru2/core v0.0.0-20240613084815-dff459401ad7 h1:VTQ/+U5Qhu7KpqZ3Z3naX7NlOpuMSwBG/uNMlSzprq0= +github.com/projecteru2/core v0.0.0-20240613084815-dff459401ad7/go.mod h1:JDOLwVw4EdLTk+bqI/LdU4Ix/Wl6BaaHMzaOO5vpU8U= +github.com/projecteru2/libyavirt v0.0.0-20231128023216-96fef06a6ca4 h1:3a0IhsOtH9J+iSn/DV7v9gpW2lBDGhkp+XEvTaSiK4g= +github.com/projecteru2/libyavirt v0.0.0-20231128023216-96fef06a6ca4/go.mod h1:+EcdWF8KyTf2u8Zxu3397nSmalCSmpuxvGwcX1g3RL0= +github.com/projecteru2/resource-storage v0.0.0-20230206062354-d828802f6b96 h1:mt8llWHpuOhPMIe7sOxr5jqYrVk4wE6eZ3cyAkj0+7o= +github.com/projecteru2/resource-storage v0.0.0-20230206062354-d828802f6b96/go.mod h1:sdXwl7dPfO3UH8PPr7ELvEutJSAjOQOGlmkenfL6DfU= +github.com/projecteru2/vmihub v0.0.0-20240628073228-3417154bf02a h1:DE3fhCM/OKJs2Z9bkeNKDJCpnU0wubUXNYs4Jhl93AM= +github.com/projecteru2/vmihub v0.0.0-20240628073228-3417154bf02a/go.mod h1:h8beeiTyKvxMccTOXVcrJkYTrQer5DGi1Ve4p53bX24= +github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4= +github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc= -github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= +github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/samber/slog-common v0.16.0 h1:2/t1EcFd1Ru77mh2ab+8B6NBHnEXsBBHtOJc7PSH0aI= +github.com/samber/slog-common v0.16.0/go.mod h1:Qjrfhwk79XiCIhBj8+jTq1Cr0u9rlWbjawh3dWXzaHk= +github.com/samber/slog-zerolog/v2 v2.3.0 h1:s1OmSGQaAjQ/9C7lgEG2cro+jqw8Dl+ML05I3dFkJ50= +github.com/samber/slog-zerolog/v2 v2.3.0/go.mod h1:wppqUvvyAa3OFW+Snn6x4v3Wf+aT1gSwh19T2CraIuE= +github.com/shirou/gopsutil v2.21.11+incompatible h1:lOGOyCG67a5dv2hq5Z1BLDUqqKp3HkbjPcz5j6XMS0U= +github.com/shirou/gopsutil v2.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -372,24 +479,19 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/urfave/cli/v2 v2.25.1 h1:zw8dSP7ghX0Gmm8vugrs6q9Ku0wzweqPyshy+syu9Gw= -github.com/urfave/cli/v2 v2.25.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= +github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= +github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4= +github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/u-root/uio v0.0.0-20230220225925-ffce2a382923 h1:tHNk7XK9GkmKUR6Gh8gVBKXc2MVSZ4G/NnWLtzw4gNA= +github.com/u-root/uio v0.0.0-20230220225925-ffce2a382923/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= +github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/vishvananda/netlink v1.2.1-beta.2.0.20230206183746-70ca0345eede h1:S+/0qI1RT0iW4moxdCoopW5JvXWU9pbJXM96IElhDF0= github.com/vishvananda/netlink v1.2.1-beta.2.0.20230206183746-70ca0345eede/go.mod h1:cAAsePK2e15YDAMJNyOpGYEWNe4sIghTY7gpz4cX/Ik= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -398,159 +500,225 @@ github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZla github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yuyang0/libovsdb v0.0.0-20231222065958-6f259a987799 h1:pcGwcgtagBxYMRjkae/hJpofMQkMe58yCLWefqxFYsM= +github.com/yuyang0/libovsdb v0.0.0-20231222065958-6f259a987799/go.mod h1:LC5DOvcY58jOG3HTvDyCVidoMJDurPeu+xlxv5Krd9Q= +github.com/yuyang0/resource-bandwidth v0.0.0-20231102113253-8e47795c92e5 h1:qLiO9f8EufOBFDRtFfAn6zYndXlhmBJgOijfc9ipViU= +github.com/yuyang0/resource-bandwidth v0.0.0-20231102113253-8e47795c92e5/go.mod h1:qq6SbQf88tieRqLkMzUgAoANLszQWdcH8H4Ji7xo2sE= +github.com/yuyang0/resource-gpu v0.0.0-20231026065700-1577d804efa8 h1:U1GBBWRCG0kmo3XG3sI5pz0i4nMwjDsM92uxfOOc/1A= +github.com/yuyang0/resource-gpu v0.0.0-20231026065700-1577d804efa8/go.mod h1:oggnae33QHkm9k2Xd0J4BFjdIV1VhPdpm4VUujYUvo0= +github.com/yuyang0/resource-rbd v0.0.2-0.20230701090628-cb86da0f60b9 h1:2La8T7mqVy98jyAkwxIN9gB+Akx3qbLGmVEtleaxND4= +github.com/yuyang0/resource-rbd v0.0.2-0.20230701090628-cb86da0f60b9/go.mod h1:ANjyr7r+YfKtpWiIsZPzF7+krI55Uf84R9AvbNr5WAg= +github.com/yuyang0/vmimage v0.0.0-20240628091041-9f45a357a3ae h1:qsuhmk0vb2uNRdWsI+23DaOODto0/fG8tmEnqwHmjCA= +github.com/yuyang0/vmimage v0.0.0-20240628091041-9f45a357a3ae/go.mod h1:sx0f5ijzfuwsxQnDlU8CpRbEzAoQu6TxpEKN6gozBAw= go.etcd.io/etcd v3.3.27+incompatible h1:5hMrpf6REqTHV2LW2OclNpRtxI0k9ZplMemJsMSWju0= go.etcd.io/etcd v3.3.27+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= -go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4= -go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= -go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M= -go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= -go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4= -go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc= +go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= +go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A= +go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/client/v2 v2.305.12 h1:0m4ovXYo1CHaA/Mp3X/Fak5sRNIWf01wk/X1/G3sGKI= +go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E= +go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= +go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= +go.etcd.io/etcd/pkg/v3 v3.5.12 h1:OK2fZKI5hX/+BTK76gXSTyZMrbnARyX9S643GenNGb8= +go.etcd.io/etcd/pkg/v3 v3.5.12/go.mod h1:UVwg/QIMoJncyeb/YxvJBJCE/NEwtHWashqc8A1nj/M= +go.etcd.io/etcd/raft/v3 v3.5.12 h1:7r22RufdDsq2z3STjoR7Msz6fYH8tmbkdheGfwJNRmU= +go.etcd.io/etcd/raft/v3 v3.5.12/go.mod h1:ERQuZVe79PI6vcC3DlKBukDCLja/L7YMu29B74Iwj4U= +go.etcd.io/etcd/server/v3 v3.5.12 h1:EtMjsbfyfkwZuA2JlKOiBfuGkFCekv5H178qjXypbG8= +go.etcd.io/etcd/server/v3 v3.5.12/go.mod h1:axB0oCjMy+cemo5290/CutIjoxlfA6KVYKD1w0uue10= +go.etcd.io/etcd/tests/v3 v3.5.12 h1:k1fG7+F87Z7zKp57EcjXu9XgOsW0sfp5USqfzmMTIwM= +go.etcd.io/etcd/tests/v3 v3.5.12/go.mod h1:CLWdnlr8bWNa8tjkmKFybPz5Ldjh9GuHbYhq1g9vpIo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= +go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= +go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= +go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= +go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= +go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= +go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= +go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= +go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220804214406-8e32c043e418/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -559,28 +727,30 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230215201556-9c5414ab4bde h1:ybF7AMzI golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230215201556-9c5414ab4bde/go.mod h1:mQqgjkW8GQQcJQsbBvK890TKqUK1DfKWkuBGbOkuMHQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.54.1 h1:zQZQNqQZU9cHv2vLdDhB2mFeDZ2hGpgYM1A0PKjFsSM= -google.golang.org/grpc v1.54.1/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -593,26 +763,27 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M= gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -620,14 +791,17 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= @@ -640,7 +814,8 @@ k8s.io/kube-openapi v0.0.0-20230224204131-30e856af5c3e h1:Zs3X1UrbS0jrEPVOM8Y3xn k8s.io/kube-openapi v0.0.0-20230224204131-30e856af5c3e/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +libvirt.org/go/libvirtxml v1.9004.0 h1:h+nhEZCABCnK4go0GLRN2WZhIhRrLAqsz84t553oiM4= +libvirt.org/go/libvirtxml v1.9004.0/go.mod h1:7Oq2BLDstLr/XtoQD8Fr3mfDNrzlI3utYKySXF2xkng= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/internal/debug/debug.go b/internal/debug/debug.go new file mode 100644 index 0000000..e70c760 --- /dev/null +++ b/internal/debug/debug.go @@ -0,0 +1,24 @@ +package debug + +import ( + "encoding/json" + "net/http" + + "github.com/projecteru2/yavirt/internal/eru/resources" + "github.com/projecteru2/yavirt/internal/vmcache" +) + +func Handler(w http.ResponseWriter, _ *http.Request) { + infos := vmcache.FetchDomainsInfo() + resp := map[string]any{ + "infos": infos, + "gpu": map[string]any{ + "capacity": resources.GetManager().FetchGPU(), + }, + "cpumem": resources.GetManager().FetchCPUMem(), + } + bs, _ := json.Marshal(resp) + w.Header().Set("Content-Type", "application/json") + + _, _ = w.Write(bs) +} diff --git a/internal/eru/agent/agent.go b/internal/eru/agent/agent.go new file mode 100644 index 0000000..79c2984 --- /dev/null +++ b/internal/eru/agent/agent.go @@ -0,0 +1,156 @@ +package agent + +import ( + "context" + "strings" + "sync" + "testing" + "time" + + "github.com/alphadose/haxmap" + "github.com/cockroachdb/errors" + "github.com/patrickmn/go-cache" + "github.com/projecteru2/core/log" + corerpc "github.com/projecteru2/core/rpc" + "github.com/projecteru2/yavirt/internal/eru/common" + "github.com/projecteru2/yavirt/internal/eru/store" + corestore "github.com/projecteru2/yavirt/internal/eru/store/core" + storemocks "github.com/projecteru2/yavirt/internal/eru/store/mocks" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/service" + "github.com/projecteru2/yavirt/internal/utils" + "google.golang.org/grpc/status" +) + +type Manager struct { + svc service.Service + store store.Store + config *types.Config + + checkWorkloadMutex *sync.Mutex + startingWorkloads *haxmap.Map[string, *utils.RetryTask] + + // storeIdentifier indicates which eru this agent belongs to + // it can be used to identify the corresponding core + // and all containers that belong to this core + storeIdentifier string + cas *utils.GroupCAS + wrkStatusCache *cache.Cache + + mCol *MetricsCollector +} + +func NewManager( + ctx context.Context, svc service.Service, + config *types.Config, endpoint string, + t *testing.T, +) (*Manager, error) { + logger := log.WithFunc("agent.NewManager") + interval := time.Duration(2*config.HealthCheck.Interval) * time.Second + m := &Manager{ + config: config, + svc: svc, + cas: utils.NewGroupCAS(), + checkWorkloadMutex: &sync.Mutex{}, + startingWorkloads: haxmap.New[string, *utils.RetryTask](), + wrkStatusCache: cache.New(interval, interval), + } + m.mCol = &MetricsCollector{ + wrkStatusCache: m.wrkStatusCache, + } + + if t == nil { + corestore.Init(ctx, config) + if m.store = corestore.Get(); m.store == nil { + return nil, common.ErrGetStoreFailed + } + } else { + m.store = storemocks.NewFakeStore() + } + labels := map[string]string{} + for _, label := range config.Labels { + parts := strings.Split(label, "=") + if len(parts) != 2 { + return nil, errors.Newf("invalid label %s", label) + } + labels[parts[0]] = parts[1] + } + go func() { + // Core need to connect to the local grpc server, so sleep 30s here to wait local grpc server up + time.Sleep(30 * time.Second) + // try to register current node to eru core + if _, err := m.store.AddNode(ctx, &types.AddNodeOpts{ + Nodename: config.Hostname, + Endpoint: endpoint, + Podname: config.Podname, + Labels: labels, + }); err != nil { + e, ok := status.FromError(err) + if !ok { + logger.Error(ctx, err, "failed to add node") + return + } + if e.Code() == corerpc.AddNode && strings.Contains(e.Message(), "node already exists") { + logger.Infof(ctx, "node %s already exists", config.Hostname) + } else { + logger.Errorf(ctx, err, "failed to add node %s", config.Hostname) + } + } + // update node's labels if necessary + if len(labels) > 0 { + if _, err := m.store.SetNode(ctx, &types.SetNodeOpts{ + Nodename: config.Hostname, + Labels: labels, + }); err != nil { + logger.Errorf(ctx, err, "failed to update node labels") + } + } + }() + m.storeIdentifier = m.store.GetIdentifier(ctx) + return m, nil +} + +func (m *Manager) startNodeManager(ctx context.Context) { + log.WithFunc("startNodeManager").Info(ctx, "starting node status heartbeat") + _ = utils.Pool.Submit(func() { m.heartbeat(ctx) }) +} + +func (m *Manager) startWorkloadManager(ctx context.Context) { + log.WithFunc("startWorkloadManager").Info(ctx, "starting workload manager") + // start status watcher + _ = utils.Pool.Submit(func() { m.monitor(ctx) }) + + // start health check + _ = utils.Pool.Submit(func() { m.healthCheck(ctx) }) +} + +// Run runs a node manager +func (m *Manager) Run(ctx context.Context) error { + logger := log.WithFunc("Run") + + m.startNodeManager(ctx) + m.startWorkloadManager(ctx) + + <-ctx.Done() + logger.Info(ctx, "exiting") + return nil +} + +// Exit . +func (m *Manager) Exit() error { + ctx := context.TODO() + logger := log.WithFunc("Exit").WithField("hostname", m.config.Hostname) + logger.Info(ctx, "remove node status") + + // ctx is now canceled. use a new context. + var err error + utils.WithTimeout(ctx, m.config.GlobalConnectionTimeout, func(ctx context.Context) { + // remove node status + err = m.store.SetNodeStatus(ctx, -1) + }) + if err != nil { + logger.Error(ctx, err, "failed to remove node status") + return err + } + return nil +} diff --git a/internal/eru/agent/agent_test.go b/internal/eru/agent/agent_test.go new file mode 100644 index 0000000..4e33add --- /dev/null +++ b/internal/eru/agent/agent_test.go @@ -0,0 +1,89 @@ +package agent + +import ( + "context" + "sync" + "testing" + "time" + + storemocks "github.com/projecteru2/yavirt/internal/eru/store/mocks" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/service/mocks" + interutils "github.com/projecteru2/yavirt/internal/utils" + "github.com/projecteru2/yavirt/pkg/test/mock" + + "github.com/stretchr/testify/assert" +) + +func newMockManager(t *testing.T) *Manager { + config := &types.Config{ + Hostname: "fake", + HeartbeatInterval: 2, + CheckOnlyMine: false, + HealthCheck: types.HealthCheckConfig{ + Interval: 10, + Timeout: 5, + CacheTTL: 300, + }, + GlobalConnectionTimeout: 5 * time.Second, + } + svc := &mocks.Service{} + + m, err := NewManager(context.Background(), svc, config, "", t) + assert.Nil(t, err) + return m +} + +func TestRunNodeManager(t *testing.T) { + manager := newMockManager(t) + store := manager.store.(*storemocks.MockStore) + svc := manager.svc.(*mocks.Service) + svc.On("IsHealthy", mock.Anything).Return(true) + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(manager.config.HeartbeatInterval*3)*time.Second) + defer cancel() + + status, err := store.GetNodeStatus(ctx, "fake") + assert.Nil(t, err) + assert.Equal(t, status.Alive, false) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + time.Sleep(time.Duration(manager.config.HeartbeatInterval*2) * time.Second) + status, err := store.GetNodeStatus(ctx, "fake") + assert.Nil(t, err) + assert.Equal(t, status.Alive, true) + }() + + manager.startNodeManager(ctx) + + info, err := store.GetNode(ctx, "fake") + assert.Nil(t, err) + assert.Equal(t, info.Available, false) + wg.Wait() +} + +func TestRunWorklaodManager(t *testing.T) { + manager := newMockManager(t) + + watchers := interutils.NewWatchers() + wch, err := watchers.Get() + assert.Nil(t, err) + go watchers.Run(context.Background()) + defer watchers.Stop() + + store := manager.store.(*storemocks.MockStore) + svc := manager.svc.(*mocks.Service) + initSVC(svc) + // svc.On("VirtContext", mock.Anything).Return(nil) + svc.On("WatchGuestEvents", mock.Anything).Return(wch, nil) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + manager.startWorkloadManager(ctx) + + time.Sleep(2 * time.Second) + + assertInitStatus(t, store) +} diff --git a/internal/eru/agent/guest.go b/internal/eru/agent/guest.go new file mode 100644 index 0000000..acd52a7 --- /dev/null +++ b/internal/eru/agent/guest.go @@ -0,0 +1,129 @@ +package agent + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/projecteru2/yavirt/internal/service" + "github.com/projecteru2/yavirt/internal/utils" + + "github.com/projecteru2/core/log" + coreutils "github.com/projecteru2/core/utils" +) + +// LabelMeta . +const LabelMeta = "ERU_META" + +// HealthCheck . +type HealthCheck struct { + TCPPorts []string + HTTPPort string + HTTPURL string + HTTPCode int + Cmds []string +} + +type healthCheckMeta struct { + Publish []string + HealthCheck *HealthCheck +} + +// Guest yavirt virtual machine +type Guest struct { + ID string + Status string + TransitStatus string + CreateTime int64 + TransitTime int64 + UpdateTime int64 + CPU int + Mem int64 + Storage int64 + ImageID int64 + ImageName string + ImageUser string + Networks map[string]string + Labels map[string]string + IPs []string + Hostname string + Running bool + HealthCheck *HealthCheck + + once sync.Once +} + +// CheckHealth returns if the guest is healthy +func (g *Guest) CheckHealth(ctx context.Context, svc service.Service, timeout time.Duration, enableDefaultChecker bool) bool { + logger := log.WithFunc("CheckHealth").WithField("ID", g.ID) + // init health check bridge + g.once.Do(func() { + if meta, ok := g.Labels[LabelMeta]; ok { + hcm := &healthCheckMeta{} + err := json.Unmarshal([]byte(meta), hcm) + if err != nil { + logger.Error(ctx, err, "invalid json format, guest %v, meta %v", g.ID, meta) + return + } + g.HealthCheck = hcm.HealthCheck + } + if enableDefaultChecker && g.HealthCheck == nil { + // add a default checker if not exist + g.HealthCheck = &HealthCheck{ + Cmds: []string{"whoami"}, + } + } + }) + + logger.Debugf(ctx, "[eru agent] guest %v\n health check: %v", g, g.HealthCheck) + if g.HealthCheck == nil { + return true + } + + var tcpCheckers []string + var httpCheckers []string + + healthCheck := g.HealthCheck + + for _, port := range healthCheck.TCPPorts { + for _, ip := range g.IPs { + tcpCheckers = append(tcpCheckers, fmt.Sprintf("%s:%s", ip, port)) + } + } + if healthCheck.HTTPPort != "" { + for _, ip := range g.IPs { + httpCheckers = append(httpCheckers, fmt.Sprintf("http://%s:%s%s", ip, healthCheck.HTTPPort, healthCheck.HTTPURL)) //nolint + } + } + + f1 := utils.CheckHTTP(ctx, g.ID, httpCheckers, healthCheck.HTTPCode, timeout) + f2 := utils.CheckTCP(ctx, g.ID, tcpCheckers, timeout) + f3 := CheckCMD(ctx, svc, g.ID, healthCheck.Cmds, timeout) + return f1 && f2 && f3 +} + +func CheckCMD(ctx context.Context, svc service.Service, ID string, cmdList []string, timeout time.Duration) bool { + logger := log.WithFunc("CheckCMD").WithField("ID", ID) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + for _, cmdStr := range cmdList { + cmd := coreutils.MakeCommandLineArgs(cmdStr) + ans := true + utils.WithTimeout(ctx, timeout, func(ctx1 context.Context) { + msg, err := svc.ExecuteGuest(ctx1, ID, cmd) + if err != nil || msg.ExitCode != 0 { + log.Warnf(ctx, "[checkHealth] guest %s execute cmd %s failed (err: %s, msg: %v)", ID, cmdStr, err, msg) + ans = false + return + } + logger.Debugf(ctx, "[checkHealth] guest %s execute cmd %s success, output: %v", ID, cmdStr, string(msg.Data)) + }) + if !ans { + return ans + } + } + return true +} diff --git a/internal/eru/agent/metrics.go b/internal/eru/agent/metrics.go new file mode 100644 index 0000000..ff816b3 --- /dev/null +++ b/internal/eru/agent/metrics.go @@ -0,0 +1,83 @@ +package agent + +import ( + "context" + "sync/atomic" + + "github.com/patrickmn/go-cache" + "github.com/projecteru2/core/log" + "github.com/projecteru2/core/utils" + virttypes "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/vmcache" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + vmHealthyDesc = prometheus.NewDesc( + prometheus.BuildFQName("vm", "", "healthy"), + "VM healthy status.", + []string{"EruID", "node", "app_id", "app_sid", "appname", "ip"}, + nil) + coreHealthyDesc = prometheus.NewDesc( + prometheus.BuildFQName("node", "core", "healthy"), + "core healthy status.", + []string{"node"}, + nil) +) + +type MetricsCollector struct { + wrkStatusCache *cache.Cache + coreHealthy atomic.Bool +} + +func (mgr *Manager) GetMetricsCollector() *MetricsCollector { + return mgr.mCol +} + +func (e *MetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- vmHealthyDesc + ch <- coreHealthyDesc +} + +func (e *MetricsCollector) Collect(ch chan<- prometheus.Metric) { + logger := log.WithFunc("agent.MetricsCollector.Collect") + for _, v := range e.wrkStatusCache.Items() { + wrkStatus, _ := v.Object.(*types.WorkloadStatus) + if wrkStatus == nil { + logger.Warnf(context.TODO(), "[BUG] wrkStatus can't be nil here") + continue + } + if !wrkStatus.Running { + continue + } + de := vmcache.FetchDomainEntry(wrkStatus.ID) + if de == nil { + logger.Warnf(context.TODO(), "[eru agent] failed to get domain entry %s", wrkStatus.ID) + continue + } + healthy := 0 + if wrkStatus.Healthy { + healthy = 1 + } + ch <- prometheus.MustNewConstMetric( + vmHealthyDesc, + prometheus.GaugeValue, + float64(healthy), + virttypes.EruID(wrkStatus.ID), + wrkStatus.Nodename, + de.AppID, + de.AppSID, + de.AppName, + de.IP, + ) + } + + ch <- prometheus.MustNewConstMetric( + coreHealthyDesc, + prometheus.GaugeValue, + float64(utils.Bool2Int(e.coreHealthy.Load())), + configs.Hostname(), + ) +} diff --git a/internal/eru/agent/monitor.go b/internal/eru/agent/monitor.go new file mode 100644 index 0000000..cbdba7d --- /dev/null +++ b/internal/eru/agent/monitor.go @@ -0,0 +1,121 @@ +package agent + +import ( + "context" + "time" + + "github.com/projecteru2/yavirt/internal/eru/common" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/utils" + + "github.com/projecteru2/core/log" +) + +func (m *Manager) initMonitor(ctx context.Context) (err error) { + watcher, err := m.svc.WatchGuestEvents(ctx) + if err != nil { + return err + } + logger := log.WithFunc("initMonitor") + defer logger.Infof(ctx, "events goroutine has done") + defer watcher.Stop() + + for { + select { + case event := <-watcher.Events(): + // don't block here + _ = utils.Pool.Submit(func() { + switch event.Op { + case intertypes.StartOp: + m.handleWorkloadStart(ctx, event.ID) + case intertypes.DieOp: + m.handleWorkloadDie(ctx, event.ID) + } + }) + case <-watcher.Done(): + // The watcher already has been stopped. + logger.Infof(ctx, "watcher has done") + return nil + + case <-ctx.Done(): + logger.Infof(ctx, "ctx done") + return nil + } + } +} + +// monitor with retry +func (m *Manager) monitor(ctx context.Context) { + logger := log.WithFunc("monitor") + for { + select { + case <-ctx.Done(): + logger.Info(ctx, "context canceled, stop monitoring") + return + default: + if err := m.initMonitor(ctx); err != nil { + logger.Error(ctx, err, "received an err, will retry") + } + time.Sleep(m.config.GlobalConnectionTimeout) + } + } +} + +// 检查一个workload,允许重试 +func (m *Manager) checkOneWorkloadWithBackoffRetry(ctx context.Context, ID string) { + logger := log.WithFunc("checkOneWorkloadWithBackoffRetry").WithField("ID", ID) + logger.Debug(ctx, "check workload") + + m.checkWorkloadMutex.Lock() + defer m.checkWorkloadMutex.Unlock() + + if retryTask, ok := m.startingWorkloads.Get(ID); ok { + retryTask.Stop(ctx) + } + + retryTask := utils.NewRetryTask(ctx, utils.GetMaxAttemptsByTTL(m.config.GetHealthCheckStatusTTL()), func() error { + if !m.checkOneWorkload(ctx, ID) { + // 这个err就是用来判断要不要继续的,不用打在日志里 + return common.ErrWorkloadUnhealthy + } + return nil + }) + m.startingWorkloads.Set(ID, retryTask) + _ = utils.Pool.Submit(func() { + if err := retryTask.Run(ctx); err != nil { + logger.Debug(ctx, "workload still not healthy") + } + }) +} + +func (m *Manager) handleWorkloadStart(ctx context.Context, ID string) { + logger := log.WithFunc("handleWorkloadStart").WithField("ID", ID) + logger.Debug(ctx, "workload start") + workloadStatus, err := m.GetStatus(ctx, ID, true) + if err != nil { + logger.Warnf(ctx, "faild to get workload status: %s", err) + return + } + + if workloadStatus.Healthy { + if err := m.store.SetWorkloadStatus(ctx, workloadStatus, m.config.GetHealthCheckStatusTTL()); err != nil { + logger.Warnf(ctx, "failed to update deploy status: %s", err) + } + } else { + m.checkOneWorkloadWithBackoffRetry(ctx, ID) + } +} + +func (m *Manager) handleWorkloadDie(ctx context.Context, ID string) { + logger := log.WithFunc("handleWorkloadDie").WithField("ID", ID) + logger.Debug(ctx, "wrokload die") + workloadStatus, err := m.GetStatus(ctx, ID, true) + if err != nil { + logger.Warnf(ctx, "faild to get workload status: %s", err) + return + } + + if err := m.store.SetWorkloadStatus(ctx, workloadStatus, m.config.GetHealthCheckStatusTTL()); err != nil { + logger.Warnf(ctx, "failed to update deploy status: %s", err) + } +} diff --git a/internal/eru/agent/monitor_test.go b/internal/eru/agent/monitor_test.go new file mode 100644 index 0000000..2b65696 --- /dev/null +++ b/internal/eru/agent/monitor_test.go @@ -0,0 +1,101 @@ +package agent + +import ( + "context" + "testing" + "time" + + virttypes "github.com/projecteru2/libyavirt/types" + storemocks "github.com/projecteru2/yavirt/internal/eru/store/mocks" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/service/mocks" + intertypes "github.com/projecteru2/yavirt/internal/types" + interutils "github.com/projecteru2/yavirt/internal/utils" + "github.com/projecteru2/yavirt/pkg/test/mock" + "github.com/stretchr/testify/assert" +) + +func TestMonitor(t *testing.T) { + manager := newMockManager(t) + + watchers := interutils.NewWatchers() + wch, err := watchers.Get() + assert.Nil(t, err) + go watchers.Run(context.Background()) + defer watchers.Stop() + + store := manager.store.(*storemocks.MockStore) + svc := manager.svc.(*mocks.Service) + + svc.On("VirtContext", mock.Anything).Return(nil) + svc.On("WatchGuestEvents", mock.Anything).Return(wch, nil).Once() + + assert.Nil(t, store.GetMockWorkloadStatus("00033017009174384208170000000001")) + assert.Nil(t, store.GetMockWorkloadStatus("00033017009174384208170000000002")) + + // stop "00033017009174384208170000000001" + svc.On("GetGuest", mock.Anything, "00033017009174384208170000000001").Return(&virttypes.Guest{ + Resource: virttypes.Resource{ + ID: "00033017009174384208170000000001", + }, + Labels: map[string]string{"ERU": "1"}, + Running: false, + }, nil) + + // svc.On("ExecuteGuest", mock.Anything, mock.Anything, mock.Anything).Return(&virttypes.ExecuteGuestMessage{}, nil) + watchers.Watched(intertypes.Event{ + ID: "00033017009174384208170000000001", + Op: "die", + }) + + // start monitor and wait for a while, then exit + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + go manager.initMonitor(ctx) + time.Sleep(2 * time.Second) + cancel() + + assert.Equal(t, store.GetMockWorkloadStatus("00033017009174384208170000000001"), &types.WorkloadStatus{ + ID: "00033017009174384208170000000001", + Nodename: "fake", + Extension: []byte(`{"ERU":"1"}`), + Running: false, + Healthy: false, + }) + + // start "00033017009174384208170000000002" + wch2, err := watchers.Get() + assert.Nil(t, err) + svc.On("WatchGuestEvents", mock.Anything).Return(wch2, nil).Once() + + svc.On("GetGuest", mock.Anything, "00033017009174384208170000000002").Return(&virttypes.Guest{ + Resource: virttypes.Resource{ + ID: "00033017009174384208170000000002", + }, + Labels: map[string]string{"ERU": "1"}, + Running: true, + }, nil) + watchers.Watched(intertypes.Event{ + ID: "00033017009174384208170000000002", + Op: "start", + }) + + ctx, cancel = context.WithTimeout(context.Background(), time.Second*30) + go manager.initMonitor(ctx) + time.Sleep(2 * time.Second) + cancel() + + assert.Equal(t, store.GetMockWorkloadStatus("00033017009174384208170000000001"), &types.WorkloadStatus{ + ID: "00033017009174384208170000000001", + Nodename: "fake", + Extension: []byte(`{"ERU":"1"}`), + Running: false, + Healthy: false, + }) + time.Sleep(1 * time.Second) + + // initMonotr already stopped watcher and send an empty event, + // so watchers got an chance to delete watcher + watchers.Watched(intertypes.Event{}) + time.Sleep(1 * time.Second) + assert.Zero(t, watchers.Len()) +} diff --git a/internal/eru/agent/node.go b/internal/eru/agent/node.go new file mode 100644 index 0000000..0e3aae5 --- /dev/null +++ b/internal/eru/agent/node.go @@ -0,0 +1,62 @@ +package agent + +import ( + "context" + "time" + + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/internal/utils" +) + +// heartbeat creates a new goroutine to report status every HeartbeatInterval seconds +// By default HeartbeatInterval is 0, will not do heartbeat. +func (m *Manager) heartbeat(ctx context.Context) { + if m.config.HeartbeatInterval <= 0 { + return + } + _ = utils.Pool.Submit(func() { m.nodeStatusReport(ctx) }) + + tick := time.NewTicker(time.Duration(m.config.HeartbeatInterval) * time.Second) + defer tick.Stop() + + for { + select { + case <-tick.C: + _ = utils.Pool.Submit(func() { m.nodeStatusReport(ctx) }) + case <-ctx.Done(): + return + } + } +} + +// nodeStatusReport does heartbeat, tells core this node is alive. +// The TTL is set to double of HeartbeatInterval, by default it will be 360s, +// which means if a node is not available, subcriber will notice this after at least 360s. +// HealthCheck.Timeout is used as timeout of requesting core Profile +func (m *Manager) nodeStatusReport(ctx context.Context) { + logger := log.WithFunc("nodeStatusReport").WithField("hostname", m.config.Hostname) + logger.Debug(ctx, "report begins") + defer logger.Debug(ctx, "report ends") + if !m.svc.IsHealthy(ctx) { + logger.Warn(ctx, "service is not healthy") + return + } + if err := m.store.CheckHealth(ctx); err != nil { + logger.Error(ctx, err, "failed to check health of core") + m.mCol.coreHealthy.Store(false) + } else { + m.mCol.coreHealthy.Store(true) + } + ttl := int64(m.config.HeartbeatInterval * 3) + + if err := utils.BackoffRetry(ctx, 3, func() (err error) { + utils.WithTimeout(ctx, m.config.GlobalConnectionTimeout, func(ctx context.Context) { + if err = m.store.SetNodeStatus(ctx, ttl); err != nil { + logger.Error(ctx, err, "failed to set node status") + } + }) + return err + }); err != nil { + logger.Error(ctx, err, "failed to set node status for 3 times") + } +} diff --git a/internal/eru/agent/node_test.go b/internal/eru/agent/node_test.go new file mode 100644 index 0000000..7d153fc --- /dev/null +++ b/internal/eru/agent/node_test.go @@ -0,0 +1,51 @@ +package agent + +import ( + "context" + "testing" + "time" + + storemocks "github.com/projecteru2/yavirt/internal/eru/store/mocks" + "github.com/projecteru2/yavirt/internal/service/mocks" + "github.com/projecteru2/yavirt/pkg/test/mock" + + "github.com/stretchr/testify/assert" +) + +func TestNodeStatusReport(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + manager := newMockManager(t) + store := manager.store.(*storemocks.MockStore) + svc := manager.svc.(*mocks.Service) + svc.On("IsHealthy", mock.Anything).Return(true) + + status, err := store.GetNodeStatus(ctx, "fake") + assert.Nil(t, err) + assert.Equal(t, status.Alive, false) + + manager.nodeStatusReport(ctx) + status, err = store.GetNodeStatus(ctx, "fake") + assert.Nil(t, err) + assert.Equal(t, status.Alive, true) +} + +func TestHeartbeat(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + manager := newMockManager(t) + store := manager.store.(*storemocks.MockStore) + svc := manager.svc.(*mocks.Service) + svc.On("IsHealthy", mock.Anything).Return(true) + + status, err := store.GetNodeStatus(ctx, "fake") + assert.Nil(t, err) + assert.Equal(t, status.Alive, false) + + go manager.heartbeat(ctx) + + time.Sleep(time.Duration(manager.config.HeartbeatInterval+2) * time.Second) + status, err = store.GetNodeStatus(ctx, "fake") + assert.Nil(t, err) + assert.Equal(t, status.Alive, true) +} diff --git a/internal/eru/agent/workload.go b/internal/eru/agent/workload.go new file mode 100644 index 0000000..a57b26b --- /dev/null +++ b/internal/eru/agent/workload.go @@ -0,0 +1,171 @@ +package agent + +import ( + "context" + "encoding/json" + "strings" + "sync" + "time" + + "github.com/projecteru2/yavirt/internal/eru/common" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/utils" + + "github.com/projecteru2/core/cluster" + "github.com/projecteru2/core/log" + yavirttypes "github.com/projecteru2/libyavirt/types" +) + +func (m *Manager) ListWorkloadIDs(ctx context.Context) (ids []string, err error) { + utils.WithTimeout(ctx, m.config.GlobalConnectionTimeout, func(ctx context.Context) { + ids, err = m.svc.GetGuestIDList(ctx) + }) + if err != nil && !strings.Contains(err.Error(), "key not exists") { + log.WithFunc("ListWorkloadIDs").Error(ctx, err, "failed to get workload ids") + return nil, err + } + return ids, nil +} + +func (m *Manager) detectWorkload(ctx context.Context, ID string) (*Guest, error) { + logger := log.WithFunc("detectWorkload").WithField("ID", ID) + + var guest *yavirttypes.Guest + var err error + + utils.WithTimeout(ctx, m.config.GlobalConnectionTimeout, func(ctx context.Context) { + guest, err = m.svc.GetGuest(ctx, ID) + }) + + if err != nil { + logger.Error(ctx, err, "failed to detect workload") + return nil, err + } + + if _, ok := guest.Labels[cluster.ERUMark]; !ok { + return nil, common.ErrInvaildVM + } + + if m.config.CheckOnlyMine && m.config.Hostname != guest.Hostname { + logger.Debugf(ctx, "guest's hostname is %s instead of %s", guest.Hostname, m.config.Hostname) + return nil, common.ErrInvaildVM + } + + return &Guest{ + ID: guest.ID, + Status: guest.Status, + TransitStatus: guest.TransitStatus, + CreateTime: guest.CreateTime, + TransitTime: guest.TransitTime, + UpdateTime: guest.UpdateTime, + CPU: guest.CPU, + Mem: guest.Mem, + Storage: guest.Storage, + ImageID: guest.ImageID, + ImageName: guest.ImageName, + ImageUser: guest.ImageUser, + Networks: guest.Networks, + Labels: guest.Labels, + IPs: guest.IPs, + Hostname: guest.Hostname, + Running: guest.Running, + once: sync.Once{}, + }, nil +} + +// GetStatus checks workload's status first, then returns workload status +func (m *Manager) GetStatus(ctx context.Context, ID string, checkHealth bool) (*types.WorkloadStatus, error) { + logger := log.WithFunc("GetStatus").WithField("ID", ID) + guest, err := m.detectWorkload(ctx, ID) + if err != nil { + logger.Error(ctx, err, "failed to get guest status") + return nil, err + } + + bytes, err := json.Marshal(guest.Labels) + if err != nil { + logger.Error(ctx, err, "failed to marshal labels") + return nil, err + } + + status := &types.WorkloadStatus{ + ID: guest.ID, + Running: guest.Running, + Healthy: guest.Running && guest.HealthCheck == nil, + Networks: guest.Networks, + Extension: bytes, + Nodename: m.config.Hostname, + } + + if checkHealth && guest.Running { + free, acquired := m.cas.Acquire(guest.ID) + if !acquired { + return nil, common.ErrGetLockFailed + } + defer free() + timeout := time.Duration(m.config.HealthCheck.Timeout) * time.Second + status.Healthy = guest.CheckHealth(ctx, m.svc, timeout, m.config.HealthCheck.EnableDefaultChecker) + } + + return status, nil +} + +func (m *Manager) healthCheck(ctx context.Context) { + tick := time.NewTicker(time.Duration(m.config.HealthCheck.Interval) * time.Second) + defer tick.Stop() + + _ = utils.Pool.Submit(func() { m.checkAllWorkloads(ctx) }) + + for { + select { + case <-tick.C: + _ = utils.Pool.Submit(func() { m.checkAllWorkloads(ctx) }) + case <-ctx.Done(): + return + } + } +} + +// 检查全部 label 为ERU=1的workload +// 这里需要 list all,原因是 monitor 检测到 die 的时候已经标记为 false 了 +// 但是这时候 health check 刚返回 true 回来并写入 core +// 为了保证最终数据一致性这里也要检测 +func (m *Manager) checkAllWorkloads(ctx context.Context) { + logger := log.WithFunc("checkAllWorkloads") + logger.Debug(ctx, "health check begin") + workloadIDs, err := m.ListWorkloadIDs(ctx) + if err != nil { + logger.Error(ctx, err, "error when list all workloads with label \"ERU=1\"") + return + } + + for idx := range workloadIDs { + wrkID := workloadIDs[idx] + _ = utils.Pool.Submit(func() { m.checkOneWorkload(ctx, wrkID) }) + } +} + +// 检查并保存一个workload的状态,最后返回workload是否healthy。 +// 返回healthy是为了重试用的,没啥别的意义。 +func (m *Manager) checkOneWorkload(ctx context.Context, ID string) bool { + logger := log.WithFunc("checkOneWorkload").WithField("ID", ID) + workloadStatus, err := m.GetStatus(ctx, ID, true) + if err != nil { + logger.Error(ctx, err, "failed to get status of workload") + return false + } + + m.wrkStatusCache.Set(workloadStatus.ID, workloadStatus, 0) + + if err := m.setWorkloadStatus(ctx, workloadStatus); err != nil { + logger.Error(ctx, err, "update workload status failed") + } + return workloadStatus.Healthy +} + +// 设置workload状态,允许重试,带timeout控制 +func (m *Manager) setWorkloadStatus(ctx context.Context, status *types.WorkloadStatus) error { + return utils.BackoffRetry(ctx, 3, func() error { + return m.store.SetWorkloadStatus(ctx, status, m.config.GetHealthCheckStatusTTL()) + }) +} diff --git a/internal/eru/agent/workload_test.go b/internal/eru/agent/workload_test.go new file mode 100644 index 0000000..f9fc511 --- /dev/null +++ b/internal/eru/agent/workload_test.go @@ -0,0 +1,64 @@ +package agent + +import ( + "context" + "testing" + "time" + + storemocks "github.com/projecteru2/yavirt/internal/eru/store/mocks" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/service/mocks" + "github.com/projecteru2/yavirt/pkg/test/mock" + "github.com/stretchr/testify/assert" + + virttypes "github.com/projecteru2/libyavirt/types" +) + +func assertInitStatus(t *testing.T, store *storemocks.MockStore) { + assert.Equal(t, store.GetMockWorkloadStatus("00033017009174384208170000000001"), &types.WorkloadStatus{ + ID: "00033017009174384208170000000001", + Nodename: "fake", + Extension: []byte(`{"ERU":"1"}`), + Running: true, + Healthy: true, + }) + + assert.Equal(t, store.GetMockWorkloadStatus("00033017009174384208170000000002"), &types.WorkloadStatus{ + ID: "00033017009174384208170000000002", + Nodename: "fake", + Extension: []byte(`{"ERU":"1"}`), + Running: false, + Healthy: false, + }) +} + +func initSVC(svc *mocks.Service) { + svc.On("VirtContext", mock.Anything).Return(nil) + svc.On("GetGuestIDList", mock.Anything).Return([]string{"00033017009174384208170000000001", "00033017009174384208170000000002"}, nil) + svc.On("GetGuest", mock.Anything, "00033017009174384208170000000001").Return(&virttypes.Guest{ + Resource: virttypes.Resource{ + ID: "00033017009174384208170000000001", + }, + Labels: map[string]string{"ERU": "1"}, + Running: true, + }, nil) + svc.On("GetGuest", mock.Anything, "00033017009174384208170000000002").Return(&virttypes.Guest{ + Resource: virttypes.Resource{ + ID: "00033017009174384208170000000002", + }, + Labels: map[string]string{"ERU": "1"}, + Running: false, + }, nil) +} +func TestHealthCheck(t *testing.T) { + manager := newMockManager(t) + svc := manager.svc.(*mocks.Service) + initSVC(svc) + + ctx := context.Background() + manager.checkAllWorkloads(ctx) + store := manager.store.(*storemocks.MockStore) + time.Sleep(2 * time.Second) + + assertInitStatus(t, store) +} diff --git a/internal/eru/common/error.go b/internal/eru/common/error.go new file mode 100644 index 0000000..1374971 --- /dev/null +++ b/internal/eru/common/error.go @@ -0,0 +1,36 @@ +package common + +import "github.com/pkg/errors" + +// ErrNotImplemented . +var ( + ErrNotImplemented = errors.New("not implemented") + // ErrConnecting means writer is in connecting status, waiting to be connected + ErrConnecting = errors.New("connecting") + // ErrInvalidScheme . + ErrInvalidScheme = errors.New("invalid scheme") + // ErrGetRuntimeFailed . + ErrGetRuntimeFailed = errors.New("failed to get runtime client") + // ErrInvalidRuntimeType . + ErrInvalidRuntimeType = errors.New("unknown runtime type") + // ErrGetStoreFailed . + ErrGetStoreFailed = errors.New("failed to get store client") + // ErrInvalidStoreType . + ErrInvalidStoreType = errors.New("unknown store type") + // ErrWorkloadUnhealthy . + ErrWorkloadUnhealthy = errors.New("not healthy") + // ErrClosedSteam . + ErrClosedSteam = errors.New("closed") + // ErrSyscallFailed . + ErrSyscallFailed = errors.New("syscall fail, Not a syscall.Stat_t") + // ErrDevNotFound . + ErrDevNotFound = errors.New("device not found") + // ErrJournalDisable . + ErrJournalDisable = errors.New("journal disabled") + // ErrInvaildContainer . + ErrInvaildContainer = errors.New("invaild container") + // ErrGetLockFailed . + ErrGetLockFailed = errors.New("get lock failed") + // ErrInvaildVM . + ErrInvaildVM = errors.New("invaild vm") +) diff --git a/internal/eru/recycle/recycle.go b/internal/eru/recycle/recycle.go new file mode 100644 index 0000000..14a448b --- /dev/null +++ b/internal/eru/recycle/recycle.go @@ -0,0 +1,155 @@ +package recycle + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/projecteru2/core/log" + corerpc "github.com/projecteru2/core/rpc" + virttypes "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/eru/common" + "github.com/projecteru2/yavirt/internal/eru/store" + corestore "github.com/projecteru2/yavirt/internal/eru/store/core" + storemocks "github.com/projecteru2/yavirt/internal/eru/store/mocks" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/service" + "github.com/projecteru2/yavirt/internal/utils" + "github.com/projecteru2/yavirt/pkg/notify/bison" + "github.com/samber/lo" + "google.golang.org/grpc/status" +) + +var ( + interval = 1 * time.Minute + deleteWait = 15 * time.Second + stor store.Store +) + +func fetchWorkloads() ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + wrks, err := stor.ListNodeWorkloads(ctx, configs.Hostname()) + if err != nil { + return nil, err + } + ids := lo.Map(wrks, func(w *types.Workload, _ int) string { + return w.ID + }) + return ids, nil +} + +func deleteGuest(svc service.Service, eruID string) error { + logger := log.WithFunc("deleteGuest") + // when core delete a workload, it will delete the record in etcd first and then delete the workload + // so there is a time window in which the guest is a dangling guest, so we wait for a while and wait the deletion finished + // TODO better way to detect if a guest is in deletion + time.Sleep(deleteWait) + + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + logger.Infof(ctx, "[recycle] start to remove dangling guest %s", eruID) + // since guest deletion is a dangerous operation here, + // so we check eru again + wrk, err := stor.GetWorkload(ctx, eruID) + logger.Infof(ctx, "[recycle] guest %s, wrk: %v, err: %v", eruID, wrk, err) + if err == nil { + logger.Errorf(ctx, err, "[recycle] BUG: dangling guest %s is still in eru", eruID) + return errors.Errorf("BUG: dangling guest %s is still in eru", eruID) + } + + e, ok := status.FromError(err) + if !ok { + return err + } + if e.Code() == corerpc.GetWorkload && strings.Contains(e.Message(), "entity count invalid") { //nolint + logger.Infof(ctx, "[recycle] start to remove local guest %s", eruID) + // When creating a guest, the core first creates the workload and then creates a record in ETCD. + // Therefore, within the time window between these two operations, we may incorrectly detect dangling guests. + // To prevent this situation, we create a creation session locker when creating a guest and check this locker here. + flck := utils.NewCreateSessionFlock(utils.VirtID(eruID)) + if flck.FileExists() { + // creation session locker file exists + // it means this guest is in creation + logger.Warnf(ctx, "[recycle] guest %s in creation", eruID) + return fmt.Errorf("guest %s is in creation", eruID) + } + + if err := svc.ControlGuest(ctx, utils.VirtID(eruID), virttypes.OpDestroy, true); err != nil { + logger.Errorf(ctx, err, "[recycle] failed to remove dangling guest %s", eruID) + return err + } + notifier := bison.GetService() + log.Debugf(ctx, "[recycle] notifier: %v", notifier) + if notifier != nil { + text := fmt.Sprintf(` +delete dangling guest successfully +--- + +- **node:** %s +- **id:** %s + `, configs.Hostname(), eruID) + if err := notifier.SendMarkdown(context.TODO(), "delete dangling guest", text); err != nil { + logger.Warnf(ctx, "[recycle] failed to send dingtalk message: %v", err) + } + } + return nil + } + + return err +} + +func startLoop(ctx context.Context, svc service.Service) { + logger := log.WithFunc("startLoop") + logger.Info(ctx, "[recycle] starting recycle loop") + defer logger.Info(ctx, "[recycle] recycle loop stopped") + + for { + select { + case <-ctx.Done(): + return + case <-time.After(interval): + } + + coreIDs, err := fetchWorkloads() + if err != nil { + logger.Error(ctx, err, "failed to fetch workloads") + continue + } + localIDs, err := svc.GetGuestIDList(context.Background()) + if err != nil { + continue + } + coreMap := map[string]struct{}{} + for _, id := range coreIDs { + coreMap[id] = struct{}{} + } + for _, id := range localIDs { + eruID := virttypes.EruID(id) + if _, ok := coreMap[eruID]; ok { + continue + } + go deleteGuest(svc, eruID) //nolint + } + } +} + +func Setup(ctx context.Context, cfg *configs.Config, t *testing.T) (err error) { + if t == nil { + corestore.Init(ctx, &cfg.Eru) + if stor = corestore.Get(); stor == nil { + return common.ErrGetStoreFailed + } + } else { + stor = storemocks.NewFakeStore() + } + return nil +} + +func Run(ctx context.Context, svc service.Service) { + go startLoop(ctx, svc) +} diff --git a/internal/eru/recycle/recycle_test.go b/internal/eru/recycle/recycle_test.go new file mode 100644 index 0000000..0581ee5 --- /dev/null +++ b/internal/eru/recycle/recycle_test.go @@ -0,0 +1,76 @@ +package recycle + +import ( + "context" + "strings" + "testing" + "time" + + virttypes "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/internal/service/mocks" + "github.com/projecteru2/yavirt/pkg/notify/bison" + "github.com/projecteru2/yavirt/pkg/test/assert" + "github.com/projecteru2/yavirt/pkg/test/mock" + + coretypes "github.com/projecteru2/core/types" + storemocks "github.com/projecteru2/yavirt/internal/eru/store/mocks" + "github.com/projecteru2/yavirt/internal/eru/types" + grpcstatus "google.golang.org/grpc/status" +) + +func TestDeleteGuest(t *testing.T) { + bison.Setup(nil, t) + deleteWait = 0 + ctx, cancle := context.WithCancel(context.Background()) + defer cancle() + err := Setup(ctx, nil, t) + assert.Nil(t, err) + + svc := &mocks.Service{} + mockSto := stor.(*storemocks.MockStore) + + // still in eru + mockSto.On("GetWorkload", mock.Anything, mock.Anything).Return( + &types.Workload{ + ID: virttypes.EruID("00033017009174384208170000000001"), + }, nil).Once() + err = deleteGuest(svc, virttypes.EruID("00033017009174384208170000000001")) + assert.Err(t, err) + assert.True(t, strings.Contains(err.Error(), "still in eru")) + + // get an other error from eru + mockSto.On("GetWorkload", mock.Anything, mock.Anything).Return(nil, grpcstatus.Error(1111, coretypes.ErrInvaildCount.Error())).Once() + err = deleteGuest(svc, virttypes.EruID("00033017009174384208170000000001")) + assert.Err(t, err) + + mockSto.On("GetWorkload", mock.Anything, mock.Anything).Return(nil, grpcstatus.Error(1051, coretypes.ErrInvaildCount.Error())).Once() + svc.On("VirtContext", mock.Anything).Return(nil).Once() + svc.On("ControlGuest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + err = deleteGuest(svc, virttypes.EruID("00033017009174384208170000000001")) + assert.Nil(t, err) + +} + +func TestNormal(t *testing.T) { + interval = 2 * time.Second + + ctx, cancle := context.WithCancel(context.Background()) + defer cancle() + err := Setup(ctx, nil, t) + assert.Nil(t, err) + + svc := &mocks.Service{} + svc.On("GetGuestIDList", mock.Anything).Return([]string{"00033017009174384208170000000001", "00033017009174384208170000000002"}, nil) + mockSto := stor.(*storemocks.MockStore) + mockSto.On("ListNodeWorkloads", mock.Anything, mock.Anything).Return( + []*types.Workload{ + { + ID: virttypes.EruID("00033017009174384208170000000001"), + }, + { + ID: virttypes.EruID("00033017009174384208170000000002"), + }, + }, nil) + Run(ctx, svc) + time.Sleep(7 * time.Second) +} diff --git a/internal/eru/resources/core.go b/internal/eru/resources/core.go new file mode 100644 index 0000000..a3d3d41 --- /dev/null +++ b/internal/eru/resources/core.go @@ -0,0 +1,234 @@ +package resources + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/projecteru2/core/log" + cpumemtypes "github.com/projecteru2/core/resource/plugins/cpumem/types" + resourcetypes "github.com/projecteru2/core/resource/types" + stotypes "github.com/projecteru2/resource-storage/storage/types" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/eru/types" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/pkg/notify/bison" + gputypes "github.com/yuyang0/resource-gpu/gpu/types" +) + +// CoreResourcesManager used to cache the resources on core +type CoreResourcesManager struct { + mu sync.Mutex + cpumem *cpumemtypes.NodeResource + gpu *gputypes.NodeResource + sto *stotypes.NodeResource +} + +func NewCoreResourcesManager() *CoreResourcesManager { + return &CoreResourcesManager{} +} + +func (cm *CoreResourcesManager) fetchResourcesWithLock() { + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + logger := log.WithFunc("fetchResources") + + resp, err := cli.GetNodeResource(ctx, configs.Hostname()) + if err != nil { + logger.Errorf(ctx, err, "failed to fetch resource from core") + return + } + capacity := resp.Capacity + cm.mu.Lock() + defer cm.mu.Unlock() + for resName, rawParams := range capacity { + switch resName { + case intertypes.PluginNameCPUMem: + cpumem := cpumemtypes.NodeResource{} + if err = mapstructure.Decode(rawParams, &cpumem); err != nil { + logger.Errorf(ctx, err, "failed to unmarshal resource cpumem") + } else { + logger.Debugf(ctx, "[core fetchResources] cpumem: %v", cpumem) + cm.cpumem = &cpumem + } + case intertypes.PluginNameStorage: + sto := stotypes.NodeResource{} + if err = mapstructure.Decode(rawParams, &sto); err != nil { + logger.Errorf(ctx, err, "failed to unmarshal resource storage") + } else { + logger.Debugf(ctx, "[core fetchResources] storage: %v", sto) + cm.sto = &sto + } + case intertypes.PluginNameGPU: + gpu := gputypes.NodeResource{} + if err = mapstructure.Decode(rawParams, &gpu); err != nil { + logger.Errorf(ctx, err, "failed to unmarshal resource gpu") + } else { + logger.Debugf(ctx, "[core fetchResources] gpu: %v", gpu) + cm.gpu = &gpu + } + } + } +} + +func (cm *CoreResourcesManager) GetCpumem() (ans *cpumemtypes.NodeResource) { + cm.mu.Lock() + ans = cm.cpumem + cm.mu.Unlock() + if ans != nil { + return + } + cm.fetchResourcesWithLock() + cm.mu.Lock() + defer cm.mu.Unlock() + return cm.cpumem +} + +func (cm *CoreResourcesManager) GetGPU() (ans *gputypes.NodeResource) { + cm.mu.Lock() + ans = cm.gpu + cm.mu.Unlock() + if ans != nil { + return + } + cm.fetchResourcesWithLock() + cm.mu.Lock() + defer cm.mu.Unlock() + return cm.gpu +} + +func (cm *CoreResourcesManager) UpdateGPU(nr *gputypes.NodeResource) { + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + logger := log.WithFunc("UpdateGPU") + + remoteNR := cm.GetGPU() + if remoteNR == nil { + return + } + if remoteNR.Count() == nr.Count() { + remoteNR1 := remoteNR.DeepCopy() + remoteNR1.Sub(nr) + if remoteNR1.Count() == 0 { + logger.Debug(ctx, "remote gpu config is consistent") + return + } + } + logger.Infof(ctx, "start to update gpu resource: ", nr, remoteNR) + resBS, _ := json.Marshal(nr) + opts := &types.SetNodeOpts{ + Nodename: configs.Hostname(), + Delta: false, + Resources: map[string][]byte{ + "gpu": resBS, + }, + } + _, err := cli.SetNode(ctx, opts) + if err != nil { + logger.Errorf(ctx, err, "failed to update core resource") + return + } + + notifier := bison.GetService() + if notifier != nil { + text := fmt.Sprintf(` +update core gpu resource successfully + +--- + +- **node:** %s +- **gpu:** %v + `, configs.Hostname(), nr) + _ = notifier.SendMarkdown(ctx, "update core gpu resource successfully", text) + } + + cm.mu.Lock() + defer cm.mu.Unlock() + + cm.gpu = nr +} + +func (cm *CoreResourcesManager) UpdateCPUMem(nr *cpumemtypes.NodeResource) (err error) { + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + logger := log.WithFunc("UpdateCPUMem") + + localNR := nr.DeepCopy() + remoteNR := cm.GetCpumem() + if remoteNR == nil { + return err + } + if remoteNR.CPU == localNR.CPU && remoteNR.Memory <= localNR.Memory && remoteNR.Memory >= (localNR.Memory*75/100) { + logger.Info(ctx, "remote cpumem config is consistent") + return err + } + logger.Infof(ctx, "start to update cpumem resource: ", localNR, remoteNR) + // prepare data for SetNode + cb, _ := convCpumemBytes(localNR) + opts := &types.SetNodeOpts{ + Nodename: configs.Hostname(), + Delta: false, + Resources: map[string][]byte{ + "cpumem": cb, + }, + } + if _, err = cli.SetNode(ctx, opts); err != nil { + logger.Errorf(ctx, err, "failed to update core resource") + return err + } + + notifier := bison.GetService() + if notifier != nil { + text := fmt.Sprintf(` +update core cpumem resource successfully +--- + +- **node:** %s +- **cpumem:** %+v + `, configs.Hostname(), localNR) + _ = notifier.SendMarkdown(ctx, "update core cpumem resource successfully", text) + } + + cm.mu.Lock() + defer cm.mu.Unlock() + + cm.cpumem = localNR + return nil +} + +func convCpumemBytes(localNR *cpumemtypes.NodeResource) ([]byte, error) { + cpumem := resourcetypes.RawParams{ + "cpu": localNR.CPU, + "memory": localNR.Memory * 80 / 100, // use 80% of memory + } + // nodeID => cpuID list + numaCPUMap := map[string][]string{} + for cpuID, numID := range localNR.NUMA { + numaCPUMap[numID] = append(numaCPUMap[numID], cpuID) + } + numaCPUList := make([]string, 0, len(numaCPUMap)) + for idx := 0; idx < len(numaCPUMap); idx++ { + cpuIDList := numaCPUMap[strconv.Itoa(idx)] + // sort here, so we can write UT easily + sort.Strings(cpuIDList) + numaCPUList = append(numaCPUList, strings.Join(cpuIDList, ",")) + } + if len(numaCPUList) > 0 { + cpumem["numa-cpu"] = numaCPUList + } + numaMemList := make([]string, 0, len(localNR.NUMAMemory)) + for idx := 0; idx < len(localNR.NUMAMemory); idx++ { + nodeMem := localNR.NUMAMemory[strconv.Itoa(idx)] * 80 / 100 // use 80% of memory + numaMemList = append(numaMemList, strconv.FormatInt(nodeMem, 10)) + } + if len(numaMemList) > 0 { + cpumem["numa-memory"] = numaMemList + } + return json.Marshal(cpumem) +} diff --git a/internal/eru/resources/core_test.go b/internal/eru/resources/core_test.go new file mode 100644 index 0000000..c619f71 --- /dev/null +++ b/internal/eru/resources/core_test.go @@ -0,0 +1,42 @@ +package resources + +import ( + "testing" + + cpumemtypes "github.com/projecteru2/core/resource/plugins/cpumem/types" + "github.com/stretchr/testify/assert" +) + +func TestConvCpumemBytes(t *testing.T) { + testCases := []struct { + name string + localNR *cpumemtypes.NodeResource + expected []byte + expectFail bool + }{ + { + name: "Valid case", + localNR: &cpumemtypes.NodeResource{ + CPU: 4, + Memory: 8192, + NUMA: map[string]string{"0": "0", "1": "0", "2": "1", "3": "1"}, + NUMAMemory: map[string]int64{"0": 1000, "1": 10000}, + }, + expected: []byte(`{"cpu":4,"memory":6553,"numa-cpu":["0,1","2,3"],"numa-memory":["800","8000"]}`), + expectFail: false, + }, + // Add more test cases as needed + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := convCpumemBytes(tc.localNR) + if tc.expectFail { + assert.Error(t, err, "Expected an error but got nil") + } else { + assert.NoError(t, err, "Expected no error but got %v", err) + assert.Equal(t, tc.expected, result, "Result does not match expected") + } + }) + } +} diff --git a/internal/eru/resources/cpumem.go b/internal/eru/resources/cpumem.go new file mode 100644 index 0000000..20b47a1 --- /dev/null +++ b/internal/eru/resources/cpumem.go @@ -0,0 +1,79 @@ +package resources + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/jaypipes/ghw" + "github.com/projecteru2/core/log" + cpumemtypes "github.com/projecteru2/core/resource/plugins/cpumem/types" + "github.com/projecteru2/yavirt/configs" +) + +type CPUMemManager struct { + cpumem *cpumemtypes.NodeResource + coreMgr *CoreResourcesManager +} + +func NewCPUMemManager(coreMgr *CoreResourcesManager, cfg *configs.Config) (*CPUMemManager, error) { + logger := log.WithFunc("NewCPUMemManager") + cpumem, err := fetchCPUMemFromHardware(cfg) + if err != nil { + return nil, err + } + logger.Infof(context.TODO(), "hardware cpumem info: %+v", cpumem) + go func() { + // Core need to connect to the local grpc server, so sleep 30s here to wait local grpc server up + time.Sleep(45 * time.Second) + if err := coreMgr.UpdateCPUMem(cpumem); err != nil { + logger.Errorf(context.TODO(), err, "failed to update cpumem") + } + }() + return &CPUMemManager{ + cpumem: cpumem, + coreMgr: coreMgr, + }, err +} + +func fetchCPUMemFromHardware(cfg *configs.Config) (*cpumemtypes.NodeResource, error) { + numa := cpumemtypes.NUMA{} + numaMem := cpumemtypes.NUMAMemory{} + + cpu, err := ghw.CPU() + if err != nil { + return nil, err + } + mem, err := ghw.Memory() + if err != nil { + return nil, err + } + // 因为core会对memory取0.8,而在我们这个场景中,当机器的内存很大比如500G的时候,取0.8浪费太多 + // 所以这里默认只保留配置的ReservedMemory给yavirt,os使用。 + reservedMem := cfg.Resource.ReservedMemory + if reservedMem > mem.TotalUsableBytes*20/100 { + reservedMem = mem.TotalUsableBytes * 20 / 100 + } + infoMem := (mem.TotalUsableBytes - reservedMem) * 100 / 80 + + topology, err := ghw.Topology() + if err != nil { + return nil, err + } + numaReservedMem := reservedMem / int64(len(topology.Nodes)) + for _, node := range topology.Nodes { + numaMem[strconv.Itoa(node.ID)] = (node.Memory.TotalUsableBytes - numaReservedMem) * 100 / 80 + for _, core := range node.Cores { + for _, id := range core.LogicalProcessors { + numa[strconv.Itoa(id)] = fmt.Sprintf("%d", node.ID) + } + } + } + return &cpumemtypes.NodeResource{ + CPU: float64(cpu.TotalThreads), + Memory: infoMem, + NUMAMemory: numaMem, + NUMA: numa, + }, nil +} diff --git a/internal/eru/resources/cpumem_test.go b/internal/eru/resources/cpumem_test.go new file mode 100644 index 0000000..a98e1fb --- /dev/null +++ b/internal/eru/resources/cpumem_test.go @@ -0,0 +1,82 @@ +package resources + +import ( + "testing" + + . "github.com/agiledragon/gomonkey/v2" + "github.com/jaypipes/ghw" + "github.com/jaypipes/ghw/pkg/cpu" + "github.com/jaypipes/ghw/pkg/memory" + "github.com/jaypipes/ghw/pkg/topology" + "github.com/mcuadros/go-defaults" + "github.com/projecteru2/yavirt/configs" + "github.com/stretchr/testify/assert" +) + +func TestFetchCPUMem(t *testing.T) { + patches := ApplyFuncReturn(ghw.Topology, &topology.Info{ + Nodes: []*topology.Node{ + { + ID: 0, + Cores: []*cpu.ProcessorCore{ + { + ID: 0, + LogicalProcessors: []int{ + 0, 2, + }, + }, + }, + Memory: &memory.Area{ + TotalUsableBytes: 1024, + }, + }, + { + ID: 1, + Cores: []*cpu.ProcessorCore{ + { + ID: 1, + LogicalProcessors: []int{ + 1, 3, + }, + }, + }, + Memory: &memory.Area{ + TotalUsableBytes: 1024, + }, + }, + }, + }, nil) + defer patches.Reset() + + patches = ApplyFuncReturn(ghw.CPU, &cpu.Info{ + TotalThreads: 4, + }, nil) + defer patches.Reset() + + patches = ApplyFuncReturn(ghw.Memory, &memory.Info{ + Area: memory.Area{ + TotalUsableBytes: 2048, + }, + }, nil) + defer patches.Reset() + cfg := &configs.Config{} + defaults.SetDefaults(cfg) + + res, err := fetchCPUMemFromHardware(cfg) + assert.Nil(t, err) + assert.Equal(t, res.CPU, float64(4)) + assert.Equal(t, res.Memory, int64(2048)) + for core, node := range res.NUMA { + switch node { + case "0": + assert.Truef(t, core == "0" || core == "2", "+++ %v", core) + case "1": + assert.Truef(t, core == "1" || core == "3", "++++ %v", core) + default: + assert.False(t, true) + } + } + for _, node := range res.NUMAMemory { + assert.Equal(t, node, int64(1025)) + } +} diff --git a/internal/eru/resources/gpu.go b/internal/eru/resources/gpu.go new file mode 100644 index 0000000..58c0e46 --- /dev/null +++ b/internal/eru/resources/gpu.go @@ -0,0 +1,245 @@ +package resources + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os/exec" + "strings" + "sync" + "time" + + _ "embed" + + "github.com/kr/pretty" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/patrickmn/go-cache" + + "github.com/jaypipes/ghw" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/vmcache" + gputypes "github.com/yuyang0/resource-gpu/gpu/types" +) + +var execCommand = exec.Command + +var ( + //go:embed gpu_name.json + gpuNameJSON string + gpuNameMap = map[string]string{} +) + +type SingleTypeGPUs struct { + gpuMap map[string]types.GPUInfo + addrSet mapset.Set[string] +} + +type GPUManager struct { + mu sync.Mutex + gpuTypeMap map[string]*SingleTypeGPUs + coreMgr *CoreResourcesManager + lostGPUCache *cache.Cache +} + +func initGPUNameMap(gpuProdMapCfg map[string]string) error { + if len(gpuNameMap) > 0 { + return nil + } + if err := json.Unmarshal([]byte(gpuNameJSON), &gpuNameMap); err != nil { + return err + } + // merge name map from config to gpuNameMap + for k, v := range gpuProdMapCfg { + gpuNameMap[k] = v + } + return nil +} + +func NewGPUManager(ctx context.Context, cfg *configs.Config, coreMgr *CoreResourcesManager) (*GPUManager, error) { + if err := initGPUNameMap(cfg.Resource.GPUProductMap); err != nil { + return nil, err + } + gpuMap, err := fetchGPUInfoFromHardware() + if err != nil { + return nil, err + } + log.WithFunc("NewGPUManager").Infof(ctx, "hardware gpu info: %# v", pretty.Formatter(gpuMap)) + + mgr := &GPUManager{ + gpuTypeMap: gpuMap, + coreMgr: coreMgr, + lostGPUCache: cache.New(3*time.Minute, 1*time.Minute), + } + go mgr.monitor(ctx) + return mgr, nil +} + +func (g *GPUManager) GetResource() *gputypes.NodeResource { + g.mu.Lock() + defer g.mu.Unlock() + + res := &gputypes.NodeResource{ + ProdCountMap: make(gputypes.ProdCountMap), + } + for prod, gpus := range g.gpuTypeMap { + res.ProdCountMap[prod] = len(gpus.gpuMap) + } + return res +} + +func (g *GPUManager) alloc(req *gputypes.EngineParams, usedAddrsMap mapset.Set[string]) (ans []types.GPUInfo, err error) { + totalCount := 0 + for reqProd, reqCount := range req.ProdCountMap { + if reqCount <= 0 { + continue + } + totalCount += reqCount + singleTypeGPUs := g.gpuTypeMap[reqProd] + available := singleTypeGPUs.addrSet.Difference(usedAddrsMap) + if available.Cardinality() < reqCount { + return nil, errors.New("no enough GPU") + } + for addr := range available.Iter() { + if reqCount <= 0 { + break + } + info := singleTypeGPUs.gpuMap[addr] + ans = append(ans, info) + reqCount-- + } + } + return ans, nil +} + +func (g *GPUManager) Alloc(req *gputypes.EngineParams) (ans []types.GPUInfo, err error) { + g.mu.Lock() + defer g.mu.Unlock() + usedAddrs := vmcache.FetchGPUAddrs() + usedAddrsSet := mapset.NewSet[string](usedAddrs...) + return g.alloc(req, usedAddrsSet) +} + +func (g *GPUManager) monitor(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case <-time.After(1 * time.Minute): + } + gpuMap, err := fetchGPUInfoFromHardware() + if err != nil { + log.WithFunc("GPUManager.monitor").Errorf(ctx, err, "failed to fetch gpu info from hardware") + continue + } + + totalGPUAddrSet := mapset.NewSet[string]() + for _, gpus := range gpuMap { + totalGPUAddrSet = totalGPUAddrSet.Union(gpus.addrSet) + } + // check if used gpu are lost + usedAddrs := vmcache.FetchDomainGPUAddrs() + + for domain, addrs := range usedAddrs { + addrsSet := mapset.NewSet[string](addrs...) + diff := addrsSet.Difference(totalGPUAddrSet) + if diff.Cardinality() > 0 { + de := vmcache.FetchDomainEntry(domain) + if de == nil { + continue + } + lostGPUInfo := map[string]string{ + "node": configs.Hostname(), + "domain": domain, + "pci_addrs": strings.Join(diff.ToSlice(), ","), + "app_id": de.AppID, + "app_sid": de.AppSID, + "appname": de.AppName, + "ip": de.IP, + } + g.lostGPUCache.Set(domain, lostGPUInfo, cache.DefaultExpiration) + } + } + + g.mu.Lock() + g.gpuTypeMap = gpuMap + // don't use defer here,because the following GetRsource also need acquire locker + g.mu.Unlock() + + g.coreMgr.UpdateGPU(g.GetResource()) + } +} + +func fetchGPUInfoFromHardware() (map[string]*SingleTypeGPUs, error) { + pci, err := ghw.PCI() + if err != nil { + return nil, err + } + + cmdOut, err := execCommand("lshw", "-quiet", "-json", "-C", "display").Output() + if err != nil { + return nil, err + } + params := []map[string]any{} + if err = json.Unmarshal(cmdOut, ¶ms); err != nil { + return nil, err + } + // map format: + // + // product: + // pciaddr: gpu info + gpuMap := make(map[string]*SingleTypeGPUs) + excludePCIs := map[string]struct{}{} + for _, addr := range configs.Conf.Resource.ExcludePCIs { + excludePCIs[addr] = struct{}{} + } + for _, param := range params { + var addr string + if businfoRaw, ok := param["businfo"]; ok && businfoRaw != nil { + addr = strings.Split(businfoRaw.(string), "@")[1] + } else if handleRaw, ok := param["handle"]; ok && handleRaw != nil { + handle := handleRaw.(string) //nolint + idx := strings.Index(handle, ":") + addr = handle[idx+1:] + } + if addr == "" { + log.Warnf(context.TODO(), "Can't fetch PCI address from %v", param) + continue + } + if _, ok := excludePCIs[addr]; ok { + log.Warnf(context.TODO(), "Exclude PCI address %s", addr) + continue + } + deviceInfo := pci.GetDevice(addr) + var numa string + if deviceInfo != nil && deviceInfo.Node != nil { + numa = fmt.Sprintf("%d", deviceInfo.Node.ID) + } + info := types.GPUInfo{ + Address: addr, + Product: param["product"].(string), + Vendor: param["vendor"].(string), + NumaID: numa, + } + if strings.Contains(info.Vendor, "NVIDIA") || strings.Contains(info.Vendor, "AMD") { + prod := gpuNameMap[info.Product] + if prod == "" { + return nil, fmt.Errorf("unknown GPU product: %s", info.Product) + } + singleTypeGPUs := gpuMap[prod] + if singleTypeGPUs == nil { + singleTypeGPUs = &SingleTypeGPUs{ + gpuMap: make(map[string]types.GPUInfo), + addrSet: mapset.NewSet[string](), + } + } + singleTypeGPUs.gpuMap[info.Address] = info + singleTypeGPUs.addrSet.Add(info.Address) + gpuMap[prod] = singleTypeGPUs + } + } + return gpuMap, nil +} diff --git a/internal/eru/resources/gpu_name.json b/internal/eru/resources/gpu_name.json new file mode 100644 index 0000000..0873ed8 --- /dev/null +++ b/internal/eru/resources/gpu_name.json @@ -0,0 +1,8 @@ +{ + "GA104 [GeForce RTX 3070]": "nvidia-3070", + "GA104 [GeForce RTX 3070 Lite Hash Rate]": "nvidia-3070-lhr", + "GA102 [GeForce RTX 3090]": "nvidia-3090", + "NVIDIA Corporation": "nvidia-4090", + "GA104M [GeForce RTX 3070 Mobile / Max-Q]": "nvidia-3070m", + "Aldebaran": "amd-mi210" +} \ No newline at end of file diff --git a/internal/eru/resources/gpu_test.go b/internal/eru/resources/gpu_test.go new file mode 100644 index 0000000..136c4ab --- /dev/null +++ b/internal/eru/resources/gpu_test.go @@ -0,0 +1,274 @@ +package resources + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "testing" + + "github.com/samber/lo" + gputypes "github.com/yuyang0/resource-gpu/gpu/types" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/projecteru2/yavirt/internal/types" + "github.com/stretchr/testify/assert" +) + +func TestGPUAlloc(t *testing.T) { + addrSet1 := mapset.NewSet[string]("0.0.0.0", "0.0.0.1", "0.0.0.2", "0.0.0.3") + addrSet2 := mapset.NewSet[string]("0.0.0.4", "0.0.0.5", "0.0.0.6", "0.0.0.7") + mgr := &GPUManager{ + gpuTypeMap: map[string]*SingleTypeGPUs{ + "nvidia-3090": { + gpuMap: map[string]types.GPUInfo{ + "0.0.0.0": { + Address: "0.0.0.0", + }, + "0.0.0.1": { + Address: "0.0.0.1", + }, + "0.0.0.2": { + Address: "0.0.0.2", + }, + "0.0.0.3": { + Address: "0.0.0.3", + }, + }, + addrSet: addrSet1, + }, + "nvidia-3070": { + gpuMap: map[string]types.GPUInfo{ + "0.0.0.4": { + Address: "0.0.0.4", + }, + "0.0.0.5": { + Address: "0.0.0.5", + }, + + "0.0.0.6": { + Address: "0.0.0.6", + }, + + "0.0.0.7": { + Address: "0.0.0.7", + }, + }, + addrSet: addrSet2, + }, + }, + } + // no used addrs + req := &gputypes.EngineParams{ + ProdCountMap: map[string]int{ + "nvidia-3090": 1, + "nvidia-3070": 2, + }, + } + usedAddrsSet := mapset.NewSet[string]() + ans, err := mgr.alloc(req, usedAddrsSet) + assert.Nil(t, err) + assert.Len(t, ans, 3) + + allocedAddrs := lo.Map(ans, func(info types.GPUInfo, _ int) string { + return info.Address + }) + addrSet := mapset.NewSet[string](allocedAddrs...) + diff1 := addrSet.Difference(addrSet2) + diff2 := addrSet.Difference(addrSet1) + assert.Equal(t, diff1.Cardinality(), 1) + assert.Equal(t, diff2.Cardinality(), 2) + + // have used addrs + usedAddrsSet = mapset.NewSet[string]("0.0.0.0", "0.0.0.3", "0.0.0.4", "0.0.0.5") + ans, err = mgr.alloc(req, usedAddrsSet) + assert.Nil(t, err) + assert.Len(t, ans, 3) + allocedAddrs = lo.Map(ans, func(info types.GPUInfo, _ int) string { + return info.Address + }) + addrSet = mapset.NewSet[string](allocedAddrs...) + diff1 = addrSet.Difference(addrSet2) + diff2 = addrSet.Difference(addrSet1) + assert.Equal(t, diff1.Cardinality(), 1) + assert.Equal(t, diff2.Cardinality(), 2) + + assert.False(t, diff1.Contains("0.0.0.0")) + assert.False(t, diff1.Contains("0.0.0.3")) + assert.False(t, diff2.Contains("0.0.0.4")) + assert.False(t, diff2.Contains("0.0.0.5")) + + // no enough resource + req = &gputypes.EngineParams{ + ProdCountMap: map[string]int{ + "nvidia-3090": 1, + "nvidia-3070": 3, + }, + } + usedAddrsSet = mapset.NewSet[string]("0.0.0.0", "0.0.0.3", "0.0.0.4", "0.0.0.5") + ans, err = mgr.alloc(req, usedAddrsSet) + assert.Error(t, err) + req = &gputypes.EngineParams{ + ProdCountMap: map[string]int{ + "nvidia-3090": 2, + "nvidia-3070": 3, + }, + } + ans, err = mgr.alloc(req, usedAddrsSet) + assert.Error(t, err) + req = &gputypes.EngineParams{ + ProdCountMap: map[string]int{ + "nvidia-3090": 3, + "nvidia-3070": 3, + }, + } + ans, err = mgr.alloc(req, usedAddrsSet) + assert.Error(t, err) + req = &gputypes.EngineParams{ + ProdCountMap: map[string]int{ + "nvidia-3090": 3, + "nvidia-3070": 2, + }, + } + ans, err = mgr.alloc(req, usedAddrsSet) + assert.Error(t, err) + req = &gputypes.EngineParams{ + ProdCountMap: map[string]int{ + "nvidia-3090": 2, + "nvidia-3070": 2, + }, + } + ans, err = mgr.alloc(req, usedAddrsSet) + assert.Nil(t, err) +} + +func TestFetchGPU(t *testing.T) { + err := initGPUNameMap(nil) + assert.Nil(t, err) + execCommand = fakeExecCommand + defer func() { execCommand = exec.Command }() + + gpuMap, err := fetchGPUInfoFromHardware() + assert.Nil(t, err) + assert.Len(t, gpuMap, 3) + gpus3090, ok := gpuMap["nvidia-3090"] + assert.True(t, ok) + assert.Equal(t, gpus3090.addrSet.Cardinality(), 1) + assert.True(t, gpus3090.addrSet.Contains("0000:3d:00.0")) + + gpus3070, ok := gpuMap["nvidia-3070"] + assert.True(t, ok) + assert.Equal(t, gpus3070.addrSet.Cardinality(), 1) + assert.True(t, gpus3070.addrSet.Contains("0000:3c:00.0")) + gpuMapJSON, _ := json.Marshal(gpuMap) + t.Logf(string(gpuMapJSON)) + + mi210, ok := gpuMap["amd-mi210"] + assert.True(t, ok) + assert.Equal(t, mi210.addrSet.Cardinality(), 1) + assert.True(t, mi210.addrSet.Contains("0000:e3:00.0")) +} + +var fakeExecResult = `[ + { + "id" : "display", + "class" : "display", + "claimed" : true, + "handle" : "PCI:0000:3d:00.0", + "description" : "VGA compatible controller", + "product" : "GA102 [GeForce RTX 3090]", + "vendor" : "NVIDIA Corporation", + "physid" : "0", + "businfo" : "pci@0000:3d:00.0", + "version" : "a1", + "width" : 64, + "clock" : 33000000, + "configuration" : { + "driver" : "vfio-pci", + "latency" : "0" + }, + "capabilities" : { + "pm" : "Power Management", + "msi" : "Message Signalled Interrupts", + "pciexpress" : "PCI Express", + "vga_controller" : true, + "bus_master" : "bus mastering", + "cap_list" : "PCI capabilities listing", + "rom" : "extension ROM" + } + }, + { + "id" : "display", + "class" : "display", + "claimed" : true, + "handle" : "PCI:0000:3c:00.0", + "description" : "VGA compatible controller", + "product" : "GA104 [GeForce RTX 3070]", + "vendor" : "NVIDIA Corporation", + "physid" : "0", + "businfo" : "pci@0000:3c:00.0", + "version" : "a1", + "width" : 64, + "clock" : 33000000, + "configuration" : { + "driver" : "vfio-pci", + "latency" : "0" + }, + "capabilities" : { + "pm" : "Power Management", + "msi" : "Message Signalled Interrupts", + "pciexpress" : "PCI Express", + "vga_controller" : true, + "bus_master" : "bus mastering", + "cap_list" : "PCI capabilities listing", + "rom" : "extension ROM" + } + }, + { + "id" : "display", + "class" : "display", + "claimed" : true, + "handle" : "PCI:0000:e3:00.0", + "description" : "Display controller", + "product" : "Aldebaran", + "vendor" : "Advanced Micro Devices, Inc. [AMD/ATI]", + "physid" : "0", + "businfo" : "pci@0000:e3:00.0", + "version" : "02", + "width" : 64, + "clock" : 33000000, + "configuration" : { + "driver" : "amdgpu", + "latency" : "0" + }, + "capabilities" : { + "pm" : "Power Management", + "pciexpress" : "PCI Express", + "msi" : "Message Signalled Interrupts", + "msix" : "MSI-X", + "bus_master" : "bus mastering", + "cap_list" : "PCI capabilities listing", + "rom" : "extension ROM" + } + } +]` + +func fakeExecCommand(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{ + "GO_WANT_HELPER_PROCESS=1", + fmt.Sprintf("GOCOVERDIR=%s", os.TempDir()), + } + return cmd +} + +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + // some code here to check arguments perhaps? + fmt.Fprintf(os.Stdout, fakeExecResult) + os.Exit(0) +} diff --git a/internal/eru/resources/manager.go b/internal/eru/resources/manager.go new file mode 100644 index 0000000..f920842 --- /dev/null +++ b/internal/eru/resources/manager.go @@ -0,0 +1,121 @@ +package resources + +import ( + "context" + "encoding/json" + "sync" + "testing" + + cpumemtypes "github.com/projecteru2/core/resource/plugins/cpumem/types" + stotypes "github.com/projecteru2/resource-storage/storage/types" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/eru/common" + "github.com/projecteru2/yavirt/internal/eru/store" + corestore "github.com/projecteru2/yavirt/internal/eru/store/core" + storemocks "github.com/projecteru2/yavirt/internal/eru/store/mocks" + intertypes "github.com/projecteru2/yavirt/internal/types" + bdtypes "github.com/yuyang0/resource-bandwidth/bandwidth/types" + gputypes "github.com/yuyang0/resource-gpu/gpu/types" +) + +var ( + mgr *Manager + cli store.Store +) + +type Manager struct { + cfg *configs.Config + coreMgr *CoreResourcesManager + + gpu *GPUManager + cpumem *CPUMemManager + sto *StorageManager + gpuLock sync.Mutex +} + +func (mgr *Manager) AllocGPU(req *gputypes.EngineParams) (ans []intertypes.GPUInfo, err error) { + return mgr.gpu.Alloc(req) +} + +func (mgr *Manager) LockGPU() { + mgr.gpuLock.Lock() +} + +func (mgr *Manager) UnlockGPU() { + mgr.gpuLock.Unlock() +} + +func (mgr *Manager) FetchResources() (map[string][]byte, error) { + cpumemBytes, err := json.Marshal(mgr.cpumem.cpumem) + if err != nil { + return nil, err + } + stoBytes, err := json.Marshal(mgr.sto.sto) + if err != nil { + return nil, err + } + gpusBytes, err := json.Marshal(mgr.gpu.GetResource()) + if err != nil { + return nil, err + } + bd := bdtypes.NodeResource{ + Bandwidth: mgr.cfg.Resource.Bandwidth, + } + bdBytes, _ := json.Marshal(bd) + + ans := map[string][]byte{ + intertypes.PluginNameCPUMem: cpumemBytes, + intertypes.PluginNameStorage: stoBytes, + intertypes.PluginNameGPU: gpusBytes, + intertypes.PluginNameBandwidth: bdBytes, + } + return ans, nil +} + +func (mgr *Manager) FetchCPUMem() *cpumemtypes.NodeResource { + return mgr.cpumem.cpumem +} + +func (mgr *Manager) FetchStorage() *stotypes.NodeResource { + return mgr.sto.sto +} + +func (mgr *Manager) FetchGPU() *gputypes.NodeResource { + return mgr.gpu.GetResource() +} + +func GetManager() *Manager { + return mgr +} + +func Setup(ctx context.Context, cfg *configs.Config, t *testing.T) (*Manager, error) { + if t == nil { + corestore.Init(ctx, &cfg.Eru) + if cli = corestore.Get(); cli == nil { + return nil, common.ErrGetStoreFailed + } + } else { + cli = storemocks.NewFakeStore() + } + coreMgr := NewCoreResourcesManager() + gpuMgr, err := NewGPUManager(ctx, cfg, coreMgr) + if err != nil { + return nil, err + } + cpumemMgr, err := NewCPUMemManager(coreMgr, cfg) + if err != nil { + return nil, err + } + stoMgr, err := newStorageManager() + if err != nil { + return nil, err + } + mgr = &Manager{ + cfg: cfg, + gpu: gpuMgr, + cpumem: cpumemMgr, + sto: stoMgr, + coreMgr: coreMgr, + } + return mgr, nil +} diff --git a/internal/eru/resources/metrics.go b/internal/eru/resources/metrics.go new file mode 100644 index 0000000..a1e0737 --- /dev/null +++ b/internal/eru/resources/metrics.go @@ -0,0 +1,44 @@ +package resources + +import "github.com/prometheus/client_golang/prometheus" + +var ( + lostGPUDesc = prometheus.NewDesc( + prometheus.BuildFQName("vm", "gpu", "lost"), + "Lost GPUs.", + []string{"node", "domain", "pci_addrs", "app_id", "app_sid", "appname", "ip"}, + nil) +) + +type MetricsCollector struct { + mgr *Manager +} + +func (mgr *Manager) GetMetricsCollector() *MetricsCollector { + return &MetricsCollector{ + mgr: mgr, + } +} + +func (e *MetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- lostGPUDesc +} + +func (e *MetricsCollector) Collect(ch chan<- prometheus.Metric) { + for _, v := range e.mgr.gpu.lostGPUCache.Items() { + gpuInfo, _ := v.Object.(map[string]string) + + ch <- prometheus.MustNewConstMetric( + lostGPUDesc, + prometheus.GaugeValue, + 1.0, + gpuInfo["node"], + gpuInfo["domain"], + gpuInfo["pci_addrs"], + gpuInfo["app_id"], + gpuInfo["app_sid"], + gpuInfo["appname"], + gpuInfo["ip"], + ) + } +} diff --git a/internal/eru/resources/storage.go b/internal/eru/resources/storage.go new file mode 100644 index 0000000..a6f2818 --- /dev/null +++ b/internal/eru/resources/storage.go @@ -0,0 +1,56 @@ +package resources + +import ( + "os/exec" + "path" + "strings" + + "github.com/dustin/go-humanize" + stotypes "github.com/projecteru2/resource-storage/storage/types" +) + +type StorageManager struct { + sto *stotypes.NodeResource +} + +func newStorageManager() (*StorageManager, error) { + sto, err := FetchStorage() + if err != nil { + return nil, err + } + return &StorageManager{ + sto: sto, + }, nil +} + +func FetchStorage() (*stotypes.NodeResource, error) { + ans := &stotypes.NodeResource{} + total := int64(0) + vols := stotypes.Volumes{} + // use df to fetch volume information + + cmdOut, err := exec.Command("df", "-h").Output() + if err != nil { + return nil, err + } + lines := strings.Split(string(cmdOut), "\n") + for _, line := range lines { + parts := strings.Fields(line) + if len(parts) != 6 { + continue + } + var size uint64 + size, err = humanize.ParseBytes(parts[1]) + if err != nil { + return nil, err + } + mountPoint := parts[len(parts)-1] + if path.Base(mountPoint) == "eru" { + vols[mountPoint] = int64(size) + total += int64(size) + } + } + ans.Volumes = vols + ans.Storage = total + return ans, nil +} diff --git a/internal/eru/store/core/client.go b/internal/eru/store/core/client.go new file mode 100644 index 0000000..0047323 --- /dev/null +++ b/internal/eru/store/core/client.go @@ -0,0 +1,71 @@ +package core + +import ( + "context" + "sync" + "time" + + "github.com/projecteru2/core/client" + pb "github.com/projecteru2/core/rpc/gen" + coretypes "github.com/projecteru2/core/types" + "github.com/projecteru2/yavirt/internal/eru/types" + + "github.com/patrickmn/go-cache" + "github.com/projecteru2/core/log" +) + +// Store use core to store meta +type Store struct { + clientPool *client.Pool + config *types.Config + cache *cache.Cache +} + +var coreStore *Store +var once sync.Once + +// New new a Store +func New(ctx context.Context, config *types.Config) (*Store, error) { + auth := coretypes.AuthConfig{ + Username: config.Username, + Password: config.Password, + } + clientPoolConfig := &client.PoolConfig{ + EruAddrs: config.Addrs, + Auth: auth, + ConnectionTimeout: config.GlobalConnectionTimeout, + } + clientPool, err := client.NewCoreRPCClientPool(ctx, clientPoolConfig) + if err != nil { + return nil, err + } + cache := cache.New(time.Duration(config.HealthCheck.CacheTTL)*time.Second, 24*time.Hour) + return &Store{clientPool, config, cache}, nil +} + +// GetClient returns a gRPC client +func (c *Store) GetClient() pb.CoreRPCClient { + return c.clientPool.GetClient() +} + +// Init inits the core store only once +func Init(ctx context.Context, config *types.Config) { + once.Do(func() { + var err error + coreStore, err = New(ctx, config) + if err != nil { + log.WithFunc("core.client").Error(ctx, err, "failed to create core store") + return + } + }) +} + +// Get returns the core store instance +func Get() *Store { + return coreStore +} + +func (c *Store) CheckHealth(ctx context.Context) error { + _, err := c.GetClient().Info(ctx, &pb.Empty{}) + return err +} diff --git a/internal/eru/store/core/identifier.go b/internal/eru/store/core/identifier.go new file mode 100644 index 0000000..7879114 --- /dev/null +++ b/internal/eru/store/core/identifier.go @@ -0,0 +1,21 @@ +package core + +import ( + "context" + + pb "github.com/projecteru2/core/rpc/gen" + "github.com/projecteru2/yavirt/internal/utils" +) + +// GetIdentifier returns the identifier of core +func (c *Store) GetIdentifier(ctx context.Context) string { + var resp *pb.CoreInfo + var err error + utils.WithTimeout(ctx, c.config.GlobalConnectionTimeout, func(ctx context.Context) { + resp, err = c.GetClient().Info(ctx, &pb.Empty{}) + }) + if err != nil { + return "" + } + return resp.Identifier +} diff --git a/internal/eru/store/core/node.go b/internal/eru/store/core/node.go new file mode 100644 index 0000000..16e1709 --- /dev/null +++ b/internal/eru/store/core/node.go @@ -0,0 +1,245 @@ +package core + +import ( + "context" + "encoding/json" + "errors" + "io" + + "github.com/projecteru2/core/log" + resourcetypes "github.com/projecteru2/core/resource/types" + pb "github.com/projecteru2/core/rpc/gen" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/utils" + "github.com/samber/lo" +) + +// GetNode return a node by core +func (c *Store) GetNode(ctx context.Context, nodename string) (*types.Node, error) { + var resp *pb.Node + var err error + + utils.WithTimeout(ctx, c.config.GlobalConnectionTimeout, func(ctx context.Context) { + resp, err = c.GetClient().GetNode(ctx, &pb.GetNodeOptions{Nodename: nodename}) + }) + + if err != nil { + return nil, err + } + + node := &types.Node{ + Name: resp.Name, + Podname: resp.Podname, + Endpoint: resp.Endpoint, + Available: resp.Available, + } + return node, nil +} + +// SetNodeStatus reports the status of node +// SetNodeStatus always reports alive status, +// when not alive, TTL will cause expiration of node +func (c *Store) SetNodeStatus(ctx context.Context, ttl int64) error { + opts := &pb.SetNodeStatusOptions{ + Nodename: c.config.Hostname, + Ttl: ttl, + } + var err error + utils.WithTimeout(ctx, c.config.GlobalConnectionTimeout, func(ctx context.Context) { + _, err = c.GetClient().SetNodeStatus(ctx, opts) + }) + + return err +} + +// GetNodeStatus gets the status of node +func (c *Store) GetNodeStatus(ctx context.Context, nodename string) (*types.NodeStatus, error) { + var resp *pb.NodeStatusStreamMessage + var err error + + utils.WithTimeout(ctx, c.config.GlobalConnectionTimeout, func(ctx context.Context) { + resp, err = c.GetClient().GetNodeStatus(ctx, &pb.GetNodeStatusOptions{Nodename: nodename}) + }) + + if err != nil { + return nil, err + } + + if resp.Error != "" { + err = errors.New(resp.Error) + } + + status := &types.NodeStatus{ + Nodename: resp.Nodename, + Podname: resp.Podname, + Alive: resp.Alive, + Error: err, + } + return status, nil +} + +// NodeStatusStream watches the changes of node status +func (c *Store) NodeStatusStream(ctx context.Context) (<-chan *types.NodeStatus, <-chan error) { + msgChan := make(chan *types.NodeStatus) + errChan := make(chan error) + + _ = utils.Pool.Submit(func() { + defer close(msgChan) + defer close(errChan) + + client, err := c.GetClient().NodeStatusStream(ctx, &pb.Empty{}) + if err != nil { + errChan <- err + return + } + + for { + message, err := client.Recv() + if err != nil { + errChan <- err + return + } + nodeStatus := &types.NodeStatus{ + Nodename: message.Nodename, + Podname: message.Podname, + Alive: message.Alive, + Error: nil, + } + if message.Error != "" { + nodeStatus.Error = errors.New(message.Error) + } + msgChan <- nodeStatus + } + }) + + return msgChan, errChan +} + +// ListPodNodes list nodes by given conditions, note that not all the fields are filled. +func (c *Store) ListPodNodes(ctx context.Context, all bool, podname string, labels map[string]string) ([]*types.Node, error) { + ch, err := c.listPodeNodes(ctx, &pb.ListNodesOptions{ + Podname: podname, + All: all, + Labels: labels, + }) + if err != nil { + return nil, err + } + + nodes := []*types.Node{} + for n := range ch { + nodes = append(nodes, &types.Node{ + Name: n.Name, + Endpoint: n.Endpoint, + Podname: n.Podname, + Labels: n.Labels, + }) + } + return nodes, nil +} + +func (c *Store) listPodeNodes(ctx context.Context, opt *pb.ListNodesOptions) (ch chan *pb.Node, err error) { + ch = make(chan *pb.Node) + + utils.WithTimeout(ctx, c.config.GlobalConnectionTimeout, func(ctx context.Context) { + var stream pb.CoreRPC_ListPodNodesClient + if stream, err = c.GetClient().ListPodNodes(ctx, opt); err != nil { + return + } + + _ = utils.Pool.Submit(func() { + defer close(ch) + for { + node, err := stream.Recv() + if err != nil { + if err != io.EOF { //nolint:nolintlint + log.WithFunc("listPodeNodes").Error(ctx, err, "get node stream failed") + } + return + } + ch <- node + } + }) + }) + + return ch, nil +} + +func (c *Store) ListNodeWorkloads(ctx context.Context, nodename string) ([]*types.Workload, error) { + opts := &pb.GetNodeOptions{ + Nodename: nodename, + } + wrks, err := c.GetClient().ListNodeWorkloads(ctx, opts) + if err != nil { + return nil, err + } + ans := lo.Map(wrks.Workloads, func(w *pb.Workload, _ int) *types.Workload { + return &types.Workload{ + ID: w.Id, + } + }) + return ans, nil +} + +func (c *Store) GetNodeResource(ctx context.Context, nodename string) (*types.NodeResource, error) { + resp, err := c.GetClient().GetNodeResource(ctx, &pb.GetNodeResourceOptions{ + Opts: &pb.GetNodeOptions{ + Nodename: nodename, + }, + }) + if err != nil { + return nil, err + } + capacity := resourcetypes.Resources{} + if err = json.Unmarshal([]byte(resp.ResourceCapacity), &capacity); err != nil { + return nil, err + } + return &types.NodeResource{ + Capacity: capacity, + }, nil +} + +func (c *Store) SetNode(ctx context.Context, opts *types.SetNodeOpts) (*types.Node, error) { + resp, err := c.GetClient().SetNode(ctx, &pb.SetNodeOptions{ + Nodename: opts.Nodename, + Endpoint: opts.Endpoint, + Ca: opts.Ca, + Cert: opts.Cert, + Key: opts.Key, + Labels: opts.Labels, + Resources: opts.Resources, + Delta: opts.Delta, + WorkloadsDown: opts.WorkloadsDown, + }) + if err != nil { + return nil, err + } + return &types.Node{ + Name: resp.Name, + Podname: resp.Podname, + Endpoint: resp.Endpoint, + Available: resp.Available, + }, nil +} + +func (c *Store) AddNode(ctx context.Context, opts *types.AddNodeOpts) (*types.Node, error) { + resp, err := c.GetClient().AddNode(ctx, &pb.AddNodeOptions{ + Nodename: opts.Nodename, + Endpoint: opts.Endpoint, + Podname: opts.Podname, + Ca: opts.Ca, + Cert: opts.Cert, + Key: opts.Key, + Labels: opts.Labels, + Resources: opts.Resources, + }) + if err != nil { + return nil, err + } + return &types.Node{ + Name: resp.Name, + Podname: resp.Podname, + Endpoint: resp.Endpoint, + Available: resp.Available, + }, nil +} diff --git a/internal/eru/store/core/workload.go b/internal/eru/store/core/workload.go new file mode 100644 index 0000000..a089afd --- /dev/null +++ b/internal/eru/store/core/workload.go @@ -0,0 +1,81 @@ +package core + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + "time" + + pb "github.com/projecteru2/core/rpc/gen" + virttypes "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/internal/eru/types" + "github.com/projecteru2/yavirt/internal/utils" +) + +func getCacheTTL(ttl int64) time.Duration { + n, _ := rand.Int(rand.Reader, big.NewInt(ttl)) + delta := n.Int64() / 4 + ttl = ttl - ttl/8 + delta + return time.Duration(ttl) * time.Second +} + +// SetWorkloadStatus deploy containers +func (c *Store) SetWorkloadStatus(ctx context.Context, status *types.WorkloadStatus, ttl int64) error { + workloadStatus := fmt.Sprintf("%+v", status) + if ttl == 0 { + cached, ok := c.cache.Get(status.ID) + if ok { + str := cached.(string) //nolint + if str == workloadStatus { + return nil + } + } + } + + statusPb := &pb.WorkloadStatus{ + Id: virttypes.EruID(status.ID), + Running: status.Running, + Healthy: status.Healthy, + Networks: status.Networks, + Extension: status.Extension, + Ttl: ttl, + + Appname: status.Appname, + Entrypoint: status.Entrypoint, + Nodename: c.config.Hostname, + } + + opts := &pb.SetWorkloadsStatusOptions{ + Status: []*pb.WorkloadStatus{statusPb}, + } + + var err error + utils.WithTimeout(ctx, c.config.GlobalConnectionTimeout, func(ctx context.Context) { + _, err = c.GetClient().SetWorkloadsStatus(ctx, opts) + }) + + if ttl == 0 { + if err != nil { + c.cache.Delete(status.ID) + } else { + c.cache.Set(status.ID, workloadStatus, getCacheTTL(c.config.HealthCheck.CacheTTL)) + } + } + + return err +} + +func (c *Store) GetWorkload(ctx context.Context, id string) (*types.Workload, error) { + opts := &pb.WorkloadID{ + Id: virttypes.EruID(id), + } + wrk, err := c.GetClient().GetWorkload(ctx, opts) + if err != nil { + return nil, err + } + ans := &types.Workload{ + ID: wrk.Id, + } + return ans, nil +} diff --git a/internal/eru/store/mocks/Store.go b/internal/eru/store/mocks/Store.go new file mode 100644 index 0000000..1b5345e --- /dev/null +++ b/internal/eru/store/mocks/Store.go @@ -0,0 +1,374 @@ +// Code generated by mockery v2.42.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/projecteru2/yavirt/internal/eru/types" +) + +// Store is an autogenerated mock type for the Store type +type Store struct { + mock.Mock +} + +// AddNode provides a mock function with given fields: ctx, opts +func (_m *Store) AddNode(ctx context.Context, opts *types.AddNodeOpts) (*types.Node, error) { + ret := _m.Called(ctx, opts) + + if len(ret) == 0 { + panic("no return value specified for AddNode") + } + + var r0 *types.Node + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.AddNodeOpts) (*types.Node, error)); ok { + return rf(ctx, opts) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.AddNodeOpts) *types.Node); ok { + r0 = rf(ctx, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Node) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.AddNodeOpts) error); ok { + r1 = rf(ctx, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckHealth provides a mock function with given fields: ctx +func (_m *Store) CheckHealth(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for CheckHealth") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetIdentifier provides a mock function with given fields: ctx +func (_m *Store) GetIdentifier(ctx context.Context) string { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetIdentifier") + } + + var r0 string + if rf, ok := ret.Get(0).(func(context.Context) string); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// GetNode provides a mock function with given fields: ctx, nodename +func (_m *Store) GetNode(ctx context.Context, nodename string) (*types.Node, error) { + ret := _m.Called(ctx, nodename) + + if len(ret) == 0 { + panic("no return value specified for GetNode") + } + + var r0 *types.Node + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*types.Node, error)); ok { + return rf(ctx, nodename) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *types.Node); ok { + r0 = rf(ctx, nodename) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Node) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, nodename) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNodeResource provides a mock function with given fields: ctx, nodename +func (_m *Store) GetNodeResource(ctx context.Context, nodename string) (*types.NodeResource, error) { + ret := _m.Called(ctx, nodename) + + if len(ret) == 0 { + panic("no return value specified for GetNodeResource") + } + + var r0 *types.NodeResource + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*types.NodeResource, error)); ok { + return rf(ctx, nodename) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *types.NodeResource); ok { + r0 = rf(ctx, nodename) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.NodeResource) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, nodename) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNodeStatus provides a mock function with given fields: ctx, nodename +func (_m *Store) GetNodeStatus(ctx context.Context, nodename string) (*types.NodeStatus, error) { + ret := _m.Called(ctx, nodename) + + if len(ret) == 0 { + panic("no return value specified for GetNodeStatus") + } + + var r0 *types.NodeStatus + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*types.NodeStatus, error)); ok { + return rf(ctx, nodename) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *types.NodeStatus); ok { + r0 = rf(ctx, nodename) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.NodeStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, nodename) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetWorkload provides a mock function with given fields: ctx, id +func (_m *Store) GetWorkload(ctx context.Context, id string) (*types.Workload, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetWorkload") + } + + var r0 *types.Workload + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*types.Workload, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *types.Workload); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Workload) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListNodeWorkloads provides a mock function with given fields: ctx, nodename +func (_m *Store) ListNodeWorkloads(ctx context.Context, nodename string) ([]*types.Workload, error) { + ret := _m.Called(ctx, nodename) + + if len(ret) == 0 { + panic("no return value specified for ListNodeWorkloads") + } + + var r0 []*types.Workload + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]*types.Workload, error)); ok { + return rf(ctx, nodename) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []*types.Workload); ok { + r0 = rf(ctx, nodename) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Workload) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, nodename) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListPodNodes provides a mock function with given fields: ctx, all, podname, labels +func (_m *Store) ListPodNodes(ctx context.Context, all bool, podname string, labels map[string]string) ([]*types.Node, error) { + ret := _m.Called(ctx, all, podname, labels) + + if len(ret) == 0 { + panic("no return value specified for ListPodNodes") + } + + var r0 []*types.Node + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, bool, string, map[string]string) ([]*types.Node, error)); ok { + return rf(ctx, all, podname, labels) + } + if rf, ok := ret.Get(0).(func(context.Context, bool, string, map[string]string) []*types.Node); ok { + r0 = rf(ctx, all, podname, labels) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Node) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, bool, string, map[string]string) error); ok { + r1 = rf(ctx, all, podname, labels) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeStatusStream provides a mock function with given fields: ctx +func (_m *Store) NodeStatusStream(ctx context.Context) (<-chan *types.NodeStatus, <-chan error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for NodeStatusStream") + } + + var r0 <-chan *types.NodeStatus + var r1 <-chan error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.NodeStatus, <-chan error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.NodeStatus); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *types.NodeStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) <-chan error); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + return r0, r1 +} + +// SetNode provides a mock function with given fields: ctx, opts +func (_m *Store) SetNode(ctx context.Context, opts *types.SetNodeOpts) (*types.Node, error) { + ret := _m.Called(ctx, opts) + + if len(ret) == 0 { + panic("no return value specified for SetNode") + } + + var r0 *types.Node + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.SetNodeOpts) (*types.Node, error)); ok { + return rf(ctx, opts) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.SetNodeOpts) *types.Node); ok { + r0 = rf(ctx, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Node) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.SetNodeOpts) error); ok { + r1 = rf(ctx, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetNodeStatus provides a mock function with given fields: ctx, ttl +func (_m *Store) SetNodeStatus(ctx context.Context, ttl int64) error { + ret := _m.Called(ctx, ttl) + + if len(ret) == 0 { + panic("no return value specified for SetNodeStatus") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, ttl) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetWorkloadStatus provides a mock function with given fields: ctx, status, ttl +func (_m *Store) SetWorkloadStatus(ctx context.Context, status *types.WorkloadStatus, ttl int64) error { + ret := _m.Called(ctx, status, ttl) + + if len(ret) == 0 { + panic("no return value specified for SetWorkloadStatus") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.WorkloadStatus, int64) error); ok { + r0 = rf(ctx, status, ttl) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStore(t interface { + mock.TestingT + Cleanup(func()) +}) *Store { + mock := &Store{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/eru/store/mocks/fake.go b/internal/eru/store/mocks/fake.go new file mode 100644 index 0000000..fdfbd95 --- /dev/null +++ b/internal/eru/store/mocks/fake.go @@ -0,0 +1,191 @@ +package mocks + +import ( + "context" + "sync" + + "github.com/alphadose/haxmap" + "github.com/projecteru2/core/log" + "github.com/stretchr/testify/mock" + + resourcetypes "github.com/projecteru2/core/resource/types" + virttypes "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/internal/eru/common" + "github.com/projecteru2/yavirt/internal/eru/store" + "github.com/projecteru2/yavirt/internal/eru/types" +) + +// MockStore . +type MockStore struct { + Store + sync.Mutex + workloads *haxmap.Map[string, *types.Workload] + workloadStatus *haxmap.Map[string, *types.WorkloadStatus] // map[string]*types.WorkloadStatus + nodeStatus *haxmap.Map[string, *types.NodeStatus] // map[string]*types.NodeStatus + nodeInfo *haxmap.Map[string, *types.Node] // map[string]*types.Node + msgChan chan *types.NodeStatus + errChan chan error +} + +func (m *MockStore) init() { + m.workloads = haxmap.New[string, *types.Workload]() + m.workloadStatus = haxmap.New[string, *types.WorkloadStatus]() + m.nodeStatus = haxmap.New[string, *types.NodeStatus]() + m.nodeInfo = haxmap.New[string, *types.Node]() + m.msgChan = make(chan *types.NodeStatus) + m.errChan = make(chan error) + + m.nodeInfo.Set("fake", &types.Node{ + Name: "fake", + Endpoint: "eva://127.0.0.1:6666", + }) + m.nodeInfo.Set("faker", &types.Node{ + Name: "faker", + Endpoint: "eva://127.0.0.1:6667", + }) +} + +// NewFakeStore returns a mock store instance created from mock +func NewFakeStore() store.Store { + logger := log.WithFunc("fakestore") + m := &MockStore{} + m.init() + m.On("CheckHealth", mock.Anything).Return(nil) + m.On("GetNode", mock.Anything, mock.Anything).Return(func(ctx context.Context, nodename string) *types.Node { + m.Lock() + defer m.Unlock() + node, ok := m.nodeInfo.Get(nodename) + if !ok { + return nil + } + return &types.Node{ + Name: node.Name, + Available: node.Available, + } + }, nil) + m.On("SetNodeStatus", mock.Anything, mock.Anything).Return(func(ctx context.Context, ttl int64) error { + logger.Info(ctx, "set node status") + nodename := "fake" + m.Lock() + defer m.Unlock() + if status, ok := m.nodeStatus.Get(nodename); ok { + status.Alive = true + } else { + m.nodeStatus.Set(nodename, &types.NodeStatus{ + Nodename: nodename, + Alive: true, + }) + } + return nil + }) + m.On("GetNodeStatus", mock.Anything, mock.Anything).Return(func(ctx context.Context, nodename string) *types.NodeStatus { + m.Lock() + defer m.Unlock() + if status, ok := m.nodeStatus.Get(nodename); ok { + return &types.NodeStatus{ + Nodename: status.Nodename, + Alive: status.Alive, + } + } + return &types.NodeStatus{ + Nodename: nodename, + Alive: false, + } + }, nil) + m.On("AddNode", mock.Anything, mock.Anything).Return(func(ctx context.Context, opts *types.AddNodeOpts) (*types.Node, error) { + m.Lock() + defer m.Unlock() + m.nodeInfo.Set(opts.Nodename, &types.Node{ + Name: opts.Nodename, + Endpoint: opts.Endpoint, + }) + return &types.Node{ + Name: opts.Nodename, + Endpoint: opts.Endpoint, + }, nil + }) + m.On("SetNode", mock.Anything, mock.Anything).Return(func(ctx context.Context, opts *types.SetNodeOpts) (*types.Node, error) { + m.Lock() + defer m.Unlock() + m.nodeInfo.Set(opts.Nodename, &types.Node{ + Name: opts.Nodename, + Endpoint: opts.Endpoint, + Labels: opts.Labels, + }) + return &types.Node{ + Name: opts.Nodename, + Endpoint: opts.Endpoint, + Labels: opts.Labels, + }, nil + }) + m.On("SetWorkloadStatus", mock.Anything, mock.Anything, mock.Anything).Return(func(ctx context.Context, status *types.WorkloadStatus, ttl int64) error { + logger.Infof(ctx, "set workload status: %+v\n", status) + m.workloadStatus.Set(status.ID, status) + return nil + }) + m.On("GetIdentifier", mock.Anything).Return("fake-identifier") + m.On("NodeStatusStream", mock.Anything).Return(func(ctx context.Context) <-chan *types.NodeStatus { + return m.msgChan + }, func(ctx context.Context) <-chan error { + return m.errChan + }) + m.On("ListPodNodes", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*types.Node{ + { + Name: "fake", + }, + { + Name: "faker", + }, + }, nil) + + m.On("ListNodeWorkloads", mock.Anything, mock.Anything).Return([]*types.Workload{ + { + ID: virttypes.EruID("00033017009174384208170000000001"), + }, + { + ID: virttypes.EruID("00033017009174384208170000000002"), + }, + }, nil) + // TODO + m.On("GetNodeResource", mock.Anything, mock.Anything).Return(&types.NodeResource{ + Capacity: resourcetypes.Resources{ + "cpumem": nil, + }, + }, nil) + // m.On("GetWorkload", mock.Anything, mock.Anything).Return(func(ctx context.Context, ID string) (*types.Workload, error) { + // m.Lock() + // defer m.Unlock() + // wrk, ok := m.workloads.Get(ID) + // if !ok { + // return nil, status.Error(1051, "entity count invalid") + // } + // return wrk, nil + // }) + return m +} + +// GetMockWorkloadStatus returns the mock workload status by ID +func (m *MockStore) GetMockWorkloadStatus(ID string) *types.WorkloadStatus { + status, ok := m.workloadStatus.Get(ID) + if !ok { + return nil + } + return status +} + +// StartNodeStatusStream "faker" up, "fake" down. +func (m *MockStore) StartNodeStatusStream() { + m.msgChan <- &types.NodeStatus{ + Nodename: "faker", + Alive: true, + } + m.msgChan <- &types.NodeStatus{ + Nodename: "fake", + Alive: false, + } +} + +// StopNodeStatusStream send an err to errChan. +func (m *MockStore) StopNodeStatusStream() { + m.errChan <- common.ErrClosedSteam +} diff --git a/internal/eru/store/store.go b/internal/eru/store/store.go new file mode 100644 index 0000000..8da416c --- /dev/null +++ b/internal/eru/store/store.go @@ -0,0 +1,24 @@ +package store + +import ( + "context" + + "github.com/projecteru2/yavirt/internal/eru/types" +) + +// Store wrapper of remote calls +type Store interface { //nolint + CheckHealth(ctx context.Context) error + GetNode(ctx context.Context, nodename string) (*types.Node, error) + SetNodeStatus(ctx context.Context, ttl int64) error + GetNodeStatus(ctx context.Context, nodename string) (*types.NodeStatus, error) + AddNode(ctx context.Context, opts *types.AddNodeOpts) (*types.Node, error) + SetNode(ctx context.Context, opts *types.SetNodeOpts) (*types.Node, error) + SetWorkloadStatus(ctx context.Context, status *types.WorkloadStatus, ttl int64) error + GetIdentifier(ctx context.Context) string + NodeStatusStream(ctx context.Context) (<-chan *types.NodeStatus, <-chan error) + ListPodNodes(ctx context.Context, all bool, podname string, labels map[string]string) ([]*types.Node, error) + ListNodeWorkloads(ctx context.Context, nodename string) ([]*types.Workload, error) + GetNodeResource(ctx context.Context, nodename string) (*types.NodeResource, error) + GetWorkload(ctx context.Context, id string) (*types.Workload, error) +} diff --git a/internal/eru/types/config.go b/internal/eru/types/config.go new file mode 100644 index 0000000..008316b --- /dev/null +++ b/internal/eru/types/config.go @@ -0,0 +1,8 @@ +package types + +import ( + "github.com/projecteru2/yavirt/configs" +) + +type Config = configs.EruConfig +type HealthCheckConfig = configs.HealthCheckConfig diff --git a/internal/eru/types/log.go b/internal/eru/types/log.go new file mode 100644 index 0000000..a2a11ce --- /dev/null +++ b/internal/eru/types/log.go @@ -0,0 +1,26 @@ +package types + +import ( + "bufio" + "net" +) + +// Log for log +type Log struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + EntryPoint string `json:"entrypoint"` + Ident string `json:"ident"` + Data string `json:"data"` + Datetime string `json:"datetime"` + Extra map[string]string `json:"extra"` +} + +// LogConsumer for log consumer +type LogConsumer struct { + ID string + App string + Conn net.Conn + Buf *bufio.ReadWriter +} diff --git a/internal/eru/types/message.go b/internal/eru/types/message.go new file mode 100644 index 0000000..e52b09d --- /dev/null +++ b/internal/eru/types/message.go @@ -0,0 +1,9 @@ +package types + +// WorkloadEventMessage . +type WorkloadEventMessage struct { + ID string + Type string + Action string + TimeNano int64 +} diff --git a/internal/eru/types/node.go b/internal/eru/types/node.go new file mode 100644 index 0000000..1d7fae7 --- /dev/null +++ b/internal/eru/types/node.go @@ -0,0 +1,53 @@ +package types + +import ( + resourcetypes "github.com/projecteru2/core/resource/types" +) + +// Node . +type Node struct { + Name string + Endpoint string + Podname string + Labels map[string]string + Available bool +} + +// NodeStatus . +type NodeStatus struct { + Nodename string + Podname string + Alive bool + Error error +} + +type NodeResource struct { + Capacity resourcetypes.Resources +} + +type Workload struct { + ID string +} + +type SetNodeOpts struct { + Nodename string + Endpoint string + Ca string + Cert string + Key string + Labels map[string]string + Resources map[string][]byte + Delta bool + WorkloadsDown bool +} + +type AddNodeOpts struct { + Nodename string + Endpoint string + Podname string + Ca string + Cert string + Key string + Labels map[string]string + Resources map[string][]byte +} diff --git a/internal/eru/types/workload.go b/internal/eru/types/workload.go new file mode 100644 index 0000000..a16ff67 --- /dev/null +++ b/internal/eru/types/workload.go @@ -0,0 +1,13 @@ +package types + +// WorkloadStatus . +type WorkloadStatus struct { + ID string + Running bool + Healthy bool + Networks map[string]string + Extension []byte + Appname string + Nodename string + Entrypoint string +} diff --git a/internal/meta/const.go b/internal/meta/const.go new file mode 100644 index 0000000..98edb8b --- /dev/null +++ b/internal/meta/const.go @@ -0,0 +1,54 @@ +package meta + +const ( + // StatusPending . + StatusPending = "pending" + // StatusCreating . + StatusCreating = "creating" + // StatusStarting . + StatusStarting = "starting" + // StatusRunning . + StatusRunning = "running" + // StatusStopping . + StatusStopping = "stopping" + // StatusStopped . + StatusStopped = "stopped" + // StatusMigrating . + StatusMigrating = "migrating" + // StatusResizing . + StatusResizing = "resizing" + // StatusCapturing . + StatusCapturing = "capturing" + // StatusCaptured . + StatusCaptured = "captured" + // StatusDestroying . + StatusDestroying = "destroying" + // StatusDestroyed . + StatusDestroyed = "destroyed" + // StatusPausing . + StatusPausing = "pausing" + // StatusPaused . + StatusPaused = "paused" + // StatusResuming . + StatusResuming = "resuming" + // StatusFreeze . + StatusFreeze = "frozen" + // StatusThaw . + StatusThaw = "thawed" +) + +// AllStatuses . +var AllStatuses = []string{ + StatusPending, + StatusCreating, + StatusStarting, + StatusRunning, + StatusStopping, + StatusStopped, + StatusCapturing, + StatusCaptured, + StatusMigrating, + StatusResizing, + StatusDestroying, + StatusDestroyed, +} diff --git a/internal/models/generic.go b/internal/meta/generic.go similarity index 59% rename from internal/models/generic.go rename to internal/meta/generic.go index 47046f4..838faa0 100644 --- a/internal/models/generic.go +++ b/internal/meta/generic.go @@ -1,46 +1,71 @@ -package models +package meta import ( "path/filepath" "time" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/pkg/terrors" ) // Generic . type Generic struct { - ID string `json:"id,omitempty"` - Status string `json:"status"` - CreatedTime int64 `json:"create_time"` - UpdatedTime int64 `json:"update_time,omitempty"` - MigratedTime int64 `json:"migrate_time,omitempty"` + ID string `json:"id,omitempty" mapstructure:"id"` + Status string `json:"status" mapstructure:"status"` + CreatedTime int64 `json:"create_time" mapstructure:"create_time"` + UpdatedTime int64 `json:"update_time,omitempty" mapstructure:"update_time"` + MigratedTime int64 `json:"migrate_time,omitempty" mapstructure:"migrate_time"` - *meta.Ver + *Ver } -func newGeneric() *Generic { +type GenericInterface interface { + Resource + GetID() string + GetCreatedTime() int64 + SetStatus(st string, force bool) error + GetStatus() string +} + +func NewGeneric() *Generic { return &Generic{ Status: StatusPending, CreatedTime: time.Now().Unix(), - Ver: meta.NewVer(), + Ver: NewVer(), } } +func (g *Generic) GetID() string { + return g.ID +} + +func (g *Generic) GetCreatedTime() int64 { + return g.CreatedTime +} + +// MetaKey . +func (g *Generic) MetaKey() string { + return VolumeKey(g.ID) +} + // JoinVirtPath . func (g *Generic) JoinVirtPath(elem string) string { return filepath.Join(configs.Conf.VirtDir, elem) } -func (g *Generic) setStatus(st string, force bool) error { +func (g *Generic) SetStatus(st string, force bool) error { if !(force || g.checkStatus(st)) { - return errors.Annotatef(errors.ErrForwardStatus, "%s => %s", g.Status, st) + return errors.Wrapf(terrors.ErrForwardStatus, "%s => %s", g.Status, st) } g.Status = st return nil } +func (g *Generic) GetStatus() string { + return g.Status +} + // CheckForwardStatus . func (g *Generic) CheckForwardStatus(next string) bool { return g.checkStatus(next) @@ -76,7 +101,7 @@ func (g *Generic) checkStatus(next string) bool { return now == StatusStopped || now == StatusRunning case StatusRunning: - return now == StatusStarting || now == StatusResuming + return now == StatusStarting || now == StatusResuming || now == StatusRunning case StatusPaused: return now == StatusPausing diff --git a/internal/meta/ip.go b/internal/meta/ip.go index 18a7e34..e933fee 100644 --- a/internal/meta/ip.go +++ b/internal/meta/ip.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - "github.com/projecteru2/yavirt/internal/vnet/device" + "github.com/projecteru2/yavirt/internal/network/utils/device" "github.com/projecteru2/yavirt/pkg/netx" ) @@ -66,6 +66,10 @@ func (ipn IPNet) GatewayIPNet() (*net.IPNet, error) { return netx.ParseCIDR2(ipn.GatewayCIDR()) } +func (ipn IPNet) GatewayAddr() string { + return netx.IntToIPv4(ipn.IntGateway) +} + // GatewayCIDR . func (ipn IPNet) GatewayCIDR() string { return fmt.Sprintf("%s/%d", netx.IntToIPv4(ipn.IntGateway), ipn.GatewayPrefix) diff --git a/internal/meta/keys.go b/internal/meta/keys.go index d2d0f5a..567aa78 100644 --- a/internal/meta/keys.go +++ b/internal/meta/keys.go @@ -22,7 +22,7 @@ const ( // HostCounterKey //hosts:counter func HostCounterKey() string { - return filepath.Join(configs.Conf.EtcdPrefix, fmt.Sprintf("%s:counter", hostPrefix)) + return filepath.Join(configs.Conf.Etcd.Prefix, fmt.Sprintf("%s:counter", hostPrefix)) } // HostGuestKey //hosts// @@ -37,7 +37,7 @@ func HostGuestsPrefix(name string) string { // HostKey //hosts/ func HostKey(name string) string { - return filepath.Join(configs.Conf.EtcdPrefix, hostPrefix, name) + return filepath.Join(configs.Conf.Etcd.Prefix, hostPrefix, name) } // GuestKey //guests/ @@ -47,16 +47,16 @@ func GuestKey(id string) string { // GuestsPrefix //guests/ func GuestsPrefix() string { - return fmt.Sprintf("%s/", filepath.Join(configs.Conf.EtcdPrefix, guestPrefix)) + return fmt.Sprintf("%s/", filepath.Join(configs.Conf.Etcd.Prefix, guestPrefix)) } // VolumeKey //vols/ func VolumeKey(id string) string { - return filepath.Join(configs.Conf.EtcdPrefix, volPrefix, id) + return filepath.Join(configs.Conf.Etcd.Prefix, volPrefix, id) } func SnapshotKey(id string) string { - return filepath.Join(configs.Conf.EtcdPrefix, snapshotPrefix, id) + return filepath.Join(configs.Conf.Etcd.Prefix, snapshotPrefix, id) } // UserImageKey //uimgs// @@ -66,7 +66,7 @@ func UserImageKey(user, name string) string { // UserImagePrefix // func UserImagePrefix(user string) string { - return filepath.Join(configs.Conf.EtcdPrefix, uimgPrefix, user) + return filepath.Join(configs.Conf.Etcd.Prefix, uimgPrefix, user) } // ImageKey //imgs/ @@ -76,7 +76,7 @@ func SysImageKey(name string) string { // SysImagePrefix //imgs/ func SysImagePrefix() string { - return filepath.Join(configs.Conf.EtcdPrefix, imgPrefix) + return filepath.Join(configs.Conf.Etcd.Prefix, imgPrefix) } // OccupiedIPKey //ips//occupied/ @@ -104,7 +104,7 @@ func IPALocKey(subnet int64) string { // SubnetKey //ips/ func SubnetKey(subnet int64) string { var v = strconv.FormatInt(subnet, 10) - return filepath.Join(configs.Conf.EtcdPrefix, ipPrefix, v) + return filepath.Join(configs.Conf.Etcd.Prefix, ipPrefix, v) } // IPBlockKey //ippool//blocks/ @@ -129,5 +129,5 @@ func IPPoolKey(name string) string { // IPPoolsPrefix //ippools/ func IPPoolsPrefix() string { - return filepath.Join(configs.Conf.EtcdPrefix, ippPrefix) + return filepath.Join(configs.Conf.Etcd.Prefix, ippPrefix) } diff --git a/internal/meta/meta.go b/internal/meta/meta.go index 783be4d..c823ebf 100644 --- a/internal/meta/meta.go +++ b/internal/meta/meta.go @@ -3,8 +3,8 @@ package meta import ( "context" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/pkg/errors" "github.com/projecteru2/yavirt/pkg/store" ) @@ -12,14 +12,14 @@ import ( func Create(res Resources) error { var data, err = res.Encode() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "failed to encode resources") } var ctx, cancel = Context(context.Background()) defer cancel() if err := store.Create(ctx, data); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "failed to create resources") } res.IncrVer() @@ -34,7 +34,7 @@ func Load(res Resource) error { var ver, err = store.Get(ctx, res.MetaKey(), res) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "failed to load resource") } res.SetVer(ver) @@ -42,18 +42,30 @@ func Load(res Resource) error { return nil } +func LoadRaw(key string) (map[string]any, int64, error) { + var ctx, cancel = Context(context.Background()) + defer cancel() + val := map[string]any{} + var ver, err = store.Get(ctx, key, &val) + if err != nil { + return nil, ver, errors.Wrap(err, "failed to load resource") + } + + return val, ver, nil +} + // Save . func Save(res Resources) error { var data, err = res.Encode() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "failed to encode resources") } var ctx, cancel = Context(context.Background()) defer cancel() if err := store.Update(ctx, data, res.Vers()); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "failed to update resources") } res.IncrVer() @@ -63,5 +75,5 @@ func Save(res Resources) error { // Context . func Context(ctx context.Context) (context.Context, context.CancelFunc) { - return context.WithTimeout(ctx, configs.Conf.MetaTimeout.Duration()) + return context.WithTimeout(ctx, configs.Conf.MetaTimeout) } diff --git a/internal/meta/res.go b/internal/meta/res.go index 3184610..51c4dc2 100644 --- a/internal/meta/res.go +++ b/internal/meta/res.go @@ -1,7 +1,7 @@ package meta import ( - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/pkg/utils" ) @@ -28,7 +28,7 @@ func (res Resources) Encode() (map[string]string, error) { for _, r := range res { var enc, err = utils.JSONEncode(r, "\t") if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrapf(err, "encode resource %v failed", r) } data[r.MetaKey()] = string(enc) diff --git a/internal/models/status_test.go b/internal/meta/status_test.go similarity index 97% rename from internal/models/status_test.go rename to internal/meta/status_test.go index ec3a741..57faa5e 100644 --- a/internal/models/status_test.go +++ b/internal/meta/status_test.go @@ -1,4 +1,4 @@ -package models +package meta import ( "testing" @@ -61,7 +61,7 @@ func TestStatusForward(t *testing.T) { }, } - var g = newGeneric() + var g = NewGeneric() for _, c := range cases { var next = c.forward diff --git a/internal/metrics/libvirt.go b/internal/metrics/libvirt.go new file mode 100644 index 0000000..d3b4642 --- /dev/null +++ b/internal/metrics/libvirt.go @@ -0,0 +1,451 @@ +package metrics + +import ( + "context" + "fmt" + "strings" + + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/internal/vmcache" + "github.com/prometheus/client_golang/prometheus" + + "github.com/digitalocean/go-libvirt" +) + +var ( + libvirtUpDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "", "up"), + "Whether scraping libvirt's metrics was successful.", + []string{"host"}, + nil) + + libvirtDomainNumbers = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "", "domains_number"), + "Number of the domain", + []string{"host"}, + nil) + + libvirtDomainState = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "", "domain_state_code"), + "Code of the domain state", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "stateDesc"}, + nil) + + libvirtDomainInfoNrVirtCPUDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_info", "virtual_cpus"), + "Number of virtual CPUs for the domain.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + + libvirtDomainInfoMaxMemDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_info", "maximum_memory_bytes"), + "Maximum allowed memory of the domain, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + libvirtDomainInfoMemoryDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_info", "memory_usage_bytes"), + "Memory usage of the domain, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + + libvirtDomainInfoNrGPUDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_info", "gpus"), + "Number of GPUs for the domain.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + + // CPU stats + libvirtDomainStatCPUTimeDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_cpu_stats", "time_seconds_total"), + "Amount of CPU time used by the domain, in seconds.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + + // memory stats + libvirtDomainStatMemorySwapInBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_mem_stats", "swap_in_bytes"), + "Memory swap in of domain(the total amount of data read from swap space), in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + libvirtDomainStatMemorySwapOutBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_mem_stats", "swap_out_bytes"), + "Memory swap out of the domain(the total amount of memory written out to swap space), in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + libvirtDomainStatMemoryUnusedBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_mem_stats", "unused_bytes"), + "Memory unused of the domain, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + libvirtDomainStatMemoryAvailableInBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_mem_stats", "available_bytes"), + "Memory available of the domain, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + libvirtDomainStatMemoryUsableBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_mem_stats", "usable_bytes"), + "Memory usable of the domain(corresponds to 'Available' in /proc/meminfo), in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + libvirtDomainStatMemoryRssBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_mem_stats", "rss_bytes"), + "Resident Set Size of the process running the domain, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host"}, + nil) + + libvirtDomainBlockRdBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_block_stats", "read_bytes_total"), + "Number of bytes read from a block device, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "source_file", "target_device"}, + nil) + libvirtDomainBlockRdReqDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_block_stats", "read_requests_total"), + "Number of read requests from a block device.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "source_file", "target_device"}, + nil) + libvirtDomainBlockWrBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_block_stats", "write_bytes_total"), + "Number of bytes written from a block device, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "source_file", "target_device"}, + nil) + libvirtDomainBlockWrReqDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_block_stats", "write_requests_total"), + "Number of write requests from a block device.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "source_file", "target_device"}, + nil) + + // DomainInterface + libvirtDomainInterfaceRxBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_interface_stats", "rx_bytes_total"), + "Number of bytes received on a network interface, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "target_device"}, + nil) + libvirtDomainInterfaceRxPacketsDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_interface_stats", "rx_packets_total"), + "Number of packets received on a network interface.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "target_device"}, + nil) + libvirtDomainInterfaceRxErrsDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_interface_stats", "rx_errors_total"), + "Number of packet receive errors on a network interface.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "target_device"}, + nil) + libvirtDomainInterfaceRxDropDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_interface_stats", "rx_drops_total"), + "Number of packet receive drops on a network interface.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "target_device"}, + nil) + libvirtDomainInterfaceTxBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_interface_stats", "tx_bytes_total"), + "Number of bytes transmitted on a network interface, in bytes.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "target_device"}, + nil) + libvirtDomainInterfaceTxPacketsDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_interface_stats", "tx_packets_total"), + "Number of packets transmitted on a network interface.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "target_device"}, + nil) + libvirtDomainInterfaceTxErrsDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_interface_stats", "tx_errors_total"), + "Number of packet transmit errors on a network interface.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "target_device"}, + nil) + libvirtDomainInterfaceTxDropDesc = prometheus.NewDesc( + prometheus.BuildFQName("libvirt", "domain_interface_stats", "tx_drops_total"), + "Number of packet transmit drops on a network interface.", + []string{"name", "ip", "eruName", "instanceId", "userName", "userId", "host", "target_device"}, + nil) + + domainState = map[libvirt.DomainState]string{ + libvirt.DomainNostate: "no state", + libvirt.DomainRunning: "the domain is running", + libvirt.DomainBlocked: "the domain is blocked on resource", + libvirt.DomainPaused: "the domain is paused by user", + libvirt.DomainShutdown: "the domain is being shut down", + libvirt.DomainShutoff: "the domain is shut off", + libvirt.DomainCrashed: "the domain is crashed", + libvirt.DomainPmsuspended: "the domain is suspended by guest power management", + // libvirtgo.DOMAIN_LAST: "this enum value will increase over time as new events are added to the libvirt API", + } +) + +type collectFunc func(ch chan<- prometheus.Metric, stats *vmcache.DomainStatsResp, promLabels []string) (err error) + +// LibvirtExporter implements a Prometheus exporter for libvirt state. +type LibvirtExporter struct { + host string +} + +// NewLibvirtExporter creates a new Prometheus exporter for libvirt. +func NewLibvirtExporter(hn string) *LibvirtExporter { + return &LibvirtExporter{hn} +} + +// Collect scrapes Prometheus metrics from libvirt. +func (e *LibvirtExporter) Collect(ch chan<- prometheus.Metric) { + ch <- prometheus.MustNewConstMetric( + libvirtUpDesc, + prometheus.GaugeValue, + 1.0, + e.host) + + domainMap := vmcache.FetchStats() + domainNumber := len(domainMap) + ch <- prometheus.MustNewConstMetric( + libvirtDomainNumbers, + prometheus.GaugeValue, + float64(domainNumber), + e.host, + ) + + for name := range domainMap { + dom := domainMap[name] + if err := e.CollectDomain(ch, &dom); err != nil { + log.WithFunc("metrics.Collect").Errorf(context.TODO(), err, "failed to collect domain %s", name) + return + } + } +} + +// CollectDomain extracts Prometheus metrics from a libvirt domain. +func (e *LibvirtExporter) CollectDomain(ch chan<- prometheus.Metric, stats *vmcache.DomainStatsResp) (err error) { + // if stats.State == nil || stats.Balloon == nil || stats.Cpu == nil { + // return nil + // } + var ( + rState = stats.State + rvirCPU, _ = vmcache.ToUint64(stats.Stats["vcpu.maximum"]) + nrGPUs = len(stats.GPUAddrs) + rmaxmem, _ = vmcache.ToUint64(stats.Stats["balloon.maximum"]) + rmemory, _ = vmcache.ToUint64(stats.Stats["balloon.current"]) + rcputime, _ = vmcache.ToUint64(stats.Stats["cpu.time"]) + ) + + promLabels := []string{ + stats.AppName, + stats.IP, + stats.EruName, + stats.UUID, + stats.UserName, + stats.UserID, + e.host} + + ch <- prometheus.MustNewConstMetric(libvirtDomainState, prometheus.GaugeValue, float64(rState), append(promLabels, domainState[rState])...) + + ch <- prometheus.MustNewConstMetric(libvirtDomainInfoMaxMemDesc, prometheus.GaugeValue, float64(rmaxmem)*1024, promLabels...) + ch <- prometheus.MustNewConstMetric(libvirtDomainInfoMemoryDesc, prometheus.GaugeValue, float64(rmemory)*1024, promLabels...) + ch <- prometheus.MustNewConstMetric(libvirtDomainInfoNrVirtCPUDesc, prometheus.GaugeValue, float64(rvirCPU), promLabels...) + ch <- prometheus.MustNewConstMetric(libvirtDomainStatCPUTimeDesc, prometheus.CounterValue, float64(rcputime)/1e9, promLabels...) + ch <- prometheus.MustNewConstMetric(libvirtDomainInfoNrGPUDesc, prometheus.GaugeValue, float64(nrGPUs), promLabels...) + // var isActive int32 + // if isActive, err = l.DomainIsActive(domain.libvirtDomain); err != nil { + // logger.Error("failed to get active status of domain", zap.String("name", "ip", domain.domainName), zap.Error(err)) + // return err + // } + // if isActive != 1 { + // logger.Info("domain is not active", zap.String("name", "ip", domain.domainName)) + // return nil + // } + + for _, collectFunc := range []collectFunc{CollectDomainBlockDeviceInfo, CollectDomainNetworkInfo, CollectDomainMemoryStatInfo} { + if err = collectFunc(ch, stats, promLabels); err != nil { + log.WithFunc("metrics.CollectDomain").Errorf(context.TODO(), err, "failed to collect some domain info") + } + } + + return nil +} + +func CollectDomainBlockDeviceInfo(ch chan<- prometheus.Metric, stats *vmcache.DomainStatsResp, promLabels []string) (err error) { + // Report block device statistics. + count, _ := vmcache.ToUint64(stats.Stats["block.count"]) + + for i := uint64(0); i < count; i++ { + diskName, _ := vmcache.ToString(stats.Stats[fmt.Sprintf("block.%d.name", i)]) + if !strings.HasPrefix(diskName, "vd") { + continue + } + diskPath, _ := vmcache.ToString(stats.Stats[fmt.Sprintf("block.%d.path", i)]) + rRdReq, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("block.%d.rd.reqs", i)]) + rRdBytes, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("block.%d.rd.bytes", i)]) + rWrReq, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("block.%d.wr.reqs", i)]) + rWrBytes, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("block.%d.wr.bytes", i)]) + + promDiskLabels := append(promLabels, diskPath, diskName) //nolint + ch <- prometheus.MustNewConstMetric( + libvirtDomainBlockRdBytesDesc, + prometheus.CounterValue, + float64(rRdBytes), + promDiskLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainBlockRdReqDesc, + prometheus.CounterValue, + float64(rRdReq), + promDiskLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainBlockWrBytesDesc, + prometheus.CounterValue, + float64(rWrBytes), + promDiskLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainBlockWrReqDesc, + prometheus.CounterValue, + float64(rWrReq), + promDiskLabels...) + + } + return +} + +func CollectDomainNetworkInfo(ch chan<- prometheus.Metric, stats *vmcache.DomainStatsResp, promLabels []string) (err error) { + // Report network interface statistics. + count, _ := vmcache.ToUint64(stats.Stats["net.count"]) + for idx := uint64(0); idx < count; idx++ { + ifaceName, _ := vmcache.ToString(stats.Stats[fmt.Sprintf("net.%d.name", idx)]) + if ifaceName == "" { + continue + } + rRxBytes, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("net.%d.rx.bytes", idx)]) + rRxPackets, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("net.%d.rx.pkts", idx)]) + rRxErrs, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("net.%d.rx.errs", idx)]) + rRxDrop, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("net.%d.rx.drop", idx)]) + rTxBytes, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("net.%d.tx.bytes", idx)]) + rTxPackets, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("net.%d.tx.pkts", idx)]) + rTxErrs, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("net.%d.tx.errs", idx)]) + rTxDrop, _ := vmcache.ToUint64(stats.Stats[fmt.Sprintf("net.%d.tx.drop", idx)]) + + promInterfaceLabels := append(promLabels, ifaceName) //nolint + ch <- prometheus.MustNewConstMetric( + libvirtDomainInterfaceRxBytesDesc, + prometheus.CounterValue, + float64(rRxBytes), + promInterfaceLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainInterfaceRxPacketsDesc, + prometheus.CounterValue, + float64(rRxPackets), + promInterfaceLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainInterfaceRxErrsDesc, + prometheus.CounterValue, + float64(rRxErrs), + promInterfaceLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainInterfaceRxDropDesc, + prometheus.CounterValue, + float64(rRxDrop), + promInterfaceLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainInterfaceTxBytesDesc, + prometheus.CounterValue, + float64(rTxBytes), + promInterfaceLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainInterfaceTxPacketsDesc, + prometheus.CounterValue, + float64(rTxPackets), + promInterfaceLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainInterfaceTxErrsDesc, + prometheus.CounterValue, + float64(rTxErrs), + promInterfaceLabels...) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainInterfaceTxDropDesc, + prometheus.CounterValue, + float64(rTxDrop), + promInterfaceLabels...) + } + return err +} + +func CollectDomainMemoryStatInfo(ch chan<- prometheus.Metric, stats *vmcache.DomainStatsResp, promLabels []string) (err error) { + var ( + swapIn, _ = vmcache.ToUint64(stats.Stats["balloon.swap_in"]) + swapOut, _ = vmcache.ToUint64(stats.Stats["balloon.swap_out"]) + unused, _ = vmcache.ToUint64(stats.Stats["balloon.unused"]) + available, _ = vmcache.ToUint64(stats.Stats["balloon.available"]) + usable, _ = vmcache.ToUint64(stats.Stats["balloon.usable"]) + rss, _ = vmcache.ToUint64(stats.Stats["balloon.rss"]) + ) + + ch <- prometheus.MustNewConstMetric( + libvirtDomainStatMemorySwapInBytesDesc, + prometheus.GaugeValue, + float64(swapIn)*1024, + promLabels...) + ch <- prometheus.MustNewConstMetric( + libvirtDomainStatMemorySwapOutBytesDesc, + prometheus.GaugeValue, + float64(swapOut)*1024, + promLabels...) + ch <- prometheus.MustNewConstMetric( + libvirtDomainStatMemoryUnusedBytesDesc, + prometheus.GaugeValue, + float64(unused*1024), + promLabels...) + ch <- prometheus.MustNewConstMetric( + libvirtDomainStatMemoryAvailableInBytesDesc, + prometheus.GaugeValue, + float64(available*1024), + promLabels...) + ch <- prometheus.MustNewConstMetric( + libvirtDomainStatMemoryUsableBytesDesc, + prometheus.GaugeValue, + float64(usable*1024), + promLabels...) + ch <- prometheus.MustNewConstMetric( + libvirtDomainStatMemoryRssBytesDesc, + prometheus.GaugeValue, + float64(rss*1024), + promLabels...) + return +} + +// Describe returns metadata for all Prometheus metrics that may be exported. +func (e *LibvirtExporter) Describe(ch chan<- *prometheus.Desc) { + ch <- libvirtUpDesc + ch <- libvirtDomainNumbers + + // domain info + ch <- libvirtDomainState + ch <- libvirtDomainInfoMaxMemDesc + ch <- libvirtDomainInfoMemoryDesc + ch <- libvirtDomainInfoNrVirtCPUDesc + ch <- libvirtDomainStatCPUTimeDesc + ch <- libvirtDomainInfoNrGPUDesc + + // domain block + ch <- libvirtDomainBlockRdBytesDesc + ch <- libvirtDomainBlockRdReqDesc + ch <- libvirtDomainBlockWrBytesDesc + ch <- libvirtDomainBlockWrReqDesc + + // domain interface + ch <- libvirtDomainInterfaceRxBytesDesc + ch <- libvirtDomainInterfaceRxPacketsDesc + ch <- libvirtDomainInterfaceRxErrsDesc + ch <- libvirtDomainInterfaceRxDropDesc + ch <- libvirtDomainInterfaceTxBytesDesc + ch <- libvirtDomainInterfaceTxPacketsDesc + ch <- libvirtDomainInterfaceTxErrsDesc + ch <- libvirtDomainInterfaceTxDropDesc + + // domain mem stat + ch <- libvirtDomainStatMemorySwapInBytesDesc + ch <- libvirtDomainStatMemorySwapOutBytesDesc + ch <- libvirtDomainStatMemoryUnusedBytesDesc + ch <- libvirtDomainStatMemoryAvailableInBytesDesc + ch <- libvirtDomainStatMemoryUsableBytesDesc + ch <- libvirtDomainStatMemoryRssBytesDesc +} diff --git a/internal/metrics/metric.go b/internal/metrics/metric.go index 192cf29..b389937 100644 --- a/internal/metrics/metric.go +++ b/internal/metrics/metric.go @@ -6,8 +6,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/pkg/utils" ) @@ -19,31 +18,34 @@ var ( MetricHeartbeatCount = "yavirt_heartbeat_total" // MetricErrorCount . MetricErrorCount = "yavirt_error_total" + MetricSvcTasks = "yavirt_svc_task_count" metr *Metrics ) -func init() { - hn := configs.Hostname() - +func Setup(hn string, cols ...prometheus.Collector) { metr = New(hn) metr.RegisterCounter(MetricErrorCount, "yavirt errors", nil) //nolint metr.RegisterCounter(MetricHeartbeatCount, "yavirt heartbeats", nil) //nolint + metr.RegisterGauge(MetricSvcTasks, "yavirt service tasks", nil) //nolint + e := NewLibvirtExporter(hn) + prometheus.MustRegister(e) + if len(cols) > 0 { + prometheus.MustRegister(cols...) + } } // Metrics . type Metrics struct { - host string - counters map[string]*prometheus.CounterVec - gauges map[string]*prometheus.GaugeVec + host string + collectors map[string]prometheus.Collector } // New . func New(host string) *Metrics { return &Metrics{ - host: host, - counters: map[string]*prometheus.CounterVec{}, - gauges: map[string]*prometheus.GaugeVec{}, + host: host, + collectors: map[string]prometheus.Collector{}, } } @@ -58,10 +60,9 @@ func (m *Metrics) RegisterCounter(name, desc string, labels []string) error { ) if err := prometheus.Register(col); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - - m.counters[name] = col + m.collectors[name] = col return nil } @@ -77,38 +78,66 @@ func (m *Metrics) RegisterGauge(name, desc string, labels []string) error { ) if err := prometheus.Register(col); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - m.gauges[name] = col + m.collectors[name] = col return nil } // Incr . func (m *Metrics) Incr(name string, labels map[string]string) error { - var col, exists = m.counters[name] + var collector, exists = m.collectors[name] if !exists { return errors.Errorf("collector %s not found", name) } labels = m.appendLabel(labels, "host", m.host) + switch col := collector.(type) { + case *prometheus.GaugeVec: + col.With(labels).Inc() + case *prometheus.CounterVec: + col.With(labels).Inc() + default: + return errors.Errorf("collector %s is not counter or gauge", name) + } - col.With(labels).Inc() + return nil +} + +// Decr . +func (m *Metrics) Decr(name string, labels map[string]string) error { + var collector, exists = m.collectors[name] + if !exists { + return errors.Errorf("collector %s not found", name) + } + + labels = m.appendLabel(labels, "host", m.host) + switch col := collector.(type) { + case *prometheus.GaugeVec: + col.With(labels).Dec() + default: + return errors.Errorf("collector %s is not gauge", name) + } return nil } // Store . func (m *Metrics) Store(name string, value float64, labels map[string]string) error { - var col, exists = m.gauges[name] + var collector, exists = m.collectors[name] if !exists { return errors.Errorf("collector %s not found", name) } labels = m.appendLabel(labels, "host", m.host) - - col.With(labels).Set(value) + switch col := collector.(type) { + case *prometheus.GaugeVec: + col.With(labels).Set(value) + default: + return errors.Errorf("collector %s is not gauge", name) + } return nil } @@ -142,6 +171,10 @@ func Incr(name string, labels map[string]string) error { return metr.Incr(name, labels) } +func Decr(name string, labels map[string]string) error { + return metr.Decr(name, labels) +} + // Store . func Store(name string, value float64, labels map[string]string) error { return metr.Store(name, value, labels) diff --git a/internal/models/const.go b/internal/models/const.go index 2b875e8..29ff0ed 100644 --- a/internal/models/const.go +++ b/internal/models/const.go @@ -1,66 +1,11 @@ package models const ( - // StatusPending . - StatusPending = "pending" - // StatusCreating . - StatusCreating = "creating" - // StatusStarting . - StatusStarting = "starting" - // StatusRunning . - StatusRunning = "running" - // StatusStopping . - StatusStopping = "stopping" - // StatusStopped . - StatusStopped = "stopped" - // StatusMigrating . - StatusMigrating = "migrating" - // StatusResizing . - StatusResizing = "resizing" - // StatusCapturing . - StatusCapturing = "capturing" - // StatusCaptured . - StatusCaptured = "captured" - // StatusDestroying . - StatusDestroying = "destroying" - // StatusDestroyed . - StatusDestroyed = "destroyed" - // StatusPausing . - StatusPausing = "pausing" - // StatusPaused . - StatusPaused = "paused" - // StatusResuming . - StatusResuming = "resuming" - - // VolDataType . - VolDataType = "dat" - // VolSysType . - VolSysType = "sys" - // VolQcow2Format . - VolQcow2Format = "qcow2" - - // SnapshotFullType . - SnapshotFullType = "full" - // SnapshotIncrementalType . - SnapshotIncrementalType = "incremental" - // HostVirtType . HostVirtType = "virt" // HostMetaType . HostMetaType = "meta" - // DistroUbuntu . - DistroUbuntu = "ubuntu" - // DistroCentOS . - DistroCentOS = "centos" - - // UserImagePrefix . - UserImagePrefix = "uimg" - // ImageSys indicates the image is a system version. - ImageSys = "sys" - // ImageUser indicates the image was captured by user. - ImageUser = "user" - // LabelPublish . LabelPublish = "Publish" // LabelHealthCheck . @@ -83,19 +28,3 @@ const ( // MaxBlockIPCount . MaxBlockIPCount = 256 ) - -// AllStatuses . -var AllStatuses = []string{ - StatusPending, - StatusCreating, - StatusStarting, - StatusRunning, - StatusStopping, - StatusStopped, - StatusCapturing, - StatusCaptured, - StatusMigrating, - StatusResizing, - StatusDestroying, - StatusDestroyed, -} diff --git a/internal/models/guest.go b/internal/models/guest.go index 83a1a9e..8bd4428 100644 --- a/internal/models/guest.go +++ b/internal/models/guest.go @@ -2,70 +2,80 @@ package models import ( "context" - "fmt" - "path/filepath" + "encoding/json" "strings" erucluster "github.com/projecteru2/core/cluster" + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" "github.com/projecteru2/yavirt/configs" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/internal/virt/types" - "github.com/projecteru2/yavirt/internal/vnet" - "github.com/projecteru2/yavirt/internal/vnet/handler" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/types" + interutils "github.com/projecteru2/yavirt/internal/utils" + "github.com/projecteru2/yavirt/internal/volume" + volFact "github.com/projecteru2/yavirt/internal/volume/factory" + "github.com/projecteru2/yavirt/internal/volume/local" "github.com/projecteru2/yavirt/pkg/idgen" + "github.com/projecteru2/yavirt/pkg/netx" "github.com/projecteru2/yavirt/pkg/store" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" + bdtypes "github.com/yuyang0/resource-bandwidth/bandwidth/types" + gputypes "github.com/yuyang0/resource-gpu/gpu/types" + vmiFact "github.com/yuyang0/vmimage/factory" + vmitypes "github.com/yuyang0/vmimage/types" ) // Guest indicates a virtual machine. type Guest struct { - *Generic - - ImageName string `json:"img"` - ImageUser string `json:"img_user,omitempty"` - HostName string `json:"host"` - CPU int `json:"cpu"` - Memory int64 `json:"mem"` - VolIDs []string `json:"vols"` - IPNets meta.IPNets `json:"ips"` - ExtraNetworks Networks `json:"extra_networks,omitempty"` - NetworkMode string `json:"network,omitempty"` - EnabledCalicoCNI bool `json:"enabled_calico_cni,omitempty"` - NetworkPair string `json:"network_pair,omitempty"` - EndpointID string `json:"endpoint,omitempty"` - MAC string `json:"mac"` - JSONLabels map[string]string `json:"labels"` - - LambdaOption *LambdaOptions `json:"lambda_option,omitempty"` - LambdaStdin bool `json:"lambda_stdin,omitempty"` - Host *Host `json:"-"` - Img Image `json:"-"` - Vols Volumes `json:"-"` - IPs IPs `json:"-"` + *meta.Generic + + ImageName string `json:"img"` + HostName string `json:"host"` + CPU int `json:"cpu"` + Memory int64 `json:"mem"` + VolIDs []string `json:"vols"` + GPUEngineParams *gputypes.EngineParams `json:"gpu_engine_params"` + BDEngineParams *bdtypes.EngineParams `json:"bandwidth_engine_params"` + IPNets meta.IPNets `json:"ips"` + ExtraNetworks Networks `json:"extra_networks,omitempty"` + NetworkMode string `json:"network,omitempty"` + NetworkPair string `json:"network_pair,omitempty"` + EndpointID string `json:"endpoint,omitempty"` + MAC string `json:"mac"` + MTU int `json:"mtu"` + JSONLabels map[string]string `json:"labels"` + + LambdaOption *LambdaOptions `json:"lambda_option,omitempty"` + LambdaStdin bool `json:"lambda_stdin,omitempty"` + Host *Host `json:"-"` + Img *vmitypes.Image `json:"-"` + Vols volFact.Volumes `json:"-"` + IPs IPs `json:"-"` DmiUUID string `json:"-"` } // Check . func (g *Guest) Check() error { - if g.CPU < configs.Conf.MinCPU || g.CPU > configs.Conf.MaxCPU { - return errors.Annotatef(errors.ErrInvalidValue, + if g.CPU < configs.Conf.Resource.MinCPU || g.CPU > configs.Conf.Resource.MaxCPU { + return errors.Wrapf(terrors.ErrInvalidValue, "invalid CPU num: %d, it should be [%d, %d]", - g.CPU, configs.Conf.MinCPU, configs.Conf.MaxCPU) + g.CPU, configs.Conf.Resource.MinCPU, configs.Conf.Resource.MaxCPU) } - if g.Memory < configs.Conf.MinMemory || g.Memory > configs.Conf.MaxMemory { - return errors.Annotatef(errors.ErrInvalidValue, + if g.Memory < configs.Conf.Resource.MinMemory || g.Memory > configs.Conf.Resource.MaxMemory { + return errors.Wrapf(terrors.ErrInvalidValue, "invalie memory: %d, it shoule be [%d, %d]", - g.Memory, configs.Conf.MinMemory, configs.Conf.MaxMemory) + g.Memory, configs.Conf.Resource.MinMemory, configs.Conf.Resource.MaxMemory) } if lab, exists := g.JSONLabels[erucluster.LabelMeta]; exists { obj := map[string]any{} if err := utils.JSONDecode([]byte(lab), &obj); err != nil { - return errors.Annotatef(errors.ErrInvalidValue, "'%s' should be JSON format", lab) + return errors.Wrapf(terrors.ErrInvalidValue, "'%s' should be JSON format", lab) } } @@ -79,14 +89,15 @@ func (g *Guest) MetaKey() string { // Create . func (g *Guest) Create() error { - g.Vols.genID() - g.Vols.setGuestID(g.ID) - g.VolIDs = g.Vols.ids() + g.Vols.GenID() + g.Vols.SetDevice() + g.Vols.SetGuestID(g.ID) + g.VolIDs = g.Vols.IDs() g.IPs.setGuestID(g.ID) var res = meta.Resources{g} - res.Concate(g.Vols.resources()) + res.Concate(g.Vols.Resources()) res.Concate(meta.Resources{newHostGuest(g.HostName, g.ID)}) return meta.Create(res) @@ -109,7 +120,7 @@ func (g *Guest) RemoveVol(volID string) { n := g.Vols.Len() for i := n - 1; i >= 0; i-- { - if g.Vols[i].ID != volID { + if g.Vols[i].GetID() != volID { continue } @@ -125,53 +136,58 @@ func (g *Guest) RemoveVol(volID string) { } // AppendVols . -func (g *Guest) AppendVols(vols ...*Volume) error { - if g.Vols.Len()+len(vols) > configs.Conf.MaxVolumesCount { - return errors.Annotatef(errors.ErrTooManyVolumes, "at most %d", configs.Conf.MaxVolumesCount) +func (g *Guest) AppendVols(vols ...volume.Volume) error { + if g.Vols.Len()+len(vols) > configs.Conf.Resource.MaxVolumesCount { + return errors.Wrapf(terrors.ErrTooManyVolumes, "at most %d", configs.Conf.Resource.MaxVolumesCount) } - var res = Volumes(vols) - res.setHostName(g.HostName) + var res = volFact.Volumes(vols) + res.SetHostName(g.HostName) - g.Vols.append(vols...) + g.Vols = append(g.Vols, vols...) - g.VolIDs = append(g.VolIDs, res.ids()...) + g.VolIDs = append(g.VolIDs, res.IDs()...) return nil } -func (g *Guest) SwitchVol(vol *Volume, idx int) error { +func (g *Guest) SwitchVol(vol volume.Volume, idx int) error { if idx < 0 || idx >= g.Vols.Len() { - return errors.Annotatef(errors.ErrInvalidValue, "must in range 0 to %d", g.Vols.Len()-1) + return errors.WithMessagef(terrors.ErrInvalidValue, "must in range 0 to %d", g.Vols.Len()-1) } g.Vols[idx] = vol - g.VolIDs[idx] = vol.ID + g.VolIDs[idx] = vol.GetID() return nil } // Load . -func (g *Guest) Load(host *Host, networkHandler handler.Handler) (err error) { +func (g *Guest) Load(host *Host, networkHandler network.Driver, opts ...Option) (err error) { + logger := log.WithFunc("Guest.Load") + op := NewOp(opts...) g.Host = host - if g.Img, err = LoadImage(g.ImageName, g.ImageUser); err != nil { - return errors.Trace(err) - } - - if g.Vols, err = LoadVolumes(g.VolIDs); err != nil { - return errors.Trace(err) + if g.Vols, err = volFact.LoadVolumes(g.VolIDs); err != nil { + return errors.WithMessagef(err, "failed to load volumes %v", g.VolIDs) } if err = g.LoadIPs(networkHandler); err != nil { - return errors.Trace(err) + return errors.WithMessage(err, "failed to load IPs") } + if g.Img, err = vmiFact.LoadImage(context.TODO(), g.ImageName); err != nil { + if op.IgnoreLoadImageErr { + logger.Warnf(context.TODO(), "failed to load image %s: %s", g.ImageName, err) + } else { + return errors.Wrapf(terrors.ErrLoadImage, "failed to load image %s: %s", g.ImageName, err) + } + } return nil } // LoadIPs . -func (g *Guest) LoadIPs(networkHandler handler.Handler) (err error) { +func (g *Guest) LoadIPs(networkHandler network.Driver) (err error) { for _, ipn := range g.IPNets { ipn.Assigned = true } @@ -195,15 +211,15 @@ func (g *Guest) Save() error { // Delete . func (g *Guest) Delete(force bool) error { - if err := g.setStatus(StatusDestroyed, force); err != nil { - return errors.Trace(err) + if err := g.SetStatus(meta.StatusDestroyed, force); err != nil { + return errors.WithMessagef(err, "Delete: failed to set status to %s", meta.StatusDestroyed) } var keys = []string{ g.MetaKey(), newHostGuest(g.HostName, g.ID).MetaKey(), } - keys = append(keys, g.Vols.deleteKeys()...) + keys = append(keys, g.Vols.MetaKeys()...) var vers = map[string]int64{g.MetaKey(): g.GetVer()} for _, vol := range g.Vols { @@ -217,20 +233,20 @@ func (g *Guest) Delete(force bool) error { } // SysVolume . -func (g *Guest) SysVolume() (*Volume, error) { +func (g *Guest) SysVolume() (volume.Volume, error) { for _, vol := range g.Vols { if vol.IsSys() { return vol, nil } } - return nil, errors.ErrSysVolumeNotExists + return nil, terrors.ErrSysVolumeNotExists } // HealthCheck . func (g *Guest) HealthCheck() (HealthCheck, error) { hcb, err := g.healthCheckBridge() if err != nil { - return HealthCheck{}, errors.Trace(err) + return HealthCheck{}, errors.Wrap(err, "") } return hcb.healthCheck(g) } @@ -239,7 +255,7 @@ func (g *Guest) HealthCheck() (HealthCheck, error) { func (g *Guest) PublishPorts() ([]int, error) { hcb, err := g.healthCheckBridge() if err != nil { - return []int{}, errors.Trace(err) + return []int{}, errors.Wrap(err, "") } return hcb.publishPorts() } @@ -258,16 +274,10 @@ func (g *Guest) MemoryInMiB() int64 { return utils.ConvToMB(g.Memory) } -// SocketFilepath shows the socket filepath of the guest on the host. -func (g *Guest) SocketFilepath() string { - var fn = fmt.Sprintf("%s.sock", g.ID) - return filepath.Join(configs.Conf.VirtSockDir, fn) -} - // NetworkPairName . func (g *Guest) NetworkPairName() string { switch { - case g.NetworkMode == vnet.NetworkCalico: + case g.NetworkMode == network.CalicoMode: fallthrough case len(g.NetworkPair) > 0: return g.NetworkPair @@ -277,65 +287,83 @@ func (g *Guest) NetworkPairName() string { } } +// Generate cloud-init config from guest. +func (g *Guest) GenCloudInit() (*types.CloudInitConfig, error) { + cidr := g.IPNets[0].CIDR() + gwAddr := g.IPNets[0].GatewayAddr() + inSubnet := netx.InSubnet(gwAddr, cidr) + obj := &types.CloudInitConfig{ + IFName: "ens5", + CIDR: cidr, + MAC: g.MAC, + MTU: g.MTU, + DefaultGW: types.CloudInitGateway{ + IP: gwAddr, + OnLink: !inSubnet, + }, + } + if bs, ok := g.JSONLabels["instance/cloud-init"]; ok { + if err := json.Unmarshal([]byte(bs), &obj); err != nil { + return nil, errors.Wrap(err, "") + } + } else { + obj.Username = configs.Conf.VMAuth.Username + obj.Password = configs.Conf.VMAuth.Password + } + if obj.Hostname == "" { + obj.Hostname = interutils.RandomString(10) + } + if obj.InstanceID == "" { + obj.InstanceID = obj.Hostname + } + return obj, nil +} + func newGuest() *Guest { return &Guest{ - Generic: newGeneric(), - Vols: Volumes{}, + Generic: meta.NewGeneric(), + Vols: volFact.Volumes{}, IPs: IPs{}, } } // LoadGuest . func LoadGuest(id string) (*Guest, error) { - return manager.LoadGuest(id) -} - -// CreateGuest . -func CreateGuest(opts types.GuestCreateOption, host *Host, vols []*Volume) (*Guest, error) { - return manager.CreateGuest(opts, host, vols) -} - -// NewGuest creates a new guest. -func NewGuest(host *Host, img Image) (*Guest, error) { - return manager.NewGuest(host, img) -} - -// GetNodeGuests gets all guests which belong to the node. -func GetNodeGuests(nodename string) ([]*Guest, error) { - return manager.GetNodeGuests(nodename) -} - -// GetAllGuests . -func GetAllGuests() ([]*Guest, error) { - return manager.GetAllGuests() -} - -// LoadGuest . -func (m *Manager) LoadGuest(id string) (*Guest, error) { - g, err := m.NewGuest(nil, nil) - if err != nil { - return nil, errors.Trace(err) - } - + g := newGuest() g.ID = id if err := meta.Load(g); err != nil { - return nil, errors.Trace(err) + return nil, errors.WithMessagef(err, "load guest %s", id) } return g, nil } // CreateGuest . -func (m *Manager) CreateGuest(opts types.GuestCreateOption, host *Host, vols []*Volume) (*Guest, error) { - img, err := LoadImage(opts.ImageName, opts.ImageUser) +func CreateGuest(opts types.GuestCreateOption, host *Host, vols []volume.Volume) (*Guest, error) { + img, err := vmiFact.LoadImage(context.TODO(), opts.ImageName) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrapf(err, "failed to load image %s", opts.ImageName) } - guest, err := m.NewGuest(host, img) - if err != nil { - return nil, errors.Trace(err) + var guest = newGuest() + guest.Host = host + guest.HostName = guest.Host.Name + guest.NetworkMode = opts.Labels[network.ModeLabelKey] + if guest.NetworkMode == "" { + guest.NetworkMode = host.DefaultNetworkMode + opts.Labels[network.ModeLabelKey] = guest.NetworkMode + } + guest.MTU = 1500 + + guest.Img = img + guest.ImageName = img.Fullname() + // Create sys volume when user doesn't specify one + if len(vols) == 0 || (!vols[0].IsSys()) { + sysVol := local.NewSysVolume(img.VirtualSize, img.Fullname()) + if err := guest.AppendVols(sysVol); err != nil { + return nil, errors.WithMessagef(err, "Create: failed to append volume %s", sysVol) + } } guest.ID = idgen.Next() @@ -344,48 +372,60 @@ func (m *Manager) CreateGuest(opts types.GuestCreateOption, host *Host, vols []* guest.DmiUUID = opts.DmiUUID guest.JSONLabels = opts.Labels - if guest.NetworkMode == vnet.NetworkCalico { - guest.EnabledCalicoCNI = configs.Conf.EnabledCalicoCNI - } - if opts.Lambda { guest.LambdaOption = &LambdaOptions{ Cmd: opts.Cmd, CmdOutput: nil, } } + log.Debugf(context.TODO(), "Resources: %v", opts.Resources) + if bs, ok := opts.Resources["gpu"]; ok { + var eParams gputypes.EngineParams + if err := json.Unmarshal(bs, &eParams); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal gpu params") + } + guest.GPUEngineParams = &eParams + } + if bs, ok := opts.Resources["bandwidth"]; ok { + var eParams bdtypes.EngineParams + if err := json.Unmarshal(bs, &eParams); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal bandwidth params") + } + guest.BDEngineParams = &eParams + } if err := guest.AppendVols(vols...); err != nil { - return nil, errors.Trace(err) + return nil, errors.WithMessagef(err, "CreateGuest: failed to append volumes %v", vols) } if err := guest.Check(); err != nil { - return nil, errors.Trace(err) + return nil, errors.WithMessagef(err, "CreateGuest: failed to check guest %v", guest) } if err := guest.Create(); err != nil { - return nil, errors.Trace(err) + return nil, err } return guest, nil } // NewGuest creates a new guest. -func (m *Manager) NewGuest(host *Host, img Image) (*Guest, error) { +func NewGuest(host *Host, img *vmitypes.Image) (*Guest, error) { var guest = newGuest() if host != nil { guest.Host = host guest.HostName = guest.Host.Name - guest.NetworkMode = guest.Host.NetworkMode + guest.NetworkMode = guest.Host.DefaultNetworkMode } if img != nil { guest.Img = img - guest.ImageName = guest.Img.GetName() - guest.ImageUser = guest.Img.GetUser() - if err := guest.AppendVols(guest.Img.NewSysVolume()); err != nil { - return nil, errors.Trace(err) + guest.ImageName = img.Fullname() + + sysVol := local.NewSysVolume(img.VirtualSize, img.Fullname()) + if err := guest.AppendVols(sysVol); err != nil { + return nil, errors.WithMessagef(err, "NewGuest: failed to append volume %s", sysVol) } } @@ -393,13 +433,13 @@ func (m *Manager) NewGuest(host *Host, img Image) (*Guest, error) { } // GetNodeGuests gets all guests which belong to the node. -func (m *Manager) GetNodeGuests(nodename string) ([]*Guest, error) { +func GetNodeGuests(nodename string) ([]*Guest, error) { ctx, cancel := meta.Context(context.Background()) defer cancel() data, _, err := store.GetPrefix(ctx, meta.HostGuestsPrefix(nodename), 0) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "failed to get prefix") } guests := []*Guest{} @@ -410,9 +450,9 @@ func (m *Manager) GetNodeGuests(nodename string) ([]*Guest, error) { continue } - g, err := m.LoadGuest(gid) + g, err := LoadGuest(gid) if err != nil { - return nil, errors.Trace(err) + return nil, errors.WithMessagef(err, "GetNodeGuests: failed to load guest %s", gid) } guests = append(guests, g) @@ -422,13 +462,13 @@ func (m *Manager) GetNodeGuests(nodename string) ([]*Guest, error) { } // GetAllGuests . -func (m *Manager) GetAllGuests() ([]*Guest, error) { +func GetAllGuests() ([]*Guest, error) { var ctx, cancel = meta.Context(context.Background()) defer cancel() var data, vers, err = store.GetPrefix(ctx, meta.GuestsPrefix(), 0) if err != nil { - return nil, errors.Trace(err) + return nil, errors.WithMessagef(err, "GetAllGuests: failed to get prefix") } var guests = []*Guest{} @@ -436,12 +476,12 @@ func (m *Manager) GetAllGuests() ([]*Guest, error) { for key, val := range data { var ver, exists = vers[key] if !exists { - return nil, errors.Annotatef(errors.ErrKeyBadVersion, key) + return nil, errors.Wrapf(terrors.ErrKeyBadVersion, key) } var g = newGuest() if err := utils.JSONDecode(val, g); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrapf(err, "GetAllGuests: failed to decode guest %s", key) } g.SetVer(ver) diff --git a/internal/models/guest_status.go b/internal/models/guest_status.go index d3367f3..e28dce1 100644 --- a/internal/models/guest_status.go +++ b/internal/models/guest_status.go @@ -1,87 +1,87 @@ package models import ( + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" ) // ForwardCreating . func (g *Guest) ForwardCreating() error { - return g.ForwardStatus(StatusCreating, false) + return g.ForwardStatus(meta.StatusCreating, false) } // ForwardStarting . -func (g *Guest) ForwardStarting() error { - return g.ForwardStatus(StatusStarting, false) +func (g *Guest) ForwardStarting(force bool) error { + return g.ForwardStatus(meta.StatusStarting, force) } // ForwardStopped . func (g *Guest) ForwardStopped(force bool) error { - return g.ForwardStatus(StatusStopped, force) + return g.ForwardStatus(meta.StatusStopped, force) } // ForwardStopping . func (g *Guest) ForwardStopping() error { - return g.ForwardStatus(StatusStopping, false) + return g.ForwardStatus(meta.StatusStopping, false) } // ForwardCaptured . func (g *Guest) ForwardCaptured() error { - return g.ForwardStatus(StatusCaptured, false) + return g.ForwardStatus(meta.StatusCaptured, false) } // ForwardCapturing . func (g *Guest) ForwardCapturing() error { - return g.ForwardStatus(StatusCapturing, false) + return g.ForwardStatus(meta.StatusCapturing, false) } // ForwardDestroying . func (g *Guest) ForwardDestroying(force bool) error { - return g.ForwardStatus(StatusDestroying, force) + return g.ForwardStatus(meta.StatusDestroying, force) } // ForwardRunning . func (g *Guest) ForwardRunning() error { - return g.ForwardStatus(StatusRunning, false) + return g.ForwardStatus(meta.StatusRunning, false) } // ForwardPaused . func (g *Guest) ForwardPaused() error { - return g.ForwardStatus(StatusPaused, false) + return g.ForwardStatus(meta.StatusPaused, false) } // ForwardPausing . func (g *Guest) ForwardPausing() error { - return g.ForwardStatus(StatusPausing, false) + return g.ForwardStatus(meta.StatusPausing, false) } // ForwardResuming . func (g *Guest) ForwardResuming() error { - return g.ForwardStatus(StatusResuming, false) + return g.ForwardStatus(meta.StatusResuming, false) } // ForwardResizing . func (g *Guest) ForwardResizing() error { - return g.ForwardStatus(StatusResizing, false) + return g.ForwardStatus(meta.StatusResizing, false) } // ForwardMigrating . func (g *Guest) ForwardMigrating() error { - return g.ForwardStatus(StatusMigrating, false) + return g.ForwardStatus(meta.StatusMigrating, false) } // ForwardStatus . func (g *Guest) ForwardStatus(st string, force bool) error { - if err := g.setStatus(st, force); err != nil { - return errors.Trace(err) + if err := g.SetStatus(st, force); err != nil { + return errors.WithMessagef(err, "ForwardStatus: failed to set guest status to %s", st) } - if err := g.Vols.setStatus(st, force); err != nil { - return errors.Trace(err) + if err := g.Vols.SetStatus(st, force); err != nil { + return errors.WithMessagef(err, "ForwardStatus: failed to set volumes status to %s", st) } var res = meta.Resources{g} - res.Concate(g.Vols.resources()) + res.Concate(g.Vols.Resources()) return meta.Save(res) } diff --git a/internal/models/guest_test.go b/internal/models/guest_test.go index 0b1ae5c..be40b6b 100644 --- a/internal/models/guest_test.go +++ b/internal/models/guest_test.go @@ -2,8 +2,6 @@ package models import ( "context" - "fmt" - "strconv" "testing" erucluster "github.com/projecteru2/core/cluster" @@ -11,7 +9,6 @@ import ( eruutils "github.com/projecteru2/core/utils" "github.com/projecteru2/yavirt/pkg/test/assert" - "github.com/projecteru2/yavirt/pkg/utils" ) func TestEmptyLabels(t *testing.T) { @@ -51,120 +48,120 @@ func TestValidLabels(t *testing.T) { assert.Equal(t, 200, hc.HTTPCode) } -func TestRemoveVol(t *testing.T) { - testcases := []struct { - orig []int - rm int - ids []int - }{ - // removes the first item. - { - []int{0}, - 0, - []int{}, - }, - { - []int{0, 1}, - 0, - []int{1}, - }, - { - []int{0, 1, 2}, - 0, - []int{2, 1}, - }, - { - []int{0, 1, 2, 3}, - 0, - []int{3, 1, 2}, - }, - // removes the last item. - { - []int{0, 1}, - 1, - []int{0}, - }, - { - []int{0, 1, 2}, - 2, - []int{0, 1}, - }, - { - []int{0, 1, 2, 3}, - 3, - []int{0, 1, 2}, - }, - // removes the medium item. - { - []int{0, 1, 2}, - 1, - []int{0, 2}, - }, - { - []int{0, 1, 2, 3}, - 2, - []int{0, 1, 3}, - }, - { - []int{0, 1, 2, 3}, - 1, - []int{0, 3, 2}, - }, - // duplicated - { - []int{0, 0, 0}, - 0, - []int{}, - }, - { - []int{0, 0, 1}, - 0, - []int{1}, - }, - { - []int{0, 1, 1}, - 1, - []int{0}, - }, - { - []int{0, 1, 0, 1}, - 0, - []int{1, 1}, - }, - { - []int{0, 1, 0, 1}, - 1, - []int{0, 0}, - }, - { - []int{0, 1, 1, 0}, - 0, - []int{1, 1}, - }, - { - []int{0, 1, 1, 0}, - 1, - []int{0, 0}, - }, - } +// func TestRemoveVol(t *testing.T) { +// testcases := []struct { +// orig []int +// rm int +// ids []int +// }{ +// // removes the first item. +// { +// []int{0}, +// 0, +// []int{}, +// }, +// { +// []int{0, 1}, +// 0, +// []int{1}, +// }, +// { +// []int{0, 1, 2}, +// 0, +// []int{2, 1}, +// }, +// { +// []int{0, 1, 2, 3}, +// 0, +// []int{3, 1, 2}, +// }, +// // removes the last item. +// { +// []int{0, 1}, +// 1, +// []int{0}, +// }, +// { +// []int{0, 1, 2}, +// 2, +// []int{0, 1}, +// }, +// { +// []int{0, 1, 2, 3}, +// 3, +// []int{0, 1, 2}, +// }, +// // removes the medium item. +// { +// []int{0, 1, 2}, +// 1, +// []int{0, 2}, +// }, +// { +// []int{0, 1, 2, 3}, +// 2, +// []int{0, 1, 3}, +// }, +// { +// []int{0, 1, 2, 3}, +// 1, +// []int{0, 3, 2}, +// }, +// // duplicated +// { +// []int{0, 0, 0}, +// 0, +// []int{}, +// }, +// { +// []int{0, 0, 1}, +// 0, +// []int{1}, +// }, +// { +// []int{0, 1, 1}, +// 1, +// []int{0}, +// }, +// { +// []int{0, 1, 0, 1}, +// 0, +// []int{1, 1}, +// }, +// { +// []int{0, 1, 0, 1}, +// 1, +// []int{0, 0}, +// }, +// { +// []int{0, 1, 1, 0}, +// 0, +// []int{1, 1}, +// }, +// { +// []int{0, 1, 1, 0}, +// 1, +// []int{0, 0}, +// }, +// } - for _, tc := range testcases { - g := newGuest() - for _, id := range tc.orig { - vol, err := NewDataVolume(fmt.Sprintf("/data%d", id), utils.GB) - assert.NilErr(t, err) +// for _, tc := range testcases { +// g := newGuest() +// for _, id := range tc.orig { +// vol, err := NewDataVolume(fmt.Sprintf("/data%d", id), utils.GB, "") +// assert.NilErr(t, err) - vol.ID = strconv.Itoa(id) - assert.NilErr(t, g.AppendVols(vol)) - } +// vol.ID = strconv.Itoa(id) +// assert.NilErr(t, g.AppendVols(vol)) +// } - g.RemoveVol(strconv.Itoa(tc.rm)) - assert.Equal(t, len(tc.ids), g.Vols.Len()) - assert.Equal(t, len(tc.ids), len(g.VolIDs)) +// g.RemoveVol(strconv.Itoa(tc.rm)) +// assert.Equal(t, len(tc.ids), g.Vols.Len()) +// assert.Equal(t, len(tc.ids), len(g.VolIDs)) - for i, id := range tc.ids { - assert.Equal(t, strconv.Itoa(id), g.Vols[i].ID) - assert.Equal(t, strconv.Itoa(id), g.VolIDs[i]) - } - } -} +// for i, id := range tc.ids { +// assert.Equal(t, strconv.Itoa(id), g.Vols[i].GetID()) +// assert.Equal(t, strconv.Itoa(id), g.VolIDs[i]) +// } +// } +// } diff --git a/internal/models/health_check.go b/internal/models/health_check.go index c1082b8..2dc2ff5 100644 --- a/internal/models/health_check.go +++ b/internal/models/health_check.go @@ -8,15 +8,16 @@ import ( erucluster "github.com/projecteru2/core/cluster" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" ) func (g *Guest) healthCheckBridge() (*HealthCheckBridge, error) { raw, exists := g.JSONLabels[erucluster.LabelMeta] if !exists { - return nil, errors.Annotatef(errors.ErrKeyNotExists, "no such label: %s", erucluster.LabelMeta) + return nil, errors.Wrapf(terrors.ErrKeyNotExists, "no such label: %s", erucluster.LabelMeta) } hcb := &HealthCheckBridge{} diff --git a/internal/models/host.go b/internal/models/host.go index 47886a6..e5b286d 100644 --- a/internal/models/host.go +++ b/internal/models/host.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/eru/resources" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/netx" ) // Host . @@ -14,42 +14,56 @@ import ( // /hosts:counter // /hosts/ type Host struct { - *Generic + *meta.Generic - ID uint32 `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - Subnet int64 `json:"subnet"` - CPU int `json:"cpu"` - Memory int64 `json:"mem"` - Storage int64 `json:"storage"` - NetworkMode string `json:"network,omitempty"` + ID uint32 `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Subnet int64 `json:"subnet"` + CPU int `json:"cpu"` + Memory int64 `json:"mem"` + Storage int64 `json:"storage"` + NetworkModes []string `json:"network,omitempty"` + DefaultNetworkMode string `json:"default_network,omitempty"` } // LoadHost . func LoadHost() (*Host, error) { - host := &Host{ - Generic: newGeneric(), - Name: configs.Conf.Host.Name, - Type: HostVirtType, - Subnet: int64(configs.Conf.Host.Subnet), - CPU: configs.Conf.Host.CPU, - Memory: int64(configs.Conf.Host.Memory), - Storage: int64(configs.Conf.Host.Storage), - NetworkMode: configs.Conf.Host.NetworkMode, + cfg := &configs.Conf + cpu, mem, sto := cfg.Host.CPU, int64(cfg.Host.Memory), int64(cfg.Host.Storage) + // update cpu, memory, storage using hardware information + if cpu == 0 || mem == 0 { + cpumem := resources.GetManager().FetchCPUMem() + if cpu == 0 { + cpu = int(cpumem.CPU) + } + if mem == 0 { + mem = cpumem.Memory + } } - dec, err := netx.IPv4ToInt(configs.Conf.Host.Addr) - if err != nil { - return nil, err + if sto == 0 { + storage := resources.GetManager().FetchStorage() + sto = storage.Storage } - host.ID = uint32(dec) + host := &Host{ + Generic: meta.NewGeneric(), + ID: cfg.Host.ID, + Name: cfg.Host.Name, + Type: HostVirtType, + Subnet: int64(cfg.Host.Subnet), + CPU: cpu, + Memory: mem, + Storage: sto, + NetworkModes: cfg.Network.Modes, + DefaultNetworkMode: cfg.Network.DefaultMode, + } return host, nil } // NewHost . func NewHost() *Host { - return &Host{Generic: newGeneric()} + return &Host{Generic: meta.NewGeneric()} } // MetaKey . diff --git a/internal/models/host_test.go b/internal/models/host_test.go index e18a307..1c29ea2 100644 --- a/internal/models/host_test.go +++ b/internal/models/host_test.go @@ -1,11 +1,9 @@ package models import ( - "time" - "github.com/projecteru2/yavirt/pkg/idgen" ) func init() { - idgen.Setup(0, time.Now()) + idgen.Setup(0) } diff --git a/internal/models/image.go b/internal/models/image.go deleted file mode 100644 index 7555a00..0000000 --- a/internal/models/image.go +++ /dev/null @@ -1,53 +0,0 @@ -package models - -import ( - "os" - - "github.com/projecteru2/yavirt/pkg/errors" -) - -// Image wraps a few methods about Image. -type Image interface { //nolint - GetName() string - GetUser() string - GetDistro() string - GetID() string - GetType() string - GetHash() string - UpdateHash() (string, error) - - NewSysVolume() *Volume - Delete() error - - String() string - Filepath() string - Filename() string -} - -// LoadImage loads an Image. -func LoadImage(name, user string) (Image, error) { - if len(user) > 0 { - return LoadUserImage(user, name) - } - return LoadSysImage(name) -} - -// ListImages lists all images which belong to a specific user, or system-wide type. -func ListImages(user string) ([]Image, error) { - if len(user) > 0 { - return ListUserImages(user) - } - return ListSysImages() -} - -// ImageExists whether the image file exists. -func ImageExists(img Image) (bool, error) { - switch _, err := os.Stat(img.Filepath()); { - case err == nil: - return true, nil - case os.IsNotExist(err): - return false, nil - default: - return false, errors.Trace(err) - } -} diff --git a/internal/models/ipblock.go b/internal/models/ipblock.go index 5730cf4..abb8d71 100644 --- a/internal/models/ipblock.go +++ b/internal/models/ipblock.go @@ -3,9 +3,10 @@ package models import ( "net" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" "github.com/projecteru2/yavirt/pkg/netx" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" ) @@ -22,7 +23,7 @@ func (bs *IPBlocks) Append(block ...*IPBlock) { // IPBlock . type IPBlock struct { - *Generic + *meta.Generic IPs *utils.Bitmap32 `json:"ips"` @@ -32,12 +33,12 @@ type IPBlock struct { func newIPBlock(ipp *IPPool, ipn *net.IPNet) *IPBlock { block := &IPBlock{ - Generic: newGeneric(), + Generic: meta.NewGeneric(), ippool: ipp, ipnet: ipn, } - block.Status = StatusRunning + block.Status = meta.StatusRunning block.IPs = utils.NewBitmap32(block.ipCount()) return block @@ -46,17 +47,17 @@ func newIPBlock(ipp *IPPool, ipn *net.IPNet) *IPBlock { // Release . func (b *IPBlock) Release(ipn *net.IPNet) error { if !b.ippool.Contains(ipn) { - return errors.Annotatef(errors.ErrInsufficientIP, "IP %s not found", ipn.IP) + return errors.Wrapf(terrors.ErrInsufficientIP, "IP %s not found", ipn.IP) } offset := b.getIPIndex(ipn.IP) if err := b.IPs.Unset(offset); err != nil { - return errors.Annotatef(err, "release %d IP %s failed", offset, ipn) + return errors.Wrapf(err, "release %d IP %s failed", offset, ipn) } if err := b.save(); err != nil { b.IPs.Set(offset) //nolint - return errors.Trace(err) + return errors.Wrapf(err, "release %d IP %s failed", offset, ipn) } return nil @@ -90,7 +91,7 @@ func (b *IPBlock) Assign() (ipn *net.IPNet, err error) { }) if err == nil && ipn == nil { - err = errors.Annotatef(errors.ErrInsufficientIP, + err = errors.Wrapf(terrors.ErrInsufficientIP, "block %s hasn't free IP", b.ipnet) } @@ -104,12 +105,12 @@ func (b *IPBlock) assign(offset int) (*net.IPNet, error) { } if err := b.IPs.Set(offset); err != nil { - return nil, errors.Annotatef(err, "assign %d IP %s failed", offset, ipn) + return nil, errors.Wrapf(err, "assign %d IP %s failed", offset, ipn) } if err := b.save(); err != nil { b.IPs.Unset(offset) //nolint - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return ipn, nil diff --git a/internal/models/ippool.go b/internal/models/ippool.go index 4016748..fe2996a 100644 --- a/internal/models/ippool.go +++ b/internal/models/ippool.go @@ -7,16 +7,17 @@ import ( clientv3 "go.etcd.io/etcd/client/v3" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" "github.com/projecteru2/yavirt/pkg/netx" "github.com/projecteru2/yavirt/pkg/store" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" ) // IPPool . type IPPool struct { - *Generic + *meta.Generic Name string `json:"name"` Raw string `json:"raw"` @@ -34,7 +35,7 @@ func LoadIPPool(name string) (*IPPool, error) { ipp := newIPPool(name) if err := meta.Load(ipp); err != nil { - return nil, errors.Annotatef(err, "load IPPool %s failed", name) + return nil, errors.WithMessagef(err, "load IPPool %s failed", name) } return ipp, ipp.parse() @@ -52,27 +53,27 @@ func NewIPPool(name, cidr string) (ipp *IPPool, err error) { func newIPPool(name string) *IPPool { ipp := &IPPool{ - Generic: newGeneric(), + Generic: meta.NewGeneric(), Name: name, blocks: IPBlocks{}, sync: true, } - ipp.Status = StatusRunning + ipp.Status = meta.StatusRunning return ipp } func (ipp *IPPool) parse() (err error) { if _, ipp.ipnet, err = netx.ParseCIDR(ipp.Raw); err != nil { - return errors.Annotatef(err, "parse CIDR %s failed", ipp.Raw) + return errors.Wrapf(err, "parse CIDR %s failed", ipp.Raw) } switch { case ipp.MaskBits() > MaxMaskBits: - return errors.Annotatef(errors.ErrTooLargeMaskBits, "at most", MaxMaskBits) + return errors.Wrapf(terrors.ErrTooLargeMaskBits, "at most", MaxMaskBits) case ipp.MaskBits() < MinMaskBits: - return errors.Annotatef(errors.ErrTooSmallMaskBits, "at least", MinMaskBits) + return errors.Wrapf(terrors.ErrTooSmallMaskBits, "at least", MinMaskBits) } ipp.CIDR = fmt.Sprintf("%s/%d", ipp.Subnet().String(), ipp.MaskBits()) @@ -89,7 +90,7 @@ func (ipp *IPPool) Assign() (ipn *net.IPNet, err error) { } defer func() { if ue := unlock(context.Background()); ue != nil { - err = errors.Wrap(err, ue) + err = errors.CombineErrors(err, ue) } }() @@ -125,7 +126,7 @@ func (ipp *IPPool) getAvailBlock() (block *IPBlock, err error) { // there's no any available block. if err == nil && block == nil { - err = errors.Annotatef(errors.ErrInsufficientIP, + err = errors.Wrapf(terrors.ErrInsufficientIP, "%s CIDR %s hasn't free IP", ipp.Name, ipp.ipnet) } @@ -135,7 +136,7 @@ func (ipp *IPPool) getAvailBlock() (block *IPBlock, err error) { // Release . func (ipp *IPPool) Release(ipn *net.IPNet) (err error) { if !ipp.Contains(ipn) { - return errors.Annotatef(errors.ErrInvalidValue, "%s doesn't contain %s", ipp.Name, ipn) + return errors.Wrapf(terrors.ErrInvalidValue, "%s doesn't contain %s", ipp.Name, ipn) } var unlock utils.Unlocker @@ -144,7 +145,7 @@ func (ipp *IPPool) Release(ipn *net.IPNet) (err error) { } defer func() { if ue := unlock(context.Background()); ue != nil { - err = errors.Wrap(err, ue) + err = errors.CombineErrors(err, ue) } }() @@ -165,7 +166,7 @@ func (ipp *IPPool) Release(ipn *net.IPNet) (err error) { // IsAssigned . func (ipp *IPPool) IsAssigned(ipn *net.IPNet) (assigned bool, err error) { if !ipp.Contains(ipn) { - return false, errors.Annotatef(errors.ErrInvalidValue, "%s doesn't contain %s", ipp.Name, ipn) + return false, errors.Wrapf(terrors.ErrInvalidValue, "%s doesn't contain %s", ipp.Name, ipn) } var unlock utils.Unlocker @@ -174,7 +175,7 @@ func (ipp *IPPool) IsAssigned(ipn *net.IPNet) (assigned bool, err error) { } defer func() { if ue := unlock(context.Background()); ue != nil { - err = errors.Wrap(err, ue) + err = errors.CombineErrors(err, ue) } }() @@ -194,7 +195,7 @@ func (ipp *IPPool) getBlock(ipn *net.IPNet) (*IPBlock, error) { i := ipp.getBlockIndex(ipn.IP) if int64(ipp.blocks.Len()) <= i { - return nil, errors.Annotatef(errors.ErrInsufficientBlocks, + return nil, errors.Wrapf(terrors.ErrInsufficientBlocks, "block %s not found", netx.Int2ip(ipp.intSubnet()+i)) } @@ -222,7 +223,7 @@ func (ipp *IPPool) Contains(ipn *net.IPNet) bool { func (ipp *IPPool) spawnBlock(offset int) (block *IPBlock, err error) { if err = ipp.Flags.Set(offset); err != nil { - return nil, errors.Annotatef(err, "spawn %d block %s failed", + return nil, errors.Wrapf(err, "spawn %d block %s failed", offset, netx.Int2ip(ipp.intSubnet()+int64(offset))) } @@ -240,7 +241,7 @@ func (ipp *IPPool) spawnBlock(offset int) (block *IPBlock, err error) { func (ipp *IPPool) reload() error { newOne := newIPPool(ipp.Name) if err := meta.Load(newOne); err != nil { - return errors.Annotatef(err, "load IPPool %s failed", ipp.Name) + return errors.Wrapf(err, "load IPPool %s failed", ipp.Name) } ipp.Flags = newOne.Flags @@ -257,11 +258,11 @@ func (ipp *IPPool) reloadBlocks() error { data, vers, err := store.GetPrefix(ctx, prefix, int64(ipp.blockCount())) if err != nil { // there's no any block yet. - if errors.Contain(err, errors.ErrKeyNotExists) { + if errors.Is(err, terrors.ErrKeyNotExists) { return nil } - return errors.Annotatef(err, "get IPPool %s all blocks failed", ipp.Name) + return errors.Wrapf(err, "get IPPool %s all blocks failed", ipp.Name) } delete(data, prefix) @@ -275,31 +276,31 @@ func (ipp *IPPool) parseBlocksBytes(data map[string][]byte, vers map[string]int6 for key, bytes := range data { ver, exists := vers[key] if !exists { - return errors.Annotatef(errors.ErrKeyBadVersion, key) + return errors.Wrapf(terrors.ErrKeyBadVersion, key) } ipn, err := ipp.parseBlockMetaKey(key) if err != nil { - return errors.Annotatef(err, "parse block key %s failed", key) + return errors.Wrapf(err, "parse block key %s failed", key) } block := newIPBlock(ipp, ipn) if err := utils.JSONDecode(bytes, block); err != nil { - return errors.Annotatef(err, "decode IPBlock bytes %s failed", bytes) + return errors.Wrapf(err, "decode IPBlock bytes %s failed", bytes) } block.SetVer(ver) i := ipp.getBlockIndex(block.BlockIP()) if int64(blocks.Len()) <= i { - return errors.Annotatef(errors.ErrInsufficientBlocks, "%d block %s not found", i, ipn) + return errors.Wrapf(terrors.ErrInsufficientBlocks, "%d block %s not found", i, ipn) } blocks[i] = block } if err := ipp.checkBlocks(blocks); err != nil { - return errors.Trace(err) + return errors.WithMessagef(err, "parse blocks %s failed", blocks) } ipp.blocks = blocks @@ -318,7 +319,7 @@ func (ipp *IPPool) checkBlocks(blocks IPBlocks) (err error) { return true } - err = errors.Annotatef(errors.ErrInvalidValue, + err = errors.Wrapf(terrors.ErrInvalidValue, "IPPool %s %d block %s should be spawned (%t) but not", ipp.ipnet, offset, ipp.getBlockIPNet(offset), set) @@ -346,12 +347,12 @@ func (ipp *IPPool) save(block *IPBlock) error { ippBytes, err := ipp.Marshal() if err != nil { - return errors.Trace(err) + return errors.Wrapf(err, "failed to marshal %s", ipp) } blockBytes, err := block.Marshal() if err != nil { - return errors.Trace(err) + return errors.Wrapf(err, "failed to marshal %s", block) } ops := []clientv3.Op{ @@ -364,9 +365,9 @@ func (ipp *IPPool) save(block *IPBlock) error { switch succ, err := store.BatchOperate(ctx, ops); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "failed to batch operate") case !succ: - return errors.Annotatef(errors.ErrBatchOperate, + return errors.Wrapf(terrors.ErrBatchOperate, "put: %s / %s", ipp.MetaKey(), block.MetaKey()) } diff --git a/internal/models/mocks/Manageable.go b/internal/models/mocks/Manageable.go deleted file mode 100644 index 192e1ed..0000000 --- a/internal/models/mocks/Manageable.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - models "github.com/projecteru2/yavirt/internal/models" - mock "github.com/stretchr/testify/mock" - - types "github.com/projecteru2/yavirt/internal/virt/types" -) - -// Manageable is an autogenerated mock type for the Manageable type -type Manageable struct { - mock.Mock -} - -// CreateGuest provides a mock function with given fields: opts, host, vols -func (_m *Manageable) CreateGuest(opts types.GuestCreateOption, host *models.Host, vols []*models.Volume) (*models.Guest, error) { - ret := _m.Called(opts, host, vols) - - var r0 *models.Guest - if rf, ok := ret.Get(0).(func(types.GuestCreateOption, *models.Host, []*models.Volume) *models.Guest); ok { - r0 = rf(opts, host, vols) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Guest) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(types.GuestCreateOption, *models.Host, []*models.Volume) error); ok { - r1 = rf(opts, host, vols) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetAllGuests provides a mock function with given fields: -func (_m *Manageable) GetAllGuests() ([]*models.Guest, error) { - ret := _m.Called() - - var r0 []*models.Guest - if rf, ok := ret.Get(0).(func() []*models.Guest); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*models.Guest) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNodeGuests provides a mock function with given fields: nodename -func (_m *Manageable) GetNodeGuests(nodename string) ([]*models.Guest, error) { - ret := _m.Called(nodename) - - var r0 []*models.Guest - if rf, ok := ret.Get(0).(func(string) []*models.Guest); ok { - r0 = rf(nodename) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*models.Guest) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(nodename) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LoadGuest provides a mock function with given fields: id -func (_m *Manageable) LoadGuest(id string) (*models.Guest, error) { - ret := _m.Called(id) - - var r0 *models.Guest - if rf, ok := ret.Get(0).(func(string) *models.Guest); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Guest) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewGuest provides a mock function with given fields: host, img -func (_m *Manageable) NewGuest(host *models.Host, img models.Image) (*models.Guest, error) { - ret := _m.Called(host, img) - - var r0 *models.Guest - if rf, ok := ret.Get(0).(func(*models.Host, models.Image) *models.Guest); ok { - r0 = rf(host, img) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Guest) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*models.Host, models.Image) error); ok { - r1 = rf(host, img) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/internal/models/mocks/mock.go b/internal/models/mocks/mock.go deleted file mode 100644 index d1d7baf..0000000 --- a/internal/models/mocks/mock.go +++ /dev/null @@ -1,10 +0,0 @@ -package mocks - -import "github.com/projecteru2/yavirt/internal/models" - -func Mock() (*Manageable, func()) { - var origManager = models.GetManager() - var mockManager = &Manageable{} - models.SetManager(mockManager) - return mockManager, func() { models.SetManager(origManager) } -} diff --git a/internal/models/model.go b/internal/models/model.go deleted file mode 100644 index a7c4709..0000000 --- a/internal/models/model.go +++ /dev/null @@ -1,27 +0,0 @@ -package models - -import "github.com/projecteru2/yavirt/internal/virt/types" - -type Manageable interface { - GetAllGuests() ([]*Guest, error) - GetNodeGuests(nodename string) ([]*Guest, error) - LoadGuest(id string) (*Guest, error) - CreateGuest(opts types.GuestCreateOption, host *Host, vols []*Volume) (*Guest, error) - NewGuest(host *Host, img Image) (*Guest, error) -} - -type Manager struct{} - -var manager Manageable - -func Setup() { - manager = &Manager{} -} - -func GetManager() Manageable { - return manager -} - -func SetManager(m Manageable) { - manager = m -} diff --git a/internal/models/option.go b/internal/models/option.go new file mode 100644 index 0000000..c3fd227 --- /dev/null +++ b/internal/models/option.go @@ -0,0 +1,21 @@ +package models + +type Op struct { + IgnoreLoadImageErr bool +} + +type Option func(*Op) + +func IgnoreLoadImageErrOption() Option { + return func(op *Op) { + op.IgnoreLoadImageErr = true + } +} + +func NewOp(opts ...Option) *Op { + op := &Op{} + for _, opt := range opts { + opt(op) + } + return op +} diff --git a/internal/models/sys_image.go b/internal/models/sys_image.go deleted file mode 100644 index 122b0ea..0000000 --- a/internal/models/sys_image.go +++ /dev/null @@ -1,185 +0,0 @@ -package models - -import ( - "context" - "crypto/sha256" - "fmt" - "io" - "math" - "os" - - "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/store" - "github.com/projecteru2/yavirt/pkg/utils" -) - -// SysImage indicates a system image -type SysImage struct { - *Generic - ParentName string `json:"parent,omitempty"` - Name string `json:"name"` - Size int64 `json:"size"` - Hash string `json:"sha256"` -} - -// NewSysImage creates a new system-wide image. -func NewSysImage() *SysImage { - return &SysImage{Generic: newGeneric()} -} - -// ListSysImages lists all system-wide images. -func ListSysImages() ([]Image, error) { - ctx, cancel := meta.Context(context.TODO()) - defer cancel() - - prefix := meta.SysImagePrefix() - data, vers, err := store.GetPrefix(ctx, prefix, math.MaxInt64) - if err != nil { - if errors.Contain(err, errors.ErrKeyNotExists) { - return nil, nil - } - return nil, errors.Annotatef(err, "get sys images failed") - } - - delete(data, prefix) - - return parseSysImages(data, vers) -} - -func parseSysImages(data map[string][]byte, vers map[string]int64) ([]Image, error) { - imgs := make([]Image, 0, len(data)) - - for key, bytes := range data { - ver, exists := vers[key] - if !exists { - return nil, errors.Annotatef(errors.ErrKeyBadVersion, key) - } - - img := NewSysImage() - if err := utils.JSONDecode(bytes, img); err != nil { - return nil, errors.Annotatef(err, "decode SysImage bytes %s failed", bytes) - } - - img.SetVer(ver) - - imgs = append(imgs, img) - } - - return imgs, nil -} - -// LoadSysImage loads a system-wide image. -func LoadSysImage(name string) (*SysImage, error) { - img := NewSysImage() - img.Name = name - if err := meta.Load(img); err != nil { - return nil, errors.Trace(err) - } - return img, nil -} - -// String . -func (img *SysImage) String() string { - return fmt.Sprintf("sys-image: %s", img.GetName()) -} - -// GetType gets the image's type. -func (img *SysImage) GetType() string { - return ImageSys -} - -// GetHash gets the image's hash. -func (img *SysImage) GetHash() string { - return img.Hash -} - -// UpdateHash update and return the image's hash . -func (img *SysImage) UpdateHash() (string, error) { - exists, err := ImageExists(img) - if err != nil { - return "", err - } - if !exists { - // TODO: Pull image? - return "", errors.ErrImageFileNotExists - } - - f, err := os.Open(img.Filepath()) - if err != nil { - return "", err - } - defer f.Close() - - hash := sha256.New() - if _, err := io.Copy(hash, f); err != nil { - return "", err - } - - img.Hash = fmt.Sprintf("%x", hash.Sum(nil)) - - return img.Hash, img.Save() -} - -// Save updated metadata. -func (img *SysImage) Save() error { - return meta.Save(meta.Resources{img}) -} - -// GetID gets the image's ID which will be uploaded to image hub. -func (img *SysImage) GetID() string { - return img.Name -} - -// GetName gets image's name -func (img *SysImage) GetName() string { - return img.Name -} - -// GetUser gets the system-wide image's owner name -func (img *SysImage) GetUser() string { - return "" -} - -// Create . -func (img *SysImage) Create() error { - img.Status = StatusRunning - return meta.Create(meta.Resources{img}) -} - -// Delete removes the system-wide image. -func (img *SysImage) Delete() error { - ctx, cancel := meta.Context(context.TODO()) - defer cancel() - - return store.Delete( - ctx, - []string{img.MetaKey()}, - map[string]int64{img.MetaKey(): img.GetVer()}, - ) -} - -// MetaKey . -func (img *SysImage) MetaKey() string { - return meta.SysImageKey(img.Name) -} - -// NewSysVolume generates a new volume for OS' system disk. -func (img *SysImage) NewSysVolume() *Volume { - return NewSysVolume(img.Size, img.Name) -} - -// Filepath gets a system-wide image's absolute filepath. -func (img *SysImage) Filepath() string { - return img.JoinVirtPath(img.Filename()) -} - -// Filename generates a system-wide image's filename without any path info. -func (img *SysImage) Filename() string { - return fmt.Sprintf("%s.img", img.Name) -} - -// GetDistro gets the system-wide image's distro. -func (img *SysImage) GetDistro() string { - return img.Name[:6] -} diff --git a/internal/models/user_image.go b/internal/models/user_image.go deleted file mode 100644 index c856166..0000000 --- a/internal/models/user_image.go +++ /dev/null @@ -1,146 +0,0 @@ -package models - -import ( - "context" - "fmt" - "math" - - "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/store" - "github.com/projecteru2/yavirt/pkg/utils" -) - -// UserImage . -type UserImage struct { - *SysImage - User string `json:"user"` - Distro string `json:"distro"` - Version int64 `json:"version"` -} - -// NewUserImage creates a new user captured image. -func NewUserImage(user, name string, size int64) *UserImage { - img := &UserImage{SysImage: NewSysImage()} - img.Name = name - img.Size = size - img.User = user - return img -} - -// ListUserImages list all images which belongs to the user. -func ListUserImages(user string) ([]Image, error) { - ctx, cancel := meta.Context(context.TODO()) - defer cancel() - - prefix := meta.UserImagePrefix(user) - data, vers, err := store.GetPrefix(ctx, prefix, math.MaxInt64) - if err != nil { - if errors.Contain(err, errors.ErrKeyNotExists) { - return nil, nil - } - return nil, errors.Annotatef(err, "get sys images failed") - } - - delete(data, prefix) - - return parseUserImages(data, vers) -} - -func parseUserImages(data map[string][]byte, vers map[string]int64) ([]Image, error) { - imgs := make([]Image, 0, len(data)) - - for key, bytes := range data { - ver, exists := vers[key] - if !exists { - return nil, errors.Annotatef(errors.ErrKeyBadVersion, key) - } - - img := &UserImage{SysImage: NewSysImage()} - if err := utils.JSONDecode(bytes, img); err != nil { - return nil, errors.Annotatef(err, "decode SysImage bytes %s failed", bytes) - } - - img.SetVer(ver) - - imgs = append(imgs, img) - } - - return imgs, nil -} - -// LoadUserImage loads a user captured image. -func LoadUserImage(user, name string) (*UserImage, error) { - i := NewUserImage(user, name, 0) - return i, meta.Load(i) -} - -// String . -func (i UserImage) String() string { - return fmt.Sprintf("usr-image: %s, distro: %s, owner: %s", i.GetName(), i.GetDistro(), i.GetUser()) -} - -// GetType gets the image's type. -func (i UserImage) GetType() string { - return ImageUser -} - -// GetID gets the user captured image's ID which will be pushed to image hub. -func (i UserImage) GetID() string { - return fmt.Sprintf("%s_%s", i.User, i.Name) -} - -// GetUser gets the user captured image's owner name. -func (i UserImage) GetUser() string { - return i.User -} - -// Filepath gets a user captured image's absolute filepath. -func (i UserImage) Filepath() string { - return i.JoinVirtPath(i.Filename()) -} - -// Filename generates a user captured image's filename without any path info. -func (i UserImage) Filename() string { - return fmt.Sprintf("%s-%s-%s-%d.uimg", i.Distro, i.User, i.Name, i.Version) -} - -// Delete removes the system-wide image -func (i UserImage) Delete() error { - ctx, cancel := meta.Context(context.TODO()) - defer cancel() - - return store.Delete( - ctx, - []string{i.MetaKey()}, - map[string]int64{i.MetaKey(): i.GetVer()}, - ) -} - -// NextVersion . -func (i *UserImage) NextVersion() error { - // TODO - // it should be distributed calculation/update, which means unique in global. - return nil -} - -// Save updates metadata. -func (i *UserImage) Save() error { - return meta.Save(meta.Resources{i}) -} - -// Create creates a new user image to metadata. -func (i *UserImage) Create() error { - i.Status = StatusRunning - return meta.Create(meta.Resources{i}) -} - -// MetaKey . -func (i *UserImage) MetaKey() string { - return meta.UserImageKey(i.User, i.Name) -} - -// GetDistro gets the user captured image's distro. -func (i UserImage) GetDistro() string { - return i.Distro -} diff --git a/internal/models/volume.go b/internal/models/volume.go deleted file mode 100644 index 099d647..0000000 --- a/internal/models/volume.go +++ /dev/null @@ -1,380 +0,0 @@ -package models - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/idgen" - "github.com/projecteru2/yavirt/pkg/store" - "github.com/projecteru2/yavirt/pkg/utils" -) - -// Volume . -// etcd keys: -// -// /vols/ -type Volume struct { - *Generic - Type string `json:"type"` - MountDir string `json:"mount,omitempty"` - HostDir string `json:"host_dir,omitempty"` - Capacity int64 `json:"capacity"` - Format string `json:"format"` - HostName string `json:"host"` - GuestID string `json:"guest"` - ImageName string `json:"image,omitempty"` - SnapIDs []string `json:"snaps"` - BaseSnapshotID string `json:"base_snapshot_id"` - - Snaps Snapshots `json:"-"` -} - -// LoadVolume . -func LoadVolume(id string) (*Volume, error) { - var vol = newVolume() - vol.ID = id - - if err := meta.Load(vol); err != nil { - return nil, err - } - - return vol, vol.Load() -} - -// NewDataVolume . -func NewDataVolume(mnt string, cap int64) (*Volume, error) { - mnt = strings.TrimSpace(mnt) - - src, dest := utils.PartRight(mnt, ":") - src = strings.TrimSpace(src) - dest = filepath.Join("/", strings.TrimSpace(dest)) - - if len(src) > 0 { - src = filepath.Join("/", src) - } - - var vol = NewVolume(VolDataType, cap) - vol.HostDir = src - vol.MountDir = dest - - return vol, vol.Check() -} - -// NewSysVolume . -func NewSysVolume(cap int64, imageName string) *Volume { - vol := NewVolume(VolSysType, cap) - vol.ImageName = imageName - return vol -} - -// NewVolume . -func NewVolume(vtype string, cap int64) *Volume { - var vol = newVolume() - vol.Type = vtype - vol.Capacity = cap - return vol -} - -func newVolume() *Volume { - return &Volume{Generic: newGeneric(), Format: VolQcow2Format} -} - -// Load . -func (v *Volume) Load() (err error) { - if v.Snaps, err = LoadSnapshots(v.SnapIDs); err != nil { - return errors.Trace(err) - } - - return nil -} - -// Delete . -func (v *Volume) Delete(force bool) error { - if err := v.setStatus(StatusDestroyed, force); err != nil { - return errors.Trace(err) - } - - keys := []string{v.MetaKey()} - vers := map[string]int64{v.MetaKey(): v.GetVer()} - - ctx, cancel := meta.Context(context.Background()) - defer cancel() - - return store.Delete(ctx, keys, vers) -} - -// Amplify . -func (v *Volume) Amplify(cap int64) error { - v.Capacity = cap - return v.Save() -} - -// AppendSnaps . -func (v *Volume) AppendSnaps(snaps ...*Snapshot) error { - if v.Snaps.Len()+len(snaps) > configs.Conf.MaxSnapshotsCount { - return errors.Annotatef(errors.ErrTooManyVolumes, "at most %d", configs.Conf.MaxSnapshotsCount) - } - - res := Snapshots(snaps) - - v.Snaps.append(snaps...) - - v.SnapIDs = append(v.SnapIDs, res.ids()...) - - return nil -} - -// RemoveSnaps Remove snapshots meta by preserving the order. -func (v *Volume) RemoveSnap(snapID string) { - keep := 0 - - for i := 0; i < v.Snaps.Len(); i++ { - if v.Snaps[i].ID == snapID { - continue - } - - v.Snaps[keep] = v.Snaps[i] - v.SnapIDs[keep] = v.SnapIDs[i] - keep++ - } - - v.Snaps = v.Snaps[:keep] - v.SnapIDs = v.SnapIDs[:keep] -} - -// Save updates metadata to persistence store. -func (v *Volume) Save() error { - return meta.Save(meta.Resources{v}) -} - -// MetaKey . -func (v *Volume) MetaKey() string { - return meta.VolumeKey(v.ID) -} - -// GenerateID . -func (v *Volume) GenerateID() { - v.genID() -} - -func (v *Volume) genID() { - v.ID = idgen.Next() -} - -// GetDevicePathBySerialNumber . -func (v *Volume) GetDevicePathBySerialNumber(sn int) string { - return v.GetDevicePathByName(v.GetDeviceName(sn)) -} - -// GetDevicePathByName . -func (v *Volume) GetDevicePathByName(name string) string { - return GetDevicePathByName(name) -} - -// GetDeviceName . -func (v *Volume) GetDeviceName(sn int) string { - return GetDeviceName(sn) -} - -// GetDevicePathByName . -func GetDevicePathByName(name string) string { - return filepath.Join("/dev", name) -} - -// GetDeviceName . -func GetDeviceName(sn int) string { - return fmt.Sprintf("vd%s", string(utils.LowerLetters[sn])) -} - -func (v *Volume) GetMountDir() string { - if len(v.MountDir) > 0 { - return v.MountDir - } - return "/" -} - -func (v *Volume) String() string { - var mnt = "/" - if len(v.MountDir) > 0 { - mnt = v.MountDir - } - return fmt.Sprintf("%s, %s, %s:%s, size: %d", v.Filepath(), v.Status, v.GuestID, mnt, v.Capacity) -} - -// Filepath . -func (v *Volume) Filepath() string { - if len(v.HostDir) > 0 { - return filepath.Join(v.HostDir, v.Name()) - } - return v.JoinVirtPath(v.Name()) -} - -// Name . -func (v *Volume) Name() string { - return fmt.Sprintf("%s-%s.vol", v.Type, v.ID) -} - -// Check . -func (v *Volume) Check() error { - switch { - case v.Capacity < configs.Conf.MinVolumeCap || v.Capacity > configs.Conf.MaxVolumeCap: - return errors.Annotatef(errors.ErrInvalidValue, "capacity: %d", v.Capacity) - case v.HostDir == "/": - return errors.Annotatef(errors.ErrInvalidValue, "host dir: %s", v.HostDir) - case v.MountDir == "/": - return errors.Annotatef(errors.ErrInvalidValue, "mount dir: %s", v.MountDir) - default: - return nil - } -} - -// IsSys . -func (v *Volume) IsSys() bool { - return v.Type == VolSysType -} - -// LoadVolumes . -func LoadVolumes(ids []string) (vols Volumes, err error) { - vols = make(Volumes, len(ids)) - - for i, id := range ids { - if vols[i], err = LoadVolume(id); err != nil { - return nil, errors.Trace(err) - } - } - - return vols, nil -} - -// Volumes . -type Volumes []*Volume - -// Check . -func (vols Volumes) Check() error { - for _, v := range vols { - if v == nil { - return errors.Annotatef(errors.ErrInvalidValue, "nil *Volume") - } - if err := v.Check(); err != nil { - return errors.Trace(err) - } - } - return nil -} - -// Find . -func (vols Volumes) Find(volID string) (*Volume, error) { - for _, v := range vols { - if v.ID == volID { - return v, nil - } - } - - return nil, errors.Annotatef(errors.ErrInvalidValue, "volID %s not exists", volID) -} - -func (vols Volumes) resources() meta.Resources { - var r = make(meta.Resources, len(vols)) - for i, v := range vols { - r[i] = v - } - return r -} - -func (vols *Volumes) append(vol ...*Volume) { - *vols = append(*vols, vol...) -} - -func (vols Volumes) setGuestID(id string) { - for _, vol := range vols { - vol.GuestID = id - } -} - -func (vols Volumes) setHostName(name string) { - for _, vol := range vols { - vol.HostName = name - } -} - -func (vols Volumes) ids() []string { - var v = make([]string, len(vols)) - for i, vol := range vols { - v[i] = vol.ID - } - return v -} - -func (vols Volumes) genID() { - for _, vol := range vols { - vol.genID() - } -} - -func (vols Volumes) setStatus(st string, force bool) error { - for _, vol := range vols { - if err := vol.setStatus(st, force); err != nil { - return errors.Trace(err) - } - } - return nil -} - -func (vols Volumes) deleteKeys() []string { - var keys = make([]string, len(vols)) - for i, vol := range vols { - keys[i] = vol.MetaKey() - } - return keys -} - -// Exists checks the volume if exists, in which mounted the directory. -func (vols Volumes) Exists(mnt string) bool { - for _, vol := range vols { - switch { - case vol.IsSys(): - continue - case vol.MountDir == mnt: - return true - } - } - return false -} - -// Len . -func (vols Volumes) Len() int { - return len(vols) -} - -// GetMntVol return the vol of a path if exists . -func (vols Volumes) GetMntVol(path string) (*Volume, error) { - path = filepath.Dir(path) - if path[0] != '/' { - return nil, errors.ErrDestinationInvalid - } - - var sys, maxVol *Volume - maxLen := -1 - for _, vol := range vols { - if vol.IsSys() { - sys = vol - continue - } - - mntDirLen := len(vol.MountDir) - if mntDirLen > maxLen && strings.Index(path, vol.MountDir) == 0 { - maxLen = mntDirLen - maxVol = vol - } - } - - if maxLen < 1 { - return sys, nil - } - return maxVol, nil -} diff --git a/internal/network/const.go b/internal/network/const.go new file mode 100644 index 0000000..bdb987a --- /dev/null +++ b/internal/network/const.go @@ -0,0 +1,18 @@ +package network + +const ( + // CalicoMode . + CalicoMode = "calico" + // Network CalicoCNI + CalicoCNIMode = "calico-cni" + // VlanMode . + VlanMode = "vlan" + // OVNMode + OVNMode = "ovn" + // FakeMode + FakeMode = "fake" + + ModeLabelKey = "network/mode" + CalicoLabelKey = "network/calico" + OVNLabelKey = "network/ovn" +) diff --git a/internal/network/drivers/calico/const.go b/internal/network/drivers/calico/const.go new file mode 100644 index 0000000..1737648 --- /dev/null +++ b/internal/network/drivers/calico/const.go @@ -0,0 +1,14 @@ +package calico + +import "net" + +const ( + // OrchestratorID . + OrchestratorID = "yavirt" + + // CalicoIPv4Version . + CalicoIPv4Version = 4 +) + +// AllonesMask . +var AllonesMask = net.CIDRMask(32, net.IPv4len*8) diff --git a/internal/network/drivers/calico/dhcp.go b/internal/network/drivers/calico/dhcp.go new file mode 100644 index 0000000..94e3910 --- /dev/null +++ b/internal/network/drivers/calico/dhcp.go @@ -0,0 +1,120 @@ +package calico + +import ( + "context" + "net" + + "github.com/alphadose/haxmap" + "github.com/cockroachdb/errors" + "github.com/insomniacslk/dhcp/dhcpv4" + "github.com/insomniacslk/dhcp/dhcpv4/server4" + "github.com/projecteru2/core/log" +) + +type singleServer struct { + *server4.Server + Iface string + IP net.IP +} + +type DHCPServer struct { + Servers haxmap.Map[string, *singleServer] + Port int + gwIP net.IP +} + +func NewDHCPServer(gw net.IP) *DHCPServer { + return &DHCPServer{ + Port: 67, + gwIP: gw, + } +} + +func (srv *DHCPServer) AddInterface(iface string, ip net.IP, ipNet *net.IPNet) error { + logger := log.WithFunc("DHCPServer.AddInterface") + laddr := net.UDPAddr{ + IP: net.ParseIP("0.0.0.0"), + Port: srv.Port, + } + dhcpSrv, err := server4.NewServer(iface, &laddr, srv.NewHandler(ip, ipNet)) + if err != nil { + return errors.Wrapf(err, "fialed to create dhcpv4 server") + } + if oldSrv, exists := srv.Servers.Get(iface); exists { + logger.Infof(context.TODO(), "Stop old dhcp server for interface %s", iface) + oldSrv.Close() + } + srv.Servers.Set(iface, &singleServer{ + Server: dhcpSrv, + Iface: iface, + IP: ip, + }) + go func() { + defer logger.Infof(context.TODO(), "dhcp server for %s: %s exits", iface, ip.String()) + logger.Infof(context.TODO(), "starting dhcp server for %s: %s", iface, ip.String()) + _ = dhcpSrv.Serve() + }() + return nil +} + +func (srv *DHCPServer) RemoveInterface(iface string) { + ssrv, exists := srv.Servers.GetAndDel(iface) + if !exists { + return + } + ssrv.Close() +} + +func (srv *DHCPServer) NewHandler(ip net.IP, ipNet *net.IPNet) server4.Handler { + return func(conn net.PacketConn, peer net.Addr, m *dhcpv4.DHCPv4) { + // Process DHCP requests (DISCOVER, OFFER, REQUEST, DECLINE, RELEASE) + logger := log.WithFunc("dhcp.handler") + logger.Debugf(context.TODO(), m.Summary()) + leaseTime := uint32(3600) + + switch m.MessageType() { + case dhcpv4.MessageTypeDiscover: + // Offer an IP address from the subnet based on server IP + offer, err := dhcpv4.NewReplyFromRequest( + m, + dhcpv4.WithMessageType(dhcpv4.MessageTypeOffer), + dhcpv4.WithYourIP(ip), + dhcpv4.WithClientIP(m.ClientIPAddr), + dhcpv4.WithLeaseTime(leaseTime), + dhcpv4.WithNetmask(ipNet.Mask), + dhcpv4.WithGatewayIP(srv.gwIP), + ) + if err != nil { + logger.Errorf(context.TODO(), err, "Failed to create DHCP offer") + return + } + if _, err := conn.WriteTo(offer.ToBytes(), peer); err != nil { + logger.Error(context.TODO(), err, "failed to write offer packet.") + } + case dhcpv4.MessageTypeRequest: + // Check if requested IP is within our range and send ACK + if m.YourIPAddr.Equal(ip) { + ack, err := dhcpv4.NewReplyFromRequest( + m, + dhcpv4.WithMessageType(dhcpv4.MessageTypeAck), + dhcpv4.WithYourIP(ip), + dhcpv4.WithClientIP(m.ClientIPAddr), + // dhcpv4.WithServerIP(ip), + dhcpv4.WithLeaseTime(leaseTime), + dhcpv4.WithNetmask(ipNet.Mask), + dhcpv4.WithGatewayIP(srv.gwIP), + ) + if err != nil { + logger.Errorf(context.TODO(), err, "Failed to create DHCP ACK") + } + if _, err := conn.WriteTo(ack.ToBytes(), peer); err != nil { + logger.Errorf(context.TODO(), err, "failed to write ACK package.") + } + } else { + logger.Warnf(context.TODO(), "Invalid IP request from %s for %s", peer, m.YourIPAddr) + } + default: + logger.Warnf(context.TODO(), "Unhandled DHCP message type: %d", m.MessageType()) + } + } +} diff --git a/internal/network/drivers/calico/driver.go b/internal/network/drivers/calico/driver.go new file mode 100644 index 0000000..40bbd03 --- /dev/null +++ b/internal/network/drivers/calico/driver.go @@ -0,0 +1,160 @@ +package calico + +import ( + "context" + "net" + "strings" + "sync" + + calitype "github.com/projectcalico/api/pkg/apis/projectcalico/v3" + "github.com/projectcalico/calico/libcalico-go/lib/apiconfig" + libcaliapi "github.com/projectcalico/calico/libcalico-go/lib/apis/v3" + "github.com/projectcalico/calico/libcalico-go/lib/clientv3" + "github.com/projectcalico/calico/libcalico-go/lib/options" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/network/utils/device" + "github.com/projecteru2/yavirt/pkg/netx" + "github.com/projecteru2/yavirt/pkg/terrors" + + "github.com/cockroachdb/errors" +) + +// Driver . +type Driver struct { + sync.Mutex + + clientv3.Interface + + mCol *MetricsCollector + dev *device.Driver + + gateway *device.Dummy + gatewayWorkloadEndpoint *libcaliapi.WorkloadEndpoint + + nodename string + hostIP string + poolNames map[string]struct{} + dhcp *DHCPServer +} + +// NewDriver . +func NewDriver(cfg *configs.CalicoConfig) (*Driver, error) { + configFile, poolNames := cfg.ConfigFile, cfg.PoolNames + dev, err := device.New() + if err != nil { + return nil, errors.Wrap(err, "") + } + + hostIP, err := netx.GetOutboundIP("8.8.8.8:53") + if err != nil { + return nil, errors.Wrap(err, "") + } + + caliConf, err := apiconfig.LoadClientConfig(configFile) + if err != nil { + return nil, errors.Wrap(err, "") + } + + cali, err := clientv3.New(*caliConf) + if err != nil { + return nil, errors.Wrap(err, "") + } + + var driver = &Driver{ + Interface: cali, + mCol: &MetricsCollector{}, + nodename: cfg.Nodename, + dev: dev, + hostIP: hostIP, + poolNames: map[string]struct{}{}, + } + + for _, pn := range poolNames { + driver.poolNames[pn] = struct{}{} + } + + return driver, nil +} + +func (d *Driver) CheckHealth(ctx context.Context) (err error) { + defer func() { + if err != nil { + d.mCol.healthy.Store(false) + } else { + d.mCol.healthy.Store(true) + } + }() + n, err := d.Nodes().Get(ctx, d.nodename, options.GetOptions{}) + if err != nil { + return err + } + if n == nil { + return errors.Newf("calico node %s not found", d.nodename) + } + if err := CheckNodeStatus(); err != nil { + return err + } + return nil +} + +func (d *Driver) InitDHCP() error { + logger := log.WithFunc("calico.initDHCP") + gwIP := net.ParseIP("169.254.1.1") + dhcpSrv := NewDHCPServer(gwIP) + weps, err := d.ListWEP() + if err != nil { + return err + } + for _, wep := range weps { + iface := wep.Spec.InterfaceName + if len(wep.Spec.IPNetworks) == 0 { + continue + } + ip, ipNet, err := net.ParseCIDR(wep.Spec.IPNetworks[0]) + if err != nil { + logger.Errorf(context.TODO(), err, "failed to parse cidr: %s", wep.Spec.IPNetworks[0]) + continue + } + if err := dhcpSrv.AddInterface(iface, ip, ipNet); err != nil { + logger.Errorf(context.TODO(), err, "failed to add interface to dhcp.") + } + } + d.dhcp = dhcpSrv + return nil +} + +func (d *Driver) getIPPool(poolName string) (pool *calitype.IPPool, err error) { + if poolName != "" { + return d.IPPools().Get(context.Background(), poolName, options.GetOptions{}) + } + pools, err := d.IPPools().List(context.Background(), options.ListOptions{}) + switch { + case err != nil: + return pool, errors.Wrap(err, "") + case len(pools.Items) < 1: + return pool, errors.Wrap(terrors.ErrCalicoPoolNotExists, "") + } + + if len(d.poolNames) < 1 { + return &pools.Items[0], nil + } + + for _, p := range pools.Items { + if _, exists := d.poolNames[p.Name]; exists { + return &p, nil + } + } + + return pool, errors.Wrapf(terrors.ErrCalicoPoolNotExists, "no such pool names: %s", d.poolNamesStr()) +} + +func (d *Driver) poolNamesStr() string { + names := d.PoolNames() + return strings.Join(names, ", ") +} + +// Ipam . +func (d *Driver) Ipam() *Ipam { + return newIpam(d) +} diff --git a/internal/network/drivers/calico/driver_test.go b/internal/network/drivers/calico/driver_test.go new file mode 100644 index 0000000..e0c0841 --- /dev/null +++ b/internal/network/drivers/calico/driver_test.go @@ -0,0 +1,20 @@ +package calico + +import ( + "strings" + "testing" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/projecteru2/yavirt/pkg/test/assert" +) + +func TestPoolNameStr(t *testing.T) { + d := &Driver{ + poolNames: map[string]struct{}{"a": {}, "b": {}, "c": {}}, + } + ss := d.poolNamesStr() + l := strings.Split(ss, ", ") + s1 := mapset.NewSet[string](l...) + diff := s1.Difference(mapset.NewSet("a", "b", "c")) + assert.Equal(t, diff.Cardinality(), 0) +} diff --git a/internal/network/drivers/calico/endpoint.go b/internal/network/drivers/calico/endpoint.go new file mode 100644 index 0000000..e5bb803 --- /dev/null +++ b/internal/network/drivers/calico/endpoint.go @@ -0,0 +1,137 @@ +package calico + +import ( + "context" + "fmt" + "strings" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/projecteru2/yavirt/internal/network/utils/device" + "github.com/projecteru2/yavirt/pkg/terrors" + "github.com/projecteru2/yavirt/pkg/utils" +) + +const ( + calicoMTU = 1500 +) + +// CreateEndpointNetwork . +func (h *Driver) CreateEndpointNetwork(args types.EndpointArgs) (types.EndpointArgs, func() error, error) { + // Create network policy if necessary + // TODO: maybe we can create network policy when create new user. + if err := h.CreateNetworkPolicy(args.Calico.Namespace); err != nil { + return args, nil, errors.Wrapf(err, "failed to create network policy") + } + + h.Lock() + defer h.Unlock() + + // alloc an ip for this endpoint + ip, err := h.assignIP(&args) + if err != nil { + return args, nil, errors.Wrap(err, "") + } + rollbackIP := func() error { + return h.releaseIPs(ip) + } + + args.IPs = append(args.IPs, ip) + + if args.EndpointID, err = h.generateEndpointID(); err != nil { + return args, rollbackIP, errors.Wrap(err, "") + } + + dev, err := h.createTap() + if err != nil { + return args, rollbackIP, errors.Wrap(err, "") + } + args.DevName = dev.Name() + if err = dev.Up(); err != nil { + return args, rollbackIP, errors.Wrap(err, "") + } + + // qemu will create TAP device when start, so we can delete it here + defer func() { + // try to delete tap device, we ignore the error + err = h.deleteTap(dev) + log.Debugf(context.TODO(), "After delete tap device(%v): %v", dev.Name(), err) + }() + + if _, err = h.createWEP(args); err != nil { + return args, rollbackIP, err + } + rollback := func() error { + err1 := rollbackIP() + err2 := h.DeleteEndpointNetwork(args) + return errors.CombineErrors(err1, err2) + } + args.MTU = calicoMTU + return args, rollback, err +} + +// JoinEndpointNetwork . +func (h *Driver) JoinEndpointNetwork(args types.EndpointArgs) (func() error, error) { + if err := args.Check(); err != nil { + return nil, errors.Wrap(err, "") + } + + h.Lock() + defer h.Unlock() + + devDriver, err := device.New() + if err != nil { + return nil, errors.Wrap(err, "") + } + dev, err := devDriver.ShowLink(args.DevName) + if err != nil { + return nil, errors.Wrap(err, "failed to get link") + } + + for _, ip := range args.IPs { + ip.BindDevice(dev) + + if err := dev.AddRoute(ip.IPAddr(), h.hostIP); err != nil { + if !terrors.IsVirtLinkRouteExistsErr(err) { + return nil, errors.Wrap(err, "") + } + } + } + + if h.dhcp != nil && len(args.IPs) > 0 { + ip := args.IPs[0] + if err := h.dhcp.AddInterface(dev.Name(), ip.NetIP(), ip.IPNetwork()); err != nil { + return nil, errors.Wrap(err, "Failed to add interface to dhcp server") + } + } + rollback := func() error { + var err error + for _, ip := range args.IPs { + cidr := fmt.Sprintf("%s/32", ip.IPAddr()) + if err1 := dev.DeleteRoute(cidr); err1 != nil { + err = errors.CombineErrors(err, err1) + } + } + return err + } + + return rollback, nil +} + +// DeleteEndpointNetwork . +func (h *Driver) DeleteEndpointNetwork(args types.EndpointArgs) error { + h.Lock() + defer h.Unlock() + err1 := h.releaseIPs(args.IPs...) + err2 := h.deleteWEP(&args) + return errors.CombineErrors(err1, err2) +} + +func (h *Driver) generateEndpointID() (string, error) { + var uuid, err = utils.UUIDStr() + if err != nil { + return "", errors.Wrap(err, "") + } + return strings.ReplaceAll(uuid, "-", ""), nil +} diff --git a/internal/network/drivers/calico/gateway.go b/internal/network/drivers/calico/gateway.go new file mode 100644 index 0000000..3f01898 --- /dev/null +++ b/internal/network/drivers/calico/gateway.go @@ -0,0 +1,163 @@ +package calico + +import ( + "net" + + libcaliapi "github.com/projectcalico/calico/libcalico-go/lib/apis/v3" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/projecteru2/yavirt/internal/network/utils/device" + "github.com/projecteru2/yavirt/pkg/terrors" +) + +// InitGateway . +func (h *Driver) InitGateway(gwName string) error { + dev, err := device.New() + if err != nil { + return errors.Wrap(err, "") + } + + h.Lock() + defer h.Unlock() + + gw, err := dev.ShowLink(gwName) + if err != nil { + if terrors.IsVirtLinkNotExistsErr(err) { + gw, err = dev.AddLink(device.LinkTypeDummy, gwName) + } + + if err != nil { + return errors.Wrap(err, "") + } + } + + var ok bool + if h.gateway, ok = gw.(*device.Dummy); !ok { + return errors.Wrapf(terrors.ErrInvalidValue, "expect *device.Dummy, but %v", gw) + } + + if err := h.gateway.Up(); err != nil { + return errors.Wrap(err, "") + } + + if err := h.loadGateway(); err != nil { + return errors.Wrap(err, "") + } + + gwIPs, err := h.gatewayIPs() + if err != nil { + return errors.Wrap(err, "") + } + + return h.bindGatewayIPs(gwIPs...) +} + +// Gateway . +func (h *Driver) Gateway() *device.Dummy { + h.Lock() + defer h.Unlock() + return h.gateway +} + +// GatewayWorkloadEndpoint . +func (h *Driver) GatewayWorkloadEndpoint() *libcaliapi.WorkloadEndpoint { + h.Lock() + defer h.Unlock() + return h.gatewayWorkloadEndpoint +} + +func (h *Driver) bindGatewayIPs(ips ...meta.IP) error { + for _, ip := range ips { + var addr, err = h.dev.ParseCIDR(ip.CIDR()) + if err != nil { + return errors.Wrap(err, "") + } + + addr.IPNet = &net.IPNet{ + IP: addr.IPNet.IP, + Mask: AllonesMask, + } + + if err := h.gateway.BindAddr(addr); err != nil && !terrors.IsVirtLinkAddrExistsErr(err) { + return errors.Wrap(err, "") + } + + if err := h.gateway.ClearRoutes(); err != nil { + return errors.Wrap(err, "") + } + } + + return nil +} + +// RefreshGateway refreshes gateway data. +func (h *Driver) RefreshGateway() error { + h.Lock() + defer h.Unlock() + return h.loadGateway() +} + +func (h *Driver) loadGateway() error { + hn := configs.Hostname() + + var args types.EndpointArgs + args.Hostname = hn + //TODO: better way to set namespace here + args.Calico.Namespace = hn + args.EndpointID = configs.Conf.Network.Calico.GatewayName + + wep, err := h.getWEP(args) + if err != nil { + if terrors.IsCalicoEndpointNotExistsErr(err) { + return nil + } + return errors.Wrap(err, "") + } + + h.gatewayWorkloadEndpoint = wep + + return nil +} + +// GetGatewayIP gets a gateway IP which could serve the ip. +func (h *Driver) GetGatewayIP(ip meta.IP) (meta.IP, error) { + h.Lock() + defer h.Unlock() + return h.getGatewayIP(ip) +} + +func (h *Driver) getGatewayIP(ip meta.IP) (meta.IP, error) { + if h.gatewayWorkloadEndpoint == nil { + return nil, errors.Wrapf(terrors.ErrCalicoGatewayIPNotExists, "no such gateway WorkloadEndpoint") + } + + for _, cidr := range h.gatewayWorkloadEndpoint.Spec.IPNetworks { + var gwIP, err = ParseCIDR(cidr) + if err != nil { + return nil, errors.Wrap(err, "") + } + + if h.isUnderGateway(gwIP, ip) { + return gwIP, nil + } + } + + return nil, errors.Wrapf(terrors.ErrCalicoGatewayIPNotExists, "for %s", ip) +} + +func (h *Driver) isUnderGateway(gatewayIP, ip meta.IP) bool { + var ipn = &net.IPNet{} + ipn.IP = gatewayIP.NetIP() + ipn.Mask = net.CIDRMask(ip.Prefix(), net.IPv4len*8) + return ipn.Contains(ip.NetIP()) +} + +func (h *Driver) gatewayIPs() (ips []meta.IP, err error) { + if h.gatewayWorkloadEndpoint != nil { + ips, err = ConvIPs(h.gatewayWorkloadEndpoint) + } + return +} diff --git a/internal/network/drivers/calico/health.go b/internal/network/drivers/calico/health.go new file mode 100644 index 0000000..730a221 --- /dev/null +++ b/internal/network/drivers/calico/health.go @@ -0,0 +1,275 @@ +// Copyright (c) 2016 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package calico + +import ( + "bufio" + "context" + "fmt" + "net" + "regexp" + "strings" + "time" + + "reflect" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/internal/utils" + "github.com/samber/lo" + "github.com/shirou/gopsutil/process" +) + +// CheckNodeStatus prints status of the node and returns error (if any) +func CheckNodeStatus() error { + // Go through running processes and check if `calico-felix` processes is not running + processes, err := process.Processes() + if err != nil { + return err + } + + // For older versions of calico/node, the process was called `calico-felix`. Newer ones use `calico-node -felix`. + if !utils.PSContains([]string{"calico-felix"}, processes) && !utils.PSContains([]string{"calico-node", "-felix"}, processes) { + // Return and print message if calico-node is not running + return errors.New("calico process is not running") + } + + if !utils.PSContains([]string{"bird"}, processes) { + return errors.New("BIRDv4 process: 'bird' is not running") + } + peers, err := getBIRDPeers("4") + if err != nil { + return err + } + if checkActivePeers(peers) != nil { + return errors.New("No active IPV4 peers") + } + // // Check if birdv6 process is running, print the BGP peer table if it is, else print a warning + // if psContains([]string{"bird6"}, processes) { + // if peers, err := getBIRDPeers("6"); err != nil { + // return err + // } + // } else { + // fmt.Printf("\nINFO: BIRDv6 process: 'bird6' is not running.\n") + // } + + return nil +} + +// Check for Word_ where every octate is separated by "_", regardless of IP protocols +// Example match: "Mesh_192_168_56_101" or "Mesh_fd80_24e2_f998_72d7__2" +var bgpPeerRegex = regexp.MustCompile(`^(Global|Node|Mesh)_(.+)$`) + +// Mapping the BIRD/GoBGP type extracted from the peer name to the display type. +var bgpTypeMap = map[string]string{ + "Global": "global", + "Mesh": "node-to-node mesh", + "Node": "node specific", +} + +// Timeout for querying BIRD +var birdTimeOut = 2 * time.Second + +// Expected BIRD protocol table columns +var birdExpectedHeadings = []string{"name", "proto", "table", "state", "since", "info"} + +// bgpPeer is a structure containing details about a BGP peer. +type bgpPeer struct { + PeerIP string + PeerType string + State string + Since string + BGPState string + Info string +} + +// Unmarshal a peer from a line in the BIRD protocol output. Returns true if +// successful, false otherwise. +func (b *bgpPeer) unmarshalBIRD(line, ipSep string) bool { + // Split into fields. We expect at least 6 columns: + // name, proto, table, state, since and info. + // The info column contains the BGP state plus possibly some additional + // info (which will be columns > 6). + // + // Peer names will be of the format described by bgpPeerRegex. + log.Debugf(context.TODO(), "Parsing line: %s", line) + columns := strings.Fields(line) + if len(columns) < 6 { + log.Debug(context.TODO(), "Not a valid line: fewer than 6 columns") + return false + } + if columns[1] != "BGP" { + log.Debug(context.TODO(), "Not a valid line: protocol is not BGP") + return false + } + + // Check the name of the peer is of the correct format. This regex + // returns two components: + // - A type (Global|Node|Mesh) which we can map to a display type + // - An IP address (with _ separating the octets) + sm := bgpPeerRegex.FindStringSubmatch(columns[0]) + if len(sm) != 3 { + log.Debugf(context.TODO(), "Not a valid line: peer name '%s' is not correct format", columns[0]) + return false + } + var ok bool + b.PeerIP = strings.ReplaceAll(sm[2], "_", ipSep) + if b.PeerType, ok = bgpTypeMap[sm[1]]; !ok { + log.Debugf(context.TODO(), "Not a valid line: peer type '%s' is not recognized", sm[1]) + return false + } + + // Store remaining columns (piecing back together the info string) + b.State = columns[3] + b.Since = columns[4] + b.BGPState = columns[5] + if len(columns) > 6 { + b.Info = strings.Join(columns[6:], " ") + } + + return true +} + +// getBIRDPeers queries BIRD and displays the local peers in table format. +func getBIRDPeers(ipv string) ([]bgpPeer, error) { + log.Debugf(context.TODO(), "Print BIRD peers for IPv%s", ipv) + birdSuffix := "" + if ipv == "6" { + birdSuffix = "6" + } + + log.Debugf(context.TODO(), "IPv%s BGP status", ipv) + + // Try connecting to the bird socket in `/var/run/calico/` first to get the data + c, err := net.Dial("unix", fmt.Sprintf("/var/run/calico/bird%s.ctl", birdSuffix)) + if err != nil { + // If that fails, try connecting to bird socket in `/var/run/bird` (which is the + // default socket location for bird install) for non-containerized installs + log.Debug(context.TODO(), "Failed to connect to BIRD socket in /var/run/calic, trying /var/run/bird \n") + c, err = net.Dial("unix", fmt.Sprintf("/var/run/bird/bird%s.ctl", birdSuffix)) + if err != nil { + return nil, errors.Wrapf(err, "Error querying BIRD: unable to connect to BIRDv%s socket: %v", ipv, err) + } + } + defer c.Close() + + // To query the current state of the BGP peers, we connect to the BIRD + // socket and send a "show protocols" message. BIRD responds with + // peer data in a table format. + // + // Send the request. + _, err = c.Write([]byte("show protocols\n")) + if err != nil { + return nil, errors.Wrapf(err, "Error executing command: unable to write to BIRD socket") + } + + // Scan the output and collect parsed BGP peers + log.Debug(context.TODO(), "Reading output from BIRD\n") + peers, err := scanBIRDPeers(ipv, c) + if err != nil { + return nil, errors.Wrapf(err, "Error executing command") + } + + return peers, nil +} + +// scanBIRDPeers scans through BIRD output to return a slice of bgpPeer +// structs. +// +// We split this out from the main printBIRDPeers() function to allow us to +// test this processing in isolation. +func scanBIRDPeers(ipv string, conn net.Conn) ([]bgpPeer, error) { + // Determine the separator to use for an IP address, based on the + // IP version. + ipSep := "." + if ipv == "6" { + ipSep = ":" + } + + // The following is sample output from BIRD + // + // 0001 BIRD 1.5.0 ready. + // 2002-name proto table state since info + // 1002-kernel1 Kernel master up 2016-11-21 + // device1 Device master up 2016-11-21 + // direct1 Direct master up 2016-11-21 + // Mesh_172_17_8_102 BGP master up 2016-11-21 Established + // 0000 + scanner := bufio.NewScanner(conn) + peers := []bgpPeer{} + + // Set a time-out for reading from the socket connection. + err := conn.SetReadDeadline(time.Now().Add(birdTimeOut)) + if err != nil { + return nil, errors.New("failed to set time-out") + } + + for scanner.Scan() { + // Process the next line that has been read by the scanner. + str := scanner.Text() + log.Debugf(context.TODO(), "Read: %s\n", str) + + if strings.HasPrefix(str, "0000") { //nolint + // "0000" means end of data + break + } else if strings.HasPrefix(str, "0001") { //nolint + // "0001" code means BIRD is ready. + } else if strings.HasPrefix(str, "2002") { + // "2002" code means start of headings + f := strings.Fields(str[5:]) + if !reflect.DeepEqual(f, birdExpectedHeadings) { + return nil, errors.New("unknown BIRD table output format") + } + } else if strings.HasPrefix(str, "1002") { + // "1002" code means first row of data. + peer := bgpPeer{} + if peer.unmarshalBIRD(str[5:], ipSep) { + peers = append(peers, peer) + } + } else if strings.HasPrefix(str, " ") { + // Row starting with a " " is another row of data. + peer := bgpPeer{} + if peer.unmarshalBIRD(str[1:], ipSep) { + peers = append(peers, peer) + } + } else { + // Format of row is unexpected. + return nil, errors.New("unexpected output line from BIRD") + } + + // Before reading the next line, adjust the time-out for + // reading from the socket connection. + err = conn.SetReadDeadline(time.Now().Add(birdTimeOut)) + if err != nil { + return nil, errors.New("failed to adjust time-out") + } + } + + return peers, scanner.Err() +} + +func checkActivePeers(peers []bgpPeer) error { + activePeers := lo.Reduce(peers, func(agg int, peer bgpPeer, _ int) int { + // log.Infof(context.TODO(), "+++++++++ %v, %s", peer, peer.State) + if peer.State == "up" { + agg++ + } + return agg + }, 0) + if activePeers <= 0 { + return errors.New("no active peers") + } + return nil +} diff --git a/internal/vnet/calico/ip.go b/internal/network/drivers/calico/ip.go similarity index 88% rename from internal/vnet/calico/ip.go rename to internal/network/drivers/calico/ip.go index c5d6aca..3ff1cc3 100644 --- a/internal/vnet/calico/ip.go +++ b/internal/network/drivers/calico/ip.go @@ -7,10 +7,10 @@ import ( calinet "github.com/projectcalico/calico/libcalico-go/lib/net" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/internal/vnet" - "github.com/projecteru2/yavirt/internal/vnet/device" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/network/utils/device" "github.com/projecteru2/yavirt/pkg/netx" ) @@ -31,7 +31,7 @@ type IP struct { func ParseCIDR(cidr string) (*IP, error) { var _, ipn, err = parseCIDR(cidr) if err != nil { - return nil, errors.Annotatef(err, cidr) + return nil, errors.Wrap(err, cidr) } return NewIP(ipn), nil } @@ -68,7 +68,7 @@ func (ip *IP) BindGuestID(guestID string) { // IntIP . func (ip *IP) IntIP() (v int64) { - v, _ = netx.IPv4ToInt(ip.IP.String()) //nolint + v, _ = netx.IPv4ToInt(ip.IP.String()) return } @@ -91,7 +91,7 @@ func (ip *IP) String() string { func (ip *IP) AutoRouteCIDR() (string, error) { var _, ipn, err = netx.ParseCIDR(ip.CIDR()) if err != nil { - return "", errors.Trace(err) + return "", errors.Wrap(err, "") } return ipn.String(), nil } @@ -134,7 +134,7 @@ func (ip *IP) SubnetAddr() string { // IntGateway . func (ip *IP) IntGateway() (v int64) { - v, _ = netx.IPv4ToInt(ip.GatewayAddr()) //nolint + v, _ = netx.IPv4ToInt(ip.GatewayAddr()) return v } @@ -162,7 +162,7 @@ func (ip *IP) MetaKey() string { // NetworkMode . func (ip *IP) NetworkMode() string { - return vnet.NetworkCalico + return network.CalicoMode } // NetworkName . diff --git a/internal/network/drivers/calico/ipam.go b/internal/network/drivers/calico/ipam.go new file mode 100644 index 0000000..d8a8087 --- /dev/null +++ b/internal/network/drivers/calico/ipam.go @@ -0,0 +1,224 @@ +package calico + +import ( + "context" + "net" + "sync" + + libcaliipam "github.com/projectcalico/calico/libcalico-go/lib/ipam" + libcalinet "github.com/projectcalico/calico/libcalico-go/lib/net" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/projecteru2/yavirt/pkg/netx" + "github.com/projecteru2/yavirt/pkg/store/etcd" + "github.com/projecteru2/yavirt/pkg/terrors" +) + +// Ipam . +type Ipam struct { + *Driver + lck sync.Mutex +} + +func newIpam(driver *Driver) *Ipam { + return &Ipam{Driver: driver} +} + +// Assign . +func (ipam *Ipam) Assign(_ context.Context, args *types.EndpointArgs) (meta.IP, error) { + hn := configs.Hostname() + + ipam.lck.Lock() + defer ipam.lck.Unlock() + + ipn, err := ipam.getIPv4Net(args.Calico.IPPool) + if err != nil { + return nil, errors.Wrap(err, "") + } + + caliArgs := libcaliipam.AutoAssignArgs{ + Num4: 1, + Hostname: hn, + IPv4Pools: []libcalinet.IPNet{*ipn}, + IntendedUse: "Workload", + } + + return ipam.assign(caliArgs) +} + +func (ipam *Ipam) assign(args libcaliipam.AutoAssignArgs) (meta.IP, error) { + var ipv4s, err = ipam.autoAssign(args) + if err != nil { + return nil, errors.Wrap(err, "") + } + + if len(ipv4s.IPs) < 1 { + return nil, errors.Wrap(terrors.ErrInsufficientIP, "") + } + + var ip = ipv4s.IPs[0] + var ones, _ = ip.Mask.Size() + if ones >= 30 { + return nil, errors.Wrapf(terrors.ErrCalicoTooSmallSubnet, "/%d", ones) + } + + if err := netx.CheckIPv4(ip.IP, ip.Mask); err != nil { + if !terrors.IsIPv4IsNetworkNumberErr(err) && !terrors.IsIPv4IsBroadcastErr(err) { + return nil, errors.Wrap(err, "") + } + + // Occupies the network no. and broadcast addr., + // doesn't release them to Calico unallocated pool. + // and then retry to assign. + log.Warnf(context.TODO(), "occupy %s as it's a network no. or broadcast addr.", ip) + return ipam.assign(args) + } + + return NewIP(&ip), nil +} + +func (ipam *Ipam) autoAssign(args libcaliipam.AutoAssignArgs) (ipv4s *libcaliipam.IPAMAssignments, err error) { + etcd.RetryTimedOut(func() error { //nolint + ipv4s, _, err = ipam.IPAM().AutoAssign(context.Background(), args) + return err + }, 3) //nolint:gomnd // try 3 times + return +} + +func (ipam *Ipam) getIPv4Net(poolName string) (*libcalinet.IPNet, error) { + pool, err := ipam.getIPPool(poolName) + if err != nil { + return nil, errors.Wrap(err, "") + } + + _, ipn, err := libcalinet.ParseCIDR(pool.Spec.CIDR) + + switch { + case err != nil: + return nil, errors.Wrap(err, pool.Spec.CIDR) + + case ipn.Version() != CalicoIPv4Version: + return nil, errors.Wrapf(terrors.ErrCalicoIPv4Only, "%d", ipn.Version()) + } + + return ipn, err +} + +// Release . +func (ipam *Ipam) Release(ctx context.Context, ips ...meta.IP) error { + ipam.lck.Lock() + defer ipam.lck.Unlock() + + var releaseOpts = make([]libcaliipam.ReleaseOptions, len(ips)) + for i := range ips { + var ip, ok = ips[i].(*IP) + if !ok { + return errors.Wrapf(terrors.ErrInvalidValue, "expect *IP, but %v", ips[i]) + } + + releaseOpts[i] = libcaliipam.ReleaseOptions{Address: ip.IP.String()} + } + + return etcd.RetryTimedOut(func() error { + var _, err = ipam.IPAM().ReleaseIPs(ctx, releaseOpts...) + return err + }, 3) //nolint:gomnd // try 3 times +} + +// Query . +func (ipam *Ipam) Query(ctx context.Context, args meta.IPNets) ([]meta.IP, error) { + ipam.lck.Lock() + defer ipam.lck.Unlock() + + var ips = make([]meta.IP, len(args)) + var err error + + for i := range args { + if ips[i], err = ipam.load(ctx, args[i]); err != nil { + return nil, errors.Wrap(err, "") + } + } + + return ips, nil +} + +func (ipam *Ipam) load(_ context.Context, arg *meta.IPNet) (*IP, error) { + var ip, err = ParseCIDR(arg.CIDR()) + if err != nil { + return nil, errors.Wrap(err, "") + } + + gwIPNet, err := ipam.getGatewayIPNet(arg) + if err != nil { + return nil, errors.Wrap(err, "") + } + + ip.BindGatewayIPNet(gwIPNet) + + return ip, nil +} + +func (ipam *Ipam) getGatewayIPNet(arg *meta.IPNet) (*net.IPNet, error) { + return netx.ParseCIDR2(arg.GatewayCIDR()) +} + +// NewIP . +func (h *Driver) NewIP(_, cidr string) (meta.IP, error) { + return ParseCIDR(cidr) +} + +// AssignIP . +func (h *Driver) AssignIP(args *types.EndpointArgs) (ip meta.IP, err error) { + h.Lock() + defer h.Unlock() + return h.assignIP(args) +} + +func (h *Driver) assignIP(args *types.EndpointArgs) (ip meta.IP, err error) { + if ip, err = h.ipam().Assign(context.TODO(), args); err != nil { + return nil, errors.Wrap(err, "") + } + + var roll = ip + defer func() { + if err != nil && roll != nil { + if re := h.releaseIPs(roll); re != nil { + err = errors.CombineErrors(err, re) + } + } + }() + + _, gwIPNet, err := net.ParseCIDR("169.254.1.1/32") + ip.BindGatewayIPNet(gwIPNet) + return ip, err +} + +// ReleaseIPs . +func (h *Driver) ReleaseIPs(ips ...meta.IP) error { + h.Lock() + defer h.Unlock() + return h.releaseIPs(ips...) +} + +func (h *Driver) releaseIPs(ips ...meta.IP) error { + return h.ipam().Release(context.Background(), ips...) +} + +// QueryIPs . +func (h *Driver) QueryIPs(ipns meta.IPNets) ([]meta.IP, error) { + return h.ipam().Query(context.Background(), ipns) +} + +func (h *Driver) ipam() network.Ipam { + return h.Ipam() +} + +// QueryIPv4 . +func (h *Driver) QueryIPv4(_ string) (meta.IP, error) { + return nil, errors.Wrap(terrors.ErrNotImplemented, "QueryIPv4 error") +} diff --git a/internal/network/drivers/calico/ippool.go b/internal/network/drivers/calico/ippool.go new file mode 100644 index 0000000..750d70b --- /dev/null +++ b/internal/network/drivers/calico/ippool.go @@ -0,0 +1,24 @@ +package calico + +import ( + "context" + + "github.com/projectcalico/calico/libcalico-go/lib/options" +) + +// PoolNames . +func (h *Driver) PoolNames() (ans []string) { + for name := range h.poolNames { + ans = append(ans, name) + } + return +} + +// GetIPPoolCidr . +func (h *Driver) GetIPPoolCidr(ctx context.Context, name string) (string, error) { + ipPool, err := h.IPPools().Get(ctx, name, options.GetOptions{}) + if err != nil { + return "", err + } + return ipPool.Spec.CIDR, nil +} diff --git a/internal/network/drivers/calico/metrics.go b/internal/network/drivers/calico/metrics.go new file mode 100644 index 0000000..ddd3130 --- /dev/null +++ b/internal/network/drivers/calico/metrics.go @@ -0,0 +1,39 @@ +package calico + +import ( + "sync/atomic" + + "github.com/projecteru2/core/utils" + "github.com/projecteru2/yavirt/configs" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + calicoHealthyDesc = prometheus.NewDesc( + prometheus.BuildFQName("node", "calico", "healthy"), + "calico healthy status.", + []string{"node"}, + nil) +) + +type MetricsCollector struct { + healthy atomic.Bool +} + +func (d *Driver) GetMetricsCollector() prometheus.Collector { + return d.mCol +} + +func (e *MetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- calicoHealthyDesc +} + +func (e *MetricsCollector) Collect(ch chan<- prometheus.Metric) { + healthy := utils.Bool2Int(e.healthy.Load()) + ch <- prometheus.MustNewConstMetric( + calicoHealthyDesc, + prometheus.GaugeValue, + float64(healthy), + configs.Hostname(), + ) +} diff --git a/internal/network/drivers/calico/policy.go b/internal/network/drivers/calico/policy.go new file mode 100644 index 0000000..219f7f4 --- /dev/null +++ b/internal/network/drivers/calico/policy.go @@ -0,0 +1,147 @@ +package calico + +import ( + "context" + "fmt" + + apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3" + libcalierr "github.com/projectcalico/calico/libcalico-go/lib/errors" + libcaliopt "github.com/projectcalico/calico/libcalico-go/lib/options" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/pkg/store/etcd" + "github.com/projecteru2/yavirt/pkg/terrors" +) + +const ( + policyName = "deny-namespaces" +) + +func (d *Driver) CreateNetworkPolicy(ns string) (err error) { + d.Lock() + defer d.Unlock() + + cwe := genCalicoNetworkPolicy(ns) + err = etcd.RetryTimedOut(func() error { + var created, ce = d.NetworkPolicies().Create(context.Background(), cwe, libcaliopt.SetOptions{}) + if ce != nil { + if _, ok := ce.(libcalierr.ErrorResourceAlreadyExists); !ok { + return ce + } + } + + cwe = created + + return nil + }, 3) + + return +} + +func (d *Driver) DeleteNetworkPolicy(ns string) (err error) { + d.Lock() + defer d.Unlock() + + return etcd.RetryTimedOut(func() error { + _, err := d.NetworkPolicies().Delete( + context.Background(), + ns, + policyName, + libcaliopt.DeleteOptions{}, + ) + if err != nil { + if _, ok := err.(libcalierr.ErrorResourceDoesNotExist); !ok { + return err + } + } + return nil + }, 3) +} + +func (d *Driver) GetNetworkPolicy(ns string) (cwe *apiv3.NetworkPolicy, err error) { + d.Lock() + defer d.Unlock() + + etcd.RetryTimedOut(func() error { //nolint + cwe, err = d.NetworkPolicies().Get(context.Background(), ns, policyName, libcaliopt.GetOptions{}) + if err != nil { + if _, ok := err.(libcalierr.ErrorResourceDoesNotExist); ok { //nolint + err = errors.Wrapf(terrors.ErrCalicoEndpointNotExists, "%s on %s", ns, policyName) + } + } + + return err + }, 3) + return +} + +// apiVersion: projectcalico.org/v3 +// kind: NetworkPolicy +// metadata: +// +// name: allow-pool2 +// namespace: testpool2 +// +// spec: +// +// types: +// - Ingress +// - Egress +// ingress: +// - action: Allow +// source: +// selector: projectcalico.org/namespace == 'testpool2' +// - action: Allow +// source: +// notNets: +// - '10.0.0.0/8' +// egress: +// - action: Allow +// destination: +// selector: projectcalico.org/namespace == 'testpool2' +// - action: Allow +// destination: +// notNets: +// - '10.0.0.0/8' +func genCalicoNetworkPolicy(ns string) *apiv3.NetworkPolicy { + p := apiv3.NewNetworkPolicy() + p.Name = policyName + + p.ObjectMeta.Namespace = ns + p.Spec.Types = []apiv3.PolicyType{apiv3.PolicyTypeIngress, apiv3.PolicyTypeEgress} + p.Spec.Ingress = []apiv3.Rule{ + { + Action: apiv3.Allow, + Source: apiv3.EntityRule{ + Selector: fmt.Sprintf("projectcalico.org/namespace == '%s'", ns), + }, + }, + { + Action: apiv3.Allow, + Source: apiv3.EntityRule{ + NotNets: []string{ + "10.0.0.0/8", + "192.168.0.0/16", + }, + }, + }, + } + p.Spec.Egress = []apiv3.Rule{ + { + Action: apiv3.Allow, + Destination: apiv3.EntityRule{ + Selector: fmt.Sprintf("projectcalico.org/namespace == '%s'", ns), + }, + }, + { + Action: apiv3.Allow, + Destination: apiv3.EntityRule{ + NotNets: []string{ + "10.0.0.0/8", + "192.168.0.0/16", + }, + }, + }, + } + return p +} diff --git a/internal/network/drivers/calico/tap.go b/internal/network/drivers/calico/tap.go new file mode 100644 index 0000000..f1e1724 --- /dev/null +++ b/internal/network/drivers/calico/tap.go @@ -0,0 +1,46 @@ +package calico + +import ( + "fmt" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/network/utils/device" + "github.com/projecteru2/yavirt/pkg/terrors" + "github.com/projecteru2/yavirt/pkg/utils" +) + +func (d *Driver) createTap() (device.VirtLink, error) { + var name, err = d.randTapName() + if err != nil { + return nil, errors.Wrap(err, "") + } + + for { + var tap, err = d.dev.AddLink(device.LinkTypeTuntap, name) + if err != nil { + if errors.Is(err, terrors.ErrVirtLinkExists) { + continue + } + + return nil, errors.Wrap(err, "") + } + + return tap, nil + } +} + +func (d *Driver) deleteTap(dev device.VirtLink) error { + return d.dev.DeleteLink(dev.Name()) +} + +func (d *Driver) randTapName() (string, error) { + var endpID, err = d.generateEndpointID() + if err != nil { + return "", errors.Wrap(err, "") + } + + var name = fmt.Sprintf("%s%s", configs.Conf.Network.Calico.IFNamePrefix, endpID[:utils.Min(12, len(endpID))]) + + return name, nil +} diff --git a/internal/network/drivers/calico/workload_endpoint.go b/internal/network/drivers/calico/workload_endpoint.go new file mode 100644 index 0000000..28f1978 --- /dev/null +++ b/internal/network/drivers/calico/workload_endpoint.go @@ -0,0 +1,246 @@ +package calico + +import ( + "context" + "net" + "time" + + libcaliapi "github.com/projectcalico/calico/libcalico-go/lib/apis/v3" + libcalierr "github.com/projectcalico/calico/libcalico-go/lib/errors" + libcalinames "github.com/projectcalico/calico/libcalico-go/lib/names" + libcalinet "github.com/projectcalico/calico/libcalico-go/lib/net" + libcaliopt "github.com/projectcalico/calico/libcalico-go/lib/options" + k8smeta "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/projecteru2/yavirt/pkg/netx" + "github.com/projecteru2/yavirt/pkg/store/etcd" + "github.com/projecteru2/yavirt/pkg/terrors" +) + +// Get . +func (d *Driver) GetWEP(args types.EndpointArgs) (*libcaliapi.WorkloadEndpoint, error) { + d.Lock() + defer d.Unlock() + return d.getWEP(args) +} + +func (d *Driver) getWEP(args types.EndpointArgs) (cwe *libcaliapi.WorkloadEndpoint, err error) { + var endpName string + if endpName, err = d.generateEndpointName(args.Hostname, args.EndpointID); err != nil { + return nil, errors.Wrap(err, "") + } + + etcd.RetryTimedOut(func() error { //nolint + cwe, err = d.WorkloadEndpoints().Get(context.Background(), args.Calico.Namespace, endpName, libcaliopt.GetOptions{}) + if err != nil { + if _, ok := err.(libcalierr.ErrorResourceDoesNotExist); ok { //nolint + err = errors.Wrapf(terrors.ErrCalicoEndpointNotExists, "%s on %s", endpName, args.Hostname) + } + } + + return err + }, 3) + + return +} + +// Create . +func (d *Driver) CreateWEP(args types.EndpointArgs) (cwe *libcaliapi.WorkloadEndpoint, err error) { + d.Lock() + defer d.Unlock() + + return d.createWEP(args) +} + +func (d *Driver) createWEP(args types.EndpointArgs) (cwe *libcaliapi.WorkloadEndpoint, err error) { + if cwe, err = d.getCalicoWorkloadEndpoint(args); err != nil { + return nil, errors.Wrap(err, "") + } + + err = etcd.RetryTimedOut(func() error { + var created, ce = d.WorkloadEndpoints().Create(context.Background(), cwe, libcaliopt.SetOptions{}) + if ce != nil { + if _, ok := ce.(libcalierr.ErrorResourceAlreadyExists); !ok { + return ce + } + } + + cwe = created + + return nil + }, 3) + + return +} + +// Update . +func (d *Driver) UpdateWEP(args types.EndpointArgs) (cwe *libcaliapi.WorkloadEndpoint, err error) { + d.Lock() + defer d.Unlock() + return d.updateWEP(args) +} + +func (d *Driver) updateWEP(args types.EndpointArgs) (cwe *libcaliapi.WorkloadEndpoint, err error) { + cwe, err = d.getCalicoWorkloadEndpoint(args) + if err != nil { + return nil, errors.Wrap(err, "") + } + + cwe.ObjectMeta.UID = k8stypes.UID(args.Calico.UID) + cwe.ObjectMeta.CreationTimestamp = k8smeta.NewTime(time.Now().UTC()) + + err = etcd.RetryTimedOut(func() error { + var updated, ue = d.WorkloadEndpoints().Update(context.Background(), cwe, libcaliopt.SetOptions{}) + if ue != nil { + return ue + } + + cwe = updated + + return nil + }, 3) + + return +} + +// Delete . +func (d *Driver) DeleteWEP(args types.EndpointArgs) error { + d.Lock() + defer d.Unlock() + return d.deleteWEP(&args) +} + +// func (we *Driver) delete(endpName, namespace string) (err error) { +func (d *Driver) deleteWEP(args *types.EndpointArgs) (err error) { + endpName, err := d.generateEndpointName(args.Hostname, args.EndpointID) + if err != nil { + return errors.Wrap(err, "") + } + + return etcd.RetryTimedOut(func() error { + _, err := d.WorkloadEndpoints().Delete( + context.TODO(), + args.Calico.Namespace, + endpName, + libcaliopt.DeleteOptions{}, + ) + if _, ok := err.(libcalierr.ErrorResourceDoesNotExist); !ok { + return err + } + return nil + }, 3) +} + +func (d *Driver) ListWEP() ([]libcaliapi.WorkloadEndpoint, error) { + var ident = libcalinames.WorkloadEndpointIdentifiers{ + Node: d.nodename, + } + namePrefix, _ := ident.CalculateWorkloadEndpointName(true) + ans, err := d.WorkloadEndpoints().List(context.TODO(), libcaliopt.ListOptions{ + Name: namePrefix, + Prefix: true, + }) + if err != nil { + return nil, err + } + return ans.Items, nil +} + +func (d *Driver) getCalicoWorkloadEndpoint(args types.EndpointArgs) (*libcaliapi.WorkloadEndpoint, error) { + endpName, err := d.generateEndpointName(args.Hostname, args.EndpointID) + if err != nil { + return nil, errors.Wrap(err, "") + } + + ipNets, err := d.convCalicoIPNetworks(args.IPs) + if err != nil { + return nil, errors.Wrap(err, "") + } + + profile, err := d.getProfile(args.Calico.IPPool) + if err != nil { + return nil, errors.Wrap(err, "") + } + + wep := libcaliapi.NewWorkloadEndpoint() + wep.Name = endpName + wep.ObjectMeta.Namespace = args.Calico.Namespace + wep.ObjectMeta.ResourceVersion = args.Calico.ResourceVersion + wep.Spec.Endpoint = args.EndpointID + wep.Spec.Node = args.Hostname + wep.Spec.Orchestrator = OrchestratorID + wep.Spec.Workload = OrchestratorID + wep.Spec.InterfaceName = args.DevName + wep.Spec.MAC = args.MAC + wep.Spec.IPNetworks = append(wep.Spec.IPNetworks, ipNets...) + wep.Spec.Profiles = d.mergeProfile(args.Calico.Profiles, profile) + + return wep, nil +} + +func (d *Driver) mergeProfile(profiles []string, other string) []string { + if len(profiles) < 1 { + return []string{other} + } + + for _, p := range profiles { + if p == other { + return profiles + } + } + + return append(profiles, other) +} + +func (d *Driver) generateEndpointName(hostname, endpointID string) (string, error) { + var ident = libcalinames.WorkloadEndpointIdentifiers{ + Node: hostname, + Orchestrator: OrchestratorID, + Workload: OrchestratorID, + Endpoint: endpointID, + } + return ident.CalculateWorkloadEndpointName(false) +} + +func (d *Driver) convCalicoIPNetworks(ips []meta.IP) ([]string, error) { + var ipNets = make([]string, len(ips)) + + for i, ip := range ips { + ipv4, _, err := netx.ParseCIDR(ip.CIDR()) + if err != nil { + return nil, errors.Wrap(err, "") + } + + ipNets[i] = libcalinet.IPNet{IPNet: net.IPNet{ + IP: ipv4, + Mask: net.CIDRMask(net.IPv4len*8, net.IPv4len*8), + }}.String() + } + + return ipNets, nil +} + +func (d *Driver) getProfile(poolName string) (string, error) { + var pool, err = d.getIPPool(poolName) + if err != nil { + return "", errors.Wrap(err, "") + } + return pool.ObjectMeta.Name, nil +} + +// ConvIPs . +func ConvIPs(cwe *libcaliapi.WorkloadEndpoint) (ips []meta.IP, err error) { + ips = make([]meta.IP, len(cwe.Spec.IPNetworks)) + + for i, cidr := range cwe.Spec.IPNetworks { + if ips[i], err = ParseCIDR(cidr); err != nil { + return nil, errors.Wrap(err, "") + } + } + + return ips, nil +} diff --git a/internal/network/drivers/cni/calico/network.go b/internal/network/drivers/cni/calico/network.go new file mode 100644 index 0000000..cf4da90 --- /dev/null +++ b/internal/network/drivers/cni/calico/network.go @@ -0,0 +1,181 @@ +package calico + +import ( + "bytes" + "context" + "encoding/json" + "io" + "os" + "path/filepath" + "time" + + "github.com/cockroachdb/errors" + current "github.com/containernetworking/cni/pkg/types/100" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/meta" + calihandler "github.com/projecteru2/yavirt/internal/network/drivers/calico" + "github.com/projecteru2/yavirt/internal/network/types" + netutils "github.com/projecteru2/yavirt/internal/network/utils" + "github.com/projecteru2/yavirt/pkg/sh" + "github.com/projecteru2/yavirt/pkg/terrors" + "github.com/projecteru2/yavirt/pkg/utils" +) + +const ( + cniCmdAdd = "ADD" + cniCmdDel = "DEL" + calicoIPPoolLabelKey = "calico/ippool" + calicoNSLabelKey = "calico/namespace" +) + +type Driver struct { + *calihandler.Driver + cfg *configs.CNIConfig +} + +func NewDriver(cfg *configs.CNIConfig, cali *calihandler.Driver) (*Driver, error) { + return &Driver{ + Driver: cali, + cfg: cfg, + }, nil +} + +func (d *Driver) QueryIPs(meta.IPNets) ([]meta.IP, error) { + return nil, nil +} + +func (d *Driver) CreateEndpointNetwork(args types.EndpointArgs) (types.EndpointArgs, func() error, error) { + var err error + if args.EndpointID, err = netutils.GenEndpointID(); err != nil { + return args, nil, errors.Wrap(err, "") + } + + stdout, execDel, err := d.calicoCNIAdd(&args, true) + if err != nil { + return args, nil, errors.Wrap(err, "") + } + + if err := d.populateIPFromAddResult(stdout, &args); err != nil { + if de := execDel(); de != nil { + return args, nil, errors.CombineErrors(err, de) + } + } + + return args, execDel, nil +} + +func (d *Driver) JoinEndpointNetwork(args types.EndpointArgs) (func() error, error) { + _, _, err := d.calicoCNIAdd(&args, false) + return nil, errors.Wrap(err, "") +} + +func (d *Driver) DeleteEndpointNetwork(args types.EndpointArgs) error { + return d.calicoCNIDel(&args) +} + +func (d *Driver) calicoCNIDel(args *types.EndpointArgs) error { + env := d.makeCNIEnv(args) + env["CNI_COMMAND"] = cniCmdDel + + dat, err := d.readCNIConfig() + if err != nil { + return errors.Wrap(err, "") + } + + _, err = execCNIPlugin(env, bytes.NewBuffer(dat), d.cfg.PluginPath) + return err +} + +func (d *Driver) calicoCNIAdd(args *types.EndpointArgs, needRollback bool) (stdout []byte, rollback func() error, err error) { + env := d.makeCNIEnv(args) + env["CNI_COMMAND"] = cniCmdAdd + + var dat []byte + if dat, err = d.readCNIConfig(); err != nil { + return nil, nil, errors.Wrap(err, "") + } + + if stdout, err = execCNIPlugin(env, bytes.NewBuffer(dat), d.cfg.PluginPath); err != nil { + return nil, nil, errors.Wrap(err, "") + } + + execDel := func() error { + env["CNI_COMMAND"] = cniCmdDel + _, err := execCNIPlugin(env, bytes.NewBuffer(dat), d.cfg.PluginPath) + return err + } + + defer func() { + if err != nil && needRollback { + if de := execDel(); de != nil { + err = errors.CombineErrors(err, de) + } + execDel = nil + } + }() + + // Refreshes gateway for non-Calico-CNI operations. + if err = d.RefreshGateway(); err != nil { + return nil, nil, errors.Wrap(err, "") + } + + return stdout, execDel, nil +} + +func (d *Driver) populateIPFromAddResult(dat []byte, args *types.EndpointArgs) error { + var result current.Result + if err := json.Unmarshal(dat, &result); err != nil { + return errors.Wrap(err, "") + } + if len(result.IPs) < 1 { + return errors.Wrap(terrors.ErrIPIsnotAssigned, "") + } + + for _, ipConf := range result.IPs { + ip, err := calihandler.ParseCIDR(ipConf.Address.String()) + if err != nil { + return errors.Wrap(err, "") + } + + gwip, err := d.GetGatewayIP(ip) + if err != nil { + return errors.Wrap(err, "") + } + + ip.BindGatewayIPNet(gwip.IPNetwork()) + args.IPs = append(args.IPs, ip) + } + + return nil +} + +func (d *Driver) readCNIConfig() ([]byte, error) { + // TODO: follows the CNI policy, rather than hard code absolute path here. + return os.ReadFile(d.cfg.ConfigPath) +} + +func (d *Driver) makeCNIEnv(args *types.EndpointArgs) map[string]string { + networkPair := d.cfg.IFNamePrefix + args.EndpointID[:utils.Min(12, len(args.EndpointID))] + return map[string]string{ + "CNI_CONTAINERID": args.GuestID, + "CNI_ARGS": "IgnoreUnknown=1;MAC=" + args.MAC, + "CNI_IFNAME": networkPair, + "CNI_PATH": filepath.Dir(d.cfg.PluginPath), + "CNI_NETNS": "yap", + } +} + +func execCNIPlugin(env map[string]string, stdin io.Reader, plugin string) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*8) + defer cancel() + + log.Debugf(context.TODO(), "CNI Plugin env: %v", env) + so, se, err := sh.ExecInOut(ctx, env, stdin, plugin) + + if err != nil { + err = errors.Wrapf(err, "Failed to exec %s with %v: %s: %s", plugin, string(so), string(se)) + } + + return so, err +} diff --git a/internal/network/drivers/fake/fake.go b/internal/network/drivers/fake/fake.go new file mode 100644 index 0000000..6caf083 --- /dev/null +++ b/internal/network/drivers/fake/fake.go @@ -0,0 +1,41 @@ +package fake + +import ( + "context" + + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/projecteru2/yavirt/internal/network/utils/device" + "github.com/prometheus/client_golang/prometheus" +) + +type Driver struct{} + +func (d *Driver) CheckHealth(_ context.Context) error { + return nil +} +func (d *Driver) GetMetricsCollector() prometheus.Collector { + return nil +} + +func (d *Driver) QueryIPs(meta.IPNets) ([]meta.IP, error) { + return nil, nil +} +func (d *Driver) CreateEndpointNetwork(types.EndpointArgs) (types.EndpointArgs, func() error, error) { + return types.EndpointArgs{}, nil, nil +} +func (d *Driver) JoinEndpointNetwork(types.EndpointArgs) (func() error, error) { + return nil, nil //nolint:nilnil +} +func (d *Driver) DeleteEndpointNetwork(types.EndpointArgs) error { + return nil +} +func (d *Driver) GetEndpointDevice(string) (device.VirtLink, error) { + return nil, nil //nolint +} +func (d *Driver) CreateNetworkPolicy(string) error { + return nil +} +func (d *Driver) DeleteNetworkPolicy(string) error { + return nil +} diff --git a/internal/network/drivers/ovn/ip.go b/internal/network/drivers/ovn/ip.go new file mode 100644 index 0000000..55beff7 --- /dev/null +++ b/internal/network/drivers/ovn/ip.go @@ -0,0 +1,173 @@ +package ovn + +import ( + "fmt" + "net" + "strings" + + "github.com/pkg/errors" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/network/utils/device" + "github.com/projecteru2/yavirt/pkg/netx" +) + +// IP . +// etcd keys: +// +// /ips//free/ +// /ips//occupied/ +type IP struct { + IPNet *net.IPNet + IP net.IP + Device device.VirtLink + GatewayIPNet *net.IPNet + GuestID string `json:"guest"` + *meta.Ver // just for fulfilling meta.IP, it doesn't be referred. +} + +// NewIP . +func NewIP(ipStr, subnet string) (*IP, error) { + ip, ipnet, err := net.ParseCIDR(subnet) + if err != nil { + return nil, err + } + if ipStr != "" { + ip = net.ParseIP(ipStr) + } + gwIP, err := netx.DefaultGatewayIP(ipnet) + if err != nil { + return nil, err + } + gwIPNet := &net.IPNet{ + IP: gwIP, + Mask: ipnet.Mask, + } + ans := &IP{ + IPNet: ipnet, + IP: ip, + GatewayIPNet: gwIPNet, + } + return ans, nil +} + +// BindGatewayIPNet . +func (ip *IP) BindGatewayIPNet(ipn *net.IPNet) { + ip.GatewayIPNet = ipn +} + +// BindDevice . +func (ip *IP) BindDevice(dev device.VirtLink) { + ip.Device = dev +} + +// BindGuestID . +func (ip *IP) BindGuestID(guestID string) { + ip.GuestID = guestID +} + +// IntIP . +func (ip *IP) IntIP() (v int64) { + v, _ = netx.IPv4ToInt(ip.IP.String()) + return +} + +// IntSubnet . +func (ip *IP) IntSubnet() int64 { + return 0 +} + +// Prefix . +func (ip *IP) Prefix() int { + var prefix, _ = ip.IPNet.Mask.Size() + return prefix +} + +func (ip *IP) String() string { + return ip.CIDR() +} + +// AutoRouteCIDR . +func (ip *IP) AutoRouteCIDR() (string, error) { + var _, ipn, err = netx.ParseCIDR(ip.CIDR()) + if err != nil { + return "", errors.Wrap(err, "") + } + return ipn.String(), nil +} + +// CIDR . +func (ip *IP) CIDR() string { + return ip.IPNet.String() +} + +// NetIP . +func (ip *IP) NetIP() net.IP { + return ip.IP +} + +// IPNetwork . +func (ip *IP) IPNetwork() *net.IPNet { + return ip.IPNet +} + +// Netmask . +func (ip *IP) Netmask() string { + mask := ip.IPNet.Mask + var s = make([]string, len(mask)) + + for i, byte := range mask { + s[i] = fmt.Sprintf("%d", byte) + } + + return strings.Join(s, ".") +} + +// IPAddr . +func (ip *IP) IPAddr() string { + return ip.IP.String() +} + +// SubnetAddr . +func (ip *IP) SubnetAddr() string { + return "" +} + +// IntGateway . +func (ip *IP) IntGateway() (v int64) { + v, _ = netx.IPv4ToInt(ip.GatewayAddr()) + return v +} + +// GatewayPrefix . +func (ip *IP) GatewayPrefix() int { + var prefix, _ = ip.IPNet.Mask.Size() + return prefix +} + +// GatewayAddr . +func (ip *IP) GatewayAddr() string { + return ip.GatewayIPNet.IP.String() +} + +// IsAssigned always returns true, +// the unassigned IPs're only stored in Calico itself. +func (ip *IP) IsAssigned() bool { + return true +} + +// MetaKey . +func (ip *IP) MetaKey() string { + // DOES NOT STORE + return "" +} + +// NetworkMode . +func (ip *IP) NetworkMode() string { + return network.OVNMode +} + +// NetworkName . +func (ip *IP) NetworkName() (s string) { + return +} diff --git a/internal/network/drivers/ovn/metrics.go b/internal/network/drivers/ovn/metrics.go new file mode 100644 index 0000000..27dbfa6 --- /dev/null +++ b/internal/network/drivers/ovn/metrics.go @@ -0,0 +1,39 @@ +package ovn + +import ( + "sync/atomic" + + "github.com/projecteru2/core/utils" + "github.com/projecteru2/yavirt/configs" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + ovnHealthyDesc = prometheus.NewDesc( + prometheus.BuildFQName("node", "ovn", "healthy"), + "ovn healthy status.", + []string{"node"}, + nil) +) + +type MetricsCollector struct { + healthy atomic.Bool +} + +func (d *Driver) GetMetricsCollector() prometheus.Collector { + return d.mCol +} + +func (e *MetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- ovnHealthyDesc +} + +func (e *MetricsCollector) Collect(ch chan<- prometheus.Metric) { + healthy := utils.Bool2Int(e.healthy.Load()) + ch <- prometheus.MustNewConstMetric( + ovnHealthyDesc, + prometheus.GaugeValue, + float64(healthy), + configs.Hostname(), + ) +} diff --git a/internal/network/drivers/ovn/models.go b/internal/network/drivers/ovn/models.go new file mode 100644 index 0000000..7c37f67 --- /dev/null +++ b/internal/network/drivers/ovn/models.go @@ -0,0 +1,35 @@ +package ovn + +type LogicalSwitch struct { + UUID string `ovsdb:"_uuid"` // _uuid tag is mandatory + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` + Config map[string]string `ovsdb:"other_config"` +} + +type LogicalSwitchPort struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + Type string `ovsdb:"type"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` + Addresses []string `ovsdb:"addresses"` + PortSecurity []string `ovsdb:"port_security"` + DynamicAddresses *string `ovsdb:"dynamic_addresses"` + ParentName *string `ovsdb:"parent_name"` + Tag *int `ovsdb:"tag"` + TagRequest *int `ovsdb:"tag_request"` + Up *bool `ovsdb:"up"` +} + +type Interface struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + Type string `ovsdb:"type"` + Error *string `ovsdb:"error"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Statistics map[string]int `ovsdb:"statistics"` + Config map[string]string `ovsdb:"other_config"` + AdminState *string `ovsdb:"admin_state"` + LinkState *string `ovsdb:"link_state"` +} diff --git a/internal/network/drivers/ovn/ovn.go b/internal/network/drivers/ovn/ovn.go new file mode 100644 index 0000000..fbd74cc --- /dev/null +++ b/internal/network/drivers/ovn/ovn.go @@ -0,0 +1,163 @@ +package ovn + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/cockroachdb/errors" + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/network/types" + netutils "github.com/projecteru2/yavirt/internal/network/utils" + "github.com/projecteru2/yavirt/internal/utils" + "github.com/shirou/gopsutil/process" +) + +const ( + ovnMTU = 1442 +) + +type Driver struct { + sync.Mutex + mCol *MetricsCollector + cfg *configs.OVNConfig + nbClientDBModel model.ClientDBModel + nbCli client.Client + ovsClientDBModel model.ClientDBModel + ovsCli client.Client +} + +func NewDriver(cfg *configs.OVNConfig) (*Driver, error) { + return &Driver{cfg: cfg, mCol: &MetricsCollector{}}, nil +} + +func (d *Driver) CheckHealth(_ context.Context) (err error) { + defer func() { + if err != nil { + d.mCol.healthy.Store(false) + } else { + d.mCol.healthy.Store(true) + } + }() + processes, err := process.Processes() + if err != nil { + return err + } + binaries := []string{ + "ovn-controller", + "ovsdb-server", + "ovs-vswitchd", + } + for _, name := range binaries { + if !utils.PSContains([]string{name}, processes) { + return errors.Newf("%s is not running", name) + } + } + + return nil +} + +func (d *Driver) QueryIPs(args meta.IPNets) ([]meta.IP, error) { + if len(args) == 0 { + return nil, nil + } + ans := make([]meta.IP, 0, len(args)) + for _, ipn := range args { + ip, err := NewIP("", ipn.CIDR()) + if err != nil { + return nil, err + } + ans = append(ans, ip) + } + return ans, nil +} + +func (d *Driver) CreateEndpointNetwork(args types.EndpointArgs) (types.EndpointArgs, func() error, error) { + var ( + err error + ls *LogicalSwitch + lsp *LogicalSwitchPort + ) + + if args.DevName, err = netutils.GenDevName(configs.Conf.Network.OVN.IFNamePrefix); err != nil { + return types.EndpointArgs{}, nil, err + } + // get LogicalSwitch + if args.OVN.LogicalSwitchUUID != "" { + if ls, err = d.getLogicalSwitch(args.OVN.LogicalSwitchUUID); err != nil { + return types.EndpointArgs{}, nil, err + } + } else { + lsList, err := d.getLogicalSwitchByName(args.OVN.LogicalSwitchName) + if err != nil { + return types.EndpointArgs{}, nil, err + } + switch len(lsList) { + case 1: + ls = lsList[0] + case 0: + return types.EndpointArgs{}, nil, fmt.Errorf("logical switch %s not found", args.OVN.LogicalSwitchName) + default: + return types.EndpointArgs{}, nil, fmt.Errorf("multiple logical switch %s found", args.OVN.LogicalSwitchName) + } + } + // create LogicalSwitchPort if necessary and then get it + if args.OVN.LogicalSwitchPortName != "" { + if lsp, err = d.getLogicalSwitchPortByName(args.OVN.LogicalSwitchPortName); err != nil { + return types.EndpointArgs{}, nil, err + } + } else { + lspUUID, err := d.createLogicalSwitchPort(&args) + if err != nil { + return types.EndpointArgs{}, nil, err + } + if lsp, err = d.getLogicalSwitchPort(lspUUID); err != nil { + return types.EndpointArgs{}, nil, err + } + } + subnet := ls.Config["subnet"] + addrs := strings.Split(*lsp.DynamicAddresses, " ") + mac := addrs[0] + ipv4 := addrs[1] + args.MAC = mac + args.MTU = ovnMTU + ip, err := NewIP(ipv4, subnet) + if err != nil { + return types.EndpointArgs{}, nil, err + } + args.IPs = append(args.IPs, ip) + return args, nil, nil +} + +func (d *Driver) JoinEndpointNetwork(args types.EndpointArgs) (func() error, error) { + lspName := args.OVN.LogicalSwitchPortName + if lspName == "" { + lspName = LSPName(args.GuestID) + } + err := d.setExternalID(args.DevName, "iface-id", lspName) + return nil, err +} + +func (d *Driver) DeleteEndpointNetwork(args types.EndpointArgs) error { + // lsp is provided by other component, so we ignore deleting lsp here + if args.OVN.LogicalSwitchPortName != "" { + return nil + } + return d.deleteLogicalSwitchPort(&args) +} + +func (d *Driver) CreateNetworkPolicy(string) error { + return nil +} + +func (d *Driver) DeleteNetworkPolicy(string) error { + return nil +} + +func LSPName(guestID string) string { + return fmt.Sprintf("lsp-%s", guestID) +} diff --git a/internal/network/drivers/ovn/ovn_api.go b/internal/network/drivers/ovn/ovn_api.go new file mode 100644 index 0000000..625e031 --- /dev/null +++ b/internal/network/drivers/ovn/ovn_api.go @@ -0,0 +1,402 @@ +package ovn + +import ( + "context" + "fmt" + "log/slog" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/cockroachdb/errors" + "github.com/go-logr/logr" + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/samber/lo" + slogzerolog "github.com/samber/slog-zerolog/v2" +) + +func normalizeAddr(addr string) string { + if !(strings.HasPrefix(addr, "tcp:") || strings.HasPrefix(addr, "unix:")) { + if strings.HasPrefix(addr, "/") { + addr = "unix:" + addr + } else { + addr = "tcp:" + addr + } + } + return addr +} + +func getCli(addrs []string, dbModelReq *model.ClientDBModel, monitor bool) (client.Client, error) { + opts := make([]client.Option, 0, len(addrs)+3) + for _, addr := range addrs { + addr = normalizeAddr(addr) + opts = append(opts, client.WithEndpoint(addr)) + } + + zlogger := log.GetGlobalLogger() + slogHandler := slogzerolog.Option{Level: slog.LevelDebug, Logger: zlogger}.NewZerologHandler() + ovsLogger := logr.FromSlogHandler(slogHandler) + opts = append(opts, []client.Option{ + client.WithReconnect(15*time.Second, backoff.NewExponentialBackOff()), + // client.WithInactivityCheck(1*time.Minute, 10*time.Second, backoff.NewExponentialBackOff()), + client.WithLogger(&ovsLogger), + }...) + cli, err := client.NewOVSDBClient(*dbModelReq, opts...) + if err != nil { + return nil, err + } + if err = cli.Connect(context.TODO()); err != nil { + return nil, err + } + if monitor { + _, err = cli.MonitorAll(context.TODO()) + } + return cli, err +} + +func (d *Driver) getNBCli() (cli client.Client, err error) { + if d.nbCli == nil { + d.nbClientDBModel, _ = model.NewClientDBModel("OVN_Northbound", map[string]model.Model{ + "Logical_Switch": &LogicalSwitch{}, + "Logical_Switch_Port": &LogicalSwitchPort{}, + }) + // dbModelReq.SetIndexes(map[string][]model.ClientIndex{ + // "Logical_Switch": { + // { + // Columns: []model.ColumnKey{ + // { + // Column: "name", + // }, + // }, + // }, + // }, + // }) + d.nbCli, err = getCli(d.cfg.NBAddrs, &d.nbClientDBModel, false) + } + return d.nbCli, err +} + +func (d *Driver) getOVSDBCli() (cli client.Client, err error) { + if d.ovsCli == nil { + d.ovsClientDBModel, _ = model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ + "Interface": &Interface{}, + }) + d.ovsCli, err = getCli([]string{d.cfg.OVSDBAddr}, &d.ovsClientDBModel, true) + } + return d.ovsCli, err +} + +func (d *Driver) setExternalID(ifaceName, key, value string) error { + iface := &Interface{ + Name: ifaceName, + } + cli, err := d.getOVSDBCli() + if err != nil { + return err + } + + err = cli.Get(context.TODO(), iface) + if err != nil { + return errors.Wrapf(err, "failed to get interface %s", ifaceName) + } + iface.ExternalIDs[key] = value + + ops, err := cli.Where(iface).Update(iface) + if err != nil { + return errors.Wrapf(err, "failed to set external_id %s", key) + } + _, err = cli.Transact(context.TODO(), ops...) + if err != nil { + return errors.Wrapf(err, "failed to set external_id %s", key) + } + return nil +} + +func (d *Driver) createLogicalSwitch(name string, subnet string) (string, error) { //nolint:unused + cli, err := d.getNBCli() + if err != nil { + return "", err + } + obj := &LogicalSwitch{ + Name: name, + } + if subnet != "" { + obj.Config = map[string]string{ + "subnet": subnet, + } + } + ops, err := cli.Create(obj) + if err != nil { + return "", errors.Wrapf(err, "failed to create logical switch %s", name) + } + opsRes, err := cli.Transact(context.TODO(), ops...) + if err != nil { + return "", errors.Wrapf(err, "failed to create logical switch %s", name) + } + var ansUUID string + if len(opsRes) > 0 { + ansUUID = opsRes[0].UUID.GoUUID + } + return ansUUID, nil +} + +func (d *Driver) getLogicalSwitch(uuid string) (*LogicalSwitch, error) { + cli, err := d.getNBCli() + if err != nil { + return nil, err + } + obj := &LogicalSwitch{ + UUID: uuid, + } + v, err := selectHelper(cli, "Logical_Switch", &d.nbClientDBModel, obj) + if err != nil { + return nil, err + } + if ans, ok := v.(*LogicalSwitch); !ok { + return nil, errors.New("failed to convert to LogicalSwitch") + } else { //nolint + return ans, nil + } +} + +func (d *Driver) getLogicalSwitchByName(name string) ([]*LogicalSwitch, error) { + cli, err := d.getNBCli() + if err != nil { + return nil, err + } + obj := &LogicalSwitch{ + Name: name, + } + ans, err := selectHelper(cli, "Logical_Switch", &d.nbClientDBModel, obj) + if err != nil { + return nil, err + } + if ans == nil { + return nil, nil + } + switch v := ans.(type) { + case *LogicalSwitch: + return []*LogicalSwitch{v}, nil + case []*LogicalSwitch: + return v, nil + default: + return nil, errors.New("failed to convert to LogicalSwitch") + } +} + +func (d *Driver) getOneLogicalSwitchByName(name string) (*LogicalSwitch, error) { + lsList, err := d.getLogicalSwitchByName(name) + if err != nil { + return nil, err + } + switch len(lsList) { + case 1: + return lsList[0], nil + case 0: + return nil, fmt.Errorf("logical switch %s not found", name) + default: + return nil, fmt.Errorf("multiple logical switch %s found", name) + } +} + +func (d *Driver) deleteLogicalSwitch(name string) error { //nolint:unused + cli, err := d.getNBCli() + if err != nil { + return err + } + ops, err := cli.Where(&LogicalSwitch{ + Name: name, + }).Delete() + if err != nil { + return errors.Wrapf(err, "failed to delete logical switch %s", name) + } + _, err = cli.Transact(context.TODO(), ops...) + return errors.Wrapf(err, "failed to delete logical switch %s", name) +} + +func (d *Driver) createLogicalSwitchPort(args *types.EndpointArgs) (string, error) { + cli, err := d.getNBCli() + if err != nil { + return "", err + } + namedUUID, err := newRowUUID() + if err != nil { + return "", err + } + obj := &LogicalSwitchPort{ + UUID: namedUUID, + Name: LSPName(args.GuestID), + // Type: "internal", + Addresses: []string{ + fmt.Sprintf("%s dynamic", args.MAC), + }, + PortSecurity: []string{ + args.MAC, + }, + } + ops, err := cli.Create(obj) + if err != nil { + return "", errors.Wrapf(err, "failed to create logical switch port %s", LSPName(args.GuestID)) + } + + ls := &LogicalSwitch{ + UUID: args.OVN.LogicalSwitchUUID, + } + if ls.UUID == "" { + ls, err = d.getOneLogicalSwitchByName(args.OVN.LogicalSwitchName) + if err != nil { + return "", err + } + } + lsOps, err := cli.Where(ls).Mutate(ls, model.Mutation{ + Field: &ls.Ports, + Mutator: ovsdb.MutateOperationInsert, + Value: []string{namedUUID}, + }) + if err != nil { + return "", errors.Wrapf(err, "failed to create logical switch port %s", LSPName(args.GuestID)) + } + ops = append(ops, lsOps...) + + opsRes, err := cli.Transact(context.TODO(), ops...) + if err != nil { + return "", errors.Wrapf(err, "failed to create logical switch port %s", LSPName(args.GuestID)) + } + err = lo.Reduce(opsRes, func(r error, op ovsdb.OperationResult, _ int) error { + if op.Error != "" { + return errors.CombineErrors(r, errors.Newf("%s: %s", op.Error, op.Details)) + } + return r + }, nil) + if err != nil { + return "", err + } + var ansUUID string + if len(opsRes) > 0 { + ansUUID = opsRes[0].UUID.GoUUID + } + return ansUUID, nil +} + +func (d *Driver) deleteLogicalSwitchPort(args *types.EndpointArgs) error { + cli, err := d.getNBCli() + if err != nil { + return err + } + lsUUID := args.OVN.LogicalSwitchUUID + if lsUUID == "" { + ls, err := d.getOneLogicalSwitchByName(args.OVN.LogicalSwitchName) + if err != nil { + return err + } + lsUUID = ls.UUID + } + ls := &LogicalSwitch{ + UUID: lsUUID, + } + lsp, err := d.getLogicalSwitchPortByName(LSPName(args.GuestID)) + if err != nil { + return err + } + uuid := lsp.UUID + ops, err := cli.Where(ls).Mutate(ls, model.Mutation{ + Field: &ls.Ports, + Mutator: ovsdb.MutateOperationDelete, + Value: []string{uuid}, + }) + if err != nil { + return err + } + _, err = cli.Transact(context.TODO(), ops...) + if err != nil { + return err + } + return nil +} + +func selectHelper(cli client.Client, table string, cModel *model.ClientDBModel, obj any) (any, error) { + ops, err := cli.Select(obj) + if err != nil { + return nil, err + } + opsRes, err := cli.Transact(context.TODO(), ops...) + if err != nil { + return nil, errors.Wrapf(err, "failed to run transaction for selection") + } + if opsRes[0].Error != "" { + return nil, errors.Newf("%s:%s", opsRes[0].Error, opsRes[0].Details) + } + if len(opsRes[0].Rows) == 0 { + return nil, nil //nolint:nilnil + } + dbModel, errList := model.NewDatabaseModel(cli.Schema(), *cModel) + err = lo.Reduce(errList, func(r error, e error, _ int) error { + return errors.CombineErrors(r, e) + }, nil) + if err != nil { + return nil, err + } + + ans := make([]any, 0, len(opsRes[0].Rows)) + for idx := range opsRes[0].Rows { + row := opsRes[0].Rows[idx] + var uuid string + if v, ok := row["_uuid"]; ok { + uuid = v.(ovsdb.UUID).GoUUID + } + mod, err := model.CreateModel(dbModel, table, &row, uuid) + if err != nil { + return nil, err + } + ans = append(ans, mod) + } + if len(ans) == 1 { + return ans[0], nil + } + return ans, nil +} + +func (d *Driver) getLogicalSwitchPort(uuid string) (*LogicalSwitchPort, error) { + cli, err := d.getNBCli() + if err != nil { + return nil, err + } + obj := &LogicalSwitchPort{ + UUID: uuid, + } + v, err := selectHelper(cli, "Logical_Switch_Port", &d.nbClientDBModel, obj) + if err != nil { + return nil, err + } + if v == nil { + return nil, nil //nolint:nilnil + } + if ans, ok := v.(*LogicalSwitchPort); !ok { + return nil, errors.New("failed to convert to LogicalSwitchPort") + } else { //nolint + return ans, nil + } +} + +func (d *Driver) getLogicalSwitchPortByName(name string) (*LogicalSwitchPort, error) { + cli, err := d.getNBCli() + if err != nil { + return nil, err + } + obj := &LogicalSwitchPort{ + Name: name, + } + v, err := selectHelper(cli, "Logical_Switch_Port", &d.nbClientDBModel, obj) + if err != nil { + return nil, err + } + if ans, ok := v.(*LogicalSwitchPort); !ok { + return nil, errors.New("failed to convert to LogicalSwitchPort") + } else { //nolint + return ans, nil + } +} diff --git a/internal/network/drivers/ovn/ovn_api_test.go b/internal/network/drivers/ovn/ovn_api_test.go new file mode 100644 index 0000000..882d472 --- /dev/null +++ b/internal/network/drivers/ovn/ovn_api_test.go @@ -0,0 +1,95 @@ +package ovn + +import ( + "fmt" + "testing" + + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/projecteru2/yavirt/pkg/test/assert" +) + +func TestLS(t *testing.T) { + cfg := &configs.OVNConfig{ + NBAddrs: []string{ + "tcp:192.168.160.25:6641", + }, + } + d, err := NewDriver(cfg) + assert.Nil(t, err) + // err = d.createLogicalSwitch("ut-test", "192.168.111.0/24") + // err = d.createLogicalSwitch("ut-test", "") + // assert.Nil(t, err) + newLS, err := d.getLogicalSwitch("c343cbde-67b4-4b86-9cc1-9452b270f6f6") + assert.Nil(t, err) + fmt.Printf("+++++++ new ls: %v\n", newLS) +} + +func TestLSP(t *testing.T) { + cfg := &configs.OVNConfig{ + NBAddrs: []string{ + "tcp:192.168.160.25:6641", + }, + } + d, err := NewDriver(cfg) + assert.Nil(t, err) + lsUUID, err := d.createLogicalSwitch("ut-test", "192.168.111.0/24") + assert.Nil(t, err) + // defer d.deleteLogicalSwitch("ut-test") + // lsUUID := "8c896d59-20d7-4b5f-9690-5b75ae03f787" + args := &types.EndpointArgs{ + GuestID: "00052017027003203659270000000003", + MAC: "00:55:00:00:00:03", + OVN: types.OVNArgs{ + LogicalSwitchUUID: lsUUID, + }, + } + uuid, err := d.createLogicalSwitchPort(args) + assert.Nil(t, err) + fmt.Printf("++++++++ uuid: %s\n", uuid) + // time.Sleep(time.Millisecond) + // d.nbCli = nil + lsp, err := d.getLogicalSwitchPort(uuid) + assert.Nil(t, err) + fmt.Printf("++++++++ lsp: %v\n", lsp) + assert.NotNil(t, lsp.DynamicAddresses) + lsp1, err := d.getLogicalSwitchPortByName(LSPName(args.GuestID)) + assert.Nil(t, err) + fmt.Printf("++++++++ lsp1: %v\n", lsp1) +} + +func TestDeleteLSP(t *testing.T) { + // lsUUID := "46333352-cfbb-45e2-a172-c0076b985718" + guestID := "00009017034928197341160000000001" + cfg := &configs.OVNConfig{ + NBAddrs: []string{ + "tcp:192.168.160.25:6641", + }, + } + d, err := NewDriver(cfg) + assert.Nil(t, err) + ls, err := d.getLogicalSwitchPortByName(LSPName(guestID)) + assert.Nil(t, err) + fmt.Printf("+++++++++++ %v\n", ls) + err = d.deleteLogicalSwitchPort(&types.EndpointArgs{ + GuestID: guestID, + // OVNLogicSwitchUUID: lsUUID, + OVN: types.OVNArgs{ + LogicalSwitchName: "ut-test", + }, + }) + assert.Nil(t, err) +} + +func TestSetExternalIDs(t *testing.T) { + cfg := &configs.OVNConfig{ + NBAddrs: []string{ + "tcp:192.168.160.25:6641", + }, + OVSDBAddr: "tcp:192.168.160.26:6640", + } + d, err := NewDriver(cfg) + assert.Nil(t, err) + err = d.setExternalID("vm2", "iface-id", "vm2222222") + assert.Nil(t, err) +} diff --git a/internal/network/drivers/ovn/ovn_test.go b/internal/network/drivers/ovn/ovn_test.go new file mode 100644 index 0000000..6a7d89f --- /dev/null +++ b/internal/network/drivers/ovn/ovn_test.go @@ -0,0 +1,67 @@ +package ovn + +import ( + "testing" + + . "github.com/agiledragon/gomonkey/v2" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/stretchr/testify/assert" +) + +func TestCreateEndpointNetwork(t *testing.T) { + inputArgs := types.EndpointArgs{ + GuestID: "00052017027003203659270000000003", + MAC: "00:55:00:00:00:03", + OVN: types.OVNArgs{ + LogicalSwitchUUID: "haha-kaka", + }, + } + lspUUID := "kaka-123456" + cfg := &configs.OVNConfig{ + NBAddrs: []string{ + "tcp:127.0.0.1:6641", + }, + } + d, err := NewDriver(cfg) + assert.Nil(t, err) + assert.NotNil(t, d) + patches := ApplyPrivateMethod(d, "createLogicalSwitchPort", func(_ *Driver, args *types.EndpointArgs) (string, error) { + assert.Equal(t, inputArgs.OVN.LogicalSwitchUUID, args.OVN.LogicalSwitchUUID) + return lspUUID, nil + }) + defer patches.Reset() + + ls := &LogicalSwitch{ + UUID: "haha-kaka", + Name: "test-ls", + Config: map[string]string{ + "subnet": "192.168.110.1/24", + }, + } + patches = ApplyPrivateMethod(d, "getLogicalSwitch", func(_ *Driver, uuid string) (*LogicalSwitch, error) { + assert.Equal(t, inputArgs.OVN.LogicalSwitchUUID, uuid) + return ls, nil + }) + defer patches.Reset() + + addr := "00:11:22:33:44:55 192.168.110.3" + lsp := &LogicalSwitchPort{ + UUID: "haha-kaka", + Name: "test-lsp", + Addresses: []string{ + "00:11:22:33:44:55 dynamic", + }, + DynamicAddresses: &addr, + } + patches = ApplyPrivateMethod(d, "getLogicalSwitchPort", func(d *Driver, uuid string) (*LogicalSwitchPort, error) { + assert.Equal(t, lspUUID, uuid) + return lsp, nil + }) + defer patches.Reset() + args, rollback, err := d.CreateEndpointNetwork(inputArgs) + assert.Nil(t, err) + assert.Nil(t, rollback) + assert.Len(t, args.IPs, 1) + assert.Equal(t, args.IPs[0].IPAddr(), "192.168.110.3") +} diff --git a/internal/network/drivers/ovn/utils.go b/internal/network/drivers/ovn/utils.go new file mode 100644 index 0000000..cf31410 --- /dev/null +++ b/internal/network/drivers/ovn/utils.go @@ -0,0 +1,30 @@ +package ovn + +import ( + "encoding/hex" + + "github.com/google/uuid" +) + +func encodeHex(dst []byte, id uuid.UUID) { + hex.Encode(dst, id[:4]) + dst[8] = '_' + hex.Encode(dst[9:13], id[4:6]) + dst[13] = '_' + hex.Encode(dst[14:18], id[6:8]) + dst[18] = '_' + hex.Encode(dst[19:23], id[8:10]) + dst[23] = '_' + hex.Encode(dst[24:], id[10:]) +} + +func newRowUUID() (string, error) { + id, err := uuid.NewRandom() + if err != nil { + return "", err + } + var buf [36 + 3]byte + copy(buf[:], "row") + encodeHex(buf[3:], id) + return string(buf[:]), nil +} diff --git a/internal/vnet/vlan/ip.go b/internal/network/drivers/vlan/ip.go similarity index 92% rename from internal/vnet/vlan/ip.go rename to internal/network/drivers/vlan/ip.go index ca61e04..bb13cab 100644 --- a/internal/vnet/vlan/ip.go +++ b/internal/network/drivers/vlan/ip.go @@ -6,8 +6,8 @@ import ( "net" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/internal/vnet" - "github.com/projecteru2/yavirt/internal/vnet/device" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/network/utils/device" "github.com/projecteru2/yavirt/pkg/netx" ) @@ -69,7 +69,7 @@ func (ip *IP) BindGuestID(guestID string) { // Create . func (ip *IP) Create() error { - var ipam = NewIpam("", ip.Subnet.IntSubnet()) + var ipam = NewIpam(ip.Subnet.IntSubnet()) var ctx, cancel = meta.Context(context.Background()) defer cancel() @@ -126,7 +126,7 @@ func (ip *IP) equal(b *IP) bool { //nolint // NetworkMode . func (ip *IP) NetworkMode() string { - return vnet.NetworkVlan + return network.VlanMode } // NetworkName . diff --git a/internal/vnet/vlan/ipam.go b/internal/network/drivers/vlan/ipam.go similarity index 74% rename from internal/vnet/vlan/ipam.go rename to internal/network/drivers/vlan/ipam.go index 358c5af..c27f1c8 100644 --- a/internal/vnet/vlan/ipam.go +++ b/internal/network/drivers/vlan/ipam.go @@ -5,40 +5,40 @@ import ( clientv3 "go.etcd.io/etcd/client/v3" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/internal/meta" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/internal/network/types" "github.com/projecteru2/yavirt/pkg/store" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" ) // Ipam . type Ipam struct { - subnet int64 - guestID string + subnet int64 } // NewIpam . -func NewIpam(guestID string, subnet int64) *Ipam { +func NewIpam(subnet int64) *Ipam { return &Ipam{ - subnet: subnet, - guestID: guestID, + subnet: subnet, } } // Assign . -func (ipam *Ipam) Assign(ctx context.Context) (ip meta.IP, err error) { +func (ipam *Ipam) Assign(ctx context.Context, args *types.EndpointArgs) (ip meta.IP, err error) { var unlock utils.Unlocker if unlock, err = ipam.Lock(ctx); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } defer func() { - if ue := unlock(context.Background()); ue != nil { - err = errors.Wrap(err, ue) + if ue := unlock(context.TODO()); ue != nil { + err = errors.CombineErrors(err, ue) } }() - return ipam.assign(ctx, ipam.guestID) + return ipam.assign(ctx, args.GuestID) } // Release . @@ -49,12 +49,12 @@ func (ipam *Ipam) Release(ctx context.Context, ips ...meta.IP) (err error) { var unlock utils.Unlocker if unlock, err = ipam.Lock(ctx); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } defer func() { if ue := unlock(context.Background()); ue != nil { - err = errors.Wrap(err, ue) + err = errors.CombineErrors(err, ue) } }() @@ -69,21 +69,21 @@ func (ipam *Ipam) Insert(ctx context.Context, ip *IP) (err error) { var unlock utils.Unlocker if unlock, err = ipam.Lock(ctx); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } defer func() { if ue := unlock(context.Background()); ue != nil { - err = errors.Wrap(err, ue) + err = errors.CombineErrors(err, ue) } }() var exists bool switch exists, err = ipam.exists(ctx, ip); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "") case exists: - return errors.Annotatef(errors.ErrKeyExists, ip.CIDR()) + return errors.Wrapf(terrors.ErrKeyExists, ip.CIDR()) } return ipam.insert(ip) @@ -96,7 +96,7 @@ func (ipam *Ipam) Query(ctx context.Context, args meta.IPNets) ([]meta.IP, error for i := range args { if ips[i], err = ipam.load(ctx, args[i]); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } } @@ -106,7 +106,7 @@ func (ipam *Ipam) Query(ctx context.Context, args meta.IPNets) ([]meta.IP, error func (ipam *Ipam) load(_ context.Context, arg *meta.IPNet) (*IP, error) { subn, err := LoadSubnet(arg.IntSubnet) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } var ip = NewIP() @@ -115,7 +115,7 @@ func (ipam *Ipam) load(_ context.Context, arg *meta.IPNet) (*IP, error) { ip.occupied = arg.Assigned if err := meta.Load(ip); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return ip, nil @@ -128,7 +128,7 @@ func (ipam *Ipam) insert(ip *IP) error { func (ipam *Ipam) check(ips ...meta.IP) error { for _, ip := range ips { if ip.IntSubnet() != ipam.subnet { - return errors.Annotatef(errors.ErrInvalidValue, "invalid subnet: %s", ip.SubnetAddr()) + return errors.Wrapf(terrors.ErrInvalidValue, "invalid subnet: %s", ip.SubnetAddr()) } } return nil @@ -138,7 +138,7 @@ func (ipam *Ipam) exists(ctx context.Context, ip *IP) (bool, error) { var keys = []string{ip.freeKey(), ip.occupiedKey()} var exists, err = store.Exists(ctx, keys) if err != nil { - return false, errors.Trace(err) + return false, errors.Wrap(err, "") } for _, v := range exists { @@ -159,7 +159,7 @@ func (ipam *Ipam) release(ctx context.Context, ips ...meta.IP) error { ip.BindGuestID("") if err := ipam.doop(ctx, ip, putkey, delkey); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } } @@ -169,12 +169,12 @@ func (ipam *Ipam) release(ctx context.Context, ips ...meta.IP) error { func (ipam *Ipam) assign(ctx context.Context, guestID string) (*IP, error) { var ip, err = ipam.pickup(ctx) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } ip.GuestID = guestID if err := ipam.occupy(ctx, ip); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return ip, nil @@ -189,7 +189,7 @@ func (ipam *Ipam) occupy(ctx context.Context, ip *IP) error { func (ipam *Ipam) doop(ctx context.Context, ip meta.IP, putkey, delkey string) error { var enc, err = utils.JSONEncode(ip, "\t") if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } var ops = []clientv3.Op{ @@ -199,9 +199,9 @@ func (ipam *Ipam) doop(ctx context.Context, ip meta.IP, putkey, delkey string) e switch succ, err := store.BatchOperate(ctx, ops); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "") case !succ: - return errors.Annotatef(errors.ErrOperateIP, "put: %s, del: %s", putkey, delkey) + return errors.Wrapf(terrors.ErrOperateIP, "put: %s, del: %s", putkey, delkey) } return nil @@ -210,12 +210,12 @@ func (ipam *Ipam) doop(ctx context.Context, ip meta.IP, putkey, delkey string) e func (ipam *Ipam) pickup(ctx context.Context) (*IP, error) { var subnet = NewSubnet(ipam.subnet) if err := meta.Load(subnet); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } var data, vers, err = store.GetPrefix(ctx, meta.FreeIPPrefix(ipam.subnet), 1) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } var ip = NewIP() @@ -224,11 +224,11 @@ func (ipam *Ipam) pickup(ctx context.Context) (*IP, error) { for key, val := range data { var ver, exists = vers[key] if !exists { - return nil, errors.Annotatef(errors.ErrKeyBadVersion, key) + return nil, errors.Wrapf(terrors.ErrKeyBadVersion, key) } if err := utils.JSONDecode(val, ip); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } ip.SetVer(ver) diff --git a/internal/network/drivers/vlan/metrics.go b/internal/network/drivers/vlan/metrics.go new file mode 100644 index 0000000..7449a05 --- /dev/null +++ b/internal/network/drivers/vlan/metrics.go @@ -0,0 +1,39 @@ +package vlan + +import ( + "sync/atomic" + + "github.com/projecteru2/core/utils" + "github.com/projecteru2/yavirt/configs" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + vlanHealthyDesc = prometheus.NewDesc( + prometheus.BuildFQName("network", "vlan", "healthy"), + "vlan healthy status.", + []string{"node"}, + nil) +) + +type MetricsCollector struct { + healthy atomic.Bool +} + +func (d *Handler) GetMetricsCollector() prometheus.Collector { + return d.mCol +} + +func (e *MetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- vlanHealthyDesc +} + +func (e *MetricsCollector) Collect(ch chan<- prometheus.Metric) { + healthy := utils.Bool2Int(e.healthy.Load()) + ch <- prometheus.MustNewConstMetric( + vlanHealthyDesc, + prometheus.GaugeValue, + float64(healthy), + configs.Hostname(), + ) +} diff --git a/internal/vnet/vlan/subnet.go b/internal/network/drivers/vlan/subnet.go similarity index 100% rename from internal/vnet/vlan/subnet.go rename to internal/network/drivers/vlan/subnet.go diff --git a/internal/network/drivers/vlan/vlan.go b/internal/network/drivers/vlan/vlan.go new file mode 100644 index 0000000..5f348d2 --- /dev/null +++ b/internal/network/drivers/vlan/vlan.go @@ -0,0 +1,84 @@ +package vlan + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/projecteru2/yavirt/pkg/terrors" +) + +// Handler . +type Handler struct { + mCol *MetricsCollector + subnet int64 +} + +// New . +func New(subnet int64) *Handler { + return &Handler{subnet: subnet, mCol: &MetricsCollector{}} +} + +func (h *Handler) CheckHealth(_ context.Context) error { + return nil +} + +// NewIP . +func (h *Handler) NewIP(_, _ string) (meta.IP, error) { + return nil, errors.Wrap(terrors.ErrNotImplemented, "NewIP error") +} + +// QueryIPs . +func (h *Handler) QueryIPs(ipns meta.IPNets) ([]meta.IP, error) { + return h.ipam().Query(context.TODO(), ipns) +} + +func (h *Handler) ipam() network.Ipam { + return NewIpam(h.subnet) +} + +// CreateEndpointNetwork . +func (h *Handler) CreateEndpointNetwork(args types.EndpointArgs) (resp types.EndpointArgs, rollback func() error, err error) { + ip, err := h.ipam().Assign(context.TODO(), &args) + if err != nil { + return + } + resp = args + resp.IPs = append(resp.IPs, ip) + rollback = func() error { + return h.ipam().Release(context.Background(), ip) + } + return +} + +// JoinEndpointNetwork . +func (h *Handler) JoinEndpointNetwork(types.EndpointArgs) (rollback func() error, err error) { + // DO NOTHING + return +} + +// DeleteEndpointNetwork . +func (h *Handler) DeleteEndpointNetwork(args types.EndpointArgs) error { + return h.ipam().Release(context.Background(), args.IPs...) +} + +// QueryIPv4 . +func (h *Handler) QueryIPv4(string) (meta.IP, error) { + return nil, errors.Wrapf(terrors.ErrNotImplemented, "QueryIPv4 error") +} + +// GetCidr . +func (h *Handler) GetCidr() string { + ip := IP{Value: h.subnet, Subnet: &Subnet{SubnetPrefix: 0}} + return ip.CIDR() +} + +func (h *Handler) CreateNetworkPolicy(string) error { + return nil +} + +func (h *Handler) DeleteNetworkPolicy(string) error { + return nil +} diff --git a/internal/network/factory/factory.go b/internal/network/factory/factory.go new file mode 100644 index 0000000..12c01dd --- /dev/null +++ b/internal/network/factory/factory.go @@ -0,0 +1,135 @@ +package factory + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/network/drivers/calico" + cniCalico "github.com/projecteru2/yavirt/internal/network/drivers/cni/calico" + "github.com/projecteru2/yavirt/internal/network/drivers/fake" + "github.com/projecteru2/yavirt/internal/network/drivers/ovn" + "github.com/projecteru2/yavirt/internal/network/drivers/vlan" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + gF *Factory +) + +type Factory struct { + Config *configs.NetworkConfig + networkHandlers map[string]network.Driver +} + +func Setup(cfg *configs.NetworkConfig) (err error) { + gF, err = New(cfg) + return err +} + +func CheckHealth(ctx context.Context) error { + for _, d := range gF.networkHandlers { + if err := d.CheckHealth(ctx); err != nil { + return err + } + } + return nil +} + +func GetMetricsCollectors() (ans []prometheus.Collector) { + for _, d := range gF.networkHandlers { + col := d.GetMetricsCollector() + if col == nil { + continue + } + ans = append(ans, col) + } + return ans +} + +func GetDriver(mode string) network.Driver { + return gF.networkHandlers[mode] +} + +func ListDrivers() map[string]network.Driver { + return gF.networkHandlers +} + +func New(cfg *configs.NetworkConfig) (*Factory, error) { + f := &Factory{ + Config: cfg, + networkHandlers: map[string]network.Driver{ + network.FakeMode: &fake.Driver{}, + }, + } + err := f.setupDrivers() + return f, err +} + +func (f *Factory) setupDrivers() error { + cfg := f.Config + for _, mode := range cfg.Modes { + switch mode { + case network.CalicoMode: + if err := f.setupCalico(); err != nil { + return errors.Wrap(err, "") + } + case network.CalicoCNIMode: + if err := f.setupCalicoCNI(); err != nil { + return errors.Wrap(err, "") + } + case network.VlanMode: + if err := f.setupVlan(); err != nil { + return errors.Wrap(err, "") + } + case network.OVNMode: + if err := f.setupOVN(); err != nil { + return errors.Wrap(err, "") + } + default: + return errors.Newf("invalid network mode: %s", mode) + } + } + return nil +} + +func (f *Factory) setupCalicoCNI() error { + cali, err := calico.NewDriver(&f.Config.Calico) + if err != nil { + return errors.Wrap(err, "") + } + + driver, err := cniCalico.NewDriver(&f.Config.CNI, cali) + if err != nil { + return errors.Wrap(err, "") + } + f.networkHandlers[network.CalicoCNIMode] = driver + return nil +} + +func (f *Factory) setupVlan() error { //nolint + cfg := f.Config.Vlan + f.networkHandlers[network.VlanMode] = vlan.New(int64(cfg.Subnet)) + return nil +} + +func (f *Factory) setupOVN() (err error) { + cfg := &f.Config.OVN + f.networkHandlers[network.OVNMode], err = ovn.NewDriver(cfg) + return +} + +func (f *Factory) setupCalico() error { + cali, err := calico.NewDriver(&f.Config.Calico) + if err != nil { + return errors.Wrap(err, "") + } + + // if err := svc.caliHandler.InitGateway(f.Config.Calico.GatewayName); err != nil { + // return errors.Wrap(err, "") + // } + + f.networkHandlers[network.CalicoMode] = cali + return nil +} diff --git a/internal/network/network.go b/internal/network/network.go new file mode 100644 index 0000000..f7eb42e --- /dev/null +++ b/internal/network/network.go @@ -0,0 +1,32 @@ +package network + +import ( + "context" + + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/network/types" + "github.com/prometheus/client_golang/prometheus" +) + +type Driver interface { + // Prepare(ctx context.Context) error + + CheckHealth(ctx context.Context) error + QueryIPs(meta.IPNets) ([]meta.IP, error) + + CreateEndpointNetwork(types.EndpointArgs) (types.EndpointArgs, func() error, error) + JoinEndpointNetwork(types.EndpointArgs) (func() error, error) + DeleteEndpointNetwork(types.EndpointArgs) error + + CreateNetworkPolicy(string) error + DeleteNetworkPolicy(string) error + + GetMetricsCollector() prometheus.Collector +} + +// Ipam . +type Ipam interface { + Assign(ctx context.Context, args *types.EndpointArgs) (meta.IP, error) + Release(context.Context, ...meta.IP) error + Query(context.Context, meta.IPNets) ([]meta.IP, error) +} diff --git a/internal/network/types/types.go b/internal/network/types/types.go new file mode 100644 index 0000000..d8f7add --- /dev/null +++ b/internal/network/types/types.go @@ -0,0 +1,68 @@ +package types + +import ( + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/pkg/terrors" +) + +type CalicoArgs struct { + IPPool string `json:"ippool"` + Namespace string `json:"namespace"` + + ResourceVersion string + UID string + Profiles []string +} + +type OVNArgs struct { + LogicalSwitchUUID string `json:"logical_switch_uuid"` + LogicalSwitchName string `json:"logical_switch_name"` + LogicalSwitchPortName string `json:"logical_switch_port_name"` +} + +type VlanArgs struct { + // TODO +} + +type CNIArgs struct { +} + +// EndpointArgs . +type EndpointArgs struct { + GuestID string + EndpointID string + IPs []meta.IP + DevName string + MAC string + MTU int + Hostname string + + Calico CalicoArgs + OVN OVNArgs + Vlan VlanArgs + CNI CNIArgs +} + +// Check . +func (a EndpointArgs) Check() error { + switch { + case len(a.EndpointID) < 1: + return errors.Wrapf(terrors.ErrInvalidValue, "EndpointID is empty") + + case len(a.IPs) < 1: + return errors.Wrapf(terrors.ErrInvalidValue, "IPs is empty") + + case a.DevName == "": + return errors.Wrapf(terrors.ErrInvalidValue, "Device is nil") + + case len(a.MAC) < 1: + return errors.Wrapf(terrors.ErrInvalidValue, "MAC is empty") + + case len(a.Hostname) < 1: + return errors.Wrapf(terrors.ErrInvalidValue, "Hostname is empty") + + default: + return nil + } +} diff --git a/internal/vnet/device/addr.go b/internal/network/utils/device/addr.go similarity index 87% rename from internal/vnet/device/addr.go rename to internal/network/utils/device/addr.go index d007f89..5a1a818 100644 --- a/internal/vnet/device/addr.go +++ b/internal/network/utils/device/addr.go @@ -5,7 +5,7 @@ import ( "github.com/vishvananda/netlink" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" ) // Addr . @@ -49,12 +49,12 @@ func (a Addrs) Len() int { func (d *Driver) BindAddr(cidr, linkName string) error { addr, err := d.ParseCIDR(cidr) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } link, err := d.ShowLink(linkName) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } return link.BindAddr(addr) @@ -64,7 +64,7 @@ func (d *Driver) BindAddr(cidr, linkName string) error { func (d *Driver) ParseCIDR(cidr string) (*Addr, error) { var raw, err = netlink.ParseAddr(cidr) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return &Addr{Addr: raw}, nil @@ -74,7 +74,7 @@ func (d *Driver) ParseCIDR(cidr string) (*Addr, error) { func (d *Driver) ListAddrs(linkName string, _ int) (Addrs, error) { link, err := d.ShowLink(linkName) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return link.ListAddr() diff --git a/internal/vnet/device/attr.go b/internal/network/utils/device/attr.go similarity index 79% rename from internal/vnet/device/attr.go rename to internal/network/utils/device/attr.go index dd04d0a..9d3a2dd 100644 --- a/internal/vnet/device/attr.go +++ b/internal/network/utils/device/attr.go @@ -6,7 +6,8 @@ import ( "github.com/vishvananda/netlink" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" ) @@ -34,11 +35,11 @@ func newHardwareAddr(linkType string) (net.HardwareAddr, error) { mac, err = newTuntapMAC() default: - err = errors.Annotatef(errors.ErrInvalidValue, "unexpected link type: %s", linkType) + err = errors.Wrapf(terrors.ErrInvalidValue, "unexpected link type: %s", linkType) } if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return net.ParseMAC(mac) @@ -47,7 +48,7 @@ func newHardwareAddr(linkType string) (net.HardwareAddr, error) { func newTuntapMAC() (string, error) { var buf, err = utils.RandBuf(3) if err != nil { - return "", errors.Trace(err) + return "", errors.Wrap(err, "") } return fmt.Sprintf("fe:54:00:%02x:%02x:%02x", buf[0], buf[1], buf[2]), nil } diff --git a/internal/vnet/device/const.go b/internal/network/utils/device/const.go similarity index 100% rename from internal/vnet/device/const.go rename to internal/network/utils/device/const.go diff --git a/internal/vnet/device/driver.go b/internal/network/utils/device/driver.go similarity index 82% rename from internal/vnet/device/driver.go rename to internal/network/utils/device/driver.go index 0ddc9db..2c4be70 100644 --- a/internal/vnet/device/driver.go +++ b/internal/network/utils/device/driver.go @@ -5,7 +5,7 @@ import ( "github.com/vishvananda/netlink" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" ) // Driver . @@ -19,7 +19,7 @@ type Driver struct { func New() (*Driver, error) { var handle, err = netlink.NewHandle() if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return &Driver{Handle: handle}, nil diff --git a/internal/vnet/device/dummy.go b/internal/network/utils/device/dummy.go similarity index 100% rename from internal/vnet/device/dummy.go rename to internal/network/utils/device/dummy.go diff --git a/internal/vnet/device/generic.go b/internal/network/utils/device/generic.go similarity index 86% rename from internal/vnet/device/generic.go rename to internal/network/utils/device/generic.go index 4a91868..b80d0b1 100644 --- a/internal/vnet/device/generic.go +++ b/internal/network/utils/device/generic.go @@ -4,9 +4,10 @@ import ( "fmt" "net" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/vishvananda/netlink" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" ) type genericLink struct { @@ -28,7 +29,7 @@ func (g *genericLink) BindAddr(addr *Addr) error { var err = g.AddrAdd(g, addr.Addr) if err != nil && isFileExistsErr(err) { - return errors.Annotatef(errors.ErrVirtLinkAddrExists, "%s", addr) + return errors.Wrapf(terrors.ErrVirtLinkAddrExists, "%s", addr) } return err @@ -37,7 +38,7 @@ func (g *genericLink) BindAddr(addr *Addr) error { func (g *genericLink) ListAddr() (Addrs, error) { var raw, err = g.AddrList(g, FamilyIPv4) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return newAddrs(raw), nil } @@ -47,7 +48,7 @@ func (g *genericLink) Add() error { defer g.Unlock() if _, err := g.showLink(g.Name()); err == nil { - return errors.Annotatef(errors.ErrVirtLinkExists, g.Name()) + return errors.Wrapf(terrors.ErrVirtLinkExists, g.Name()) } return g.LinkAdd(g.Link) @@ -77,12 +78,12 @@ func (g *genericLink) ClearRoutes() error { var raw, err = g.RouteList(g, FamilyIPv4) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } for _, r := range raw { if err := g.deleteRoute(r.Dst); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } } @@ -95,7 +96,7 @@ func (g *genericLink) DeleteRoute(cidr string) error { var raw, err = g.RouteList(g, FamilyIPv4) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } for _, r := range raw { diff --git a/internal/vnet/device/link.go b/internal/network/utils/device/link.go similarity index 73% rename from internal/vnet/device/link.go rename to internal/network/utils/device/link.go index a8c6988..da2fbf8 100644 --- a/internal/vnet/device/link.go +++ b/internal/network/utils/device/link.go @@ -3,9 +3,10 @@ package device import ( "strings" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/vishvananda/netlink" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" ) // VirtLink . @@ -31,7 +32,7 @@ type VirtLink interface { //nolint func createVirtLink(linkType, name string, d *Driver) (VirtLink, error) { var hwAddr, err = newHardwareAddr(linkType) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } var attrs = NewAttrs(name, hwAddr) @@ -44,7 +45,7 @@ func createVirtLink(linkType, name string, d *Driver) (VirtLink, error) { return createTuntap(attrs, d), nil default: - return nil, errors.Annotatef(errors.ErrInvalidValue, "unexpected link type: %s", linkType) + return nil, errors.Wrapf(terrors.ErrInvalidValue, "unexpected link type: %s", linkType) } } @@ -59,7 +60,7 @@ func newVirtLink(raw netlink.Link, d *Driver) (VirtLink, error) { return newTuntap(raw, d), nil default: - return nil, errors.Annotatef(errors.ErrInvalidValue, "unexpected link type: %s", raw.Type()) + return nil, errors.Wrapf(terrors.ErrInvalidValue, "unexpected link type: %s", raw.Type()) } } @@ -70,7 +71,7 @@ func (d *Driver) ShowLinkByIndex(index int) (VirtLink, error) { var raw, err = d.LinkByIndex(index) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return newVirtLink(raw, d) @@ -87,10 +88,10 @@ func (d *Driver) showLink(name string) (VirtLink, error) { var raw, err = d.LinkByName(name) if err != nil { if err.Error() == "Link not found" { - err = errors.Trace(errors.ErrVirtLinkNotExists) + err = errors.Wrapf(terrors.ErrVirtLinkNotExists, "failed to get link %s", name) } - return nil, errors.Annotatef(err, name) + return nil, errors.Wrap(err, name) } return newVirtLink(raw, d) @@ -103,14 +104,14 @@ func (d *Driver) ListLinks() (VirtLinks, error) { var raws, err = d.LinkList() if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } var links = make(VirtLinks, len(raws)) for i, raw := range raws { if links[i], err = newVirtLink(raw, d); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } } @@ -121,16 +122,28 @@ func (d *Driver) ListLinks() (VirtLinks, error) { func (d *Driver) AddLink(linkType, name string) (VirtLink, error) { var link, err = createVirtLink(linkType, name, d) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } if err := link.Add(); err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return link, nil } +// delete link by name +func (d *Driver) DeleteLink(name string) error { + var raw, err = d.LinkByName(name) + if err != nil { + if err.Error() == "Link not found" { + err = errors.Wrapf(terrors.ErrVirtLinkNotExists, "failed to delete link %s", name) + } + return err + } + return d.LinkDel(raw) +} + // CheckLinkType . func (d *Driver) CheckLinkType(linkType string) bool { return linkType == LinkTypeDummy || diff --git a/internal/vnet/device/link_test.go b/internal/network/utils/device/link_test.go similarity index 100% rename from internal/vnet/device/link_test.go rename to internal/network/utils/device/link_test.go diff --git a/internal/vnet/device/route.go b/internal/network/utils/device/route.go similarity index 86% rename from internal/vnet/device/route.go rename to internal/network/utils/device/route.go index c34f24e..5cfc189 100644 --- a/internal/vnet/device/route.go +++ b/internal/network/utils/device/route.go @@ -7,8 +7,9 @@ import ( "github.com/vishvananda/netlink" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/pkg/netx" + "github.com/projecteru2/yavirt/pkg/terrors" ) // Route . @@ -38,7 +39,7 @@ func (r *Route) add() error { var err = r.RouteAdd(r.Route) if err != nil && isFileExistsErr(err) { - return errors.Trace(errors.ErrVirtLinkRouteExists) + return errors.Wrap(terrors.ErrVirtLinkRouteExists, "failed to add route") } return err @@ -49,7 +50,7 @@ func (r *Route) delete() error { } func (r *Route) String() string { - var linkName, _ = r.linkName() //nolint + var linkName, _ = r.linkName() if r.isGw() { return fmt.Sprintf("default via %s dev %s", r.Gw, linkName) @@ -67,7 +68,7 @@ func (r *Route) linkName() (string, error) { var name = "UNKNOWN" var link, err = r.Link() if err != nil { - return name, errors.Trace(err) + return name, errors.Wrap(err, "") } return link.Name(), nil } @@ -83,7 +84,7 @@ type Routes []*Route func newRoutes(raw []netlink.Route, d *Driver) Routes { var routes = make(Routes, len(raw)) for i, r := range raw { - routes[i] = newRoute(&r, d) //nolint + routes[i] = newRoute(&r, d) } return routes } @@ -108,12 +109,12 @@ func (d *Driver) ListRoute(dest string) (Routes, error) { var ipn, err = netx.ParseCIDROrIP(dest) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } raw, err := d.RouteGet(ipn.IP) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return newRoutes(raw, d), nil @@ -123,7 +124,7 @@ func (d *Driver) ListRoute(dest string) (Routes, error) { func (d *Driver) ClearRoutes(linkName string) error { var link, err = d.ShowLink(linkName) if err != nil { - return errors.Annotatef(err, linkName) + return errors.Wrap(err, linkName) } return link.ClearRoutes() } @@ -132,7 +133,7 @@ func (d *Driver) ClearRoutes(linkName string) error { func (d *Driver) DeleteRoute(dest string) error { var ipn, err = netx.ParseCIDROrIP(dest) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } d.Lock() @@ -153,7 +154,7 @@ func (d *Driver) AddRoute(dest, src, linkName string) error { link, err := d.showLink(linkName) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } return d.addRoute(dest, src, link) @@ -162,7 +163,7 @@ func (d *Driver) AddRoute(dest, src, linkName string) error { func (d *Driver) addRoute(dest, src string, link VirtLink) error { ipn, err := netx.ParseCIDROrIP(dest) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } var route = newDefaultRoute(d, ipn) diff --git a/internal/vnet/device/route_test.go b/internal/network/utils/device/route_test.go similarity index 100% rename from internal/vnet/device/route_test.go rename to internal/network/utils/device/route_test.go diff --git a/internal/vnet/device/tuntap.go b/internal/network/utils/device/tuntap.go similarity index 100% rename from internal/vnet/device/tuntap.go rename to internal/network/utils/device/tuntap.go diff --git a/internal/network/utils/utils.go b/internal/network/utils/utils.go new file mode 100644 index 0000000..232a83c --- /dev/null +++ b/internal/network/utils/utils.go @@ -0,0 +1,27 @@ +package utils + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + pkgutils "github.com/projecteru2/yavirt/pkg/utils" +) + +func GenEndpointID() (string, error) { + var uuid, err = pkgutils.UUIDStr() + if err != nil { + return "", errors.Wrap(err, "") + } + return strings.ReplaceAll(uuid, "-", ""), nil +} + +func GenDevName(prefix string) (string, error) { + var endpID, err = GenEndpointID() + if err != nil { + return "", errors.Wrap(err, "") + } + var name = fmt.Sprintf("%s%s", prefix, endpID[:pkgutils.Min(12, len(endpID))]) + return name, nil + +} diff --git a/internal/server/grpc/grpc_app.go b/internal/rpc/grpc_app.go similarity index 58% rename from internal/server/grpc/grpc_app.go rename to internal/rpc/grpc_app.go index f18dcf8..739e8c0 100644 --- a/internal/server/grpc/grpc_app.go +++ b/internal/rpc/grpc_app.go @@ -9,17 +9,20 @@ import ( pb "github.com/projecteru2/libyavirt/grpc/gen" "github.com/projecteru2/libyavirt/types" - - virtypes "github.com/projecteru2/yavirt/internal/virt/types" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/log" - - "github.com/projecteru2/yavirt/internal/server" + "github.com/samber/lo" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/service" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/utils" + vmiFact "github.com/yuyang0/vmimage/factory" ) // GRPCYavirtd . type GRPCYavirtd struct { - service *server.Service + service service.Service } // Ping . @@ -29,9 +32,14 @@ func (y *GRPCYavirtd) Ping(_ context.Context, _ *pb.Empty) (*pb.PingMessage, err } // GetInfo . -func (y *GRPCYavirtd) GetInfo(_ context.Context, _ *pb.Empty) (*pb.InfoMessage, error) { - log.Infof("[grpcserver] get host info") - info := y.service.Info() +func (y *GRPCYavirtd) GetInfo(ctx context.Context, _ *pb.Empty) (*pb.InfoMessage, error) { + if configs.Conf.Log.Verbose { + log.Debug(ctx, "[grpcserver] get host info") + } + info, err := y.service.Info() + if err != nil { + return nil, errors.Wrap(err, "") + } return &pb.InfoMessage{ Id: info.ID, Cpu: int64(info.CPU), @@ -43,14 +51,14 @@ func (y *GRPCYavirtd) GetInfo(_ context.Context, _ *pb.Empty) (*pb.InfoMessage, // GetGuest . func (y *GRPCYavirtd) GetGuest(ctx context.Context, opts *pb.GetGuestOptions) (*pb.GetGuestMessage, error) { - log.Infof("[grpcserver] get guest: %s", opts.Id) + log.Infof(ctx, "[grpcserver] get guest: %s", opts.Id) guestReq := types.GuestReq{ID: opts.Id} - guest, err := y.service.GetGuest(y.service.VirtContext(ctx), guestReq.VirtID()) + guest, err := y.service.GetGuest(ctx, guestReq.VirtID()) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return &pb.GetGuestMessage{ - Id: guest.ID, + Id: types.EruID(guest.ID), Status: guest.Status, TransitStatus: guest.TransitStatus, CreateTime: guest.CreateTime, @@ -62,26 +70,34 @@ func (y *GRPCYavirtd) GetGuest(ctx context.Context, opts *pb.GetGuestOptions) (* ImageId: guest.ImageID, ImageName: guest.ImageName, Networks: guest.Networks, + Ips: guest.IPs, + Labels: guest.Labels, + Hostname: guest.Hostname, + Running: guest.Running, }, nil } // GetGuestIDList gets all local vms' domain names regardless of their metadata validility. func (y *GRPCYavirtd) GetGuestIDList(ctx context.Context, _ *pb.GetGuestIDListOptions) (*pb.GetGuestIDListMessage, error) { - log.Infof("[grpcserver] get guest id list") - ids, err := y.service.GetGuestIDList(y.service.VirtContext(ctx)) + log.Info(ctx, "[grpcserver] get guest id list") + ids, err := y.service.GetGuestIDList(ctx) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } - return &pb.GetGuestIDListMessage{Ids: ids}, nil + eruIDs := lo.Map(ids, func(id string, _ int) string { + return types.EruID(id) + }) + return &pb.GetGuestIDListMessage{Ids: eruIDs}, nil } // Events func (y *GRPCYavirtd) Events(_ *pb.EventsOptions, server pb.YavirtdRPC_EventsServer) error { - log.Infof("[grpcserver] events method calling") - defer log.Infof("[grpcserver] events method completed") - ctx := server.Context() - watcher, err := y.service.WatchGuestEvents(y.service.VirtContext(ctx)) + + log.Info(ctx, "[grpcserver] events method calling") + defer log.Info(ctx, "[grpcserver] events method completed") + + watcher, err := y.service.WatchGuestEvents(ctx) if err != nil { return err } @@ -91,25 +107,25 @@ func (y *GRPCYavirtd) Events(_ *pb.EventsOptions, server pb.YavirtdRPC_EventsSer wg.Add(1) go func() { + defer log.Info(ctx, "[grpcserver] events goroutine has done") defer wg.Done() - defer log.Infof("[grpcserver] events goroutine has done") defer watcher.Stop() for { select { case event := <-watcher.Events(): if err := server.Send(parseEvent(event)); err != nil { - log.ErrorStack(err) + log.Error(ctx, err) return } case <-watcher.Done(): // The watcher already has been stopped. - log.Infof("[grpcserver] watcher has done") + log.Info(ctx, "[grpcserver] watcher has done") return case <-ctx.Done(): - log.Infof("[grpcserver] ctx done") + log.Info(ctx, "[grpcserver] ctx done") return } } @@ -118,23 +134,23 @@ func (y *GRPCYavirtd) Events(_ *pb.EventsOptions, server pb.YavirtdRPC_EventsSer return nil } -func parseEvent(event virtypes.Event) *pb.EventMessage { +func parseEvent(event intertypes.Event) *pb.EventMessage { return &pb.EventMessage{ Id: types.EruID(event.ID), Type: event.Type, - Action: event.Action, + Action: string(event.Op), TimeNano: event.Time.UnixNano(), } } // GetGuestUUID . func (y *GRPCYavirtd) GetGuestUUID(ctx context.Context, opts *pb.GetGuestOptions) (*pb.GetGuestUUIDMessage, error) { - log.Infof("[grpcserver] get guest UUID: %s", opts.Id) + log.Infof(ctx, "[grpcserver] get guest UUID: %s", opts.Id) guestReq := types.GuestReq{ID: opts.Id} - uuid, err := y.service.GetGuestUUID(y.service.VirtContext(ctx), guestReq.VirtID()) + uuid, err := y.service.GetGuestUUID(ctx, guestReq.VirtID()) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return &pb.GetGuestUUIDMessage{Uuid: uuid}, nil @@ -142,13 +158,13 @@ func (y *GRPCYavirtd) GetGuestUUID(ctx context.Context, opts *pb.GetGuestOptions // CreateGuest . func (y *GRPCYavirtd) CreateGuest(ctx context.Context, opts *pb.CreateGuestOptions) (*pb.CreateGuestMessage, error) { - log.Infof("[grpcserver] create guest: %q", opts) - guest, err := y.service.CreateGuest(y.service.VirtContext(ctx), virtypes.ConvertGRPCCreateOptions(opts)) + log.Infof(ctx, "[grpcserver] create guest: %q", opts) + guest, err := y.service.CreateGuest(ctx, intertypes.ConvertGRPCCreateOptions(opts)) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return &pb.CreateGuestMessage{ - Id: guest.ID, + Id: types.EruID(guest.ID), Status: guest.Status, TransitStatus: guest.TransitStatus, CreateTime: guest.CreateTime, @@ -166,52 +182,29 @@ func (y *GRPCYavirtd) CreateGuest(ctx context.Context, opts *pb.CreateGuestOptio // CaptureGuest . func (y *GRPCYavirtd) CaptureGuest(ctx context.Context, opts *pb.CaptureGuestOptions) (*pb.UserImageMessage, error) { - log.Infof("[grpcserver] capture guest: %q", opts) - - req := types.CaptureGuestReq{ - Name: opts.Name, - User: opts.User, - Overridden: opts.Overridden, - } - req.ID = opts.Id - - virtCtx := y.service.VirtContext(ctx) + log.Infof(ctx, "[grpcserver] capture guest: %q", opts) - uimg, err := y.service.CaptureGuest(virtCtx, req) + imgName := vmiFact.NewImageName(opts.User, opts.Name) + uimg, err := y.service.CaptureGuest(ctx, utils.VirtID(opts.Id), imgName, opts.Overridden) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return &pb.UserImageMessage{ - Id: uimg.GetID(), - Name: uimg.Name, - Distro: uimg.Distro, - LatestVersion: uimg.Version, - Size: uimg.Size, + Name: uimg.Fullname(), + Distro: uimg.OS.Distrib, + Size: uimg.VirtualSize, }, nil } // ResizeGuest . func (y *GRPCYavirtd) ResizeGuest(ctx context.Context, opts *pb.ResizeGuestOptions) (*pb.ControlGuestMessage, error) { - log.Infof("[grpcserver] resize guest: %q", opts) + log.Infof(ctx, "[grpcserver] resize guest: %q", opts) msg := &pb.ControlGuestMessage{Msg: "ok"} - virtCtx := y.service.VirtContext(ctx) - - req := types.ResizeGuestReq{ - CPU: int(opts.Cpu), - Mem: opts.Memory, - Resources: opts.Resources, - } - req.Volumes = make([]types.Volume, len(opts.Volumes)) - for i, vol := range opts.Volumes { - req.Volumes[i].Mount = vol.Mount - req.Volumes[i].Capacity = vol.Capacity - req.Volumes[i].IO = vol.Io - } - req.ID = opts.Id - err := y.service.ResizeGuest(virtCtx, req) + req := intertypes.ConvertGRPCResizeOptions(opts) + err := y.service.ResizeGuest(ctx, utils.VirtID(opts.Id), req) if err != nil { msg.Msg = fmt.Sprintf("%s", err) } @@ -221,50 +214,45 @@ func (y *GRPCYavirtd) ResizeGuest(ctx context.Context, opts *pb.ResizeGuestOptio // ControlGuest . func (y *GRPCYavirtd) ControlGuest(ctx context.Context, opts *pb.ControlGuestOptions) (_ *pb.ControlGuestMessage, err error) { - log.Infof("[grpcserver] control guest: %q", opts) - req := types.GuestReq{ID: opts.Id} - virtCtx := y.service.VirtContext(ctx) - err = y.service.ControlGuest(virtCtx, req.VirtID(), opts.Operation, opts.Force) + log.Infof(ctx, "[grpcserver] control guest: %q", opts) + err = y.service.ControlGuest(ctx, utils.VirtID(opts.Id), opts.Operation, opts.Force) msg := "ok" if err != nil { msg = fmt.Sprintf("%s", err) } - return &pb.ControlGuestMessage{Msg: msg}, errors.Trace(err) + return &pb.ControlGuestMessage{Msg: msg}, errors.Wrap(err, "") } // AttachGuest . func (y *GRPCYavirtd) AttachGuest(server pb.YavirtdRPC_AttachGuestServer) (err error) { - defer log.Infof("[grpcserver] attach guest complete") - log.Infof("[grpcserver] attach guest start") + ctx := server.Context() + defer log.Info(ctx, "[grpcserver] attach guest complete") opts, err := server.Recv() if err != nil { return } + log.Infof(ctx, "[grpcserver] attach guest start: %v", opts) - virtCtx := y.service.VirtContext(server.Context()) - req := types.GuestReq{ID: opts.Id} serverStream := &ExecuteGuestServerStream{ ID: opts.Id, server: server, } - flags := virtypes.OpenConsoleFlags{Force: opts.Force, Safe: opts.Safe, Commands: opts.Commands} - return y.service.AttachGuest(virtCtx, req.VirtID(), serverStream, flags) + flags := intertypes.NewOpenConsoleFlags(opts.Force, opts.Safe, opts.Commands) + return y.service.AttachGuest(ctx, utils.VirtID(opts.Id), serverStream, flags) } // ResizeConsoleWindow . func (y *GRPCYavirtd) ResizeConsoleWindow(ctx context.Context, opts *pb.ResizeWindowOptions) (*pb.Empty, error) { req := types.GuestReq{ID: opts.Id} - virtCtx := y.service.VirtContext(ctx) - return nil, y.service.ResizeConsoleWindow(virtCtx, req.VirtID(), uint(opts.Height), uint(opts.Width)) + return nil, y.service.ResizeConsoleWindow(ctx, req.VirtID(), uint(opts.Height), uint(opts.Width)) } // ExecuteGuest . func (y *GRPCYavirtd) ExecuteGuest(ctx context.Context, opts *pb.ExecuteGuestOptions) (msg *pb.ExecuteGuestMessage, err error) { - log.Infof("[grpcserver] execute guest start") + log.Infof(ctx, "[grpcserver] execute guest start") req := types.GuestReq{ID: opts.Id} - virtCtx := y.service.VirtContext(ctx) - m, err := y.service.ExecuteGuest(virtCtx, req.VirtID(), opts.Commands) + m, err := y.service.ExecuteGuest(ctx, req.VirtID(), opts.Commands) if err != nil { return } @@ -275,9 +263,9 @@ func (y *GRPCYavirtd) ExecuteGuest(ctx context.Context, opts *pb.ExecuteGuestOpt }, nil } -func (y *GRPCYavirtd) ExecExitCode(_ context.Context, opts *pb.ExecExitCodeOptions) (msg *pb.ExecExitCodeMessage, err error) { - log.Infof("[grpcserver] get exit code start") - defer log.Infof("[grpcserver] get exit code done") +func (y *GRPCYavirtd) ExecExitCode(ctx context.Context, opts *pb.ExecExitCodeOptions) (msg *pb.ExecExitCodeMessage, err error) { + log.Infof(ctx, "[grpcserver] get exit code start %q", opts) + defer log.Infof(ctx, "[grpcserver] get exit code done") req := types.GuestReq{ID: opts.Id} @@ -290,7 +278,7 @@ func (y *GRPCYavirtd) ExecExitCode(_ context.Context, opts *pb.ExecExitCodeOptio // ConnectNetwork . func (y *GRPCYavirtd) ConnectNetwork(ctx context.Context, opts *pb.ConnectNetworkOptions) (*pb.ConnectNetworkMessage, error) { - log.Infof("[grpcserver] connect network start") + log.Infof(ctx, "[grpcserver] connect network start %q", opts) req := types.ConnectNetworkReq{ Network: opts.Network, @@ -298,10 +286,9 @@ func (y *GRPCYavirtd) ConnectNetwork(ctx context.Context, opts *pb.ConnectNetwor } req.ID = opts.Id - virtCtx := y.service.VirtContext(ctx) - cidr, err := y.service.ConnectNetwork(virtCtx, req.VirtID(), req.Network, req.IPv4) + cidr, err := y.service.ConnectNetwork(ctx, req.VirtID(), req.Network, req.IPv4) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } return &pb.ConnectNetworkMessage{Cidr: cidr}, nil @@ -309,15 +296,14 @@ func (y *GRPCYavirtd) ConnectNetwork(ctx context.Context, opts *pb.ConnectNetwor // DisconnectNetwork . func (y *GRPCYavirtd) DisconnectNetwork(ctx context.Context, opts *pb.DisconnectNetworkOptions) (*pb.DisconnectNetworkMessage, error) { - log.Infof("[grpcserver] disconnect network start") + log.Infof(ctx, "[grpcserver] disconnect network start") var req types.DisconnectNetworkReq req.ID = opts.Id req.Network = opts.Network - virtCtx := y.service.VirtContext(ctx) - if err := y.service.DisconnectNetwork(virtCtx, req.VirtID(), req.Network); err != nil { - return nil, errors.Trace(err) + if err := y.service.DisconnectNetwork(ctx, req.VirtID(), req.Network); err != nil { + return nil, errors.Wrap(err, "") } return &pb.DisconnectNetworkMessage{Msg: "ok"}, nil @@ -325,20 +311,19 @@ func (y *GRPCYavirtd) DisconnectNetwork(ctx context.Context, opts *pb.Disconnect // NetworkList . func (y *GRPCYavirtd) NetworkList(ctx context.Context, opts *pb.NetworkListOptions) (*pb.NetworkListMessage, error) { - log.Infof("[grpcserver] list network start") - defer log.Infof("[grpcserver] list network completed %v", opts) + log.Infof(ctx, "[grpcserver] list network start") + defer log.Infof(ctx, "[grpcserver] list network completed %v", opts) - virtCtx := y.service.VirtContext(ctx) - networks, err := y.service.NetworkList(virtCtx, opts.Drivers) + networks, err := y.service.NetworkList(ctx, opts.Drivers) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } msg := &pb.NetworkListMessage{Networks: make(map[string][]byte)} for _, network := range networks { content, err := json.Marshal(network.Subnets) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } msg.Networks[network.Name] = content } @@ -348,10 +333,10 @@ func (y *GRPCYavirtd) NetworkList(ctx context.Context, opts *pb.NetworkListOptio // Cat . func (y *GRPCYavirtd) Cat(opts *pb.CatOptions, srv pb.YavirtdRPC_CatServer) error { - log.Infof("[grpcserver] cat %v", opts) - defer log.Infof("[grpcserver] cat %v completed", opts) + ctx := srv.Context() + log.Infof(ctx, "[grpcserver] cat %v", opts) + defer log.Infof(ctx, "[grpcserver] cat %v completed", opts) - ctx := y.service.VirtContext(srv.Context()) req := types.GuestReq{ID: opts.Id} wc := &CatWriteCloser{srv: srv} @@ -362,8 +347,9 @@ func (y *GRPCYavirtd) Cat(opts *pb.CatOptions, srv pb.YavirtdRPC_CatServer) erro // CopyToGuest . func (y *GRPCYavirtd) CopyToGuest(server pb.YavirtdRPC_CopyToGuestServer) (err error) { - defer log.Infof("[grpcserver] copy file to guest complete") - log.Infof("[grpcserver] copy file to guest start") + ctx := server.Context() + defer log.Infof(ctx, "[grpcserver] copy file to guest complete") + log.Infof(ctx, "[grpcserver] copy file to guest start") var opts *pb.CopyOptions byteChan := make(chan []byte, 4*types.BufferSize) @@ -396,7 +382,6 @@ func (y *GRPCYavirtd) CopyToGuest(server pb.YavirtdRPC_CopyToGuestServer) (err e } }() - ctx := y.service.VirtContext(server.Context()) if err := y.service.CopyToGuest(ctx, req.VirtID(), dest, byteChan, override); err != nil { <-end return server.SendAndClose(&pb.CopyMessage{Msg: "copy failed: " + err.Error(), Failed: true}) @@ -410,30 +395,29 @@ func (y *GRPCYavirtd) CopyToGuest(server pb.YavirtdRPC_CopyToGuestServer) (err e // Log . func (y *GRPCYavirtd) Log(opts *pb.LogOptions, srv pb.YavirtdRPC_LogServer) error { - log.Infof("[grpcserver] log start") - defer log.Infof("[grpcserver] log completed") + ctx := srv.Context() + log.Infof(ctx, "[grpcserver] log start") + defer log.Infof(ctx, "[grpcserver] log completed") - virtCtx := y.service.VirtContext(srv.Context()) req := types.GuestReq{ID: opts.Id} wc := &LogWriteCloser{srv: srv} defer wc.Close() - return y.service.Log(virtCtx, req.VirtID(), "/var/log/syslog", int(opts.N), wc) + return y.service.Log(ctx, req.VirtID(), "/var/log/syslog", int(opts.N), wc) } // WaitGuest . func (y *GRPCYavirtd) WaitGuest(ctx context.Context, opts *pb.WaitGuestOptions) (*pb.WaitGuestMessage, error) { - log.Infof("[grpcserver] wait guest") - defer log.Infof("[grpcserver] wait complete") + log.Infof(ctx, "[grpcserver] wait guest") + defer log.Infof(ctx, "[grpcserver] wait complete") req := types.GuestReq{ID: opts.Id} - virtCtx := y.service.VirtContext(ctx) - msg, code, err := y.service.Wait(virtCtx, req.VirtID(), true) + msg, code, err := y.service.Wait(ctx, req.VirtID(), true) if err != nil { return &pb.WaitGuestMessage{ - Msg: errors.Trace(err).Error(), + Msg: errors.Wrap(err, "").Error(), Code: -1, - }, errors.Trace(err) + }, errors.Wrap(err, "") } return &pb.WaitGuestMessage{Msg: msg, Code: int64(code)}, nil @@ -441,76 +425,83 @@ func (y *GRPCYavirtd) WaitGuest(ctx context.Context, opts *pb.WaitGuestOptions) // PushImage . func (y *GRPCYavirtd) PushImage(ctx context.Context, opts *pb.PushImageOptions) (*pb.PushImageMessage, error) { - log.Infof("[grpcserver] PushImage %v", opts) - defer log.Infof("[grpcserver] Push %v completed", opts) + log.Infof(ctx, "[grpcserver] PushImage %v", opts) + defer log.Infof(ctx, "[grpcserver] Push %v completed", opts) - virtCtx := y.service.VirtContext(ctx) msg := &pb.PushImageMessage{} - if err := y.service.PushImage(virtCtx, opts.ImgName, opts.User); err != nil { + // TODO add force to opts + force := false + imgName := vmiFact.NewImageName(opts.User, opts.ImgName) + rc, err := y.service.PushImage(ctx, imgName, force) + if err != nil { msg.Err = err.Error() return msg, err } + defer utils.EnsureReaderClosed(rc) return msg, nil } // RemoveImage . func (y *GRPCYavirtd) RemoveImage(ctx context.Context, opts *pb.RemoveImageOptions) (*pb.RemoveImageMessage, error) { - log.Infof("[grpcserver] RemoveImage %v", opts) - defer log.Infof("[grpcserver] Remove %v completed", opts) + log.Infof(ctx, "[grpcserver] RemoveImage %v", opts) + defer log.Infof(ctx, "[grpcserver] Remove %v completed", opts) - virtCtx := y.service.VirtContext(ctx) msg := &pb.RemoveImageMessage{} var err error - msg.Removed, err = y.service.RemoveImage(virtCtx, opts.Image, opts.User, opts.Force, opts.Prune) + imgName := vmiFact.NewImageName(opts.User, opts.Image) + msg.Removed, err = y.service.RemoveImage(ctx, imgName, opts.Force, opts.Prune) return msg, err } // ListImage . func (y *GRPCYavirtd) ListImage(ctx context.Context, opts *pb.ListImageOptions) (*pb.ListImageMessage, error) { - log.Infof("[grpcserver] ListImage %v", opts) - defer log.Infof("[grpcserver] ListImage %v completed", opts) - - virtCtx := y.service.VirtContext(ctx) + log.Infof(ctx, "[grpcserver] ListImage %v", opts) + defer log.Infof(ctx, "[grpcserver] ListImage %v completed", opts) - imgs, err := y.service.ListImage(virtCtx, opts.Filter) + imgs, err := y.service.ListImage(ctx, opts.Filter) if err != nil { return nil, err } - + // TODO: remove User in pb msg := &pb.ListImageMessage{Images: []*pb.ImageItem{}} for _, img := range imgs { - msg.Images = append(msg.Images, types.ToGRPCImageItem(img)) + msg.Images = append(msg.Images, &pb.ImageItem{ + Name: img.Fullname(), + Distro: img.OS.Distrib, + }) } return msg, nil } func (y *GRPCYavirtd) PullImage(ctx context.Context, opts *pb.PullImageOptions) (*pb.PullImageMessage, error) { - log.Infof("[grpcserver] PullImage %v", opts) - defer log.Infof("[grpcserver] PullImage %v completed", opts) - - virtCtx := y.service.VirtContext(ctx) + log.Infof(ctx, "[grpcserver] PullImage %v", opts) + defer log.Infof(ctx, "[grpcserver] PullImage %v completed", opts) - msg, err := y.service.PullImage(virtCtx, opts.Name, opts.All) + img, rc, err := y.service.PullImage(ctx, opts.Name) if err != nil { return nil, err } + defer utils.EnsureReaderClosed(rc) - return &pb.PullImageMessage{Result: msg}, nil + // TODO change pb to return image + msg, err := json.Marshal(img) + if err != nil { + return nil, err + } + return &pb.PullImageMessage{Result: string(msg)}, nil } // DigestImage . func (y *GRPCYavirtd) DigestImage(ctx context.Context, opts *pb.DigestImageOptions) (*pb.DigestImageMessage, error) { - log.Infof("[grpcserver] DigestImage %v", opts) - defer log.Infof("[grpcserver] DigestImage %v completed", opts) + log.Infof(ctx, "[grpcserver] DigestImage %v", opts) + defer log.Infof(ctx, "[grpcserver] DigestImage %v completed", opts) - virtCtx := y.service.VirtContext(ctx) - - digests, err := y.service.DigestImage(virtCtx, opts.ImageName, opts.Local) + digests, err := y.service.DigestImage(ctx, opts.ImageName, opts.Local) if err != nil { return nil, err } @@ -520,18 +511,16 @@ func (y *GRPCYavirtd) DigestImage(ctx context.Context, opts *pb.DigestImageOptio // ListSnapshot . func (y *GRPCYavirtd) ListSnapshot(ctx context.Context, opts *pb.ListSnapshotOptions) (*pb.ListSnapshotMessage, error) { - log.Infof("[grpcserver] list snapshot: %q", opts) - - virtCtx := y.service.VirtContext(ctx) + log.Infof(ctx, "[grpcserver] list snapshot: %q", opts) req := types.ListSnapshotReq{ ID: opts.Id, VolID: opts.VolId, } - snaps, err := y.service.ListSnapshot(virtCtx, req) + snaps, err := y.service.ListSnapshot(ctx, req) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } snapshots := []*pb.ListSnapshotMessageItem{} @@ -551,17 +540,16 @@ func (y *GRPCYavirtd) ListSnapshot(ctx context.Context, opts *pb.ListSnapshotOpt // CreateSnapshot . func (y *GRPCYavirtd) CreateSnapshot(ctx context.Context, opts *pb.CreateSnapshotOptions) (*pb.CreateSnapshotMessage, error) { - log.Infof("[grpcserver] create snapshot: %q", opts) + log.Infof(ctx, "[grpcserver] create snapshot: %q", opts) msg := &pb.CreateSnapshotMessage{Msg: "ok"} - virtCtx := y.service.VirtContext(ctx) req := types.CreateSnapshotReq{ ID: opts.Id, VolID: opts.VolId, } - err := y.service.CreateSnapshot(virtCtx, req) + err := y.service.CreateSnapshot(ctx, req) if err != nil { msg.Msg = fmt.Sprintf("%s", err) } @@ -571,10 +559,9 @@ func (y *GRPCYavirtd) CreateSnapshot(ctx context.Context, opts *pb.CreateSnapsho // CommitSnapshot . func (y *GRPCYavirtd) CommitSnapshot(ctx context.Context, opts *pb.CommitSnapshotOptions) (*pb.CommitSnapshotMessage, error) { - log.Infof("[grpcserver] commit snapshot: %q", opts) + log.Infof(ctx, "[grpcserver] commit snapshot: %q", opts) msg := &pb.CommitSnapshotMessage{Msg: "ok"} - virtCtx := y.service.VirtContext(ctx) req := types.CommitSnapshotReq{ ID: opts.Id, @@ -582,7 +569,7 @@ func (y *GRPCYavirtd) CommitSnapshot(ctx context.Context, opts *pb.CommitSnapsho SnapID: opts.SnapId, } - err := y.service.CommitSnapshot(virtCtx, req) + err := y.service.CommitSnapshot(ctx, req) if err != nil { msg.Msg = fmt.Sprintf("%s", err) } @@ -592,10 +579,9 @@ func (y *GRPCYavirtd) CommitSnapshot(ctx context.Context, opts *pb.CommitSnapsho // RestoreSnapshot . func (y *GRPCYavirtd) RestoreSnapshot(ctx context.Context, opts *pb.RestoreSnapshotOptions) (*pb.RestoreSnapshotMessage, error) { - log.Infof("[grpcserver] restore snapshot: %q", opts) + log.Infof(ctx, "[grpcserver] restore snapshot: %q", opts) msg := &pb.RestoreSnapshotMessage{Msg: "ok"} - virtCtx := y.service.VirtContext(ctx) req := types.RestoreSnapshotReq{ ID: opts.Id, @@ -603,10 +589,29 @@ func (y *GRPCYavirtd) RestoreSnapshot(ctx context.Context, opts *pb.RestoreSnaps SnapID: opts.SnapId, } - err := y.service.RestoreSnapshot(virtCtx, req) + err := y.service.RestoreSnapshot(ctx, req) if err != nil { msg.Msg = fmt.Sprintf("%s", err) } return msg, err } + +// ExecuteGuest . +func (y *GRPCYavirtd) RawEngine(ctx context.Context, opts *pb.RawEngineOptions) (msg *pb.RawEngineMessage, err error) { + logger := log.WithFunc("RawEngine").WithField("id", opts.Id).WithField("op", opts.Op) + logger.Infof(ctx, "[grpcserver] raw engine operation, params: %s", string(opts.Params)) + req := types.RawEngineReq{ + ID: opts.Id, + Op: opts.Op, + Params: opts.Params, + } + m, err := y.service.RawEngine(ctx, utils.VirtID(opts.Id), req) + if err != nil { + return + } + return &pb.RawEngineMessage{ + Id: opts.Id, + Data: m.Data, + }, nil +} diff --git a/internal/rpc/grpc_server.go b/internal/rpc/grpc_server.go new file mode 100644 index 0000000..d026f42 --- /dev/null +++ b/internal/rpc/grpc_server.go @@ -0,0 +1,117 @@ +package grpcserver + +import ( + "context" + "crypto/tls" + "net" + "path/filepath" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/reflection" + + "github.com/projecteru2/core/auth" + "github.com/projecteru2/core/log" + pb "github.com/projecteru2/libyavirt/grpc/gen" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/service" + "github.com/projecteru2/yavirt/pkg/utils" +) + +// GRPCServer . +type GRPCServer struct { + server *grpc.Server + app pb.YavirtdRPCServer + quit chan struct{} +} + +func loadTLSCredentials(dir string) (credentials.TransportCredentials, error) { + // Load server's certificate and private key + certFile := filepath.Join(dir, "server-cert.pem") + keyFile := filepath.Join(dir, "server-key.pem") + if (!utils.FileExists(certFile)) || (!utils.FileExists(keyFile)) { + return nil, nil //nolint + } + serverCert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + + // Create the credentials and return it + config := &tls.Config{ + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{serverCert}, + ClientAuth: tls.NoClientCert, + } + + return credentials.NewTLS(config), nil +} + +func New(cfg *configs.Config, svc service.Service, quit chan struct{}) (*GRPCServer, error) { + logger := log.WithFunc("rpc.New") + opts := []grpc.ServerOption{} + certDir := filepath.Join(cfg.CertPath, "yavirt") + tlsCredentials, err := loadTLSCredentials(certDir) + if err != nil { + return nil, err + } + if tlsCredentials != nil { + logger.Infof(context.TODO(), "grpc server tls enable.") + opts = append(opts, grpc.Creds(tlsCredentials)) + } + if cfg.Auth.Username != "" { + logger.Infof(context.TODO(), "grpc server auth enable.") + auth := auth.NewAuth(cfg.Auth) + opts = append(opts, grpc.StreamInterceptor(auth.StreamInterceptor)) + opts = append(opts, grpc.UnaryInterceptor(auth.UnaryInterceptor)) + logger.Infof(context.TODO(), "username %s password %s", cfg.Auth.Username, cfg.Auth.Password) + } + srv := &GRPCServer{ + server: grpc.NewServer(opts...), + app: &GRPCYavirtd{service: svc}, + quit: quit, + } + reflection.Register(srv.server) + + return srv, nil +} + +// Serve . +func (s *GRPCServer) Serve() error { + defer func() { + log.WithFunc("rpc.Serve").Warnf(context.TODO(), "[grpcserver] main loop %p exit", s) + }() + lis, err := net.Listen("tcp", configs.Conf.BindGRPCAddr) + if err != nil { + return err + } + pb.RegisterYavirtdRPCServer(s.server, s.app) + + return s.server.Serve(lis) +} + +// Close . +func (s *GRPCServer) Stop(force bool) { + logger := log.WithFunc("rpc.Close") + if force { + logger.Warnf(context.TODO(), "[grpcserver] terminate grpc server forcefully") + s.server.Stop() + return + } + + gracefulDone := make(chan struct{}) + go func() { + defer close(gracefulDone) + s.server.GracefulStop() + }() + + gracefulTimer := time.NewTimer(configs.Conf.GracefulTimeout) + select { + case <-gracefulDone: + logger.Infof(context.TODO(), "[grpcserver] terminate grpc server gracefully") + case <-gracefulTimer.C: + logger.Warnf(context.TODO(), "[grpcserver] terminate grpc server forcefully") + s.server.Stop() + } +} diff --git a/internal/server/grpc/types.go b/internal/rpc/types.go similarity index 100% rename from internal/server/grpc/types.go rename to internal/rpc/types.go diff --git a/internal/server/calico.go b/internal/server/calico.go deleted file mode 100644 index 59c89b9..0000000 --- a/internal/server/calico.go +++ /dev/null @@ -1,58 +0,0 @@ -package server - -import ( - "os" - - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/internal/vnet" - "github.com/projecteru2/yavirt/internal/vnet/calico" - "github.com/projecteru2/yavirt/internal/vnet/device" - calihandler "github.com/projecteru2/yavirt/internal/vnet/handler/calico" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/netx" -) - -func (svc *Service) setupCalico() error { - if !svc.couldSetupCalico() { - if svc.Host.NetworkMode == vnet.NetworkCalico { - return errors.Annotatef(errors.ErrInvalidValue, "invalid Calico config") - } - return nil - } - - if err := svc.setupCalicoHandler(); err != nil { - return errors.Trace(err) - } - - if err := svc.caliHandler.InitGateway(configs.Conf.CalicoGatewayName); err != nil { - return errors.Trace(err) - } - - return nil -} - -func (svc *Service) setupCalicoHandler() error { - cali, err := calico.NewDriver(configs.Conf.CalicoConfigFile, configs.Conf.CalicoPoolNames) - if err != nil { - return errors.Trace(err) - } - - dev, err := device.New() - if err != nil { - return errors.Trace(err) - } - - outboundIP, err := netx.GetOutboundIP(configs.Conf.Core.Addrs[0]) - if err != nil { - return errors.Trace(err) - } - - svc.caliHandler = calihandler.New(dev, cali, configs.Conf.CalicoPoolNames, outboundIP) - - return nil -} - -func (svc *Service) couldSetupCalico() bool { - var env = configs.Conf.CalicoETCDEnv - return len(configs.Conf.CalicoConfigFile) > 0 || len(os.Getenv(env)) > 0 -} diff --git a/internal/server/conv.go b/internal/server/conv.go deleted file mode 100644 index 9efdfc4..0000000 --- a/internal/server/conv.go +++ /dev/null @@ -1,62 +0,0 @@ -package server - -import ( - "strings" - - pb "github.com/projecteru2/core/rpc/gen" - - "github.com/projecteru2/libyavirt/types" - "github.com/projecteru2/yavirt/internal/models" -) - -func convGuestIDsResp(localIDs []string) []string { - eruIDs := make([]string, len(localIDs)) - for i, id := range localIDs { - eruIDs[i] = types.EruID(id) - } - return eruIDs -} - -func convGuestResp(g *models.Guest) (resp *types.Guest) { - resp = &types.Guest{} - resp.ID = types.EruID(g.ID) - resp.Status = g.Status - resp.CreateTime = g.CreatedTime - resp.UpdateTime = g.UpdatedTime - resp.ImageName = g.ImageName - resp.ImageUser = g.ImageUser - resp.CPU = g.CPU - resp.Mem = g.Memory - - if len(g.IPs) > 0 { - var ips = make([]string, len(g.IPs)) - for i, ip := range g.IPs { - ips[i] = ip.IPAddr() - } - resp.Networks = map[string]string{"IP": strings.Join(ips, ", ")} - } - - return -} - -// ConvSetWorkloadsStatusOptions . -func ConvSetWorkloadsStatusOptions(gss []types.EruGuestStatus) *pb.SetWorkloadsStatusOptions { - css := make([]*pb.WorkloadStatus, len(gss)) - for i, gs := range gss { - css[i] = convWorkloadStatus(gs) - } - - return &pb.SetWorkloadsStatusOptions{ - Status: css, - } -} - -func convWorkloadStatus(gs types.EruGuestStatus) *pb.WorkloadStatus { - return &pb.WorkloadStatus{ - Id: gs.EruGuestID, - Running: gs.Running, - Healthy: gs.Healthy, - Ttl: int64(gs.TTL.Seconds()), - Networks: map[string]string{"IP": gs.GetIPAddrs()}, - } -} diff --git a/internal/server/grpc/grpc_server.go b/internal/server/grpc/grpc_server.go deleted file mode 100644 index f42bfbc..0000000 --- a/internal/server/grpc/grpc_server.go +++ /dev/null @@ -1,77 +0,0 @@ -package grpcserver - -import ( - "time" - - "google.golang.org/grpc" - - pb "github.com/projecteru2/libyavirt/grpc/gen" - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/internal/server" - "github.com/projecteru2/yavirt/pkg/log" -) - -// GRPCServer . -type GRPCServer struct { - *server.Server - - server *grpc.Server - app pb.YavirtdRPCServer -} - -// Listen . -func Listen(svc *server.Service) (srv *GRPCServer, err error) { - srv = &GRPCServer{} - if srv.Server, err = server.Listen(configs.Conf.BindGRPCAddr, svc); err != nil { - return - } - - srv.server = grpc.NewServer() - srv.app = &GRPCYavirtd{service: svc} - - return -} - -// Reload . -func (s *GRPCServer) Reload() error { - return nil -} - -// Serve . -func (s *GRPCServer) Serve() error { - defer func() { - log.Warnf("[grpcserver] main loop %p exit", s) - s.Close() - }() - - pb.RegisterYavirtdRPCServer(s.server, s.app) - - return s.server.Serve(s.Listener) -} - -// Close . -func (s *GRPCServer) Close() { - s.Exit.Do(func() { - close(s.Exit.Ch) - - gracefulDone := make(chan struct{}) - go func() { - defer close(gracefulDone) - s.server.GracefulStop() - }() - - gracefulTimer := time.NewTimer(configs.Conf.GracefulTimeout.Duration()) - select { - case <-gracefulDone: - log.Infof("[grpcserver] terminate grpc server gracefully") - case <-gracefulTimer.C: - log.Warnf("[grpcserver] terminate grpc server forcefully") - s.server.Stop() - } - }) -} - -// ExitCh . -func (s *GRPCServer) ExitCh() chan struct{} { - return s.Exit.Ch -} diff --git a/internal/server/http/apiserver.go b/internal/server/http/apiserver.go deleted file mode 100644 index db12499..0000000 --- a/internal/server/http/apiserver.go +++ /dev/null @@ -1,120 +0,0 @@ -package httpserver - -import ( - "context" - "net/http" - - "github.com/gin-gonic/gin" - - "github.com/projecteru2/libyavirt/types" - "github.com/projecteru2/yavirt/internal/models" - "github.com/projecteru2/yavirt/internal/server" - "github.com/projecteru2/yavirt/internal/virt" - "github.com/projecteru2/yavirt/pkg/errors" -) - -func newAPIHandler(svc *server.Service) http.Handler { - gin.SetMode(gin.ReleaseMode) - - var api = &apiServer{service: svc} - var router = gin.Default() - - var v1 = router.Group("/v1") - { - v1.GET("/ping", api.Ping) - v1.GET("/info", api.Info) - v1.GET("/guests/:id", api.GetGuest) - v1.GET("/guests/:id/uuid", api.GetGuestUUID) - v1.POST("/guests", api.CreateGuest) - v1.POST("/guests/stop", api.StopGuest) - v1.POST("/guests/start", api.StartGuest) - v1.POST("/guests/destroy", api.DestroyGuest) - v1.POST("/guests/execute", api.ExecuteGuest) - v1.POST("/guests/resize", api.ResizeGuest) - v1.POST("/guests/capture", api.CaptureGuest) - v1.POST("/guests/connect", api.ConnectNetwork) - v1.POST("/guests/resize_window", api.ResizeConsoleWindow) - // v1.POST("/guests/snapshot/list", api.ListSnapshot) - v1.POST("/guests/snapshot/create", api.CreateSnapshot) - v1.POST("/guests/snapshot/commit", api.CommitSnapshot) - v1.POST("/guests/snapshot/restore", api.RestoreSnapshot) - } - - return router -} - -type apiServer struct { - service *server.Service -} - -func (s *apiServer) host() *models.Host { //nolint - return s.service.Host -} - -func (s *apiServer) Info(c *gin.Context) { - s.renderOK(c, s.service.Info()) -} - -func (s *apiServer) Ping(c *gin.Context) { - c.JSON(http.StatusOK, s.service.Ping()) -} - -func (s *apiServer) dispatchMsg(c *gin.Context, req any, fn func(virt.Context) error) { - s.dispatch(c, req, func(ctx virt.Context) (any, error) { - return nil, fn(ctx) - }) -} - -type operate func(virt.Context) (any, error) - -func (s *apiServer) dispatch(c *gin.Context, req any, fn operate) { - if req != nil { - if err := s.bind(c, req); err != nil { - s.renderErr(c, err) - return - } - } - - var resp, err = fn(s.virtContext()) - if err != nil { - s.renderErr(c, err) - return - } - - if resp == nil { - s.renderOKMsg(c) - } else { - s.renderOK(c, resp) - } -} - -func (s *apiServer) bind(c *gin.Context, req any) error { - switch c.Request.Method { - case http.MethodGet: - return c.ShouldBindUri(req) - - case http.MethodPost: - return c.ShouldBind(req) - - default: - return errors.Errorf("invalid HTTP method: %s", c.Request.Method) - } -} - -var okMsg = types.NewMsg("ok") - -func (s *apiServer) renderOKMsg(c *gin.Context) { - s.renderOK(c, okMsg) -} - -func (s *apiServer) renderOK(c *gin.Context, resp any) { - c.JSON(http.StatusOK, resp) -} - -func (s *apiServer) renderErr(c *gin.Context, err error) { - c.JSON(http.StatusInternalServerError, err.Error()) -} - -func (s *apiServer) virtContext() virt.Context { - return s.service.VirtContext(context.Background()) -} diff --git a/internal/server/http/guest.go b/internal/server/http/guest.go deleted file mode 100644 index 9144d31..0000000 --- a/internal/server/http/guest.go +++ /dev/null @@ -1,133 +0,0 @@ -package httpserver - -import ( - "github.com/gin-gonic/gin" - - "github.com/projecteru2/libyavirt/types" - - "github.com/projecteru2/yavirt/internal/virt" - virtypes "github.com/projecteru2/yavirt/internal/virt/types" -) - -func (s *apiServer) GetGuest(c *gin.Context) { - var req types.GuestReq - s.dispatch(c, &req, func(ctx virt.Context) (any, error) { - return s.service.GetGuest(ctx, req.VirtID()) - }) -} - -func (s *apiServer) GetGuestIDList(c *gin.Context) { - s.dispatch(c, nil, func(ctx virt.Context) (any, error) { - return s.service.GetGuestIDList(ctx) - }) -} - -func (s *apiServer) GetGuestUUID(c *gin.Context) { - var req types.GuestReq - s.dispatch(c, &req, func(ctx virt.Context) (any, error) { - return s.service.GetGuestUUID(ctx, req.VirtID()) - }) -} - -func (s *apiServer) CaptureGuest(c *gin.Context) { - var req types.CaptureGuestReq - s.dispatch(c, &req, func(ctx virt.Context) (any, error) { - return s.service.CaptureGuest(ctx, req) - }) -} - -func (s *apiServer) ResizeGuest(c *gin.Context) { - var req types.ResizeGuestReq - s.dispatchMsg(c, &req, func(ctx virt.Context) error { - return s.service.ResizeGuest(ctx, req) - }) -} - -func (s *apiServer) DestroyGuest(c *gin.Context) { - var req types.GuestReq - s.dispatchMsg(c, &req, func(ctx virt.Context) error { - return s.service.ControlGuest(ctx, req.VirtID(), types.OpDestroy, req.Force) - }) -} - -func (s *apiServer) StopGuest(c *gin.Context) { - var req types.GuestReq - s.dispatchMsg(c, &req, func(ctx virt.Context) error { - return s.service.ControlGuest(ctx, req.VirtID(), types.OpStop, req.Force) - }) -} - -func (s *apiServer) StartGuest(c *gin.Context) { - var req types.GuestReq - s.dispatchMsg(c, &req, func(ctx virt.Context) error { - return s.service.ControlGuest(ctx, req.VirtID(), types.OpStart, false) - }) -} - -func (s *apiServer) CreateGuest(c *gin.Context) { - var req types.CreateGuestReq - - s.dispatch(c, &req, func(ctx virt.Context) (any, error) { - return s.service.CreateGuest( - ctx, - virtypes.GuestCreateOption{ - CPU: req.CPU, - Mem: req.Mem, - ImageName: req.ImageName, - Volumes: req.Volumes, - DmiUUID: req.DmiUUID, - Labels: req.Labels, - ImageUser: req.ImageUser, - }, - ) - }) -} - -func (s *apiServer) ExecuteGuest(c *gin.Context) { - var req types.ExecuteGuestReq - s.dispatch(c, &req, func(ctx virt.Context) (any, error) { - return s.service.ExecuteGuest(ctx, req.VirtID(), req.Commands) - }) -} - -func (s *apiServer) ConnectNetwork(c *gin.Context) { - var req types.ConnectNetworkReq - s.dispatch(c, &req, func(ctx virt.Context) (any, error) { - return s.service.ConnectNetwork(ctx, req.VirtID(), req.Network, req.IPv4) - }) -} - -func (s *apiServer) ResizeConsoleWindow(c *gin.Context) { - var req types.ResizeConsoleWindowReq - s.dispatch(c, &req, func(ctx virt.Context) (any, error) { - return nil, s.service.ResizeConsoleWindow(ctx, req.VirtID(), req.Height, req.Width) - }) -} - -func (s *apiServer) ListSnapshot(c *gin.Context) { - var req types.ListSnapshotReq - s.dispatch(c, &req, func(ctx virt.Context) (any, error) { - return s.service.ListSnapshot(ctx, req) - }) -} - -func (s *apiServer) CreateSnapshot(c *gin.Context) { - var req types.CreateSnapshotReq - s.dispatchMsg(c, &req, func(ctx virt.Context) error { - return s.service.CreateSnapshot(ctx, req) - }) -} - -func (s *apiServer) CommitSnapshot(c *gin.Context) { - var req types.CommitSnapshotReq - s.dispatchMsg(c, &req, func(ctx virt.Context) error { - return s.service.CommitSnapshot(ctx, req) - }) -} - -func (s *apiServer) RestoreSnapshot(c *gin.Context) { - var req types.RestoreSnapshotReq - s.dispatchMsg(c, &req, func(ctx virt.Context) error { - return s.service.RestoreSnapshot(ctx, req) - }) -} diff --git a/internal/server/http/http_server.go b/internal/server/http/http_server.go deleted file mode 100644 index 50be34e..0000000 --- a/internal/server/http/http_server.go +++ /dev/null @@ -1,93 +0,0 @@ -package httpserver - -import ( - "context" - "net/http" - - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/internal/metrics" - "github.com/projecteru2/yavirt/internal/server" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/log" -) - -// HTTPServer . -type HTTPServer struct { - *server.Server - - httpServer *http.Server -} - -// Listen . -func Listen(svc *server.Service) (srv *HTTPServer, err error) { - srv = &HTTPServer{} - if srv.Server, err = server.Listen(configs.Conf.BindHTTPAddr, svc); err != nil { - return - } - - srv.httpServer = srv.newHTTPServer() - - return srv, nil -} - -func (s *HTTPServer) newHTTPServer() *http.Server { - var mux = http.NewServeMux() - mux.Handle("/metrics", metrics.Handler()) - mux.Handle("/", newAPIHandler(s.Service)) - return &http.Server{Handler: mux} //nolint -} - -// Reload . -func (s *HTTPServer) Reload() error { - return nil -} - -// Serve . -func (s *HTTPServer) Serve() (err error) { - defer func() { - log.Warnf("[httpserver] main loop %p exit", s) - s.Close() - }() - - var errCh = make(chan error, 1) - go func() { - defer func() { - log.Warnf("[httpserver] HTTP server %p exit", s.httpServer) - }() - errCh <- s.httpServer.Serve(s.Listener) - }() - - select { - case <-s.Exit.Ch: - return nil - case err = <-errCh: - return errors.Trace(err) - } -} - -// Close . -func (s *HTTPServer) Close() { - s.Exit.Do(func() { - close(s.Exit.Ch) - - var err error - defer func() { - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - }() - - var ctx, cancel = context.WithTimeout(context.Background(), configs.Conf.GracefulTimeout.Duration()) - defer cancel() - - if err = s.httpServer.Shutdown(ctx); err != nil { - return - } - }) -} - -// ExitCh . -func (s *HTTPServer) ExitCh() chan struct{} { - return s.Exit.Ch -} diff --git a/internal/server/server.go b/internal/server/server.go deleted file mode 100644 index 26a9962..0000000 --- a/internal/server/server.go +++ /dev/null @@ -1,50 +0,0 @@ -package server - -import ( - "net" - "sync" - - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/pkg/netx" -) - -// Server . -type Serverable interface { - Reload() error - Serve() error - Close() - ExitCh() chan struct{} -} - -// Server . -type Server struct { - Addr string - Listener net.Listener - Service *Service - Exit struct { - sync.Once - Ch chan struct{} - } -} - -// Listen . -func Listen(addr string, svc *Service) (srv *Server, err error) { - srv = &Server{Service: svc} - srv.Exit.Ch = make(chan struct{}, 1) - srv.Listener, srv.Addr, err = srv.Listen(addr) - return -} - -// Listen . -func (s *Server) Listen(addr string) (lis net.Listener, ip string, err error) { - var network = "tcp" - if lis, err = net.Listen(network, addr); err != nil { - return - } - - if ip, err = netx.GetOutboundIP(configs.Conf.Core.Addrs[0]); err != nil { - return - } - - return -} diff --git a/internal/server/service.go b/internal/server/service.go deleted file mode 100644 index 9e133da..0000000 --- a/internal/server/service.go +++ /dev/null @@ -1,517 +0,0 @@ -package server - -import ( - "context" - "fmt" - "io" - - "github.com/projecteru2/libyavirt/types" - - "github.com/robfig/cron/v3" - - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/internal/metrics" - "github.com/projecteru2/yavirt/internal/models" - "github.com/projecteru2/yavirt/internal/ver" - "github.com/projecteru2/yavirt/internal/virt" - "github.com/projecteru2/yavirt/internal/virt/guest/manager" - virtypes "github.com/projecteru2/yavirt/internal/virt/types" - "github.com/projecteru2/yavirt/internal/vnet" - calihandler "github.com/projecteru2/yavirt/internal/vnet/handler/calico" - vlanhandler "github.com/projecteru2/yavirt/internal/vnet/handler/vlan" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/log" - "github.com/projecteru2/yavirt/pkg/utils" -) - -// Service . -type Service struct { - Host *models.Host - BootGuestCh chan<- string - caliHandler *calihandler.Handler - guest manager.Manageable - - pid2ExitCode *utils.ExitCodeMap - RecoverGuestCh chan<- string -} - -// SetupYavirtdService . -func SetupYavirtdService() (*Service, error) { - svc := &Service{ - guest: manager.New(), - pid2ExitCode: utils.NewSyncMap(), - } - - return svc, svc.setup() -} - -func (svc *Service) setup() (err error) { - if svc.Host, err = models.LoadHost(); err != nil { - return errors.Trace(err) - } - - if err := svc.setupCalico(); err != nil { - return errors.Trace(err) - } - - // Start watching all local guests' changes. - svc.guest.StartWatch() - - /* - if err := svc.ScheduleSnapshotCreate(); err != nil { - return errors.Trace(err) - } - */ - - return nil -} - -// TODO: Decide time -func (svc *Service) ScheduleSnapshotCreate() error { - c := cron.New() - - // Everyday 3am - if _, err := c.AddFunc("0 3 * * *", svc.batchCreateSnapshot); err != nil { - return errors.Trace(err) - } - - // Every Sunday 1am - if _, err := c.AddFunc("0 1 * * SUN", svc.batchCommitSnapshot); err != nil { - return errors.Trace(err) - } - - // Start job asynchronously - c.Start() - - return nil -} - -func (svc *Service) batchCreateSnapshot() { - guests, err := models.GetAllGuests() - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return - } - - for _, g := range guests { - for _, volID := range g.VolIDs { - req := types.CreateSnapshotReq{ - ID: g.ID, - VolID: volID, - } - - if err := svc.CreateSnapshot( - virt.NewContext(context.Background(), svc.caliHandler), req, - ); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - } - } -} - -func (svc *Service) batchCommitSnapshot() { - guests, err := models.GetAllGuests() - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return - } - - for _, g := range guests { - for _, volID := range g.VolIDs { - if err := svc.CommitSnapshotByDay( - virt.NewContext(context.Background(), svc.caliHandler), - g.ID, - volID, - configs.Conf.SnapshotRestorableDay, - ); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - } - } -} - -// VirtContext . -func (svc *Service) VirtContext(ctx context.Context) virt.Context { - return virt.NewContext(ctx, svc.caliHandler) -} - -// Ping . -func (svc *Service) Ping() map[string]string { - return map[string]string{"version": ver.Version()} -} - -// Info . -func (svc *Service) Info() types.HostInfo { - return types.HostInfo{ - ID: fmt.Sprintf("%d", svc.Host.ID), - CPU: svc.Host.CPU, - Mem: svc.Host.Memory, - Storage: svc.Host.Storage, - Resources: map[string][]byte{}, - } -} - -// GetGuest . -func (svc *Service) GetGuest(ctx virt.Context, id string) (*types.Guest, error) { - vg, err := svc.guest.Load(ctx, id) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return nil, err - } - return convGuestResp(vg.Guest), nil -} - -// GetGuestIDList . -func (svc *Service) GetGuestIDList(ctx virt.Context) ([]string, error) { - ids, err := svc.guest.ListLocalIDs(ctx) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return nil, err - } - return convGuestIDsResp(ids), err -} - -// GetGuestUUID . -func (svc *Service) GetGuestUUID(ctx virt.Context, id string) (string, error) { - uuid, err := svc.guest.LoadUUID(ctx, id) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return "", err - } - return uuid, nil -} - -// CreateGuest . -func (svc *Service) CreateGuest(ctx virt.Context, opts virtypes.GuestCreateOption) (*types.Guest, error) { - vols := []*models.Volume{} - for _, v := range opts.Volumes { - vol, err := models.NewDataVolume(v.Mount, v.Capacity) - if err != nil { - return nil, errors.Trace(err) - } - vols = append(vols, vol) - } - - if opts.CPU == 0 { - opts.CPU = utils.Min(svc.Host.CPU, configs.Conf.MaxCPU) - } - if opts.Mem == 0 { - opts.Mem = utils.Min(svc.Host.Memory, configs.Conf.MaxMemory) - } - - g, err := svc.guest.Create(ctx, opts, svc.Host, vols) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return nil, err - } - - go func() { - svc.BootGuestCh <- g.ID - }() - - return convGuestResp(g.Guest), nil -} - -// CaptureGuest . -func (svc *Service) CaptureGuest(ctx virt.Context, req types.CaptureGuestReq) (uimg *models.UserImage, err error) { - if uimg, err = svc.guest.Capture(ctx, req.VirtID(), req.User, req.Name, req.Overridden); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// ResizeGuest . -func (svc *Service) ResizeGuest(ctx virt.Context, req types.ResizeGuestReq) (err error) { - vols := map[string]int64{} - for _, vol := range req.Volumes { - vols[vol.Mount] = vol.Capacity - } - if err = svc.guest.Resize(ctx, req.VirtID(), req.CPU, req.Mem, vols); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// ControlGuest . -func (svc *Service) ControlGuest(ctx virt.Context, id, operation string, force bool) (err error) { - switch operation { - case types.OpStart: - err = svc.guest.Start(ctx, id) - case types.OpStop: - err = svc.guest.Stop(ctx, id, force) - case types.OpDestroy: - _, err = svc.guest.Destroy(ctx, id, force) - } - - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return errors.Trace(err) - } - - return nil -} - -// ListSnapshot . -func (svc *Service) ListSnapshot(ctx virt.Context, req types.ListSnapshotReq) (snaps types.Snapshots, err error) { - volSnap, err := svc.guest.ListSnapshot(ctx, req.ID, req.VolID) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - - for vol, s := range volSnap { - for _, snap := range s { - snaps = append(snaps, &types.Snapshot{ - VolID: vol.ID, - VolMountDir: vol.GetMountDir(), - SnapID: snap.ID, - CreatedTime: snap.CreatedTime, - }) - } - } - - return -} - -// CreateSnapshot . -func (svc *Service) CreateSnapshot(ctx virt.Context, req types.CreateSnapshotReq) (err error) { - if err = svc.guest.CreateSnapshot(ctx, req.ID, req.VolID); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// CommitSnapshot . -func (svc *Service) CommitSnapshot(ctx virt.Context, req types.CommitSnapshotReq) (err error) { - if err = svc.guest.CommitSnapshot(ctx, req.ID, req.VolID, req.SnapID); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// CommitSnapshotByDay . -func (svc *Service) CommitSnapshotByDay(ctx virt.Context, id, volID string, day int) (err error) { - if err = svc.guest.CommitSnapshotByDay(ctx, id, volID, day); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// RestoreSnapshot . -func (svc *Service) RestoreSnapshot(ctx virt.Context, req types.RestoreSnapshotReq) (err error) { - if err = svc.guest.RestoreSnapshot(ctx, req.ID, req.VolID, req.SnapID); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// ConnectNetwork . -func (svc *Service) ConnectNetwork(ctx virt.Context, id, network, ipv4 string) (cidr string, err error) { - if cidr, err = svc.guest.ConnectExtraNetwork(ctx, id, network, ipv4); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// DisconnectNetwork . -func (svc *Service) DisconnectNetwork(ctx virt.Context, id, network string) (err error) { - if err = svc.guest.DisconnectExtraNetwork(ctx, id, network); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// NetworkList . -func (svc *Service) NetworkList(ctx virt.Context, drivers []string) ([]*types.Network, error) { - drv := map[string]struct{}{} - for _, driver := range drivers { - drv[driver] = struct{}{} - } - - networks := []*types.Network{} - switch svc.Host.NetworkMode { - case vnet.NetworkCalico: - if _, ok := drv[vnet.NetworkCalico]; svc.caliHandler == nil || !ok { - break - } - for _, poolName := range svc.caliHandler.PoolNames() { - subnet, err := svc.caliHandler.GetIPPoolCidr(ctx.Context, poolName) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return nil, err - } - - networks = append(networks, &types.Network{ - Name: poolName, - Subnets: []string{subnet}, - }) - } - return networks, nil - case vnet.NetworkVlan: // vlan - if _, ok := drv[vnet.NetworkVlan]; !ok { - break - } - handler := vlanhandler.New("", svc.Host.Subnet) - networks = append(networks, &types.Network{ - Name: "vlan", - Subnets: []string{handler.GetCidr()}, - }) - } - - return networks, nil -} - -// AttachGuest . -func (svc *Service) AttachGuest(ctx virt.Context, id string, stream io.ReadWriteCloser, flags virtypes.OpenConsoleFlags) (err error) { - if err = svc.guest.AttachConsole(ctx, id, stream, flags); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// ResizeConsoleWindow . -func (svc *Service) ResizeConsoleWindow(ctx virt.Context, id string, height, width uint) (err error) { - if err = svc.guest.ResizeConsoleWindow(ctx, id, height, width); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// ExecuteGuest . -func (svc *Service) ExecuteGuest(ctx virt.Context, id string, commands []string) (*types.ExecuteGuestMessage, error) { - stdout, exitCode, pid, err := svc.guest.ExecuteCommand(ctx, id, commands) - if err != nil { - log.WarnStack(err) - metrics.IncrError() - } - svc.pid2ExitCode.Put(id, pid, exitCode) - return &types.ExecuteGuestMessage{ - Pid: pid, - Data: stdout, - ExitCode: exitCode, - }, err -} - -// ExecExitCode . -func (svc *Service) ExecExitCode(id string, pid int) (int, error) { - exitCode, err := svc.pid2ExitCode.Get(id, pid) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return 0, err - } - return exitCode, nil -} - -// Cat . -func (svc *Service) Cat(ctx virt.Context, id, path string, dest io.WriteCloser) (err error) { - if err = svc.guest.Cat(ctx, id, path, dest); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// CopyToGuest . -func (svc *Service) CopyToGuest(ctx virt.Context, id, dest string, content chan []byte, override bool) (err error) { - if err = svc.guest.CopyToGuest(ctx, id, dest, content, override); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// Log . -func (svc *Service) Log(ctx virt.Context, id, logPath string, n int, dest io.WriteCloser) (err error) { - if err = svc.guest.Log(ctx, id, logPath, n, dest); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -// Wait . -func (svc *Service) Wait(ctx virt.Context, id string, block bool) (msg string, code int, err error) { - err = svc.guest.Stop(ctx, id, !block) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - return "stop error", -1, err - } - if msg, code, err = svc.guest.Wait(ctx, id, block); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -func (svc *Service) PushImage(_ virt.Context, _, _ string) (err error) { - // todo - return -} - -func (svc *Service) RemoveImage(ctx virt.Context, imageName, user string, force, prune bool) (removed []string, err error) { - if removed, err = svc.guest.RemoveImage(ctx, imageName, user, force, prune); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -func (svc *Service) ListImage(ctx virt.Context, filter string) ([]types.SysImage, error) { - imgs, err := svc.guest.ListImage(ctx, filter) - if err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - - images := []types.SysImage{} - for _, img := range imgs { - images = append(images, types.SysImage{ - Name: img.GetName(), - User: img.GetUser(), - Distro: img.GetDistro(), - ID: img.GetID(), - Type: img.GetType(), - }) - } - - return images, err -} - -func (svc *Service) PullImage(virt.Context, string, bool) (msg string, err error) { - // todo - return -} - -func (svc *Service) DigestImage(ctx virt.Context, imageName string, local bool) (digest []string, err error) { - if digest, err = svc.guest.DigestImage(ctx, imageName, local); err != nil { - log.ErrorStack(err) - metrics.IncrError() - } - return -} - -func (svc *Service) WatchGuestEvents(virt.Context) (*manager.Watcher, error) { - return svc.guest.NewWatcher() -} diff --git a/internal/server/service_test.go b/internal/server/service_test.go deleted file mode 100644 index fa940cb..0000000 --- a/internal/server/service_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package server - -import ( - "context" - "testing" - - "github.com/projecteru2/libyavirt/types" - "github.com/projecteru2/yavirt/internal/models" - "github.com/projecteru2/yavirt/internal/virt" - vg "github.com/projecteru2/yavirt/internal/virt/guest" - managerocks "github.com/projecteru2/yavirt/internal/virt/guest/manager/mocks" - virtypes "github.com/projecteru2/yavirt/internal/virt/types" - "github.com/projecteru2/yavirt/pkg/test/assert" - "github.com/projecteru2/yavirt/pkg/test/mock" - "github.com/projecteru2/yavirt/pkg/utils" -) - -func init() { - models.Setup() -} - -func TestCreateGuest(t *testing.T) { - svc := testService(t) - - svc.guest.(*managerocks.Manageable).On("Create", - mock.Anything, // ctx - mock.Anything, // cpu - mock.Anything, // memory - mock.Anything, // vols - mock.Anything, // imgName - mock.Anything, // imgUser - mock.Anything, // host - mock.Anything, // dmiUUID - mock.Anything, // labels - ).Return(testVirtGuest(t), nil) - _, err := svc.CreateGuest(testVirtContext(t), virtypes.GuestCreateOption{ - CPU: 1, - Mem: utils.GB, - ImageName: "ubuntu", - ImageUser: "anrs", - Volumes: nil, - DmiUUID: "uuid", - Labels: nil, - }) - assert.NilErr(t, err) -} - -func TestGetGuest(t *testing.T) { - svc := testService(t) - svc.guest.(*managerocks.Manageable).On("Load", mock.Anything, mock.Anything).Return(testVirtGuest(t), nil) - _, err := svc.GetGuest(testVirtContext(t), "id") - assert.NilErr(t, err) -} - -func TestGetGuestIDList(t *testing.T) { - localIDs := []string{"ya0", "ya1", "ya2"} - svc := testService(t) - svc.guest.(*managerocks.Manageable).On("ListLocalIDs", mock.Anything, mock.Anything).Return(localIDs, nil).Once() - - ids, err := svc.GetGuestIDList(testVirtContext(t)) - assert.NilErr(t, err) - - eruIDs := []string{types.EruID("ya0"), types.EruID("ya1"), types.EruID("ya2")} - assert.Equal(t, eruIDs, ids) -} - -func TestGetGuestUUID(t *testing.T) { - svc := testService(t) - svc.guest.(*managerocks.Manageable).On("LoadUUID", mock.Anything, mock.Anything).Return("uuid", nil) - _, err := svc.GetGuestUUID(testVirtContext(t), "id") - assert.NilErr(t, err) -} - -func TestCopyToGuest(t *testing.T) { - svc := testService(t) - svc.guest.(*managerocks.Manageable).On("CopyToGuest", - mock.Anything, // ctx - mock.Anything, // id - mock.Anything, // dest - mock.Anything, // content - mock.Anything, // override - ).Return(nil) - err := svc.CopyToGuest(testVirtContext(t), "id", "dest", nil, true) - assert.NilErr(t, err) -} - -func testVirtGuest(t *testing.T) *vg.Guest { - mg, err := models.NewGuest(nil, nil) - assert.NilErr(t, err) - assert.NotNil(t, mg) - return vg.New(testVirtContext(t), mg) -} - -func testVirtContext(t *testing.T) virt.Context { - return virt.NewContext(context.Background(), nil) -} - -func testService(t *testing.T) *Service { - return &Service{ - Host: &models.Host{}, - guest: &managerocks.Manageable{}, - BootGuestCh: make(chan string, 1), - } -} diff --git a/internal/service/boar/boar.go b/internal/service/boar/boar.go new file mode 100644 index 0000000..fda3212 --- /dev/null +++ b/internal/service/boar/boar.go @@ -0,0 +1,415 @@ +package boar + +import ( + "context" + "fmt" + "net" + "strings" + "sync" + "testing" + "time" + + "github.com/projecteru2/libyavirt/types" + "github.com/prometheus/client_golang/prometheus" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/eru/agent" + "github.com/projecteru2/yavirt/internal/eru/recycle" + "github.com/projecteru2/yavirt/internal/eru/resources" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/metrics" + "github.com/projecteru2/yavirt/internal/models" + networkFactory "github.com/projecteru2/yavirt/internal/network/factory" + intertypes "github.com/projecteru2/yavirt/internal/types" + interutils "github.com/projecteru2/yavirt/internal/utils" + "github.com/projecteru2/yavirt/internal/ver" + "github.com/projecteru2/yavirt/internal/virt/guest" + "github.com/projecteru2/yavirt/internal/vmcache" + "github.com/projecteru2/yavirt/pkg/idgen" + "github.com/projecteru2/yavirt/pkg/notify/bison" + "github.com/projecteru2/yavirt/pkg/store" + "github.com/projecteru2/yavirt/pkg/utils" + vmiFact "github.com/yuyang0/vmimage/factory" + vmitypes "github.com/yuyang0/vmimage/types" +) + +// Boar . +type Boar struct { + Host *models.Host + cfg *configs.Config + pool *taskPool + BootGuestCh chan<- string + + pid2ExitCode *utils.ExitCodeMap + RecoverGuestCh chan<- string + + watchers *interutils.Watchers + + imageMutex sync.Mutex + agt *agent.Manager + mCol *MetricsCollector +} + +func New(ctx context.Context, cfg *configs.Config, t *testing.T) (br *Boar, err error) { + var cols []prometheus.Collector + + br = &Boar{ + cfg: cfg, + mCol: &MetricsCollector{}, + pid2ExitCode: utils.NewSyncMap(), + watchers: interutils.NewWatchers(), + } + // setup notify + if err := bison.Setup(&cfg.Notify, t); err != nil { + return br, errors.Wrap(err, "failed to setup notify") + } + + resMgr, err := resources.Setup(ctx, cfg, t) + if err != nil { + return nil, err + } + cols = append(cols, resMgr.GetMetricsCollector()) + + br.Host, err = models.LoadHost() + if err != nil { + return nil, err + } + br.pool, err = newTaskPool(cfg.MaxConcurrency) + if err != nil { + return nil, err + } + if err := idgen.Setup(br.Host.ID); err != nil { + return nil, err + } + + if err = store.Setup(configs.Conf, t); err != nil { + return nil, err + } + go br.watchers.Run(ctx) + if err := vmcache.Setup(ctx, cfg, br.watchers); err != nil { + return br, errors.Wrap(err, "failed to setup vmcache") + } + if err := vmiFact.Setup(&cfg.ImageHub); err != nil { + return br, errors.Wrap(err, "failed to setup vmimage") + } + if err := networkFactory.Setup(&cfg.Network); err != nil { + return br, errors.Wrap(err, "failed to setup calico") + } + cols = append(cols, networkFactory.GetMetricsCollectors()...) + + if cfg.Eru.Enable { + if err = recycle.Setup(ctx, &configs.Conf, t); err != nil { + return br, errors.Wrap(err, "failed to setup recycle") + } + recycle.Run(ctx, br) + + parts := strings.Split(cfg.BindGRPCAddr, ":") + if len(parts) != 2 { + return br, errors.Newf("invalid bind addr %s", cfg.BindGRPCAddr) + } + grpcPort := parts[1] + endpoint := fmt.Sprintf( //nolint + "virt-grpc://%s:%s@%s:%s", + cfg.Auth.Username, cfg.Auth.Password, cfg.Host.Addr, grpcPort, + ) + br.agt, err = agent.NewManager(ctx, br, &cfg.Eru, endpoint, t) + if err != nil { + return br, errors.Wrap(err, "failed to setup agent") + } + go br.agt.Run(ctx) //nolint + cols = append(cols, br.agt.GetMetricsCollector()) + } + cols = append(cols, br.GetMetricsCollector()) + metrics.Setup(cfg.Host.Name, cols...) + /* + if err := svc.ScheduleSnapshotCreate(); err != nil { + return errors.Wrap(err, "") + } + */ + + return br, nil +} + +func (svc *Boar) Close() { + _ = svc.agt.Exit() + svc.pool.Release() + store.Close() +} + +// Ping . +func (svc *Boar) Ping() map[string]string { + return map[string]string{"version": ver.Version()} +} + +// Info . +func (svc *Boar) Info() (*types.HostInfo, error) { + res, err := resources.GetManager().FetchResources() + if err != nil { + return nil, err + } + return &types.HostInfo{ + ID: fmt.Sprintf("%d", svc.Host.ID), + CPU: svc.Host.CPU, + Mem: svc.Host.Memory, + Storage: svc.Host.Storage, + Resources: res, + }, nil +} + +func (svc *Boar) IsHealthy(ctx context.Context) (ans bool) { + logger := log.WithFunc("boar.Healthy") + var err error + // check image service + if err1 := vmiFact.CheckHealth(ctx); err1 != nil { + svc.mCol.imageHealthy.Store(false) + err = errors.CombineErrors(err, errors.WithMessagef(err1, "failed to check image hub")) + } else { + svc.mCol.imageHealthy.Store(true) + } + // check libvirt health + if err1 := checkLibvirtSocket(); err1 != nil { + svc.mCol.libvirtHealthy.Store(false) + err = errors.CombineErrors(err, errors.WithMessagef(err1, "failed to check libvirt socket")) + } else { + svc.mCol.libvirtHealthy.Store(true) + } + // check network drivers, including clico, ovn etc + if err1 := networkFactory.CheckHealth(ctx); err1 != nil { + err = errors.CombineErrors(err, errors.WithMessagef(err1, "failed to check network drivers")) + } + //TODO:Check more things + + if err != nil { + logger.Errorf(ctx, err, "failed to check health") + return false + } + return true +} + +// GetGuest . +func (svc *Boar) GetGuest(ctx context.Context, id string) (*types.Guest, error) { + vg, err := svc.loadGuest(ctx, id) + if err != nil { + log.WithFunc("boar.GetGuest").Error(ctx, err) + metrics.IncrError() + return nil, err + } + resp := convGuestResp(vg.Guest) + return resp, nil +} + +// GetGuestIDList . +func (svc *Boar) GetGuestIDList(ctx context.Context) ([]string, error) { + ids, err := svc.ListLocalIDs(ctx, true) + if err != nil { + log.WithFunc("boar.GetGuestIDList").Error(ctx, err) + metrics.IncrError() + return nil, err + } + return ids, nil +} + +// GetGuestUUID . +func (svc *Boar) GetGuestUUID(ctx context.Context, id string) (string, error) { + uuid, err := svc.LoadUUID(ctx, id) + if err != nil { + log.WithFunc("boar.GetGuestUUID").Error(ctx, err) + metrics.IncrError() + return "", err + } + return uuid, nil +} + +// CaptureGuest . +func (svc *Boar) CaptureGuest(ctx context.Context, id string, imgName string, overridden bool) (uimg *vmitypes.Image, err error) { + defer logErr(err) + + g, err := svc.loadGuest(ctx, id) + if err != nil { + return nil, err + } + uImg, err := g.Capture(imgName, overridden) + if err != nil { + return nil, err + } + return uImg, nil +} + +// ResizeGuest re-allocates spec or volumes. +func (svc *Boar) ResizeGuest(ctx context.Context, id string, opts *intertypes.GuestResizeOption) (err error) { + defer logErr(err) + + vols, err := extractVols(opts.Resources) + if err != nil { + return err + } + cpumem, err := extractCPUMem(opts.Resources) + if err != nil { + return err + } + gpu, err := extractGPU(opts.Resources) + if err != nil { + return err + } + g, err := svc.loadGuest(ctx, id) + if err != nil { + return err + } + do := func(_ context.Context) (any, error) { + return nil, g.Resize(cpumem, gpu, vols) + } + _, err = svc.do(ctx, id, intertypes.ResizeOp, do, nil) + return +} + +// Wait . +func (svc *Boar) Wait(ctx context.Context, id string, block bool) (msg string, code int, err error) { + defer logErr(err) + + err = svc.stopGuest(ctx, id, !block) + if err != nil { + return "stop error", -1, err + } + + err = svc.ctrl(ctx, id, intertypes.MiscOp, func(g *guest.Guest) error { + if err = g.Wait(meta.StatusStopped, block); err != nil { + return err + } + + if g.LambdaOption != nil { + msg = string(g.LambdaOption.CmdOutput) + code = g.LambdaOption.ExitCode + } + + return nil + }, nil) + return msg, code, err +} + +// ListLocals lists all local guests. +func (svc *Boar) ListLocalIDs(ctx context.Context, onlyERU bool) ([]string, error) { + ids, err := guest.ListLocalIDs(ctx) + if err != nil { + return nil, err + } + if !onlyERU { + return ids, nil + } + var ans []string + for _, id := range ids { + if idgen.CheckID(id) { + ans = append(ans, id) + } + } + return ans, nil +} + +// LoadUUID read a guest's UUID. +func (svc *Boar) LoadUUID(ctx context.Context, id string) (string, error) { + g, err := svc.loadGuest(ctx, id) + if err != nil { + return "", err + } + return g.GetUUID() +} + +// loadGuest read a guest from metadata. +func (svc *Boar) loadGuest(ctx context.Context, id string, opts ...models.Option) (*guest.Guest, error) { + g, err := models.LoadGuest(id) + if err != nil { + return nil, err + } + + var vg = guest.New(ctx, g) + if err := vg.Load(opts...); err != nil { + return nil, err + } + if err = vg.UpdateStateIfNecessary(); err != nil { + log.WithFunc("boar.loadGuest").Warnf(ctx, "update state error: %s", err) + } + return vg, nil +} + +func (svc *Boar) WatchGuestEvents(context.Context) (*interutils.Watcher, error) { + return svc.watchers.Get() +} + +func logErr(err error) { + if err != nil { + log.Error(context.TODO(), err) + metrics.IncrError() + } +} + +type ctrlFunc func(*guest.Guest) error +type rollbackFunc func() + +func (svc *Boar) ctrl(ctx context.Context, id string, op intertypes.Operator, fn ctrlFunc, rollback rollbackFunc) error { //nolint + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id) + if err != nil { + return nil, err + } + return nil, fn(g) + } + _, err := svc.do(ctx, id, op, do, rollback) + return err +} + +type doFunc func(context.Context) (any, error) + +func (svc *Boar) do(ctx context.Context, id string, op intertypes.Operator, fn doFunc, rollback rollbackFunc) (result any, err error) { + defer func() { + if err != nil && rollback != nil { + rollback() + } + }() + + // add a max timeout + ctx1, cancel := context.WithTimeout(ctx, configs.Conf.VirtTimeout) + defer cancel() + + t := newTask(ctx1, id, op, fn) + + if err := svc.pool.SubmitTask(t); err != nil { + return nil, err + } + + metrics.Incr(metrics.MetricSvcTasks, nil) //nolint:errcheck + defer metrics.Decr(metrics.MetricSvcTasks, nil) //nolint:errcheck + + select { + case <-t.Done(): + result, err = t.result() + case <-ctx1.Done(): + err = ctx1.Err() + } + if err != nil { + metrics.IncrError() + return + } + + svc.watchers.Watched(intertypes.Event{ + ID: id, + Type: guestEventType, + Op: op, + Time: time.Now().UTC(), + }) + + return +} + +const guestEventType = "guest" + +func checkLibvirtSocket() error { + socketPath := "/var/run/libvirt/libvirt-sock" + // Dial the Unix domain socket + conn, err := net.DialTimeout("unix", socketPath, 3*time.Second) + if err != nil { + return err + } + defer conn.Close() + return nil +} diff --git a/internal/service/boar/boar_test.go b/internal/service/boar/boar_test.go new file mode 100644 index 0000000..1c6071a --- /dev/null +++ b/internal/service/boar/boar_test.go @@ -0,0 +1,85 @@ +package boar + +// func TestCreateGuest(t *testing.T) { +// svc := testService(t) + +// svc.guest.(*managerocks.Manageable).On("Create", +// mock.Anything, // ctx +// mock.Anything, // cpu +// mock.Anything, // memory +// mock.Anything, // vols +// mock.Anything, // imgName +// mock.Anything, // imgUser +// mock.Anything, // host +// mock.Anything, // dmiUUID +// mock.Anything, // labels +// ).Return(testVirtGuest(t), nil) +// _, err := svc.CreateGuest(testVirtContext(t), virtypes.GuestCreateOption{ +// CPU: 1, +// Mem: utils.GB, +// ImageName: "ubuntu", +// ImageUser: "anrs", +// Volumes: nil, +// DmiUUID: "uuid", +// Labels: nil, +// }) +// assert.NilErr(t, err) +// } + +// func TestGetGuest(t *testing.T) { +// svc := testService(t) +// svc.guest.(*managerocks.Manageable).On("Load", mock.Anything, mock.Anything).Return(testVirtGuest(t), nil) +// _, err := svc.GetGuest(testVirtContext(t), "id") +// assert.NilErr(t, err) +// } + +// func TestGetGuestIDList(t *testing.T) { +// localIDs := []string{"ya0", "ya1", "ya2"} +// svc := testService(t) +// svc.guest.(*managerocks.Manageable).On("ListLocalIDs", mock.Anything, mock.Anything).Return(localIDs, nil).Once() + +// ids, err := svc.GetGuestIDList(testVirtContext(t)) +// assert.NilErr(t, err) + +// eruIDs := []string{types.EruID("ya0"), types.EruID("ya1"), types.EruID("ya2")} +// assert.Equal(t, eruIDs, ids) +// } + +// func TestGetGuestUUID(t *testing.T) { +// svc := testService(t) +// svc.guest.(*managerocks.Manageable).On("LoadUUID", mock.Anything, mock.Anything).Return("uuid", nil) +// _, err := svc.GetGuestUUID(testVirtContext(t), "id") +// assert.NilErr(t, err) +// } + +// func TestCopyToGuest(t *testing.T) { +// svc := testService(t) +// svc.guest.(*managerocks.Manageable).On("CopyToGuest", +// mock.Anything, // ctx +// mock.Anything, // id +// mock.Anything, // dest +// mock.Anything, // content +// mock.Anything, // override +// ).Return(nil) +// err := svc.CopyToGuest(testVirtContext(t), "id", "dest", nil, true) +// assert.NilErr(t, err) +// } + +// func testVirtGuest(t *testing.T) *vg.Guest { +// mg, err := models.NewGuest(nil, nil) +// assert.NilErr(t, err) +// assert.NotNil(t, mg) +// return vg.New(testVirtContext(t), mg) +// } + +// func testVirtContext(t *testing.T) context.Context { +// return util.SetCalicoHandler(context.Background(), nil) +// } + +// func testService(t *testing.T) *Boar { +// return &Boar{ +// Host: &models.Host{}, +// guest: &managerocks.Manageable{}, +// BootGuestCh: make(chan string, 1), +// } +// } diff --git a/internal/service/boar/console.go b/internal/service/boar/console.go new file mode 100644 index 0000000..ab2d1e3 --- /dev/null +++ b/internal/service/boar/console.go @@ -0,0 +1,29 @@ +package boar + +import ( + "context" + "io" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/internal/meta" + intertypes "github.com/projecteru2/yavirt/internal/types" +) + +// AttachGuest . +func (svc *Boar) AttachGuest(ctx context.Context, id string, stream io.ReadWriteCloser, flags intertypes.OpenConsoleFlags) (err error) { + defer logErr(err) + + g, err := svc.loadGuest(ctx, id) + if err != nil { + return errors.Wrap(err, "") + } + + if g.LambdaOption != nil { + if err = g.Wait(meta.StatusRunning, false); err != nil { + return errors.Wrap(err, "") + } + flags.Commands = g.LambdaOption.Cmd + } + + return g.AttachConsole(ctx, stream, flags) +} diff --git a/internal/service/boar/control.go b/internal/service/boar/control.go new file mode 100644 index 0000000..d1d55d5 --- /dev/null +++ b/internal/service/boar/control.go @@ -0,0 +1,165 @@ +package boar + +import ( + "context" + "time" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/internal/metrics" + "github.com/projecteru2/yavirt/internal/models" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/utils" +) + +// ControlGuest . +func (svc *Boar) ControlGuest(ctx context.Context, id, operation string, force bool) (err error) { + var errCh <-chan error + switch operation { + case types.OpStart: + err = svc.startGuest(ctx, id, force) + case types.OpStop: + err = svc.stopGuest(ctx, id, force) + case types.OpDestroy: + errCh, err = svc.destroyGuest(ctx, id, force) + if err != nil { + break + } + select { + case <-ctx.Done(): + err = ctx.Err() + case err = <-errCh: + } + case types.OpSuspend: + err = svc.suspendGuest(ctx, id) + case types.OpResume: + err = svc.resumeGuest(ctx, id) + } + + if err != nil { + log.WithFunc("boar.ControlGuest").Error(ctx, err) + metrics.IncrError() + return errors.Wrap(err, "") + } + + return nil +} + +// destroyGuest destroys a guest. +func (svc *Boar) destroyGuest(ctx context.Context, id string, force bool) (<-chan error, error) { + var done <-chan error + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id, models.IgnoreLoadImageErrOption()) + if err != nil { + return nil, errors.Wrap(err, "") + } + if done, err = g.Destroy(ctx, force); err != nil { + return nil, errors.Wrap(err, "") + } + + return nil, nil //nolint + } + _, err := svc.do(ctx, id, intertypes.DestroyOp, do, nil) + return done, err +} + +// stopGuest stops a guest. +func (svc *Boar) stopGuest(ctx context.Context, id string, force bool) error { + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id, models.IgnoreLoadImageErrOption()) + if err != nil { + return nil, errors.Wrap(err, "") + } + if err := g.Stop(ctx, force); err != nil { + return nil, errors.Wrap(err, "") + } + + return nil, nil //nolint + } + _, err := svc.do(ctx, id, intertypes.StopOp, do, nil) + + // eru agent only track start and die event, + // so also send a die event here + if err == nil { + svc.watchers.Watched(intertypes.Event{ + ID: id, + Type: guestEventType, + Op: intertypes.DieOp, + Time: time.Now().UTC(), + }) + } + return err +} + +// startGuest boots a guest. +func (svc *Boar) startGuest(ctx context.Context, id string, force bool) error { + logger := log.WithFunc("boar.startGuest") + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id, models.IgnoreLoadImageErrOption()) + if err != nil { + return nil, errors.Wrap(err, "") + } + // we need to release the creation session locker here + lck := utils.NewCreateSessionFlock(g.ID) + defer func() { + logger.Debugf(ctx, "[session unlocker] %s", g.ID) + if err := lck.RemoveFile(); err != nil { + logger.Warnf(ctx, "failed to remove session locker file %s", err) + } + }() + + if err := g.Start(ctx, force); err != nil { + return nil, errors.Wrap(err, "") + } + if g.LambdaOption != nil && !g.LambdaStdin { + output, exitCode, pid, err := g.ExecuteCommand(ctx, g.LambdaOption.Cmd) + if err != nil { + return nil, errors.Wrap(err, "") + } + g.LambdaOption.CmdOutput = output + g.LambdaOption.ExitCode = exitCode + g.LambdaOption.Pid = pid + + if err = g.Save(); err != nil { + return nil, errors.Wrap(err, "") + } + } + return nil, nil //nolint + } + defer logger.Debugf(ctx, "exit startGuest") + _, err := svc.do(ctx, id, intertypes.StartOp, do, nil) + return err +} + +// suspendGuest suspends a guest. +func (svc *Boar) suspendGuest(ctx context.Context, id string) error { + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id, models.IgnoreLoadImageErrOption()) + if err != nil { + return nil, errors.Wrap(err, "") + } + if err := g.Suspend(); err != nil { + return nil, errors.Wrap(err, "") + } + return nil, nil //nolint + } + _, err := svc.do(ctx, id, intertypes.SuspendOp, do, nil) + return err +} + +// resumeGuest resumes a suspended guest. +func (svc *Boar) resumeGuest(ctx context.Context, id string) error { + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id, models.IgnoreLoadImageErrOption()) + if err != nil { + return nil, errors.Wrap(err, "") + } + if err := g.Resume(); err != nil { + return nil, errors.Wrap(err, "") + } + return nil, nil //nolint + } + _, err := svc.do(ctx, id, intertypes.ResumeOp, do, nil) + return err +} diff --git a/internal/service/boar/create.go b/internal/service/boar/create.go new file mode 100644 index 0000000..e7430d7 --- /dev/null +++ b/internal/service/boar/create.go @@ -0,0 +1,114 @@ +package boar + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/metrics" + "github.com/projecteru2/yavirt/internal/models" + interutils "github.com/projecteru2/yavirt/internal/utils" + "github.com/projecteru2/yavirt/pkg/utils" + + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/virt/guest" +) + +// CreateGuest . +func (svc *Boar) CreateGuest(ctx context.Context, opts intertypes.GuestCreateOption) (*types.Guest, error) { + logger := log.WithFunc("boar.CreateGuest") + if opts.CPU == 0 { + opts.CPU = utils.Min(svc.Host.CPU, configs.Conf.Resource.MaxCPU) + } + if opts.Mem == 0 { + opts.Mem = utils.Min(svc.Host.Memory, configs.Conf.Resource.MaxMemory) + } + ctx = interutils.NewRollbackListContext(ctx) + g, err := svc.Create(ctx, opts, svc.Host) + if err != nil { + logger.Error(ctx, err) + metrics.IncrError() + rl := interutils.GetRollbackListFromContext(ctx) + for { + fn, msg := rl.Pop() + if fn == nil { + break + } + logger.Infof(ctx, "start to rollback<%s>", msg) + if err := fn(); err != nil { + log.Errorf(ctx, err, "failed to rollback<%s>", msg) + } + } + return nil, err + } + + go func() { + svc.BootGuestCh <- g.ID + }() + + return convGuestResp(g.Guest), nil +} + +// Create creates a new guest. +func (svc *Boar) Create(ctx context.Context, opts intertypes.GuestCreateOption, host *models.Host) (*guest.Guest, error) { + logger := log.WithFunc("boar.Create") + vols, err := extractVols(opts.Resources) + if err != nil { + return nil, err + } + + // Creates metadata. + g, err := models.CreateGuest(opts, host, vols) + if err != nil { + return nil, errors.Wrap(err, "") + } + rl := interutils.GetRollbackListFromContext(ctx) + + // session locker is locked here and is released in start + lck := interutils.NewCreateSessionFlock(g.ID) + if err := lck.Trylock(); err != nil { + logger.Warnf(ctx, "failed to lock create seesion id<%s> %s", g.ID, err) + } else { + rl.Append(func() error { return lck.RemoveFile() }, "release creation session locker") + } + + rl.Append(func() error { return g.Delete(true) }, "Delete guest model") + + logger.Debugf(ctx, "Guest Created: %+v", g) + // Destroys resource and delete metadata while rolling back. + vg := guest.New(ctx, g) + + // Creates the resource. + create := func(ctx context.Context) (any, error) { + err := svc.create(ctx, vg) + return nil, err + } + + _, err = svc.do(ctx, g.ID, intertypes.CreateOp, create, nil) + return vg, err +} + +func (svc *Boar) create(ctx context.Context, vg *guest.Guest) (err error) { + logger := log.WithFunc("Boar.create").WithField("guest", vg.ID) + logger.Debugf(ctx, "starting to cache image") + if err := vg.CacheImage(&svc.imageMutex); err != nil { + return errors.Wrap(err, "") + } + + logger.Debug(ctx, "creating network") + if err = vg.CreateNetwork(ctx); err != nil { + return err + } + logger.Debug(ctx, "preparing volumes") + if err = vg.PrepareVolumesForCreate(ctx); err != nil { + return err + } + logger.Debug(ctx, "defining guest") + if err = vg.DefineGuestForCreate(ctx); err != nil { + return errors.Wrap(err, "") + } + + return nil +} diff --git a/internal/service/boar/image.go b/internal/service/boar/image.go new file mode 100644 index 0000000..1e9f2f9 --- /dev/null +++ b/internal/service/boar/image.go @@ -0,0 +1,109 @@ +package boar + +import ( + "context" + "fmt" + "io" + "regexp" + "strings" + + "github.com/cockroachdb/errors" + vmiFact "github.com/yuyang0/vmimage/factory" + vmitypes "github.com/yuyang0/vmimage/types" +) + +func (svc *Boar) PushImage(ctx context.Context, imgName string, force bool) (rc io.ReadCloser, err error) { + svc.imageMutex.Lock() + defer svc.imageMutex.Unlock() + + img, err := vmiFact.NewImage(imgName) + if err != nil { + return nil, err + } + if rc, err = vmiFact.Push(ctx, img, force); err != nil { + err = errors.Wrap(err, "") + return + } + return +} + +func (svc *Boar) RemoveImage(ctx context.Context, imageName string, force, prune bool) (removed []string, err error) { //nolint + defer logErr(err) + + img, err := vmiFact.LoadImage(ctx, imageName) + if err != nil { + return nil, errors.Wrap(err, "") + } + + svc.imageMutex.Lock() + defer svc.imageMutex.Unlock() + + if err = vmiFact.RemoveLocal(ctx, img); err != nil { + return nil, errors.Wrap(err, "") + } + + return []string{img.Fullname()}, nil +} + +func (svc *Boar) ListImage(ctx context.Context, filter string) (ans []*vmitypes.Image, err error) { + defer logErr(err) + + imgs, err := vmiFact.ListLocalImages(ctx, "") + if err != nil { + return nil, err + } + + images := []*vmitypes.Image{} + if len(filter) < 1 { + images = imgs + } else { + var regExp *regexp.Regexp + filter = strings.ReplaceAll(filter, "*", ".*") + if regExp, err = regexp.Compile(fmt.Sprintf("%s%s%s", "^", filter, "$")); err != nil { + return nil, err + } + + for _, img := range imgs { + if regExp.MatchString(img.Fullname()) { + images = append(images, img) + } + } + } + + return images, err +} + +func (svc *Boar) PullImage(ctx context.Context, imgName string) (img *vmitypes.Image, rc io.ReadCloser, err error) { + svc.imageMutex.Lock() + defer svc.imageMutex.Unlock() + + img, err = vmiFact.NewImage(imgName) + if err != nil { + err = errors.Wrap(err, "") + return + } + if rc, err = vmiFact.Pull(ctx, img, vmitypes.PullPolicyAlways); err != nil { + err = errors.Wrap(err, "") + return + } + return +} + +func (svc *Boar) DigestImage(ctx context.Context, imageName string, local bool) (digest []string, err error) { + defer logErr(err) + + if !local { + // TODO: wait for image-hub implementation and calico update + return []string{""}, nil + } + + // If not exists return error + // If exists return digests + + img, err := vmiFact.LoadImage(ctx, imageName) + if err != nil { + return nil, err + } + + return []string{img.GetDigest()}, nil +} diff --git a/internal/service/boar/metrics.go b/internal/service/boar/metrics.go new file mode 100644 index 0000000..e020fee --- /dev/null +++ b/internal/service/boar/metrics.go @@ -0,0 +1,51 @@ +package boar + +import ( + "sync/atomic" + + "github.com/projecteru2/core/utils" + "github.com/projecteru2/yavirt/configs" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + imageHubHealthyDesc = prometheus.NewDesc( + prometheus.BuildFQName("node", "image_hub", "healthy"), + "image hub healthy status.", + []string{"node"}, + nil) + libvirtHealthyDesc = prometheus.NewDesc( + prometheus.BuildFQName("node", "libvirt", "healthy"), + "libvirt healthy status.", + []string{"node"}, + nil) +) + +type MetricsCollector struct { + imageHealthy atomic.Bool + libvirtHealthy atomic.Bool +} + +func (d *Boar) GetMetricsCollector() prometheus.Collector { + return d.mCol +} + +func (e *MetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- imageHubHealthyDesc + ch <- libvirtHealthyDesc +} + +func (e *MetricsCollector) Collect(ch chan<- prometheus.Metric) { + ch <- prometheus.MustNewConstMetric( + imageHubHealthyDesc, + prometheus.GaugeValue, + float64(utils.Bool2Int(e.imageHealthy.Load())), + configs.Hostname(), + ) + ch <- prometheus.MustNewConstMetric( + libvirtHealthyDesc, + prometheus.GaugeValue, + float64(utils.Bool2Int(e.libvirtHealthy.Load())), + configs.Hostname(), + ) +} diff --git a/internal/service/boar/network.go b/internal/service/boar/network.go new file mode 100644 index 0000000..f821f04 --- /dev/null +++ b/internal/service/boar/network.go @@ -0,0 +1,92 @@ +package boar + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/metrics" + "github.com/projecteru2/yavirt/internal/network" + networkFactory "github.com/projecteru2/yavirt/internal/network/factory" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/virt/guest" + + calihandler "github.com/projecteru2/yavirt/internal/network/drivers/calico" + vlanhandler "github.com/projecteru2/yavirt/internal/network/drivers/vlan" +) + +// ConnectNetwork . +func (svc *Boar) ConnectNetwork(ctx context.Context, id, network, ipv4 string) (cidr string, err error) { + var ip meta.IP + + if err := svc.ctrl(ctx, id, intertypes.MiscOp, func(g *guest.Guest) (ce error) { + ip, ce = g.ConnectExtraNetwork(network, ipv4) + return ce + }, nil); err != nil { + log.WithFunc("boar.ConnectNetwork").Error(ctx, err) + metrics.IncrError() + return "", errors.Wrap(err, "") + } + + return ip.CIDR(), nil +} + +// DisconnectNetwork . +func (svc *Boar) DisconnectNetwork(ctx context.Context, id, network string) (err error) { + err = svc.ctrl(ctx, id, intertypes.MiscOp, func(g *guest.Guest) error { + return g.DisconnectExtraNetwork(network) + }, nil) + if err != nil { + log.WithFunc("DisconnectNetwork").Error(ctx, err) + metrics.IncrError() + } + return +} + +// NetworkList . +func (svc *Boar) NetworkList(ctx context.Context, drivers []string) ([]*types.Network, error) { + drv := map[string]struct{}{} + for _, driver := range drivers { + drv[driver] = struct{}{} + } + + networks := []*types.Network{} + for mode, hand := range networkFactory.ListDrivers() { + switch mode { + case network.CalicoMode: + if _, ok := drv[network.CalicoMode]; !ok { + break + } + caliHandler, ok := hand.(*calihandler.Driver) + if !ok { + break + } + for _, poolName := range caliHandler.PoolNames() { + subnet, err := caliHandler.GetIPPoolCidr(ctx, poolName) + if err != nil { + log.WithFunc("NetworkList").Error(ctx, err) + metrics.IncrError() + return nil, err + } + + networks = append(networks, &types.Network{ + Name: poolName, + Subnets: []string{subnet}, + }) + } + return networks, nil + case network.VlanMode: // vlan + if _, ok := drv[network.VlanMode]; !ok { + break + } + handler := vlanhandler.New(svc.Host.Subnet) + networks = append(networks, &types.Network{ + Name: "vlan", + Subnets: []string{handler.GetCidr()}, + }) + } + } + return networks, nil +} diff --git a/internal/service/boar/operation.go b/internal/service/boar/operation.go new file mode 100644 index 0000000..f0409a2 --- /dev/null +++ b/internal/service/boar/operation.go @@ -0,0 +1,108 @@ +package boar + +import ( + "context" + "io" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/internal/metrics" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/virt/guest" + "github.com/projecteru2/yavirt/pkg/terrors" +) + +// ResizeConsoleWindow . +func (svc *Boar) ResizeConsoleWindow(ctx context.Context, id string, height, width uint) (err error) { + defer logErr(err) + + g, err := svc.loadGuest(ctx, id) + if err != nil { + return errors.Wrap(err, "") + } + return g.ResizeConsoleWindow(ctx, height, width) +} + +type executeResult struct { + output []byte + exitCode int + pid int +} + +// ExecuteGuest . +func (svc *Boar) ExecuteGuest(ctx context.Context, id string, commands []string) (_ *types.ExecuteGuestMessage, err error) { + defer logErr(err) + + exec := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id) + if err != nil { + return nil, errors.Wrap(err, "") + } + output, exitCode, pid, err := g.ExecuteCommand(ctx, commands) + if err != nil { + return nil, errors.Wrap(err, "") + } + return &executeResult{output: output, exitCode: exitCode, pid: pid}, nil + } + + res, err := svc.do(ctx, id, intertypes.ExecuteOp, exec, nil) + if err != nil { + return nil, errors.Wrap(err, "") + } + + er, ok := res.(*executeResult) + if !ok { + return nil, errors.Wrapf(terrors.ErrInvalidValue, "expect *executeResult but it's %v", res) + } + svc.pid2ExitCode.Put(id, er.pid, er.exitCode) + return &types.ExecuteGuestMessage{ + Pid: er.pid, + Data: er.output, + ExitCode: er.exitCode, + }, err +} + +// ExecExitCode . +func (svc *Boar) ExecExitCode(id string, pid int) (int, error) { + exitCode, err := svc.pid2ExitCode.Get(id, pid) + if err != nil { + log.WithFunc("ExecExitCode").Error(context.TODO(), err) + metrics.IncrError() + return 0, err + } + return exitCode, nil +} + +// Cat . +func (svc *Boar) Cat(ctx context.Context, id, path string, dest io.WriteCloser) (err error) { + defer logErr(err) + + return svc.ctrl(ctx, id, intertypes.MiscOp, func(g *guest.Guest) error { + return g.Cat(ctx, path, dest) + }, nil) +} + +// CopyToGuest . +func (svc *Boar) CopyToGuest(ctx context.Context, id, dest string, content chan []byte, override bool) (err error) { + defer logErr(err) + + return svc.ctrl(ctx, id, intertypes.MiscOp, func(g *guest.Guest) error { + return g.CopyToGuest(ctx, dest, content, override) + }, nil) +} + +// Log . +func (svc *Boar) Log(ctx context.Context, id, logPath string, n int, dest io.WriteCloser) (err error) { + defer logErr(err) + + return svc.ctrl(ctx, id, intertypes.MiscOp, func(g *guest.Guest) error { + if g.LambdaOption == nil { + return g.Log(ctx, n, logPath, dest) + } + + defer dest.Close() + _, err := dest.Write(g.LambdaOption.CmdOutput) + return err + }, nil) +} diff --git a/internal/service/boar/raw_engine.go b/internal/service/boar/raw_engine.go new file mode 100644 index 0000000..e170131 --- /dev/null +++ b/internal/service/boar/raw_engine.go @@ -0,0 +1,201 @@ +package boar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/internal/meta" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/vmcache" + volFact "github.com/projecteru2/yavirt/internal/volume/factory" + vmiFact "github.com/yuyang0/vmimage/factory" +) + +type VMParams struct { + DeviceName string `json:"device_name"` +} + +func (svc *Boar) RawEngine(ctx context.Context, id string, req types.RawEngineReq) (types.RawEngineResp, error) { + switch req.Op { + case "vm-get-vnc-port": + return svc.getVNCPort(ctx, id) + case "vm-init-sys-disk": + return svc.InitSysDisk(ctx, id, req.Params) + case "vm-fs-freeze-all": + return svc.fsFreezeAll(ctx, id) + case "vm-fs-thaw-all": + return svc.fsThawAll(ctx, id) + case "vm-list-vols", "vm-list-vol", "vm-list-volume", "vm-list-volumes": + return svc.listVolumes(ctx, id) + case "vm-fs-freeze-status": + return svc.fsFreezeStatus(ctx, id) + default: + return types.RawEngineResp{}, errors.Errorf("invalid operation %s", req.Op) + } +} + +func (svc *Boar) getVNCPort(_ context.Context, id string) (types.RawEngineResp, error) { + entry := vmcache.FetchDomainEntry(id) + var port int + if entry != nil { + port = entry.VNCPort + } + obj := map[string]int{ + "port": port, + } + bs, _ := json.Marshal(obj) + resp := types.RawEngineResp{ + Data: bs, + } + return resp, nil +} + +type VolItem struct { + Name string `json:"name"` + Size int64 `json:"size"` + Device string `json:"device"` +} + +func guestVolumes2VolItemList(vols volFact.Volumes) []VolItem { + if len(vols) == 0 { + return nil + } + volItemList := make([]VolItem, 0) + for _, item := range vols { + volItem := VolItem{ + Name: item.Name(), + Size: item.GetSize(), + Device: item.GetDevice(), + } + volItemList = append(volItemList, volItem) + } + return volItemList +} + +func (svc *Boar) listVolumes(ctx context.Context, id string) (types.RawEngineResp, error) { + g, err := svc.loadGuest(ctx, id) + if err != nil { + return types.RawEngineResp{}, errors.Wrap(err, "") + } + + volItemList := guestVolumes2VolItemList(g.Vols) + if volItemList == nil { + return types.RawEngineResp{}, nil + } + + bs, _ := json.Marshal(volItemList) + resp := types.RawEngineResp{ + Data: bs, + } + return resp, nil +} + +func (svc *Boar) fsFreezeAll(ctx context.Context, id string) (types.RawEngineResp, error) { + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id) + if err != nil { + return nil, errors.Wrap(err, "") + } + if nFS, err := g.FSFreezeAll(ctx); err != nil { + return nil, errors.Wrap(err, "") + } else { //nolint + return nFS, nil + } + } + nFSRaw, err := svc.do(ctx, id, intertypes.FSThawOP, do, nil) + if err != nil { + return types.RawEngineResp{}, errors.Wrap(err, "") + } + nFS, _ := nFSRaw.(int) + return types.RawEngineResp{ + Data: []byte(fmt.Sprintf(`{"fs_count": %d}`, nFS)), + }, nil +} + +func (svc *Boar) fsThawAll(ctx context.Context, id string) (types.RawEngineResp, error) { + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id) + if err != nil { + return nil, errors.Wrap(err, "") + } + if nFS, err := g.FSThawAll(ctx); err != nil { + return nil, errors.Wrap(err, "") + } else { //nolint + return nFS, nil + } + } + nFSRaw, err := svc.do(ctx, id, intertypes.FSThawOP, do, nil) + if err != nil { + return types.RawEngineResp{}, errors.Wrap(err, "") + } + nFS, _ := nFSRaw.(int) + return types.RawEngineResp{ + Data: []byte(fmt.Sprintf(`{"fs_count": %d}`, nFS)), + }, nil +} + +func (svc *Boar) fsFreezeStatus(ctx context.Context, id string) (types.RawEngineResp, error) { + g, err := svc.loadGuest(ctx, id) + if err != nil { + return types.RawEngineResp{}, errors.Wrap(err, "") + } + status, err := g.FSFreezeStatus(ctx) + if err != nil { + return types.RawEngineResp{}, errors.Wrap(err, "") + } + return types.RawEngineResp{ + Data: []byte(fmt.Sprintf(`{"status": "%s"}`, status)), + }, nil +} + +func (svc *Boar) InitSysDisk(ctx context.Context, id string, rawParams []byte) (types.RawEngineResp, error) { + logger := log.WithFunc("boar.InitSysDisk") + args := &intertypes.InitSysDiskArgs{} + if err := json.Unmarshal(rawParams, args); err != nil { + return types.RawEngineResp{}, errors.Wrapf(err, "failed to unmarshal params") + } + logger.Infof(ctx, "[InitSysDisk] params: %v", args) + // prepare image + img, err := vmiFact.LoadImage(ctx, args.Image) + if err != nil { + return types.RawEngineResp{}, errors.Wrapf(err, "failed to load image %s", args.Image) + } + vols, err := extractVols(args.Resources) + if err != nil { + return types.RawEngineResp{}, errors.Wrapf(err, "failed to extract new sys volume") + } + if len(vols) != 1 || (!vols[0].IsSys()) { + return types.RawEngineResp{}, errors.Wrapf(err, "need a new sys volume, but gives %v", vols[0]) + } + newSysVol := vols[0] + do := func(ctx context.Context) (any, error) { + g, err := svc.loadGuest(ctx, id) + if err != nil { + return nil, errors.Wrap(err, "") + } + if g.Status != meta.StatusStopped { + return nil, errors.Newf("guest should in stopped state") + } + + if err := g.InitSysDisk(ctx, img, args, newSysVol); err != nil { + return nil, errors.Wrap(err, "") + } + if err := g.Start(ctx, true); err != nil { + return nil, errors.Wrapf(err, "failed to start guest %s", g.ID) + } + + return nil, nil //nolint + } + if _, err := svc.do(ctx, id, intertypes.StopOp, do, nil); err != nil { + return types.RawEngineResp{}, errors.Wrap(err, "") + } + msg := `{"success":true}` + resp := types.RawEngineResp{ + Data: []byte(msg), + } + return resp, nil +} diff --git a/internal/service/boar/snapshot.go b/internal/service/boar/snapshot.go new file mode 100644 index 0000000..5dab545 --- /dev/null +++ b/internal/service/boar/snapshot.go @@ -0,0 +1,224 @@ +package boar + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/metrics" + "github.com/projecteru2/yavirt/internal/models" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/virt/guest" + "github.com/robfig/cron/v3" +) + +// ListSnapshot . +func (svc *Boar) ListSnapshot(ctx context.Context, req types.ListSnapshotReq) (snaps types.Snapshots, err error) { + defer logErr(err) + + g, err := svc.loadGuest(ctx, req.ID) + if err != nil { + return nil, errors.Wrap(err, "") + } + + volSnap, err := g.ListSnapshot(req.VolID) + + for vol, s := range volSnap { + for _, snap := range s { + snaps = append(snaps, &types.Snapshot{ + VolID: vol.GetID(), + VolMountDir: vol.GetMountDir(), + SnapID: snap.GetID(), + CreatedTime: snap.GetCreatedTime(), + }) + } + } + + return +} + +// CreateSnapshot . +func (svc *Boar) CreateSnapshot(ctx context.Context, req types.CreateSnapshotReq) (err error) { + defer logErr(err) + volID := req.VolID + + return svc.ctrl(ctx, req.ID, intertypes.CreateSnapshotOp, func(g *guest.Guest) error { + suspended := false + stopped := false + if g.Status == meta.StatusRunning { + if err := g.Suspend(); err != nil { + return err + } + suspended = true + } + + if err := g.CreateSnapshot(volID); err != nil { + return err + } + + if err := g.CheckVolume(volID); err != nil { + + if suspended { + if err := g.Stop(ctx, true); err != nil { + return err + } + suspended = false + stopped = true + } + + if err := g.RepairVolume(volID); err != nil { + return err + } + } + + if suspended { + return g.Resume() + } else if stopped { + return g.Start(ctx, false) + } + return nil + }, nil) +} + +// CommitSnapshot . +func (svc *Boar) CommitSnapshot(ctx context.Context, req types.CommitSnapshotReq) (err error) { + defer logErr(err) + + return svc.ctrl(ctx, req.ID, intertypes.CommitSnapshotOp, func(g *guest.Guest) error { + stopped := false + if g.Status == meta.StatusRunning { + if err := g.Stop(ctx, true); err != nil { + return err + } + stopped = true + } + + if err := g.CommitSnapshot(req.VolID, req.SnapID); err != nil { + return err + } + + if stopped { + return g.Start(ctx, false) + } + return nil + }, nil) +} + +// CommitSnapshotByDay . +func (svc *Boar) CommitSnapshotByDay(ctx context.Context, id, volID string, day int) (err error) { + defer logErr(err) + + return svc.ctrl(ctx, id, intertypes.CommitSnapshotOp, func(g *guest.Guest) error { + stopped := false + if g.Status == meta.StatusRunning { + if err := g.Stop(ctx, true); err != nil { + return err + } + stopped = true + } + + if err := g.CommitSnapshotByDay(volID, day); err != nil { + return err + } + + if stopped { + return g.Start(ctx, false) + } + return nil + }, nil) +} + +// RestoreSnapshot . +func (svc *Boar) RestoreSnapshot(ctx context.Context, req types.RestoreSnapshotReq) (err error) { + defer logErr(err) + + return svc.ctrl(ctx, req.ID, intertypes.RestoreSnapshotOp, func(g *guest.Guest) error { + stopped := false + if g.Status == meta.StatusRunning { + if err := g.Stop(ctx, true); err != nil { + return err + } + stopped = true + } + + if err := g.RestoreSnapshot(req.VolID, req.SnapID); err != nil { + return err + } + + if stopped { + return g.Start(ctx, false) + } + return nil + }, nil) +} + +// TODO: Decide time +func (svc *Boar) ScheduleSnapshotCreate() error { + c := cron.New() + + // Everyday 3am + if _, err := c.AddFunc("0 3 * * *", svc.batchCreateSnapshot); err != nil { + return errors.Wrap(err, "") + } + + // Every Sunday 1am + if _, err := c.AddFunc("0 1 * * SUN", svc.batchCommitSnapshot); err != nil { + return errors.Wrap(err, "") + } + + // Start job asynchronously + c.Start() + + return nil +} + +func (svc *Boar) batchCreateSnapshot() { + logger := log.WithFunc("Boar.batchCreateSnapshot") + guests, err := models.GetAllGuests() + if err != nil { + logger.Error(context.TODO(), err) + metrics.IncrError() + return + } + + for _, g := range guests { + for _, volID := range g.VolIDs { + req := types.CreateSnapshotReq{ + ID: g.ID, + VolID: volID, + } + + if err := svc.CreateSnapshot(context.TODO(), req); err != nil { + logger.Error(context.TODO(), err) + metrics.IncrError() + } + } + } +} + +func (svc *Boar) batchCommitSnapshot() { + logger := log.WithFunc("Boar.batchCommitSnapshot") + guests, err := models.GetAllGuests() + if err != nil { + logger.Error(context.TODO(), err) + metrics.IncrError() + return + } + + for _, g := range guests { + for _, volID := range g.VolIDs { + if err := svc.CommitSnapshotByDay( + context.TODO(), + g.ID, + volID, + configs.Conf.SnapshotRestorableDay, + ); err != nil { + logger.Error(context.TODO(), err) + metrics.IncrError() + } + } + } +} diff --git a/internal/service/boar/task.go b/internal/service/boar/task.go new file mode 100644 index 0000000..7354e4a --- /dev/null +++ b/internal/service/boar/task.go @@ -0,0 +1,215 @@ +package boar + +import ( + "context" + "fmt" + "sync" + + llq "github.com/emirpasic/gods/queues/linkedlistqueue" + "github.com/panjf2000/ants/v2" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/internal/types" +) + +type taskNotifier struct { + id string + err error +} + +type task struct { + mu sync.Mutex + + id string + op types.Operator + ctx context.Context + do func(context.Context) (any, error) + res any + err error + done struct { + once sync.Once + c chan struct{} + } +} + +func newTask(ctx context.Context, id string, op types.Operator, fn doFunc) *task { + t := &task{ + id: id, + op: op, + ctx: ctx, + do: fn, + } + t.done.c = make(chan struct{}) + return t +} + +// String . +func (t *task) String() string { + return fmt.Sprintf("%s <%s>", t.id, t.op) +} + +func (t *task) Done() <-chan struct{} { + return t.done.c +} + +func (t *task) run(ctx context.Context) error { + defer t.finish() + + var ( + res any + err error + ) + + select { + case <-ctx.Done(): + err = ctx.Err() + default: + res, err = t.do(ctx) + } + t.setResult(res, err) + return err +} + +func (t *task) finish() { + t.done.once.Do(func() { + close(t.done.c) + }) +} + +func (t *task) result() (any, error) { + t.mu.Lock() + defer t.mu.Unlock() + return t.res, t.err +} + +func (t *task) setResult(res any, err error) { + t.mu.Lock() + defer t.mu.Unlock() + + t.res, t.err = res, err +} + +// all tasks in a task queue have same guest id. +type taskQueue struct { + *llq.Queue + id string +} + +func newTaskQueue(id string) *taskQueue { + return &taskQueue{ + Queue: llq.New(), + id: id, + } +} + +func (tq *taskQueue) revertAll(err error) { + for { + obj, ok := tq.Dequeue() + if !ok { + break + } + t, _ := obj.(*task) + t.finish() + t.setResult(nil, err) + + } +} + +type taskPool struct { + mu sync.Mutex + // pool size + size int + // key is guest id + mgr map[string]*taskQueue + pool *ants.Pool + notifier chan taskNotifier +} + +func newTaskPool(max int) (*taskPool, error) { + p, err := ants.NewPool(max, ants.WithNonblocking(true)) + if err != nil { + return nil, err + } + tp := &taskPool{ + size: max, + pool: p, + mgr: make(map[string]*taskQueue), + notifier: make(chan taskNotifier, 10), + } + go tp.loop() + return tp, nil +} + +func (p *taskPool) SubmitTask(t *task) (err error) { + needNotify := false + err = p.withLocker(func() error { + if _, ok := p.mgr[t.id]; !ok { + if p.size > 0 && len(p.mgr) >= p.size { + return fmt.Errorf("task pool is full") + } + p.mgr[t.id] = newTaskQueue(t.id) + needNotify = true + } + p.mgr[t.id].Enqueue(t) + return nil + }) + if err != nil { + return err + } + if needNotify { + p.notifier <- taskNotifier{ + id: t.id, + err: nil, + } + } + return +} + +func (p *taskPool) loop() { + logger := log.WithFunc("taskPool.loop") + for v := range p.notifier { + var t *task + _ = p.withLocker(func() error { + if v.err != nil { + // when error, revert all tasks in the same queue + tq := p.mgr[v.id] + tq.revertAll(v.err) + } + tq := p.mgr[v.id] + if tq.Empty() { + delete(p.mgr, v.id) + return nil + } + obj, _ := tq.Dequeue() + t, _ = obj.(*task) + return nil + }) + + if t == nil { + continue + } + err := p.pool.Submit(func() { + if err := t.run(t.ctx); err != nil { + logger.Error(context.TODO(), err) + } + _, err := t.result() + p.notifier <- taskNotifier{ + id: t.id, + err: err, + } + }) + if err != nil { + // the pool is full, it never happens, because when submitting task, the size is already checked + logger.Errorf(context.TODO(), err, "BUG: failed to submit task<%s> %s", t.id, err) + } + } +} + +func (p *taskPool) withLocker(f func() error) error { + p.mu.Lock() + defer p.mu.Unlock() + return f() +} + +func (p *taskPool) Release() { + p.pool.Release() +} diff --git a/internal/service/boar/task_test.go b/internal/service/boar/task_test.go new file mode 100644 index 0000000..741873b --- /dev/null +++ b/internal/service/boar/task_test.go @@ -0,0 +1,183 @@ +package boar + +import ( + "context" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/pkg/test/assert" +) + +func TestTask(t *testing.T) { + var counter int32 + timeout := 3 * time.Second + start := time.Now() + p, err := newTaskPool(100) + assert.Nil(t, err) + tsk := newTask(context.Background(), "test", types.OpStart, func(ctx context.Context) (any, error) { + atomic.AddInt32(&counter, 1) + time.Sleep(timeout) + return nil, nil + }) + p.SubmitTask(tsk) + <-tsk.Done() + assert.Equal(t, int32(1), atomic.LoadInt32(&counter)) + assert.True(t, time.Since(start) > timeout) + assert.True(t, time.Since(start) < timeout+3*time.Second) +} + +func TestSingleTypeTask(t *testing.T) { + var counter int32 + timeout := 3 * time.Second + nTasks := 4 + tasks := make([]*task, nTasks) + start := time.Now() + p, err := newTaskPool(100) + assert.Nil(t, err) + + for i := 0; i < nTasks; i++ { + tsk := newTask(context.Background(), "test", types.OpStart, func(ctx context.Context) (any, error) { + atomic.AddInt32(&counter, 1) + time.Sleep(timeout) + return nil, nil + }) + p.SubmitTask(tsk) + tasks[i] = tsk + } + for i := 0; i < nTasks; i++ { + <-tasks[i].Done() + } + assert.Equal(t, int32(nTasks), atomic.LoadInt32(&counter)) + assert.True(t, time.Since(start) > timeout*time.Duration(nTasks)) + assert.True(t, time.Since(start) < timeout*time.Duration(nTasks)+3*time.Second) +} + +func TestSingleTypeTaskSeq(t *testing.T) { + var counter int32 + timeout := 3 * time.Second + nTasks := 4 + tasks := make([]*task, nTasks) + start := time.Now() + p, err := newTaskPool(100) + assert.Nil(t, err) + + for i := 0; i < nTasks; i++ { + tsk := newTask(context.Background(), "test", types.OpStart, func(ctx context.Context) (any, error) { + atomic.AddInt32(&counter, 1) + time.Sleep(timeout) + return nil, nil + }) + p.SubmitTask(tsk) + tasks[i] = tsk + <-tasks[i].Done() + } + assert.Equal(t, int32(nTasks), atomic.LoadInt32(&counter)) + assert.True(t, time.Since(start) > timeout*time.Duration(nTasks)) + assert.True(t, time.Since(start) < timeout*time.Duration(nTasks)+3*time.Second) +} + +func TestSingleTypeFailedTask(t *testing.T) { + var counter int32 + timeout := 3 * time.Second + nTasks := 3 + nFailed := 4 + tasks := make([]*task, nTasks+nFailed) + start := time.Now() + p, err := newTaskPool(100) + assert.Nil(t, err) + + for i := 0; i < nTasks; i++ { + tsk := newTask(context.Background(), "test", types.OpStart, func(ctx context.Context) (any, error) { + atomic.AddInt32(&counter, 1) + time.Sleep(timeout) + return nil, nil + }) + p.SubmitTask(tsk) + tasks[i] = tsk + } + + for i := 0; i < nFailed; i++ { + tsk := newTask(context.Background(), "test", types.OpStart, func(ctx context.Context) (any, error) { + atomic.AddInt32(&counter, 1) + return nil, fmt.Errorf("failed") + }) + p.SubmitTask(tsk) + tasks[i+nTasks] = tsk + } + for i := 0; i < len(tasks); i++ { + <-tasks[i].Done() + _, err := tasks[i].result() + if i < nTasks { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + } + } + assert.Equal(t, int32(nTasks+1), atomic.LoadInt32(&counter)) + assert.True(t, time.Since(start) > timeout*time.Duration(nTasks)) + assert.True(t, time.Since(start) < timeout*time.Duration(nTasks)+3*time.Second) +} + +func TestMultiTypeTask(t *testing.T) { + var counter int32 + timeout := 3 * time.Second + nTasks := 4 + tasks := make([]*task, nTasks) + start := time.Now() + p, err := newTaskPool(100) + assert.Nil(t, err) + + for i := 0; i < nTasks; i++ { + id := fmt.Sprintf("test%d", i) + tsk := newTask(context.Background(), id, types.OpStart, func(ctx context.Context) (any, error) { + atomic.AddInt32(&counter, 1) + time.Sleep(timeout) + return nil, nil + }) + p.SubmitTask(tsk) + tasks[i] = tsk + } + for i := 0; i < nTasks; i++ { + <-tasks[i].Done() + } + assert.Equal(t, int32(nTasks), atomic.LoadInt32(&counter)) + assert.True(t, time.Since(start) > timeout) + assert.True(t, time.Since(start) < timeout+3*time.Second) +} + +func TestSmallPool(t *testing.T) { + var counter int32 + poolSize := 2 + timeout := 3 * time.Second + nTasks := 4 + tasks := make([]*task, nTasks) + start := time.Now() + p, err := newTaskPool(poolSize) + assert.Nil(t, err) + + for i := 0; i < nTasks; i++ { + id := fmt.Sprintf("test%d", i) + tsk := newTask(context.Background(), id, types.OpStart, func(ctx context.Context) (any, error) { + atomic.AddInt32(&counter, 1) + time.Sleep(timeout) + return nil, nil + }) + err := p.SubmitTask(tsk) + if i < poolSize { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + tsk.finish() + } + tasks[i] = tsk + } + for i := 0; i < nTasks; i++ { + <-tasks[i].Done() + } + assert.Equal(t, int32(poolSize), atomic.LoadInt32(&counter)) + assert.True(t, time.Since(start) > timeout) + assert.True(t, time.Since(start) < timeout+3*time.Second) +} diff --git a/internal/service/boar/util.go b/internal/service/boar/util.go new file mode 100644 index 0000000..d5d3f57 --- /dev/null +++ b/internal/service/boar/util.go @@ -0,0 +1,145 @@ +package boar + +import ( + "encoding/json" + + "strings" + + pb "github.com/projecteru2/core/rpc/gen" + + "github.com/cockroachdb/errors" + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/internal/meta" + "github.com/projecteru2/yavirt/internal/models" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/volume" + "github.com/projecteru2/yavirt/internal/volume/local" + "github.com/projecteru2/yavirt/internal/volume/rbd" + + cpumemtypes "github.com/projecteru2/core/resource/plugins/cpumem/types" + stotypes "github.com/projecteru2/resource-storage/storage/types" + gputypes "github.com/yuyang0/resource-gpu/gpu/types" + rbdtypes "github.com/yuyang0/resource-rbd/rbd/types" +) + +func extractCPUMem(resources map[string][]byte) (eParams *cpumemtypes.EngineParams, err error) { + cpumemRaw, ok := resources[intertypes.PluginNameCPUMem] + if !ok { + return nil, nil //nolint + } + var ans cpumemtypes.EngineParams + err = json.Unmarshal(cpumemRaw, &ans) + return &ans, err +} + +func extractGPU(resources map[string][]byte) (eParams *gputypes.EngineParams, err error) { + gpuRaw, ok := resources[intertypes.PluginNameGPU] + if !ok { + return nil, nil //nolint + } + var ans gputypes.EngineParams + err = json.Unmarshal(gpuRaw, &ans) + return &ans, err +} + +func extractVols(resources map[string][]byte) ([]volume.Volume, error) { + var sysVol volume.Volume + vols := make([]volume.Volume, 1) // first place is for sys volume + appendVol := func(vol volume.Volume) error { + if vol.IsSys() { + if sysVol != nil { + return errors.New("multiple sys volume") + } + sysVol = vol + return nil + } + vols = append(vols, vol) //nolint + return nil + } + + stoResRaw, ok := resources[intertypes.PluginNameStorage] + if ok { + eParams := &stotypes.EngineParams{} + if err := json.Unmarshal(stoResRaw, eParams); err != nil { + return nil, errors.Wrap(err, "") + } + for _, part := range eParams.Volumes { + vol, err := local.NewVolumeFromStr(part) + if err != nil { + return nil, err + } + if err := appendVol(vol); err != nil { + return nil, err + } + } + } + rbdResRaw, ok := resources[intertypes.PluginNameRBD] + if ok { + eParams := &rbdtypes.EngineParams{} + if err := json.Unmarshal(rbdResRaw, eParams); err != nil { + return nil, errors.Wrap(err, "") + } + for _, part := range eParams.Volumes { + vol, err := rbd.NewFromStr(part) + if err != nil { + return nil, err + } + if err := appendVol(vol); err != nil { + return nil, err + } + } + } + if sysVol != nil { + vols[0] = sysVol + } else { + vols = vols[1:] + } + return vols, nil +} + +func convGuestResp(g *models.Guest) (resp *types.Guest) { + resp = &types.Guest{} + resp.ID = g.ID + resp.Hostname = g.HostName + resp.Status = g.Status + resp.CreateTime = g.CreatedTime + resp.UpdateTime = g.UpdatedTime + resp.ImageName = g.ImageName + resp.CPU = g.CPU + resp.Mem = g.Memory + resp.Labels = g.JSONLabels + resp.Running = (g.Status == meta.StatusRunning) + + if len(g.IPs) > 0 { + var ips = make([]string, len(g.IPs)) + for i, ip := range g.IPs { + ips[i] = ip.IPAddr() + } + resp.IPs = ips + resp.Networks = map[string]string{"IP": strings.Join(ips, ", ")} + } + + return +} + +// ConvSetWorkloadsStatusOptions . +func ConvSetWorkloadsStatusOptions(gss []types.EruGuestStatus) *pb.SetWorkloadsStatusOptions { + css := make([]*pb.WorkloadStatus, len(gss)) + for i, gs := range gss { + css[i] = convWorkloadStatus(gs) + } + + return &pb.SetWorkloadsStatusOptions{ + Status: css, + } +} + +func convWorkloadStatus(gs types.EruGuestStatus) *pb.WorkloadStatus { + return &pb.WorkloadStatus{ + Id: gs.EruGuestID, + Running: gs.Running, + Healthy: gs.Healthy, + Ttl: int64(gs.TTL.Seconds()), + Networks: map[string]string{"IP": gs.GetIPAddrs()}, + } +} diff --git a/internal/service/mocks/Service.go b/internal/service/mocks/Service.go new file mode 100644 index 0000000..fc55029 --- /dev/null +++ b/internal/service/mocks/Service.go @@ -0,0 +1,866 @@ +// Code generated by mockery v2.42.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + io "io" + + libyavirttypes "github.com/projecteru2/libyavirt/types" + mock "github.com/stretchr/testify/mock" + + types "github.com/projecteru2/yavirt/internal/types" + + utils "github.com/projecteru2/yavirt/internal/utils" + + vmimagetypes "github.com/yuyang0/vmimage/types" +) + +// Service is an autogenerated mock type for the Service type +type Service struct { + mock.Mock +} + +// AttachGuest provides a mock function with given fields: ctx, id, stream, flags +func (_m *Service) AttachGuest(ctx context.Context, id string, stream io.ReadWriteCloser, flags types.OpenConsoleFlags) error { + ret := _m.Called(ctx, id, stream, flags) + + if len(ret) == 0 { + panic("no return value specified for AttachGuest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, io.ReadWriteCloser, types.OpenConsoleFlags) error); ok { + r0 = rf(ctx, id, stream, flags) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CaptureGuest provides a mock function with given fields: ctx, id, imgName, overridden +func (_m *Service) CaptureGuest(ctx context.Context, id string, imgName string, overridden bool) (*vmimagetypes.Image, error) { + ret := _m.Called(ctx, id, imgName, overridden) + + if len(ret) == 0 { + panic("no return value specified for CaptureGuest") + } + + var r0 *vmimagetypes.Image + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) (*vmimagetypes.Image, error)); ok { + return rf(ctx, id, imgName, overridden) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) *vmimagetypes.Image); ok { + r0 = rf(ctx, id, imgName, overridden) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vmimagetypes.Image) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, bool) error); ok { + r1 = rf(ctx, id, imgName, overridden) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Cat provides a mock function with given fields: ctx, id, path, dest +func (_m *Service) Cat(ctx context.Context, id string, path string, dest io.WriteCloser) error { + ret := _m.Called(ctx, id, path, dest) + + if len(ret) == 0 { + panic("no return value specified for Cat") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, io.WriteCloser) error); ok { + r0 = rf(ctx, id, path, dest) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CommitSnapshot provides a mock function with given fields: ctx, req +func (_m *Service) CommitSnapshot(ctx context.Context, req libyavirttypes.CommitSnapshotReq) error { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for CommitSnapshot") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, libyavirttypes.CommitSnapshotReq) error); ok { + r0 = rf(ctx, req) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CommitSnapshotByDay provides a mock function with given fields: ctx, id, volID, day +func (_m *Service) CommitSnapshotByDay(ctx context.Context, id string, volID string, day int) error { + ret := _m.Called(ctx, id, volID, day) + + if len(ret) == 0 { + panic("no return value specified for CommitSnapshotByDay") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, int) error); ok { + r0 = rf(ctx, id, volID, day) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ConnectNetwork provides a mock function with given fields: ctx, id, network, ipv4 +func (_m *Service) ConnectNetwork(ctx context.Context, id string, network string, ipv4 string) (string, error) { + ret := _m.Called(ctx, id, network, ipv4) + + if len(ret) == 0 { + panic("no return value specified for ConnectNetwork") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (string, error)); ok { + return rf(ctx, id, network, ipv4) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) string); ok { + r0 = rf(ctx, id, network, ipv4) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, id, network, ipv4) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ControlGuest provides a mock function with given fields: ctx, id, operation, force +func (_m *Service) ControlGuest(ctx context.Context, id string, operation string, force bool) error { + ret := _m.Called(ctx, id, operation, force) + + if len(ret) == 0 { + panic("no return value specified for ControlGuest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) error); ok { + r0 = rf(ctx, id, operation, force) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CopyToGuest provides a mock function with given fields: ctx, id, dest, content, override +func (_m *Service) CopyToGuest(ctx context.Context, id string, dest string, content chan []byte, override bool) error { + ret := _m.Called(ctx, id, dest, content, override) + + if len(ret) == 0 { + panic("no return value specified for CopyToGuest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, chan []byte, bool) error); ok { + r0 = rf(ctx, id, dest, content, override) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateGuest provides a mock function with given fields: ctx, opts +func (_m *Service) CreateGuest(ctx context.Context, opts types.GuestCreateOption) (*libyavirttypes.Guest, error) { + ret := _m.Called(ctx, opts) + + if len(ret) == 0 { + panic("no return value specified for CreateGuest") + } + + var r0 *libyavirttypes.Guest + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.GuestCreateOption) (*libyavirttypes.Guest, error)); ok { + return rf(ctx, opts) + } + if rf, ok := ret.Get(0).(func(context.Context, types.GuestCreateOption) *libyavirttypes.Guest); ok { + r0 = rf(ctx, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*libyavirttypes.Guest) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.GuestCreateOption) error); ok { + r1 = rf(ctx, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateSnapshot provides a mock function with given fields: ctx, req +func (_m *Service) CreateSnapshot(ctx context.Context, req libyavirttypes.CreateSnapshotReq) error { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for CreateSnapshot") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, libyavirttypes.CreateSnapshotReq) error); ok { + r0 = rf(ctx, req) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DigestImage provides a mock function with given fields: ctx, imageName, local +func (_m *Service) DigestImage(ctx context.Context, imageName string, local bool) ([]string, error) { + ret := _m.Called(ctx, imageName, local) + + if len(ret) == 0 { + panic("no return value specified for DigestImage") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, bool) ([]string, error)); ok { + return rf(ctx, imageName, local) + } + if rf, ok := ret.Get(0).(func(context.Context, string, bool) []string); ok { + r0 = rf(ctx, imageName, local) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { + r1 = rf(ctx, imageName, local) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DisconnectNetwork provides a mock function with given fields: ctx, id, network +func (_m *Service) DisconnectNetwork(ctx context.Context, id string, network string) error { + ret := _m.Called(ctx, id, network) + + if len(ret) == 0 { + panic("no return value specified for DisconnectNetwork") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, id, network) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExecExitCode provides a mock function with given fields: id, pid +func (_m *Service) ExecExitCode(id string, pid int) (int, error) { + ret := _m.Called(id, pid) + + if len(ret) == 0 { + panic("no return value specified for ExecExitCode") + } + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(string, int) (int, error)); ok { + return rf(id, pid) + } + if rf, ok := ret.Get(0).(func(string, int) int); ok { + r0 = rf(id, pid) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(string, int) error); ok { + r1 = rf(id, pid) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecuteGuest provides a mock function with given fields: ctx, id, commands +func (_m *Service) ExecuteGuest(ctx context.Context, id string, commands []string) (*libyavirttypes.ExecuteGuestMessage, error) { + ret := _m.Called(ctx, id, commands) + + if len(ret) == 0 { + panic("no return value specified for ExecuteGuest") + } + + var r0 *libyavirttypes.ExecuteGuestMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []string) (*libyavirttypes.ExecuteGuestMessage, error)); ok { + return rf(ctx, id, commands) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []string) *libyavirttypes.ExecuteGuestMessage); ok { + r0 = rf(ctx, id, commands) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*libyavirttypes.ExecuteGuestMessage) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []string) error); ok { + r1 = rf(ctx, id, commands) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetGuest provides a mock function with given fields: ctx, id +func (_m *Service) GetGuest(ctx context.Context, id string) (*libyavirttypes.Guest, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetGuest") + } + + var r0 *libyavirttypes.Guest + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*libyavirttypes.Guest, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *libyavirttypes.Guest); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*libyavirttypes.Guest) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetGuestIDList provides a mock function with given fields: ctx +func (_m *Service) GetGuestIDList(ctx context.Context) ([]string, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetGuestIDList") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []string); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetGuestUUID provides a mock function with given fields: ctx, id +func (_m *Service) GetGuestUUID(ctx context.Context, id string) (string, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetGuestUUID") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (string, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Info provides a mock function with given fields: +func (_m *Service) Info() (*libyavirttypes.HostInfo, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Info") + } + + var r0 *libyavirttypes.HostInfo + var r1 error + if rf, ok := ret.Get(0).(func() (*libyavirttypes.HostInfo, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *libyavirttypes.HostInfo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*libyavirttypes.HostInfo) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsHealthy provides a mock function with given fields: ctx +func (_m *Service) IsHealthy(ctx context.Context) bool { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsHealthy") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// ListImage provides a mock function with given fields: ctx, filter +func (_m *Service) ListImage(ctx context.Context, filter string) ([]*vmimagetypes.Image, error) { + ret := _m.Called(ctx, filter) + + if len(ret) == 0 { + panic("no return value specified for ListImage") + } + + var r0 []*vmimagetypes.Image + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]*vmimagetypes.Image, error)); ok { + return rf(ctx, filter) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []*vmimagetypes.Image); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*vmimagetypes.Image) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListSnapshot provides a mock function with given fields: ctx, req +func (_m *Service) ListSnapshot(ctx context.Context, req libyavirttypes.ListSnapshotReq) (libyavirttypes.Snapshots, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for ListSnapshot") + } + + var r0 libyavirttypes.Snapshots + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, libyavirttypes.ListSnapshotReq) (libyavirttypes.Snapshots, error)); ok { + return rf(ctx, req) + } + if rf, ok := ret.Get(0).(func(context.Context, libyavirttypes.ListSnapshotReq) libyavirttypes.Snapshots); ok { + r0 = rf(ctx, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(libyavirttypes.Snapshots) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, libyavirttypes.ListSnapshotReq) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Log provides a mock function with given fields: ctx, id, logPath, n, dest +func (_m *Service) Log(ctx context.Context, id string, logPath string, n int, dest io.WriteCloser) error { + ret := _m.Called(ctx, id, logPath, n, dest) + + if len(ret) == 0 { + panic("no return value specified for Log") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, int, io.WriteCloser) error); ok { + r0 = rf(ctx, id, logPath, n, dest) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NetworkList provides a mock function with given fields: ctx, drivers +func (_m *Service) NetworkList(ctx context.Context, drivers []string) ([]*libyavirttypes.Network, error) { + ret := _m.Called(ctx, drivers) + + if len(ret) == 0 { + panic("no return value specified for NetworkList") + } + + var r0 []*libyavirttypes.Network + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []string) ([]*libyavirttypes.Network, error)); ok { + return rf(ctx, drivers) + } + if rf, ok := ret.Get(0).(func(context.Context, []string) []*libyavirttypes.Network); ok { + r0 = rf(ctx, drivers) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*libyavirttypes.Network) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { + r1 = rf(ctx, drivers) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Ping provides a mock function with given fields: +func (_m *Service) Ping() map[string]string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ping") + } + + var r0 map[string]string + if rf, ok := ret.Get(0).(func() map[string]string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +// PullImage provides a mock function with given fields: ctx, imgName +func (_m *Service) PullImage(ctx context.Context, imgName string) (*vmimagetypes.Image, io.ReadCloser, error) { + ret := _m.Called(ctx, imgName) + + if len(ret) == 0 { + panic("no return value specified for PullImage") + } + + var r0 *vmimagetypes.Image + var r1 io.ReadCloser + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*vmimagetypes.Image, io.ReadCloser, error)); ok { + return rf(ctx, imgName) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *vmimagetypes.Image); ok { + r0 = rf(ctx, imgName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vmimagetypes.Image) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) io.ReadCloser); ok { + r1 = rf(ctx, imgName) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(io.ReadCloser) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { + r2 = rf(ctx, imgName) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// PushImage provides a mock function with given fields: ctx, imgName, force +func (_m *Service) PushImage(ctx context.Context, imgName string, force bool) (io.ReadCloser, error) { + ret := _m.Called(ctx, imgName, force) + + if len(ret) == 0 { + panic("no return value specified for PushImage") + } + + var r0 io.ReadCloser + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, bool) (io.ReadCloser, error)); ok { + return rf(ctx, imgName, force) + } + if rf, ok := ret.Get(0).(func(context.Context, string, bool) io.ReadCloser); ok { + r0 = rf(ctx, imgName, force) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { + r1 = rf(ctx, imgName, force) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RawEngine provides a mock function with given fields: ctx, id, req +func (_m *Service) RawEngine(ctx context.Context, id string, req libyavirttypes.RawEngineReq) (libyavirttypes.RawEngineResp, error) { + ret := _m.Called(ctx, id, req) + + if len(ret) == 0 { + panic("no return value specified for RawEngine") + } + + var r0 libyavirttypes.RawEngineResp + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, libyavirttypes.RawEngineReq) (libyavirttypes.RawEngineResp, error)); ok { + return rf(ctx, id, req) + } + if rf, ok := ret.Get(0).(func(context.Context, string, libyavirttypes.RawEngineReq) libyavirttypes.RawEngineResp); ok { + r0 = rf(ctx, id, req) + } else { + r0 = ret.Get(0).(libyavirttypes.RawEngineResp) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, libyavirttypes.RawEngineReq) error); ok { + r1 = rf(ctx, id, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveImage provides a mock function with given fields: ctx, imageName, force, prune +func (_m *Service) RemoveImage(ctx context.Context, imageName string, force bool, prune bool) ([]string, error) { + ret := _m.Called(ctx, imageName, force, prune) + + if len(ret) == 0 { + panic("no return value specified for RemoveImage") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, bool, bool) ([]string, error)); ok { + return rf(ctx, imageName, force, prune) + } + if rf, ok := ret.Get(0).(func(context.Context, string, bool, bool) []string); ok { + r0 = rf(ctx, imageName, force, prune) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, bool, bool) error); ok { + r1 = rf(ctx, imageName, force, prune) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResizeConsoleWindow provides a mock function with given fields: ctx, id, height, width +func (_m *Service) ResizeConsoleWindow(ctx context.Context, id string, height uint, width uint) error { + ret := _m.Called(ctx, id, height, width) + + if len(ret) == 0 { + panic("no return value specified for ResizeConsoleWindow") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint, uint) error); ok { + r0 = rf(ctx, id, height, width) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ResizeGuest provides a mock function with given fields: ctx, id, opts +func (_m *Service) ResizeGuest(ctx context.Context, id string, opts *types.GuestResizeOption) error { + ret := _m.Called(ctx, id, opts) + + if len(ret) == 0 { + panic("no return value specified for ResizeGuest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *types.GuestResizeOption) error); ok { + r0 = rf(ctx, id, opts) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RestoreSnapshot provides a mock function with given fields: ctx, req +func (_m *Service) RestoreSnapshot(ctx context.Context, req libyavirttypes.RestoreSnapshotReq) error { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for RestoreSnapshot") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, libyavirttypes.RestoreSnapshotReq) error); ok { + r0 = rf(ctx, req) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Wait provides a mock function with given fields: ctx, id, block +func (_m *Service) Wait(ctx context.Context, id string, block bool) (string, int, error) { + ret := _m.Called(ctx, id, block) + + if len(ret) == 0 { + panic("no return value specified for Wait") + } + + var r0 string + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, string, bool) (string, int, error)); ok { + return rf(ctx, id, block) + } + if rf, ok := ret.Get(0).(func(context.Context, string, bool) string); ok { + r0 = rf(ctx, id, block) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, bool) int); ok { + r1 = rf(ctx, id, block) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, string, bool) error); ok { + r2 = rf(ctx, id, block) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// WatchGuestEvents provides a mock function with given fields: _a0 +func (_m *Service) WatchGuestEvents(_a0 context.Context) (*utils.Watcher, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for WatchGuestEvents") + } + + var r0 *utils.Watcher + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*utils.Watcher, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) *utils.Watcher); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*utils.Watcher) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewService(t interface { + mock.TestingT + Cleanup(func()) +}) *Service { + mock := &Service{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/service/service.go b/internal/service/service.go new file mode 100644 index 0000000..83f85f1 --- /dev/null +++ b/internal/service/service.go @@ -0,0 +1,60 @@ +package service + +import ( + "context" + "io" + + "github.com/projecteru2/libyavirt/types" + intertypes "github.com/projecteru2/yavirt/internal/types" + "github.com/projecteru2/yavirt/internal/utils" + vmitypes "github.com/yuyang0/vmimage/types" +) + +// Service interface +// Note: all ids passed to this interface and returned by this interface don't contains maigc prefix +type Service interface { //nolint:interfacebloat + Ping() map[string]string + Info() (*types.HostInfo, error) + IsHealthy(ctx context.Context) (ans bool) + + // Guest related functions + GetGuest(ctx context.Context, id string) (*types.Guest, error) + GetGuestIDList(ctx context.Context) ([]string, error) + GetGuestUUID(ctx context.Context, id string) (string, error) + CreateGuest(ctx context.Context, opts intertypes.GuestCreateOption) (*types.Guest, error) + CaptureGuest(ctx context.Context, id string, imgName string, overridden bool) (uimg *vmitypes.Image, err error) + ResizeGuest(ctx context.Context, id string, opts *intertypes.GuestResizeOption) (err error) + ControlGuest(ctx context.Context, id, operation string, force bool) (err error) + AttachGuest(ctx context.Context, id string, stream io.ReadWriteCloser, flags intertypes.OpenConsoleFlags) (err error) + ResizeConsoleWindow(ctx context.Context, id string, height, width uint) (err error) + Wait(ctx context.Context, id string, block bool) (msg string, code int, err error) + WatchGuestEvents(context.Context) (*utils.Watcher, error) + + // Guest utilities + ExecuteGuest(ctx context.Context, id string, commands []string) (*types.ExecuteGuestMessage, error) + ExecExitCode(id string, pid int) (int, error) + Cat(ctx context.Context, id, path string, dest io.WriteCloser) (err error) + CopyToGuest(ctx context.Context, id, dest string, content chan []byte, override bool) (err error) + Log(ctx context.Context, id, logPath string, n int, dest io.WriteCloser) (err error) + + // Snapshot + ListSnapshot(ctx context.Context, req types.ListSnapshotReq) (snaps types.Snapshots, err error) + CreateSnapshot(ctx context.Context, req types.CreateSnapshotReq) (err error) + CommitSnapshot(ctx context.Context, req types.CommitSnapshotReq) (err error) + CommitSnapshotByDay(ctx context.Context, id, volID string, day int) (err error) + RestoreSnapshot(ctx context.Context, req types.RestoreSnapshotReq) (err error) + + // Network + NetworkList(ctx context.Context, drivers []string) ([]*types.Network, error) + ConnectNetwork(ctx context.Context, id, network, ipv4 string) (cidr string, err error) + DisconnectNetwork(ctx context.Context, id, network string) (err error) + + // Image + PushImage(ctx context.Context, imgName string, force bool) (rc io.ReadCloser, err error) + RemoveImage(ctx context.Context, imageName string, force, prune bool) (removed []string, err error) + ListImage(ctx context.Context, filter string) ([]*vmitypes.Image, error) + PullImage(ctx context.Context, imgName string) (img *vmitypes.Image, rc io.ReadCloser, err error) + DigestImage(ctx context.Context, imageName string, local bool) (digest []string, err error) + + RawEngine(ctx context.Context, id string, req types.RawEngineReq) (types.RawEngineResp, error) +} diff --git a/internal/types/cloud_init.go b/internal/types/cloud_init.go new file mode 100644 index 0000000..73506d6 --- /dev/null +++ b/internal/types/cloud_init.go @@ -0,0 +1,204 @@ +package types + +import ( + "bytes" + _ "embed" + "encoding/base64" + "os" + "os/exec" + "path/filepath" + "text/template" + + "github.com/Masterminds/sprig/v3" + + "github.com/kdomanski/iso9660/util" + + "github.com/cockroachdb/errors" +) + +var ( + //go:embed templates/user-data.yaml + userData string + //go:embed templates/meta-data.yaml + metaData string + //go:embed templates/network-config.yaml + networkData string + + userDataTpl *template.Template + metaDataTpl *template.Template + networkTpl *template.Template +) + +type CloudInitGateway struct { + IP string `json:"ip"` + OnLink bool `json:"on_link"` +} + +type CloudInitConfig struct { + // use remote server to fetch cloud-init config + URL string `json:"url"` + + // use local iso to fetch cloud-init config + Username string `json:"username"` + Password string `json:"password"` + SSHPubKey string `json:"ssh_pub_key"` + Hostname string `json:"hostname"` + InstanceID string `json:"instance_id"` + Files map[string][]byte `json:"files"` + Commands []string `json:"commands"` + + MAC string `json:"-"` + CIDR string `json:"-"` + MTU int `json:"-"` + IFName string `json:"-"` + DefaultGW CloudInitGateway `json:"-"` +} + +func initTpls() (err error) { + if userDataTpl == nil { + if userDataTpl, err = template.New("userdata").Funcs(sprig.TxtFuncMap()).Parse(userData); err != nil { + return + } + } + if metaDataTpl == nil { + if metaDataTpl, err = template.New("metadata").Funcs(sprig.TxtFuncMap()).Parse(metaData); err != nil { + return + } + } + if networkTpl == nil { + if networkTpl, err = template.New("network").Funcs(sprig.TxtFuncMap()).Parse(networkData); err != nil { + return + } + } + return +} + +func (ciCfg *CloudInitConfig) GenFilesContent() (string, string, string, error) { + if err := initTpls(); err != nil { + return "", "", "", err + } + d1 := map[string]any{ + "username": ciCfg.Username, + "password": ciCfg.Password, + "sshPubKey": ciCfg.SSHPubKey, + "mac": ciCfg.MAC, + "cidr": ciCfg.CIDR, + "mtu": ciCfg.MTU, + "ifname": ciCfg.IFName, + "defaultGW": map[string]any{ + "ip": ciCfg.DefaultGW.IP, + "onLink": ciCfg.DefaultGW.OnLink, + }, + "commands": ciCfg.Commands, + "files": []map[string]any{}, + } + for k, v := range ciCfg.Files { + d1["files"] = append(d1["files"].([]map[string]any), map[string]any{ + "path": k, + "content": base64.StdEncoding.EncodeToString(v), + }) + } + var userDataBs bytes.Buffer + if err := userDataTpl.Execute(&userDataBs, d1); err != nil { + return "", "", "", err + } + + d2 := map[string]string{ + "instanceID": ciCfg.InstanceID, + "hostname": ciCfg.Hostname, + } + var metaDataBs bytes.Buffer + if err := metaDataTpl.Execute(&metaDataBs, d2); err != nil { + return "", "", "", err + } + var networkBs bytes.Buffer + if err := networkTpl.Execute(&networkBs, d1); err != nil { + return "", "", "", err + } + return userDataBs.String(), metaDataBs.String(), networkBs.String(), nil +} + +func (ciCfg *CloudInitConfig) GenerateISO(fname string) (err error) { + dir, err := os.MkdirTemp("/tmp", "cloud-init") + if err != nil { + return + } + defer os.RemoveAll(dir) + udataFname := filepath.Join(dir, "user-data") + mdataFname := filepath.Join(dir, "meta-data") + networkFname := filepath.Join(dir, "network-config") + + udata, mdata, ndata, err := ciCfg.GenFilesContent() + if err != nil { + return + } + if err := os.WriteFile(udataFname, []byte(udata), 0600); err != nil { + return errors.Wrap(err, "") + } + if err := os.WriteFile(mdataFname, []byte(mdata), 0600); err != nil { + return errors.Wrap(err, "") + } + + if err := os.WriteFile(networkFname, []byte(ndata), 0600); err != nil { + return errors.Wrap(err, "") + } + // args := []string{ + // "genisoimage", "-output", fname, "-V", "cidata", "-r", "-J", "user-data", "meta-data", + // } + args := []string{ + "cloud-localds", "--network-config=network-config", fname, "user-data", "meta-data", + } + cmd := exec.Command(args[0], args[1:]...) //nolint + cmd.Dir = dir + out, err := cmd.CombinedOutput() + if err != nil { + return errors.Wrapf(err, "failed to exec genisoimage %s", out) + } + return +} + +func extractISO(isoPath, outputDir string) error { + isoFile, err := os.Open(isoPath) + if err != nil { + return err + } + defer isoFile.Close() + + return util.ExtractImageToDirectory(isoFile, outputDir) +} + +func (ciCfg *CloudInitConfig) ReplaceUserData(fname string) (err error) { + if ciCfg.Username == "" || ciCfg.Password == "" { + return + } + // if the iso file doesn't exist, it means the cloud-init DataSource is not NoCloud-local + _, err = os.Stat(fname) + if os.IsNotExist(err) { + return nil + } + + dir, err := os.MkdirTemp("/tmp", "cloud-init") + if err != nil { + return + } + defer os.RemoveAll(dir) + if err := extractISO(fname, dir); err != nil { + return err + } + udataFname := filepath.Join(dir, "user-data") + + udata, _, _, err := ciCfg.GenFilesContent() + if err := os.WriteFile(udataFname, []byte(udata), 0600); err != nil { + return errors.Wrap(err, "") + } + args := []string{ + "cloud-localds", "--network-config=network-config", fname, "user-data", "meta-data", + } + cmd := exec.Command(args[0], args[1:]...) //nolint + cmd.Dir = dir + out, err := cmd.CombinedOutput() + if err != nil { + return errors.Wrapf(err, "failed to exec genisoimage %s", out) + } + return +} diff --git a/internal/types/cloud_init_test.go b/internal/types/cloud_init_test.go new file mode 100644 index 0000000..2fea7b0 --- /dev/null +++ b/internal/types/cloud_init_test.go @@ -0,0 +1,53 @@ +package types + +import ( + "encoding/base64" + "fmt" + "strings" + "testing" + + "github.com/projecteru2/yavirt/pkg/test/assert" +) + +func TestGenContent(t *testing.T) { + cfg := &CloudInitConfig{ + Username: "root", + Password: "passwd", + } + user, _, _, err := cfg.GenFilesContent() + assert.Nil(t, err) + // it is ugly here, but using yaml lib is too complex here + assert.True(t, strings.Contains(user, "name: \"root\"")) + assert.True(t, strings.Contains(user, "plain_text_passwd: \"passwd\"")) + assert.False(t, strings.Contains(user, "- echo hello")) + assert.False(t, strings.Contains(user, "- path: foo")) + assert.False(t, strings.Contains(user, "content: ")) + + cfg.Commands = []string{"echo hello"} + cfg.Files = map[string][]byte{ + "foo": []byte("bar\nbar1"), + } + user, _, _, err = cfg.GenFilesContent() + assert.Nil(t, err) + assert.True(t, strings.Contains(user, "name: \"root\"")) + assert.True(t, strings.Contains(user, "plain_text_passwd: \"passwd\"")) + assert.True(t, strings.Contains(user, "- echo hello")) + assert.True(t, strings.Contains(user, "- path: foo")) + assert.True(t, strings.Contains(user, fmt.Sprintf("content: %s", base64.StdEncoding.EncodeToString([]byte("bar\nbar1"))))) +} + +func TestNetwork(t *testing.T) { + cfg := &CloudInitConfig{ + CIDR: "10.10.10.1/24", + DefaultGW: CloudInitGateway{ + IP: "10.10.10.111", + OnLink: true, + }, + } + _, _, network, err := cfg.GenFilesContent() + assert.Nil(t, err) + // fmt.Printf("%s\n", network) + assert.True(t, strings.Contains(network, "10.10.10.1/24")) + assert.True(t, strings.Contains(network, "via: 10.10.10.111")) + assert.True(t, strings.Contains(network, "on-link: true")) +} diff --git a/internal/types/console.go b/internal/types/console.go new file mode 100644 index 0000000..534d007 --- /dev/null +++ b/internal/types/console.go @@ -0,0 +1,30 @@ +package types + +import ( + "io" + + "github.com/projecteru2/yavirt/pkg/libvirt" +) + +type Console interface { + io.ReadWriteCloser + Fd() int // need fd for epoll event +} + +// OpenConsoleFlags . +type OpenConsoleFlags struct { + libvirt.ConsoleFlags + Devname string + Commands []string +} + +// NewOpenConsoleFlags . +func NewOpenConsoleFlags(force, safe bool, cmds []string) OpenConsoleFlags { + return OpenConsoleFlags{ + ConsoleFlags: libvirt.ConsoleFlags{ + Force: force, + Safe: safe, + }, + Commands: cmds, + } +} diff --git a/internal/types/const.go b/internal/types/const.go new file mode 100644 index 0000000..2975d56 --- /dev/null +++ b/internal/types/const.go @@ -0,0 +1,9 @@ +package types + +const ( + PluginNameCPUMem = "cpumem" + PluginNameGPU = "gpu" + PluginNameRBD = "rbd" + PluginNameStorage = "storage" + PluginNameBandwidth = "bandwidth" +) diff --git a/internal/virt/types/distro.go b/internal/types/distro.go similarity index 100% rename from internal/virt/types/distro.go rename to internal/types/distro.go diff --git a/internal/types/domain.go b/internal/types/domain.go new file mode 100644 index 0000000..8d4f8f1 --- /dev/null +++ b/internal/types/domain.go @@ -0,0 +1,40 @@ +package types + +import "encoding/xml" + +type CustomDomainMetadata struct { + XMLName xml.Name `xml:"metadata"` + App App `xml:"app"` +} + +type App struct { + XMLName xml.Name `xml:"app"` + NS string `xml:"xmlns,attr"` + From string `xml:"from,attr"` + Owner AppOwner `xml:"owner"` + Name AppName `xml:"name"` + IP AppIP `xml:"ip"` + ID AppID `xml:"id"` +} + +type AppOwner struct { + XMLName xml.Name `xml:"owner"` + UserID string `xml:"id,attr"` + UserName string `xml:",chardata"` +} + +type AppName struct { + XMLName xml.Name `xml:"name"` + Name string `xml:",chardata"` +} + +type AppIP struct { + XMLName xml.Name `xml:"ip"` + IP string `xml:",chardata"` +} + +type AppID struct { + XMLName xml.Name `xml:"id"` + SID string `xml:"sid,attr"` + ID string `xml:",chardata"` +} diff --git a/internal/virt/types/error.go b/internal/types/error.go similarity index 100% rename from internal/virt/types/error.go rename to internal/types/error.go diff --git a/internal/types/event.go b/internal/types/event.go new file mode 100644 index 0000000..7f56d8f --- /dev/null +++ b/internal/types/event.go @@ -0,0 +1,39 @@ +package types + +import "time" + +const ( + DestroyOp Operator = "destroy" + DieOp Operator = "die" + StopOp Operator = "stop" + StartOp Operator = "start" + SuspendOp Operator = "suspend" + ResumeOp Operator = "resume" + CreateOp Operator = "create" + ExecuteOp Operator = "execute" + ResizeOp Operator = "resize" + ResetSysDiskOp Operator = "reset-sys-disk" + FSFreezeOP Operator = "fs-freeze" + FSThawOP Operator = "fs-thaw" + MiscOp Operator = "misc" + CreateSnapshotOp Operator = "create-snapshot" + CommitSnapshotOp Operator = "commit-snapshot" + RestoreSnapshotOp Operator = "restore-snapshot" +) + +const ( + EventTypeGuest = "guest" +) + +type Operator string + +func (op Operator) String() string { + return string(op) +} + +type Event struct { + ID string + Type string + Op Operator + Time time.Time +} diff --git a/internal/types/gpu.go b/internal/types/gpu.go new file mode 100644 index 0000000..6725146 --- /dev/null +++ b/internal/types/gpu.go @@ -0,0 +1,20 @@ +package types + +type GPUInfo struct { + Address string `json:"address" mapstructure:"address"` + Index int `json:"index" mapstructure:"index"` + // example value: "NVIDIA Corporation" + Vendor string `json:"vendor" mapstructure:"vendor"` + // example value: "GA104 [GeForce RTX 3070]" + Product string `json:"product" mapstructure:"product"` + + // NUMA NUMAInfo + NumaID string `json:"numa_id" mapstructure:"numa_id"` + + // Cores int `json:"cores" mapstructure:"cores"` + GMemory int64 `json:"gmemory" mapstructure:"gmemory"` +} + +type GPUEngineParams struct { + Addrs []string `json:"addrs" mapstructure:"addrs"` +} diff --git a/internal/virt/types/guest.go b/internal/types/guest.go similarity index 55% rename from internal/virt/types/guest.go rename to internal/types/guest.go index d84c735..3e9fbf0 100644 --- a/internal/virt/types/guest.go +++ b/internal/types/guest.go @@ -40,3 +40,34 @@ func ConvertGRPCCreateOptions(opts *pb.CreateGuestOptions) GuestCreateOption { } return ret } + +type GuestResizeOption struct { + ID string + CPU int + Mem int64 + Volumes []virttypes.Volume + Resources map[string][]byte +} + +func ConvertGRPCResizeOptions(opts *pb.ResizeGuestOptions) *GuestResizeOption { + ret := &GuestResizeOption{ + ID: opts.Id, + CPU: int(opts.Cpu), + Mem: opts.Memory, + Resources: opts.Resources, + } + ret.Volumes = make([]virttypes.Volume, len(opts.Volumes)) + for i, vol := range opts.Volumes { + ret.Volumes[i].Mount = vol.Mount + ret.Volumes[i].Capacity = vol.Capacity + ret.Volumes[i].IO = vol.Io + } + return ret +} + +type InitSysDiskArgs struct { + Image string `json:"image"` + Username string `json:"username"` + Password string `json:"password"` + Resources map[string][]byte `json:"resources"` +} diff --git a/internal/virt/types/guestfs.go b/internal/types/guestfs.go similarity index 100% rename from internal/virt/types/guestfs.go rename to internal/types/guestfs.go diff --git a/internal/types/templates/meta-data.yaml b/internal/types/templates/meta-data.yaml new file mode 100644 index 0000000..5ddf70e --- /dev/null +++ b/internal/types/templates/meta-data.yaml @@ -0,0 +1,3 @@ +dsmode: local +instance-id: {{ .instanceID }} +local-hostname: {{ .hostname }} \ No newline at end of file diff --git a/internal/types/templates/network-config.yaml b/internal/types/templates/network-config.yaml new file mode 100644 index 0000000..ea92997 --- /dev/null +++ b/internal/types/templates/network-config.yaml @@ -0,0 +1,22 @@ +version: 2 +ethernets: + eth0: + # cloud-init has BUG, On ubuntu if set-name is set, the network config will not take effect and we must run `netplan apply` manually + # set-name: eth0 + match: + name: en* + # dhcp4: false + # dhcp6: false + macaddress: {{ .mac }} + addresses: + - {{ .cidr }} + mtu: {{ .mtu }} + # gateway4: 169.254.1.1 + nameservers: + addresses: + - 8.8.8.8 + - 8.8.4.4 + routes: + - to: 0.0.0.0/0 + via: {{index .defaultGW "ip" }} + on-link: {{index .defaultGW "onLink" }} \ No newline at end of file diff --git a/internal/types/templates/user-data.yaml b/internal/types/templates/user-data.yaml new file mode 100644 index 0000000..2c13555 --- /dev/null +++ b/internal/types/templates/user-data.yaml @@ -0,0 +1,49 @@ +#cloud-config + +{{if gt (len .files) 0}} +write_files: + {{ range .files }} + - path: {{ .path }} + encoding: b64 + content: {{ .content }} + {{ end }} +{{ end }} + +packages: + - qemu-guest-agent + +{{ if eq .username "root" }} +bootcmd: + - sed -i '/^\s*PermitRootLogin.*$/d' /etc/ssh/sshd_config + - echo "PermitRootLogin yes" >> /etc/ssh/sshd_config +{{ end }} + +runcmd: + - systemctl restart ssh + - systemctl start qemu-guest-agent +{{ range .commands }} + - {{ . }} +{{ end }} + +users: + - name: "{{ .username }}" + plain_text_passwd: "{{ .password }}" + lock_passwd: false + sudo: ALL=(ALL) NOPASSWD:ALL + groups: sudo + shell: /bin/bash + +{{ if ne .sshPubKey "" }} + ssh_authorized_keys: + - {{ .sshPubKey }} +{{ end }} + +{{ if eq .sshPubKey "" }} +ssh_pwauth: true +{{ end }} + +{{ if eq .username "root" }} +disable_root: false +{{ else }} +disable_root: true +{{ end }} \ No newline at end of file diff --git a/internal/utils/check.go b/internal/utils/check.go new file mode 100644 index 0000000..9cfcc7f --- /dev/null +++ b/internal/utils/check.go @@ -0,0 +1,85 @@ +package utils + +import ( + "context" + "net" + "net/http" + "time" + + "github.com/projecteru2/core/log" +) + +// CheckHTTP 检查一个workload的所有URL +// CheckHTTP 事实上一般也就一个 +func CheckHTTP(ctx context.Context, ID string, backends []string, code int, timeout time.Duration) bool { + logger := log.WithFunc("CheckHTTP").WithField("ID", ID).WithField("backends", backends).WithField("code", code) + for _, backend := range backends { + logger.Debug(ctx, "Check health via http") + if !checkOneURL(ctx, backend, code, timeout) { + logger.Info(ctx, "Check health failed via http") + return false + } + } + return true +} + +// CheckTCP 检查一个TCP +// 这里不支持ctx? +func CheckTCP(ctx context.Context, ID string, backends []string, timeout time.Duration) bool { + logger := log.WithFunc("CheckTCP").WithField("ID", ID).WithField("backends", backends) + for _, backend := range backends { + logger.Debug(ctx, "Check health via tcp") + conn, err := net.DialTimeout("tcp", backend, timeout) + if err != nil { + logger.Debug(ctx, "Check health failed via tcp") + return false + } + conn.Close() + } + return true +} + +// 偷来的函数 +// 谁要官方的context没有收录他 ¬ ¬ +func get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// 就先定义 [200, 500) 这个区间的 code 都算是成功吧 +func checkOneURL(ctx context.Context, url string, expectedCode int, timeout time.Duration) bool { + logger := log.WithFunc("checkOneURL").WithField("url", url) + var resp *http.Response + var err error + WithTimeout(ctx, timeout, func(ctx context.Context) { + resp, err = get(ctx, nil, url) //nolint + }) + if err != nil { + logger.Error(ctx, err, "Error when checking") + return false + } + defer resp.Body.Close() + if expectedCode == 0 { + return resp.StatusCode < 500 && resp.StatusCode >= 200 + } + if resp.StatusCode != expectedCode { + logger.Warnf(ctx, "Error when checking, expect %d, got %d", expectedCode, resp.StatusCode) + } + return resp.StatusCode == expectedCode +} diff --git a/internal/utils/check_test.go b/internal/utils/check_test.go new file mode 100644 index 0000000..47caf63 --- /dev/null +++ b/internal/utils/check_test.go @@ -0,0 +1,26 @@ +package utils + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCheck(t *testing.T) { + go http.ListenAndServe(":12306", http.NotFoundHandler()) + time.Sleep(time.Second) + ctx, cancel := context.WithCancel(context.Background()) + assert.Equal(t, CheckHTTP(ctx, "", []string{"http://127.0.0.1:12306"}, 404, time.Second), true) + assert.Equal(t, CheckHTTP(ctx, "", []string{"http://127.0.0.1:12306"}, 0, time.Second), true) + assert.Equal(t, CheckHTTP(ctx, "", []string{"http://127.0.0.1:12306"}, 200, time.Second), false) + assert.Equal(t, CheckHTTP(ctx, "", []string{"http://127.0.0.1:12307"}, 200, time.Second), false) + + cancel() + assert.Equal(t, CheckHTTP(ctx, "", []string{"http://127.0.0.1:12306"}, 404, time.Second), false) + + assert.Equal(t, CheckTCP(ctx, "", []string{"127.0.0.1:12306"}, time.Second), true) + assert.Equal(t, CheckTCP(ctx, "", []string{"127.0.0.1:12307"}, time.Second), false) +} diff --git a/internal/utils/pool.go b/internal/utils/pool.go new file mode 100644 index 0000000..fd40d95 --- /dev/null +++ b/internal/utils/pool.go @@ -0,0 +1,15 @@ +package utils + +import ( + "github.com/panjf2000/ants/v2" +) + +// TODO configurableu +const size = 10000 + +// Pool . +var Pool *ants.Pool + +func init() { + Pool, _ = ants.NewPool(size, ants.WithNonblocking(true)) +} diff --git a/internal/utils/retry.go b/internal/utils/retry.go new file mode 100644 index 0000000..65ade1e --- /dev/null +++ b/internal/utils/retry.go @@ -0,0 +1,72 @@ +package utils + +import ( + "context" + "time" + + "github.com/projecteru2/core/log" +) + +// RetryTask . +type RetryTask struct { + ctx context.Context + cancel context.CancelFunc + Func func() error + MaxAttempts int +} + +// NewRetryTask . +func NewRetryTask(ctx context.Context, maxAttempts int, f func() error) *RetryTask { + // make sure to execute at least once + if maxAttempts < 1 { + maxAttempts = 1 + } + ctx, cancel := context.WithCancel(ctx) + return &RetryTask{ + ctx: ctx, + cancel: cancel, + MaxAttempts: maxAttempts, + Func: f, + } +} + +// Run start running retry task +func (r *RetryTask) Run(ctx context.Context) error { + logger := log.WithFunc("Run") + logger.Debug(ctx, "start") + defer r.Stop(ctx) + + var err error + interval := 1 + timer := time.NewTimer(0) + defer timer.Stop() + + for i := 0; i < r.MaxAttempts; i++ { + select { + case <-r.ctx.Done(): + logger.Debug(ctx, "abort") + return r.ctx.Err() + case <-timer.C: + err = r.Func() + if err == nil { + return nil + } + logger.Debugf(ctx, "will retry after %v seconds", interval) + timer.Reset(time.Duration(interval) * time.Second) + interval *= 2 + } + } + return err +} + +// Stop stops running task +func (r *RetryTask) Stop(context.Context) { + r.cancel() +} + +// BackoffRetry retries up to `maxAttempts` times, and the interval will grow exponentially +func BackoffRetry(ctx context.Context, maxAttempts int, f func() error) error { + retryTask := NewRetryTask(ctx, maxAttempts, f) + defer retryTask.Stop(ctx) + return retryTask.Run(ctx) +} diff --git a/internal/utils/retry_test.go b/internal/utils/retry_test.go new file mode 100644 index 0000000..4a000df --- /dev/null +++ b/internal/utils/retry_test.go @@ -0,0 +1,45 @@ +package utils + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBackoffRetry(t *testing.T) { + var errNotSuccess = errors.New("not success") + i := 0 + f := func() error { + i++ + if i < 4 { + return errNotSuccess + } + return nil + } + assert.Nil(t, BackoffRetry(context.Background(), 10, f)) + assert.Equal(t, 4, i) + + i = 0 + assert.Equal(t, errNotSuccess, BackoffRetry(context.Background(), 0, f)) + assert.Equal(t, 1, i) +} + +func TestBackoffRetryWithCancel(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + var errNotSuccess = errors.New("not success") + i := 0 + f := func() error { + i++ + if i < 4 { + return errNotSuccess + } + return nil + } + assert.Equal(t, context.DeadlineExceeded, BackoffRetry(ctx, 10, f)) + assert.NotEqual(t, 4, i) +} diff --git a/internal/utils/rollback.go b/internal/utils/rollback.go new file mode 100644 index 0000000..2af686c --- /dev/null +++ b/internal/utils/rollback.go @@ -0,0 +1,57 @@ +package utils + +import ( + "context" + "sync" +) + +type RollbackFunc func() error + +type rollbackListEntry struct { + fn RollbackFunc + msg string +} + +type RollbackList struct { + mu sync.Mutex + List []rollbackListEntry +} + +type ctxType string + +const ( + rbKey ctxType = "rollbackList" +) + +func NewRollbackListContext(ctx context.Context) context.Context { + return context.WithValue(ctx, rbKey, &RollbackList{}) +} + +func GetRollbackListFromContext(ctx context.Context) *RollbackList { + v := ctx.Value(rbKey) + if v != nil { + return v.(*RollbackList) + } + return nil +} + +func (rl *RollbackList) Append(fn RollbackFunc, msg string) { + rl.mu.Lock() + defer rl.mu.Unlock() + rl.List = append(rl.List, rollbackListEntry{ + fn: fn, + msg: msg, + }) +} + +func (rl *RollbackList) Pop() (fn RollbackFunc, msg string) { + rl.mu.Lock() + defer rl.mu.Unlock() + n := len(rl.List) + if n > 0 { + entry := rl.List[n-1] + fn, msg = entry.fn, entry.msg + rl.List = rl.List[:n-1] + } + return +} diff --git a/internal/utils/rollback_test.go b/internal/utils/rollback_test.go new file mode 100644 index 0000000..4147827 --- /dev/null +++ b/internal/utils/rollback_test.go @@ -0,0 +1,22 @@ +package utils + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRollbacklList(t *testing.T) { + ctx := NewRollbackListContext(context.Background()) + rl := GetRollbackListFromContext(ctx) + rl.Append(func() error { return nil }, "1") + rl.Append(func() error { return nil }, "2") + rl2 := GetRollbackListFromContext(ctx) + fn, msg := rl2.Pop() + assert.Equal(t, msg, "2") + assert.Nil(t, fn()) + fn, msg = rl2.Pop() + assert.Equal(t, msg, "1") + assert.Nil(t, fn()) +} diff --git a/internal/utils/sh.go b/internal/utils/sh.go new file mode 100644 index 0000000..579057e --- /dev/null +++ b/internal/utils/sh.go @@ -0,0 +1,81 @@ +package utils + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/projecteru2/yavirt/pkg/sh" +) + +// CreateImage . +func CreateImage(ctx context.Context, fmt, path string, cap int64) error { + return sh.ExecContext(ctx, "qemu-img", "create", "-q", "-f", fmt, path, strconv.FormatInt(cap, 10)) +} + +func ResizeImage(ctx context.Context, path string, cap int64) error { + return sh.ExecContext(ctx, "qemu-img", "resize", path, strconv.FormatInt(cap, 10)) +} + +// AmplifyImage . +func AmplifyImage(ctx context.Context, path string, delta int64) error { + flag := fmt.Sprintf("%+d", delta) + return sh.ExecContext(ctx, "qemu-img", "resize", path, flag) +} + +// CommitImage . +func CommitImage(ctx context.Context, path string) error { + return sh.ExecContext(ctx, "qemu-img", "commit", path) +} + +// CreateSnapshot . +func CreateSnapshot(ctx context.Context, volPath string, newVolPath string) error { + return sh.ExecContext(ctx, "qemu-img", "create", "-f", "qcow2", "-F", "qcow2", newVolPath, "-b", volPath) +} + +// RebaseImage . +func RebaseImage(ctx context.Context, volPath string, backingVolPath string) error { + return sh.ExecContext(ctx, "qemu-img", "rebase", "-b", backingVolPath, volPath) +} + +// Check . +func Check(ctx context.Context, volPath string) error { + return sh.ExecContext(ctx, "qemu-img", "check", volPath) +} + +// Repair . +func Repair(ctx context.Context, volPath string) error { + return sh.ExecContext(ctx, "qemu-img", "check", "-r", "all", volPath) +} + +// Write an image to a block device +func WriteBLK(ctx context.Context, imgPath string, device string, ignoreExist bool) error { + // two methods + // qemu-img convert -f qcow2 -O raw my-qcow2.img /dev/sdb + // qemu-img dd -f qcow2 -O raw bs=4M if=/vm-images/image.qcow2 of=/dev/sdd1 + // + // for Ceph RBD, use following command: + // qemu-img convert -f qcow2 -O raw debian_squeeze.qcow2 rbd:data/squeeze + err := sh.ExecContext(ctx, "qemu-img", "convert", "-f", "qcow2", "-O", "raw", imgPath, device) + if err == nil || (ignoreExist && strings.Contains(err.Error(), "error rbd create: File exists")) { + return nil + } + return err +} + +func DumpBLK(ctx context.Context, device string, imgPath string) error { + return sh.ExecContext(ctx, "qemu-img", "convert", "-f", "raw", "-O", "qcow2", device, imgPath) +} + +func ForceWriteBLK(ctx context.Context, imgPath string, device string) error { + // two methods + // qemu-img convert -f qcow2 -O raw my-qcow2.img /dev/sdb + // qemu-img dd -f qcow2 -O raw bs=4M if=/vm-images/image.qcow2 of=/dev/sdd1 + // + // for Ceph RBD, use following command: + // qemu-img convert -f qcow2 -O raw debian_squeeze.qcow2 rbd:data/squeeze + cmdStr := fmt.Sprintf("qemu-img dd -f qcow2 -O raw bs=4M if=%s of=%s --skip-create", imgPath, device) + cmds := strings.Split(cmdStr, " ") + return sh.ExecContext(ctx, cmds[0], cmds[1:]...) +} diff --git a/internal/utils/sync.go b/internal/utils/sync.go new file mode 100644 index 0000000..fe7900b --- /dev/null +++ b/internal/utils/sync.go @@ -0,0 +1,36 @@ +package utils + +import ( + "sync" +) + +// GroupCAS indicates cas locks which are grouped by keys. +type GroupCAS struct { + sync.Mutex + locks map[string]struct{} +} + +// NewGroupCAS . +func NewGroupCAS() *GroupCAS { + return &GroupCAS{ + locks: map[string]struct{}{}, + } +} + +// Acquire tries to acquire a cas lock. +func (g *GroupCAS) Acquire(key string) (free func(), acquired bool) { + g.Lock() + defer g.Unlock() + if _, ok := g.locks[key]; ok { + return nil, false + } + + g.locks[key] = struct{}{} + free = func() { + g.Lock() + defer g.Unlock() + delete(g.locks, key) + } + + return free, true +} diff --git a/internal/utils/sync_test.go b/internal/utils/sync_test.go new file mode 100644 index 0000000..df1b406 --- /dev/null +++ b/internal/utils/sync_test.go @@ -0,0 +1,62 @@ +package utils + +import ( + "fmt" + "sync" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCAS(t *testing.T) { + key := "key" + cas := NewGroupCAS() + + free, acquired := cas.Acquire(key) + require.True(t, acquired) + require.NotNil(t, free) + + free1, acquired1 := cas.Acquire(key) + require.False(t, acquired1) + require.Nil(t, free1) + + free() + free1, acquired1 = cas.Acquire(key) + require.True(t, acquired1) + require.NotNil(t, free1) +} + +func TestCASConccurently(t *testing.T) { + var wg sync.WaitGroup + cas := NewGroupCAS() + + n := 5000 + key := "key" + var sum int32 + wg.Add(n) + + for i := 0; i < n; i++ { + go func(idx int) { + defer wg.Done() + + _, acq := cas.Acquire(fmt.Sprintf("%d", idx)) + require.True(t, acq) + + free, acquired := cas.Acquire(key) + t.Logf("idx: %d, acquired: %t", idx, acquired) + if !acquired { + return + } + + // makes sure that there're only one thread has been acquired. + require.Truef(t, atomic.CompareAndSwapInt32(&sum, 0, 1), "idx: %d, sum: %d", idx, atomic.LoadInt32(&sum)) + // marks there's no thread is acquired in advance. + require.True(t, atomic.CompareAndSwapInt32(&sum, 1, 0)) + + free() + }(i) + } + + wg.Wait() +} diff --git a/internal/utils/system.go b/internal/utils/system.go new file mode 100644 index 0000000..acb50e4 --- /dev/null +++ b/internal/utils/system.go @@ -0,0 +1,45 @@ +package utils + +import ( + "context" + "fmt" + "os" + + "github.com/projecteru2/core/log" + "github.com/shirou/gopsutil/process" +) + +func EnforceRoot() { + // Make sure the command is run with super user priviladges + if os.Getuid() != 0 { + fmt.Println("Need super user privileges: Operation not permitted") + os.Exit(1) + } +} +func PSContains(proc []string, procList []*process.Process) bool { + logger := log.WithFunc("psContains") + for _, p := range procList { + cmds, err := p.CmdlineSlice() + if err != nil { + // Failed to get CLI arguments for this process. + // Maybe it doesn't exist any more - move on to the next one. + logger.Debugf(context.TODO(), "Error getting CLI arguments: %s", err) + continue + } + var match bool + for i, p := range proc { + if i >= len(cmds) { + break + } else if cmds[i] == p { + match = true + } + } + + // If we got a match, return true. Otherwise, try the next + // process in the list. + if match { + return true + } + } + return false +} diff --git a/internal/utils/util.go b/internal/utils/util.go new file mode 100644 index 0000000..23b1c6a --- /dev/null +++ b/internal/utils/util.go @@ -0,0 +1,61 @@ +package utils + +import ( + "context" + "crypto/rand" + "fmt" + "io" + "math" + "path/filepath" + "time" + + "github.com/projecteru2/core/log" + "github.com/projecteru2/libyavirt/types" + "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/pkg/utils" +) + +func VirtID(id string) string { + req := types.GuestReq{ID: id} + return req.VirtID() +} + +func NewCreateSessionFlock(id string) *utils.Flock { + var fn = fmt.Sprintf("guest_create_session_%s.flock", id) + var fpth = filepath.Join(configs.Conf.VirtFlockDir, fn) + return utils.NewFlock(fpth) +} + +// EnsureReaderClosed As the name says, +// blocks until the stream is empty, until we meet EOF +func EnsureReaderClosed(stream io.ReadCloser) { + if stream == nil { + return + } + if _, err := io.Copy(io.Discard, stream); err != nil { + log.Errorf(context.TODO(), err, "Empty stream failed") + } + _ = stream.Close() +} + +func RandomString(length int) string { + b := make([]byte, length+2) + _, _ = rand.Read(b) + return fmt.Sprintf("%x", b)[2 : length+2] +} + +// WithTimeout runs a function with given timeout +func WithTimeout(ctx context.Context, timeout time.Duration, f func(ctx2 context.Context)) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + f(ctx) +} + +// GetMaxAttemptsByTTL . +func GetMaxAttemptsByTTL(ttl int64) int { + // if selfmon is enabled, retry 5 times + if ttl < 1 { + return 5 + } + return int(math.Floor(math.Log2(float64(ttl)+1))) + 1 +} diff --git a/internal/virt/guest/manager/watcher.go b/internal/utils/watcher.go similarity index 63% rename from internal/virt/guest/manager/watcher.go rename to internal/utils/watcher.go index 84315f8..9d18d06 100644 --- a/internal/virt/guest/manager/watcher.go +++ b/internal/utils/watcher.go @@ -1,21 +1,21 @@ -package manager +package utils import ( + "context" "sync" - "github.com/projecteru2/yavirt/internal/virt/types" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/log" + "github.com/alphadose/haxmap" + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" + "github.com/projecteru2/yavirt/internal/types" "github.com/projecteru2/yavirt/pkg/utils" ) var ErrTooManyWatchers = errors.New("too many watchers") type Watchers struct { - sync.Mutex - index utils.AtomicInt64 - ws map[int64]*Watcher + wchMap *haxmap.Map[int64, *Watcher] events chan types.Event done struct { @@ -27,35 +27,38 @@ type Watchers struct { func NewWatchers() *Watchers { ws := &Watchers{ events: make(chan types.Event), - ws: map[int64]*Watcher{}, + wchMap: haxmap.New[int64, *Watcher](), } ws.done.C = make(chan struct{}) return ws } +func (ws *Watchers) Len() int { + return int(ws.wchMap.Len()) +} + func (ws *Watchers) Stop() { defer ws.done.Do(func() { close(ws.done.C) }) - ws.Lock() - defer ws.Unlock() - - for _, w := range ws.ws { - w.Stop() - } + ws.wchMap.ForEach(func(_ int64, v *Watcher) bool { + v.Stop() + return true + }) } -func (ws *Watchers) Run() { - defer log.Infof("watchers loop has done") +func (ws *Watchers) Run(ctx context.Context) { + defer log.Infof(ctx, "watchers loop has done") for { select { case event := <-ws.events: ws.Notify(event) - case <-ws.Done(): return + case <-ctx.Done(): + return } } } @@ -63,39 +66,37 @@ func (ws *Watchers) Run() { func (ws *Watchers) Watched(event types.Event) { select { case ws.events <- event: - log.Infof("marks the event %v as watched", event) + log.Infof(context.TODO(), "marks the event %v as watched", event) case <-ws.Done(): - log.Infof("marks the event %v failed as the Watchers has done", event) + log.Infof(context.TODO(), "marks the event %v failed as the Watchers has done", event) } } func (ws *Watchers) Notify(event types.Event) { - defer log.Infof("watchers notification has done") + defer log.Infof(context.TODO(), "watchers notification has done") stopped := []int64{} - ws.Lock() - defer ws.Unlock() - - for _, w := range ws.ws { + ws.wchMap.ForEach(func(_ int64, wch *Watcher) bool { select { - case w.events <- event: + case wch.events <- event: // notified successfully. case <-ws.Done(): // It's really rare as there isn't an explicitly ws.Stop() calling now. - stopped = append(stopped, w.id) + stopped = append(stopped, wch.id) - case <-w.Done(): + case <-wch.Done(): // The Watcher has been stopped. - stopped = append(stopped, w.id) + stopped = append(stopped, wch.id) } - } + return true + }) // Reaps the watchers which have been stopped. for _, k := range stopped { - delete(ws.ws, k) + ws.wchMap.Del(k) } } @@ -104,13 +105,10 @@ func (ws *Watchers) Done() <-chan struct{} { } func (ws *Watchers) Get() (*Watcher, error) { - ws.Lock() - defer ws.Unlock() - w := NewWatcher() w.id = ws.index.Incr() - ws.ws[w.id] = w + ws.wchMap.Set(w.id, w) return w, nil } diff --git a/internal/virt/guest/manager/watcher_test.go b/internal/utils/watcher_test.go similarity index 92% rename from internal/virt/guest/manager/watcher_test.go rename to internal/utils/watcher_test.go index 1be7bb7..9f0162f 100644 --- a/internal/virt/guest/manager/watcher_test.go +++ b/internal/utils/watcher_test.go @@ -1,12 +1,13 @@ -package manager +package utils import ( + "context" "strconv" "sync" "testing" "time" - "github.com/projecteru2/yavirt/internal/virt/types" + "github.com/projecteru2/yavirt/internal/types" "github.com/projecteru2/yavirt/pkg/test/assert" "github.com/projecteru2/yavirt/pkg/utils" ) @@ -15,7 +16,7 @@ func TestWatchers_StopSingleOne(t *testing.T) { ws := NewWatchers() defer ws.Stop() - go ws.Run() + go ws.Run(context.Background()) var wg sync.WaitGroup defer wg.Wait() @@ -49,7 +50,7 @@ func TestWatchers_StopAll(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - ws.Run() + ws.Run(context.Background()) }() var sum utils.AtomicInt64 @@ -102,7 +103,7 @@ func TestWatchers_WatchedEvent(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - ws.Run() + ws.Run(context.Background()) }() var sum, recv utils.AtomicInt64 @@ -131,7 +132,7 @@ func TestWatchers_WatchedEvent(t *testing.T) { for { select { case event := <-watcher.Events(): - v, err := strconv.Atoi(event.Action) + v, err := strconv.Atoi(event.Op.String()) assert.NilErr(t, err) recv.Add(int64(v)) @@ -158,7 +159,7 @@ func TestWatchers_WatchedEvent(t *testing.T) { go func(action string) { defer nwg.Done() ws.Watched(types.Event{ - Action: action, + Op: types.Operator(action), }) }(strconv.Itoa(i)) } diff --git a/internal/ver/ver.go b/internal/ver/ver.go index 601b2e4..0ee4966 100644 --- a/internal/ver/ver.go +++ b/internal/ver/ver.go @@ -1,19 +1,28 @@ package ver -import "fmt" +import ( + "fmt" + "runtime" +) var ( - // Git commit - Git string - // Compile info. of golang itself. - Compile string - // Date of compiled - Date string + // NAME . + NAME = "Eru-agent" + // VERSION . + VERSION = "unknown" + // REVISION . + REVISION = "HEAD" + // BUILTAT . + BUILTAT = "now" ) -// Version . +// String . func Version() string { - return fmt.Sprintf(`Git: %s -Compile: %s -Built: %s`, Git, Compile, Date) + version := "" + version += fmt.Sprintf("Version: %s\n", VERSION) + version += fmt.Sprintf("Git hash: %s\n", REVISION) + version += fmt.Sprintf("Built: %s\n", BUILTAT) + version += fmt.Sprintf("Golang version: %s\n", runtime.Version()) + version += fmt.Sprintf("OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) + return version } diff --git a/internal/virt/agent/agent.go b/internal/virt/agent/agent.go index 1e8242d..e30436f 100644 --- a/internal/virt/agent/agent.go +++ b/internal/virt/agent/agent.go @@ -6,6 +6,7 @@ import ( "github.com/projecteru2/yavirt/configs" "github.com/projecteru2/yavirt/internal/virt/agent/types" + "github.com/projecteru2/yavirt/pkg/libvirt" ) // Interface . @@ -20,15 +21,18 @@ type Interface interface { //nolint IsFolder(ctx context.Context, path string) (bool, error) RemoveAll(ctx context.Context, path string) error Grep(ctx context.Context, keyword, filepath string) (bool, error) - OpenFile(path, mode string) (handle int, err error) - CloseFile(handle int) error - FlushFile(handle int) error - ReadFile(handle int, p []byte) (int, bool, error) - WriteFile(handle int, buf []byte) error - SeekFile(handle int, offset int, whence int) (position int, eof bool, err error) - AppendLine(filepath string, p []byte) error + OpenFile(ctx context.Context, path, mode string) (handle int, err error) + CloseFile(ctx context.Context, handle int) error + FlushFile(ctx context.Context, handle int) error + ReadFile(ctx context.Context, handle int, p []byte) (int, bool, error) + WriteFile(ctx context.Context, handle int, buf []byte) error + SeekFile(ctx context.Context, handle int, offset int, whence int) (position int, eof bool, err error) + AppendLine(ctx context.Context, filepath string, p []byte) error Blkid(ctx context.Context, dev string) (string, error) GetDiskfree(ctx context.Context, mnt string) (*types.Diskfree, error) + FSFreezeAll(ctx context.Context) (int, error) + FSThawAll(ctx context.Context) (int, error) + FSFreezeStatus(ctx context.Context) (string, error) } // Agent . @@ -37,9 +41,9 @@ type Agent struct { } // New . -func New(sockfile string) *Agent { +func New(name string, virt libvirt.Libvirt) *Agent { return &Agent{ - qmp: newQmp(sockfile, true), + qmp: newQmp(name, virt, true), } } diff --git a/internal/virt/agent/agent_test.go b/internal/virt/agent/agent_test.go index 8c2434e..fd80dae 100644 --- a/internal/virt/agent/agent_test.go +++ b/internal/virt/agent/agent_test.go @@ -6,6 +6,7 @@ import ( "encoding/json" "os" "testing" + "time" "github.com/projecteru2/yavirt/internal/virt/agent/mocks" "github.com/projecteru2/yavirt/internal/virt/agent/types" @@ -14,7 +15,7 @@ import ( ) func TestAgent(t *testing.T) { - var agent = New("/tmp/virt/sock/guest-000001.sock") + var agent = New("00000000001", nil) var in = "ping" var out = []byte("pong") @@ -28,8 +29,8 @@ func TestAgent(t *testing.T) { var qmp = &mocks.Qmp{} defer qmp.AssertExpectations(t) - qmp.On("Exec", mock.Anything, mock.Anything, mock.Anything).Return([]byte(`{"pid":6735}`), nil).Once() - qmp.On("ExecStatus", 6735).Return(enc, nil).Once() + qmp.On("Exec", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte(`{"pid":6735}`), nil).Once() + qmp.On("ExecStatus", mock.Anything, 6735).Return(enc, nil).Once() agent.qmp = qmp var st = <-agent.ExecOutput(context.Background(), in) @@ -48,24 +49,26 @@ func TestFileReader(t *testing.T) { return } - var agent = New("/opt/yavirtd/sock/00000000010160627254733565100003.sock") - rd, err := OpenFile(agent, "/tmp/snmpss.cache", "r") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + var agent = New("000000001", nil) + rd, err := OpenFile(ctx, agent, "/tmp/snmpss.cache", "r") assert.NilErr(t, err) - defer rd.Close() + defer rd.Close(ctx) p := make([]byte, 10) - n, err := rd.Read(p) + n, err := rd.Read(ctx, p) assert.NilErr(t, err) assert.Equal(t, 10, n) t.Logf(" read /tmp/snmpss.cache: %s ", string(p)) - n, err = rd.Read(p) + n, err = rd.Read(ctx, p) assert.NilErr(t, err) assert.Equal(t, 10, n) t.Logf(" read /tmp/snmpss.cache: %s ", string(p)) - n, err = rd.Read(p) + n, err = rd.Read(ctx, p) assert.NilErr(t, err) assert.Equal(t, 9, n) t.Logf(" read /tmp/snmpss.cache: %s ", string(p)) diff --git a/internal/virt/agent/blk.go b/internal/virt/agent/blk.go index ea335fa..b5ea267 100644 --- a/internal/virt/agent/blk.go +++ b/internal/virt/agent/blk.go @@ -4,7 +4,8 @@ import ( "context" "regexp" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/pkg/terrors" ) var blkidRegex = regexp.MustCompile(`(?i)uuid="([-a-f0-9]{36})"`) @@ -14,12 +15,12 @@ func (a *Agent) Blkid(ctx context.Context, dev string) (string, error) { var st = <-a.ExecOutput(ctx, "blkid", dev) so, _, err := st.Stdio() if err != nil { - return "", errors.Trace(err) + return "", errors.Wrap(err, "") } var mat = blkidRegex.FindSubmatch(so) if mat == nil { - return "", errors.Annotatef(errors.ErrInvalidValue, "invalid blkid: %s", so) + return "", errors.Wrapf(terrors.ErrInvalidValue, "invalid blkid: %s", so) } return string(mat[1]), nil diff --git a/internal/virt/agent/df.go b/internal/virt/agent/df.go index e7dfcc0..5722c3c 100644 --- a/internal/virt/agent/df.go +++ b/internal/virt/agent/df.go @@ -6,8 +6,9 @@ import ( "strconv" "strings" + "github.com/cockroachdb/errors" "github.com/projecteru2/yavirt/internal/virt/agent/types" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" ) @@ -19,7 +20,7 @@ func (a *Agent) GetDiskfree(ctx context.Context, mnt string) (*types.Diskfree, e st := <-a.ExecOutput(ctx, "df", "-k", mnt) so, _, err := st.Stdio() if err != nil { - return nil, errors.Annotatef(err, "df %s failed", mnt) + return nil, errors.Wrapf(err, "df %s failed", mnt) } return a.parseDiskfree(string(so)) } @@ -30,7 +31,7 @@ func (a *Agent) parseDiskfree(so string) (*types.Diskfree, error) { fields := dfRegex.FindStringSubmatch(line) if len(fields) != 7 { - return nil, errors.Annotatef(errors.ErrInvalidValue, "invalid df: %s", so) + return nil, errors.Wrapf(terrors.ErrInvalidValue, "invalid df: %s", so) } df := &types.Diskfree{ @@ -38,10 +39,10 @@ func (a *Agent) parseDiskfree(so string) (*types.Diskfree, error) { Filesystem: fields[1], Mount: fields[6], } - df.Blocks, _ = utils.Atoi64(fields[2]) //nolint - df.UsedBlocks, _ = utils.Atoi64(fields[3]) //nolint - df.AvailableBlocks, _ = utils.Atoi64(fields[4]) //nolint - df.UsedPercent, _ = strconv.Atoi(fields[5]) //nolint + df.Blocks, _ = utils.Atoi64(fields[2]) + df.UsedBlocks, _ = utils.Atoi64(fields[3]) + df.AvailableBlocks, _ = utils.Atoi64(fields[4]) + df.UsedPercent, _ = strconv.Atoi(fields[5]) return df, nil } diff --git a/internal/virt/agent/exec.go b/internal/virt/agent/exec.go index eaa7810..53d55d4 100644 --- a/internal/virt/agent/exec.go +++ b/internal/virt/agent/exec.go @@ -4,10 +4,10 @@ import ( "context" "time" + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" "github.com/projecteru2/yavirt/configs" "github.com/projecteru2/yavirt/internal/virt/agent/types" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/log" ) // Grep . @@ -54,7 +54,8 @@ func (a *Agent) touch(ctx context.Context, filepath string) error { // Ping . func (a *Agent) Ping(ctx context.Context) error { - var st = <-a.exec(ctx, "echo", nil, true) + // linux and windows both have whoami. + var st = <-a.exec(ctx, "whoami", nil, true) return st.Error() } @@ -62,7 +63,7 @@ func (a *Agent) Ping(ctx context.Context) error { func (a *Agent) ExecBatch(bat *configs.Batch) error { var ctx = context.Background() var cancel context.CancelFunc - if timeout := bat.Timeout.Duration(); timeout > 0 { + if timeout := bat.Timeout; timeout > 0 { ctx, cancel = context.WithTimeout(ctx, timeout) defer cancel() } @@ -71,7 +72,7 @@ func (a *Agent) ExecBatch(bat *configs.Batch) error { if runOnce { switch ran, err := a.isFile(ctx, bat.FlagFile); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "") case ran: return nil } @@ -79,7 +80,7 @@ func (a *Agent) ExecBatch(bat *configs.Batch) error { switch err := a.execBatch(ctx, bat); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "") case runOnce: return a.touch(ctx, bat.FlagFile) default: @@ -100,12 +101,12 @@ func (a *Agent) isFolder(ctx context.Context, path string) (bool, error) { func (a *Agent) execBatch(ctx context.Context, bat *configs.Batch) error { var cmds, err = bat.GetCommands() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } for prog, args := range cmds { if err := a.execRetry(ctx, prog, args, bat); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } } @@ -123,16 +124,16 @@ func (a *Agent) execRetry(ctx context.Context, prog string, args []string, bat * select { case <-ctx.Done(): - return errors.Annotatef(err, "run %s %s timeout", prog, args) + return errors.Wrapf(err, "run %s %s timeout", prog, args) default: } if !bat.Retry { - return errors.Annotatef(err, "run %s %s error", prog, args) + return errors.Wrapf(err, "run %s %s error", prog, args) } - log.ErrorStackf(err, "run %s %s error, retry it", prog, args) - time.Sleep(bat.Interval.Duration()) + log.WithFunc("execRetry").Errorf(ctx, err, "run %s %s error, retry it", prog, args) + time.Sleep(bat.Interval) } } @@ -151,7 +152,7 @@ func (a *Agent) exec(ctx context.Context, prog string, args []string, stdio bool var st types.ExecStatus var data []byte - data, st.Err = a.qmp.Exec(prog, args, stdio) + data, st.Err = a.qmp.Exec(ctx, prog, args, stdio) if st.Err != nil { done <- st return done @@ -180,11 +181,11 @@ func (a *Agent) exec(ctx context.Context, prog string, args []string, stdio bool select { case <-ctx.Done(): - st.Err = errors.Annotatef(ctx.Err(), "exec %s error", prog) + st.Err = errors.Wrapf(ctx.Err(), "exec %s error", prog) return case <-next.C: - if st = a.execStatus(ret.Pid, stdio); st.Err != nil || st.Exited { + if st = a.execStatus(ctx, ret.Pid, stdio); st.Err != nil || st.Exited { return } } @@ -194,15 +195,15 @@ func (a *Agent) exec(ctx context.Context, prog string, args []string, stdio bool return done } -func (a *Agent) execStatus(pid int, _ bool) (st types.ExecStatus) { - var data, err = a.qmp.ExecStatus(pid) +func (a *Agent) execStatus(ctx context.Context, pid int, _ bool) (st types.ExecStatus) { + var data, err = a.qmp.ExecStatus(ctx, pid) if err != nil { - st.Err = errors.Trace(err) + st.Err = errors.Wrap(err, "") return } if err := a.decode(data, &st); err != nil { - st.Err = errors.Trace(err) + st.Err = errors.Wrap(err, "") } st.Pid = pid diff --git a/internal/virt/agent/file.go b/internal/virt/agent/file.go index 55e1636..7ca76df 100644 --- a/internal/virt/agent/file.go +++ b/internal/virt/agent/file.go @@ -2,16 +2,18 @@ package agent import ( "bytes" + "context" "io" - "github.com/juju/errors" + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" ) // OpenFile . -func (a *Agent) OpenFile(path, mode string) (handle int, err error) { - buf, err := a.qmp.OpenFile(path, mode) +func (a *Agent) OpenFile(ctx context.Context, path, mode string) (handle int, err error) { + buf, err := a.qmp.OpenFile(ctx, path, mode) if err != nil { - return 0, errors.Trace(err) + return 0, errors.Wrap(err, "") } err = a.decode(buf, &handle) @@ -20,55 +22,56 @@ func (a *Agent) OpenFile(path, mode string) (handle int, err error) { } // CloseFile . -func (a *Agent) CloseFile(handle int) error { - return a.qmp.CloseFile(handle) +func (a *Agent) CloseFile(ctx context.Context, handle int) error { + return a.qmp.CloseFile(ctx, handle) } // FlushFile . -func (a *Agent) FlushFile(handle int) error { - return a.qmp.FlushFile(handle) +func (a *Agent) FlushFile(ctx context.Context, handle int) error { + return a.qmp.FlushFile(ctx, handle) } // ReadFile . -func (a *Agent) ReadFile(handle int, p []byte) (int, bool, error) { - return a.qmp.ReadFile(handle, p) +func (a *Agent) ReadFile(ctx context.Context, handle int, p []byte) (int, bool, error) { + return a.qmp.ReadFile(ctx, handle, p) } // SeekFile . -func (a *Agent) SeekFile(handle int, offset int, whence int) (position int, eof bool, err error) { - return a.qmp.SeekFile(handle, offset, whence) +func (a *Agent) SeekFile(ctx context.Context, handle int, offset int, whence int) (position int, eof bool, err error) { + return a.qmp.SeekFile(ctx, handle, offset, whence) } // WriteFile . -func (a *Agent) WriteFile(handle int, buf []byte) error { - return a.qmp.WriteFile(handle, buf) +func (a *Agent) WriteFile(ctx context.Context, handle int, buf []byte) error { + return a.qmp.WriteFile(ctx, handle, buf) } // AppendLine . -func (a *Agent) AppendLine(filepath string, p []byte) error { - file, err := OpenFile(a, filepath, "a") +func (a *Agent) AppendLine(ctx context.Context, filepath string, p []byte) error { + file, err := OpenFile(ctx, a, filepath, "a") if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer file.Close() + defer file.Close(ctx) - if _, err := file.WriteLine(p); err != nil { - return errors.Trace(err) + if _, err := file.WriteLine(ctx, p); err != nil { + return errors.Wrap(err, "") } - return file.Flush() + return file.Flush(ctx) } type File interface { - Open() (err error) - Flush() error - Close() error - Read(p []byte) (n int, err error) - WriteLine(p []byte) (int, error) - Write(p []byte) (n int, err error) - Seek(offset, whence int) (pos int, err error) - ReadAt(dest []byte, pos int) (n int, err error) - Tail(n int) ([]byte, error) + Open(ctx context.Context) (err error) + Flush(ctx context.Context) error + Close(ctx context.Context) error + Read(ctx context.Context, p []byte) (n int, err error) + WriteLine(ctx context.Context, p []byte) (int, error) + Write(ctx context.Context, p []byte) (n int, err error) + Seek(ctx context.Context, offset, whence int) (pos int, err error) + ReadAt(ctx context.Context, dest []byte, pos int) (n int, err error) + Tail(ctx context.Context, n int) ([]byte, error) + CopyTo(ctx context.Context, dst io.Writer) (int, error) } // file . @@ -81,73 +84,73 @@ type file struct { } // OpenFile . -func OpenFile(agent *Agent, path, mode string) (File, error) { +func OpenFile(ctx context.Context, agent *Agent, path, mode string) (File, error) { var wr = &file{ agent: agent, path: path, mode: mode, } - if err := wr.Open(); err != nil { - return nil, errors.Trace(err) + if err := wr.Open(ctx); err != nil { + return nil, errors.Wrap(err, "") } return wr, nil } // Open . -func (w *file) Open() (err error) { - w.handle, err = w.agent.OpenFile(w.path, w.mode) +func (w *file) Open(ctx context.Context) (err error) { + w.handle, err = w.agent.OpenFile(ctx, w.path, w.mode) return } // Flush . -func (w *file) Flush() error { - return w.agent.FlushFile(w.handle) +func (w *file) Flush(ctx context.Context) error { + return w.agent.FlushFile(ctx, w.handle) } // Close . -func (w *file) Close() error { - return w.agent.CloseFile(w.handle) +func (w *file) Close(ctx context.Context) error { + return w.agent.CloseFile(ctx, w.handle) } // Read . -func (w *file) Read(p []byte) (n int, err error) { +func (w *file) Read(ctx context.Context, p []byte) (n int, err error) { if w.eof { return 0, io.EOF } - n, w.eof, err = w.agent.ReadFile(w.handle, p) + n, w.eof, err = w.agent.ReadFile(ctx, w.handle, p) return } // WriteLine . -func (w *file) WriteLine(p []byte) (int, error) { - return w.Write(append(p, '\n')) +func (w *file) WriteLine(ctx context.Context, p []byte) (int, error) { + return w.Write(ctx, append(p, '\n')) } -func (w *file) Write(p []byte) (n int, err error) { +func (w *file) Write(ctx context.Context, p []byte) (n int, err error) { if len(p) < 1 { return } - if err := w.agent.WriteFile(w.handle, p); err != nil { - return 0, errors.Trace(err) + if err := w.agent.WriteFile(ctx, w.handle, p); err != nil { + return 0, errors.Wrap(err, "") } return } // Seek . -func (w *file) Seek(offset, whence int) (pos int, err error) { - pos, w.eof, err = w.agent.SeekFile(w.handle, offset, whence) +func (w *file) Seek(ctx context.Context, offset, whence int) (pos int, err error) { + pos, w.eof, err = w.agent.SeekFile(ctx, w.handle, offset, whence) return } // ReadAt . -func (w *file) ReadAt(dest []byte, pos int) (n int, err error) { - _, err = w.Seek(pos, io.SeekStart) +func (w *file) ReadAt(ctx context.Context, dest []byte, pos int) (n int, err error) { + _, err = w.Seek(ctx, pos, io.SeekStart) if err != nil { return } @@ -155,23 +158,23 @@ func (w *file) ReadAt(dest []byte, pos int) (n int, err error) { return 0, io.EOF } - return w.Read(dest) + return w.Read(ctx, dest) } // Tail . -func (w *file) Tail(n int) ([]byte, error) { +func (w *file) Tail(ctx context.Context, n int) ([]byte, error) { if n < 1 { return nil, errors.New("not valid tail") } - size, err := w.Seek(0, io.SeekEnd) + size, err := w.Seek(ctx, 0, io.SeekEnd) if err != nil { return nil, err } if size == 1 { tmp := make([]byte, 1) - _, err = w.Read(tmp) + _, err = w.Read(ctx, tmp) return tmp, err } @@ -181,7 +184,7 @@ func (w *file) Tail(n int) ([]byte, error) { lineEnd := size cursor := make([]byte, 1) for i := size - 2; i >= 0; i-- { - if _, err = w.ReadAt(cursor, i); err != nil { + if _, err = w.ReadAt(ctx, cursor, i); err != nil { return nil, err } @@ -199,12 +202,12 @@ func (w *file) Tail(n int) ([]byte, error) { lineStart = 0 } - if _, err = w.Seek(lineStart, io.SeekStart); err != nil { + if _, err = w.Seek(ctx, lineStart, io.SeekStart); err != nil { return nil, err } newLine := make([]byte, lineEnd-lineStart) - if _, err = w.Read(newLine); err != nil { + if _, err = w.Read(ctx, newLine); err != nil { return nil, err } tmp = append(tmp, newLine) @@ -220,3 +223,30 @@ func (w *file) Tail(n int) ([]byte, error) { return buff.Bytes(), nil } + +func (w *file) CopyTo(ctx context.Context, dst io.Writer) (int, error) { + var total int + for { + buf := make([]byte, 65536) + nRead, err := w.Read(ctx, buf) + + if err != nil && err != io.EOF { + return total, errors.Wrap(err, "") + } + if nRead > 0 { + if bytes.Contains(buf[:nRead], []byte("^]")) { + log.WithFunc("CopyTo").Warnf(ctx, "[io.Scan] reader exited: %v", w) + return total, errors.New("[CopyTo] reader got ^]") + } + nWrite, err := dst.Write(buf[:nRead]) + if err != nil { + return total, errors.Wrap(err, "") + } + total += nWrite + } + if err == io.EOF { + break + } + } + return total, nil +} diff --git a/internal/virt/agent/freeze.go b/internal/virt/agent/freeze.go new file mode 100644 index 0000000..d66de11 --- /dev/null +++ b/internal/virt/agent/freeze.go @@ -0,0 +1,31 @@ +package agent + +import ( + "context" + + "github.com/cockroachdb/errors" +) + +func (a *Agent) FSFreezeAll(ctx context.Context) (int, error) { + nFS, err := a.qmp.FSFreezeAll(ctx) + if err != nil { + return 0, errors.Wrap(err, "") + } + return nFS, nil +} + +func (a *Agent) FSThawAll(ctx context.Context) (int, error) { + nFS, err := a.qmp.FSThawAll(ctx) + if err != nil { + return 0, errors.Wrap(err, "") + } + return nFS, nil +} + +func (a *Agent) FSFreezeStatus(ctx context.Context) (string, error) { + status, err := a.qmp.FSFreezeStatus(ctx) + if err != nil { + return "", errors.Wrap(err, "") + } + return status, nil +} diff --git a/internal/virt/agent/freeze_test.go b/internal/virt/agent/freeze_test.go new file mode 100644 index 0000000..eccb6e8 --- /dev/null +++ b/internal/virt/agent/freeze_test.go @@ -0,0 +1,31 @@ +package agent + +import ( + "context" + "testing" + + "github.com/projecteru2/yavirt/internal/virt/agent/mocks" + "github.com/projecteru2/yavirt/pkg/test/assert" +) + +func TestFreeze(t *testing.T) { + mockQmp := mocks.NewQmp(t) + ag := Agent{ + qmp: mockQmp, + } + + mockQmp.On("FSFreezeAll", context.Background()).Return(1, nil).Once() + nFS, err := ag.FSFreezeAll(context.Background()) + assert.Nil(t, err) + assert.Equal(t, 1, nFS) + + mockQmp.On("FSThawAll", context.Background()).Return(1, nil).Once() + nFS, err = ag.FSThawAll(context.Background()) + assert.Nil(t, err) + assert.Equal(t, 1, nFS) + + mockQmp.On("FSFreezeStatus", context.Background()).Return("freezed", nil).Once() + status, err := ag.FSFreezeStatus(context.Background()) + assert.Nil(t, err) + assert.Equal(t, "freezed", status) +} diff --git a/internal/virt/agent/mocks/File.go b/internal/virt/agent/mocks/File.go index c806f67..d9b643e 100644 --- a/internal/virt/agent/mocks/File.go +++ b/internal/virt/agent/mocks/File.go @@ -1,21 +1,26 @@ -// Code generated by mockery v2.26.1. DO NOT EDIT. +// Code generated by mockery v2.33.2. DO NOT EDIT. package mocks -import mock "github.com/stretchr/testify/mock" +import ( + context "context" + io "io" + + mock "github.com/stretchr/testify/mock" +) // File is an autogenerated mock type for the File type type File struct { mock.Mock } -// Close provides a mock function with given fields: -func (_m *File) Close() error { - ret := _m.Called() +// Close provides a mock function with given fields: ctx +func (_m *File) Close(ctx context.Context) error { + ret := _m.Called(ctx) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) } else { r0 = ret.Error(0) } @@ -23,13 +28,37 @@ func (_m *File) Close() error { return r0 } -// Flush provides a mock function with given fields: -func (_m *File) Flush() error { - ret := _m.Called() +// CopyTo provides a mock function with given fields: ctx, dst +func (_m *File) CopyTo(ctx context.Context, dst io.Writer) (int, error) { + ret := _m.Called(ctx, dst) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, io.Writer) (int, error)); ok { + return rf(ctx, dst) + } + if rf, ok := ret.Get(0).(func(context.Context, io.Writer) int); ok { + r0 = rf(ctx, dst) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, io.Writer) error); ok { + r1 = rf(ctx, dst) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Flush provides a mock function with given fields: ctx +func (_m *File) Flush(ctx context.Context) error { + ret := _m.Called(ctx) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) } else { r0 = ret.Error(0) } @@ -37,13 +66,13 @@ func (_m *File) Flush() error { return r0 } -// Open provides a mock function with given fields: -func (_m *File) Open() error { - ret := _m.Called() +// Open provides a mock function with given fields: ctx +func (_m *File) Open(ctx context.Context) error { + ret := _m.Called(ctx) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) } else { r0 = ret.Error(0) } @@ -51,23 +80,23 @@ func (_m *File) Open() error { return r0 } -// Read provides a mock function with given fields: p -func (_m *File) Read(p []byte) (int, error) { - ret := _m.Called(p) +// Read provides a mock function with given fields: ctx, p +func (_m *File) Read(ctx context.Context, p []byte) (int, error) { + ret := _m.Called(ctx, p) var r0 int var r1 error - if rf, ok := ret.Get(0).(func([]byte) (int, error)); ok { - return rf(p) + if rf, ok := ret.Get(0).(func(context.Context, []byte) (int, error)); ok { + return rf(ctx, p) } - if rf, ok := ret.Get(0).(func([]byte) int); ok { - r0 = rf(p) + if rf, ok := ret.Get(0).(func(context.Context, []byte) int); ok { + r0 = rf(ctx, p) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(p) + if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { + r1 = rf(ctx, p) } else { r1 = ret.Error(1) } @@ -75,23 +104,23 @@ func (_m *File) Read(p []byte) (int, error) { return r0, r1 } -// ReadAt provides a mock function with given fields: dest, pos -func (_m *File) ReadAt(dest []byte, pos int) (int, error) { - ret := _m.Called(dest, pos) +// ReadAt provides a mock function with given fields: ctx, dest, pos +func (_m *File) ReadAt(ctx context.Context, dest []byte, pos int) (int, error) { + ret := _m.Called(ctx, dest, pos) var r0 int var r1 error - if rf, ok := ret.Get(0).(func([]byte, int) (int, error)); ok { - return rf(dest, pos) + if rf, ok := ret.Get(0).(func(context.Context, []byte, int) (int, error)); ok { + return rf(ctx, dest, pos) } - if rf, ok := ret.Get(0).(func([]byte, int) int); ok { - r0 = rf(dest, pos) + if rf, ok := ret.Get(0).(func(context.Context, []byte, int) int); ok { + r0 = rf(ctx, dest, pos) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func([]byte, int) error); ok { - r1 = rf(dest, pos) + if rf, ok := ret.Get(1).(func(context.Context, []byte, int) error); ok { + r1 = rf(ctx, dest, pos) } else { r1 = ret.Error(1) } @@ -99,23 +128,23 @@ func (_m *File) ReadAt(dest []byte, pos int) (int, error) { return r0, r1 } -// Seek provides a mock function with given fields: offset, whence -func (_m *File) Seek(offset int, whence int) (int, error) { - ret := _m.Called(offset, whence) +// Seek provides a mock function with given fields: ctx, offset, whence +func (_m *File) Seek(ctx context.Context, offset int, whence int) (int, error) { + ret := _m.Called(ctx, offset, whence) var r0 int var r1 error - if rf, ok := ret.Get(0).(func(int, int) (int, error)); ok { - return rf(offset, whence) + if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok { + return rf(ctx, offset, whence) } - if rf, ok := ret.Get(0).(func(int, int) int); ok { - r0 = rf(offset, whence) + if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok { + r0 = rf(ctx, offset, whence) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func(int, int) error); ok { - r1 = rf(offset, whence) + if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok { + r1 = rf(ctx, offset, whence) } else { r1 = ret.Error(1) } @@ -123,25 +152,25 @@ func (_m *File) Seek(offset int, whence int) (int, error) { return r0, r1 } -// Tail provides a mock function with given fields: n -func (_m *File) Tail(n int) ([]byte, error) { - ret := _m.Called(n) +// Tail provides a mock function with given fields: ctx, n +func (_m *File) Tail(ctx context.Context, n int) ([]byte, error) { + ret := _m.Called(ctx, n) var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func(int) ([]byte, error)); ok { - return rf(n) + if rf, ok := ret.Get(0).(func(context.Context, int) ([]byte, error)); ok { + return rf(ctx, n) } - if rf, ok := ret.Get(0).(func(int) []byte); ok { - r0 = rf(n) + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, n) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(n) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, n) } else { r1 = ret.Error(1) } @@ -149,23 +178,23 @@ func (_m *File) Tail(n int) ([]byte, error) { return r0, r1 } -// Write provides a mock function with given fields: p -func (_m *File) Write(p []byte) (int, error) { - ret := _m.Called(p) +// Write provides a mock function with given fields: ctx, p +func (_m *File) Write(ctx context.Context, p []byte) (int, error) { + ret := _m.Called(ctx, p) var r0 int var r1 error - if rf, ok := ret.Get(0).(func([]byte) (int, error)); ok { - return rf(p) + if rf, ok := ret.Get(0).(func(context.Context, []byte) (int, error)); ok { + return rf(ctx, p) } - if rf, ok := ret.Get(0).(func([]byte) int); ok { - r0 = rf(p) + if rf, ok := ret.Get(0).(func(context.Context, []byte) int); ok { + r0 = rf(ctx, p) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(p) + if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { + r1 = rf(ctx, p) } else { r1 = ret.Error(1) } @@ -173,23 +202,23 @@ func (_m *File) Write(p []byte) (int, error) { return r0, r1 } -// WriteLine provides a mock function with given fields: p -func (_m *File) WriteLine(p []byte) (int, error) { - ret := _m.Called(p) +// WriteLine provides a mock function with given fields: ctx, p +func (_m *File) WriteLine(ctx context.Context, p []byte) (int, error) { + ret := _m.Called(ctx, p) var r0 int var r1 error - if rf, ok := ret.Get(0).(func([]byte) (int, error)); ok { - return rf(p) + if rf, ok := ret.Get(0).(func(context.Context, []byte) (int, error)); ok { + return rf(ctx, p) } - if rf, ok := ret.Get(0).(func([]byte) int); ok { - r0 = rf(p) + if rf, ok := ret.Get(0).(func(context.Context, []byte) int); ok { + r0 = rf(ctx, p) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(p) + if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { + r1 = rf(ctx, p) } else { r1 = ret.Error(1) } @@ -197,13 +226,12 @@ func (_m *File) WriteLine(p []byte) (int, error) { return r0, r1 } -type mockConstructorTestingTNewFile interface { +// NewFile creates a new instance of File. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFile(t interface { mock.TestingT Cleanup(func()) -} - -// NewFile creates a new instance of File. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFile(t mockConstructorTestingTNewFile) *File { +}) *File { mock := &File{} mock.Mock.Test(t) diff --git a/internal/virt/agent/mocks/Interface.go b/internal/virt/agent/mocks/Interface.go index 41e3766..a4e670c 100644 --- a/internal/virt/agent/mocks/Interface.go +++ b/internal/virt/agent/mocks/Interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.26.1. DO NOT EDIT. +// Code generated by mockery v2.33.2. DO NOT EDIT. package mocks @@ -17,13 +17,13 @@ type Interface struct { mock.Mock } -// AppendLine provides a mock function with given fields: filepath, p -func (_m *Interface) AppendLine(filepath string, p []byte) error { - ret := _m.Called(filepath, p) +// AppendLine provides a mock function with given fields: ctx, filepath, p +func (_m *Interface) AppendLine(ctx context.Context, filepath string, p []byte) error { + ret := _m.Called(ctx, filepath, p) var r0 error - if rf, ok := ret.Get(0).(func(string, []byte) error); ok { - r0 = rf(filepath, p) + if rf, ok := ret.Get(0).(func(context.Context, string, []byte) error); ok { + r0 = rf(ctx, filepath, p) } else { r0 = ret.Error(0) } @@ -69,13 +69,13 @@ func (_m *Interface) Close() error { return r0 } -// CloseFile provides a mock function with given fields: handle -func (_m *Interface) CloseFile(handle int) error { - ret := _m.Called(handle) +// CloseFile provides a mock function with given fields: ctx, handle +func (_m *Interface) CloseFile(ctx context.Context, handle int) error { + ret := _m.Called(ctx, handle) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(handle) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, handle) } else { r0 = ret.Error(0) } @@ -143,13 +143,85 @@ func (_m *Interface) ExecOutput(ctx context.Context, prog string, args ...string return r0 } -// FlushFile provides a mock function with given fields: handle -func (_m *Interface) FlushFile(handle int) error { - ret := _m.Called(handle) +// FSFreezeAll provides a mock function with given fields: ctx +func (_m *Interface) FSFreezeAll(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FSFreezeStatus provides a mock function with given fields: ctx +func (_m *Interface) FSFreezeStatus(ctx context.Context) (string, error) { + ret := _m.Called(ctx) + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) string); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FSThawAll provides a mock function with given fields: ctx +func (_m *Interface) FSThawAll(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FlushFile provides a mock function with given fields: ctx, handle +func (_m *Interface) FlushFile(ctx context.Context, handle int) error { + ret := _m.Called(ctx, handle) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(handle) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, handle) } else { r0 = ret.Error(0) } @@ -255,23 +327,23 @@ func (_m *Interface) IsFolder(ctx context.Context, path string) (bool, error) { return r0, r1 } -// OpenFile provides a mock function with given fields: path, mode -func (_m *Interface) OpenFile(path string, mode string) (int, error) { - ret := _m.Called(path, mode) +// OpenFile provides a mock function with given fields: ctx, path, mode +func (_m *Interface) OpenFile(ctx context.Context, path string, mode string) (int, error) { + ret := _m.Called(ctx, path, mode) var r0 int var r1 error - if rf, ok := ret.Get(0).(func(string, string) (int, error)); ok { - return rf(path, mode) + if rf, ok := ret.Get(0).(func(context.Context, string, string) (int, error)); ok { + return rf(ctx, path, mode) } - if rf, ok := ret.Get(0).(func(string, string) int); ok { - r0 = rf(path, mode) + if rf, ok := ret.Get(0).(func(context.Context, string, string) int); ok { + r0 = rf(ctx, path, mode) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(path, mode) + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, path, mode) } else { r1 = ret.Error(1) } @@ -293,30 +365,30 @@ func (_m *Interface) Ping(ctx context.Context) error { return r0 } -// ReadFile provides a mock function with given fields: handle, p -func (_m *Interface) ReadFile(handle int, p []byte) (int, bool, error) { - ret := _m.Called(handle, p) +// ReadFile provides a mock function with given fields: ctx, handle, p +func (_m *Interface) ReadFile(ctx context.Context, handle int, p []byte) (int, bool, error) { + ret := _m.Called(ctx, handle, p) var r0 int var r1 bool var r2 error - if rf, ok := ret.Get(0).(func(int, []byte) (int, bool, error)); ok { - return rf(handle, p) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) (int, bool, error)); ok { + return rf(ctx, handle, p) } - if rf, ok := ret.Get(0).(func(int, []byte) int); ok { - r0 = rf(handle, p) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) int); ok { + r0 = rf(ctx, handle, p) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func(int, []byte) bool); ok { - r1 = rf(handle, p) + if rf, ok := ret.Get(1).(func(context.Context, int, []byte) bool); ok { + r1 = rf(ctx, handle, p) } else { r1 = ret.Get(1).(bool) } - if rf, ok := ret.Get(2).(func(int, []byte) error); ok { - r2 = rf(handle, p) + if rf, ok := ret.Get(2).(func(context.Context, int, []byte) error); ok { + r2 = rf(ctx, handle, p) } else { r2 = ret.Error(2) } @@ -338,30 +410,30 @@ func (_m *Interface) RemoveAll(ctx context.Context, path string) error { return r0 } -// SeekFile provides a mock function with given fields: handle, offset, whence -func (_m *Interface) SeekFile(handle int, offset int, whence int) (int, bool, error) { - ret := _m.Called(handle, offset, whence) +// SeekFile provides a mock function with given fields: ctx, handle, offset, whence +func (_m *Interface) SeekFile(ctx context.Context, handle int, offset int, whence int) (int, bool, error) { + ret := _m.Called(ctx, handle, offset, whence) var r0 int var r1 bool var r2 error - if rf, ok := ret.Get(0).(func(int, int, int) (int, bool, error)); ok { - return rf(handle, offset, whence) + if rf, ok := ret.Get(0).(func(context.Context, int, int, int) (int, bool, error)); ok { + return rf(ctx, handle, offset, whence) } - if rf, ok := ret.Get(0).(func(int, int, int) int); ok { - r0 = rf(handle, offset, whence) + if rf, ok := ret.Get(0).(func(context.Context, int, int, int) int); ok { + r0 = rf(ctx, handle, offset, whence) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func(int, int, int) bool); ok { - r1 = rf(handle, offset, whence) + if rf, ok := ret.Get(1).(func(context.Context, int, int, int) bool); ok { + r1 = rf(ctx, handle, offset, whence) } else { r1 = ret.Get(1).(bool) } - if rf, ok := ret.Get(2).(func(int, int, int) error); ok { - r2 = rf(handle, offset, whence) + if rf, ok := ret.Get(2).(func(context.Context, int, int, int) error); ok { + r2 = rf(ctx, handle, offset, whence) } else { r2 = ret.Error(2) } @@ -383,13 +455,13 @@ func (_m *Interface) Touch(ctx context.Context, filepath string) error { return r0 } -// WriteFile provides a mock function with given fields: handle, buf -func (_m *Interface) WriteFile(handle int, buf []byte) error { - ret := _m.Called(handle, buf) +// WriteFile provides a mock function with given fields: ctx, handle, buf +func (_m *Interface) WriteFile(ctx context.Context, handle int, buf []byte) error { + ret := _m.Called(ctx, handle, buf) var r0 error - if rf, ok := ret.Get(0).(func(int, []byte) error); ok { - r0 = rf(handle, buf) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok { + r0 = rf(ctx, handle, buf) } else { r0 = ret.Error(0) } @@ -397,13 +469,12 @@ func (_m *Interface) WriteFile(handle int, buf []byte) error { return r0 } -type mockConstructorTestingTNewInterface interface { +// NewInterface creates a new instance of Interface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewInterface(t interface { mock.TestingT Cleanup(func()) -} - -// NewInterface creates a new instance of Interface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewInterface(t mockConstructorTestingTNewInterface) *Interface { +}) *Interface { mock := &Interface{} mock.Mock.Test(t) diff --git a/internal/virt/agent/mocks/Qmp.go b/internal/virt/agent/mocks/Qmp.go index cdb8bce..47447cd 100644 --- a/internal/virt/agent/mocks/Qmp.go +++ b/internal/virt/agent/mocks/Qmp.go @@ -1,8 +1,12 @@ -// Code generated by mockery v2.26.1. DO NOT EDIT. +// Code generated by mockery v2.33.2. DO NOT EDIT. package mocks -import mock "github.com/stretchr/testify/mock" +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) // Qmp is an autogenerated mock type for the Qmp type type Qmp struct { @@ -23,13 +27,13 @@ func (_m *Qmp) Close() error { return r0 } -// CloseFile provides a mock function with given fields: handle -func (_m *Qmp) CloseFile(handle int) error { - ret := _m.Called(handle) +// CloseFile provides a mock function with given fields: ctx, handle +func (_m *Qmp) CloseFile(ctx context.Context, handle int) error { + ret := _m.Called(ctx, handle) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(handle) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, handle) } else { r0 = ret.Error(0) } @@ -37,25 +41,25 @@ func (_m *Qmp) CloseFile(handle int) error { return r0 } -// Exec provides a mock function with given fields: cmd, args, stdio -func (_m *Qmp) Exec(cmd string, args []string, stdio bool) ([]byte, error) { - ret := _m.Called(cmd, args, stdio) +// Exec provides a mock function with given fields: ctx, cmd, args, stdio +func (_m *Qmp) Exec(ctx context.Context, cmd string, args []string, stdio bool) ([]byte, error) { + ret := _m.Called(ctx, cmd, args, stdio) var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func(string, []string, bool) ([]byte, error)); ok { - return rf(cmd, args, stdio) + if rf, ok := ret.Get(0).(func(context.Context, string, []string, bool) ([]byte, error)); ok { + return rf(ctx, cmd, args, stdio) } - if rf, ok := ret.Get(0).(func(string, []string, bool) []byte); ok { - r0 = rf(cmd, args, stdio) + if rf, ok := ret.Get(0).(func(context.Context, string, []string, bool) []byte); ok { + r0 = rf(ctx, cmd, args, stdio) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } - if rf, ok := ret.Get(1).(func(string, []string, bool) error); ok { - r1 = rf(cmd, args, stdio) + if rf, ok := ret.Get(1).(func(context.Context, string, []string, bool) error); ok { + r1 = rf(ctx, cmd, args, stdio) } else { r1 = ret.Error(1) } @@ -63,25 +67,121 @@ func (_m *Qmp) Exec(cmd string, args []string, stdio bool) ([]byte, error) { return r0, r1 } -// ExecStatus provides a mock function with given fields: pid -func (_m *Qmp) ExecStatus(pid int) ([]byte, error) { - ret := _m.Called(pid) +// ExecStatus provides a mock function with given fields: ctx, pid +func (_m *Qmp) ExecStatus(ctx context.Context, pid int) ([]byte, error) { + ret := _m.Called(ctx, pid) var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func(int) ([]byte, error)); ok { - return rf(pid) + if rf, ok := ret.Get(0).(func(context.Context, int) ([]byte, error)); ok { + return rf(ctx, pid) } - if rf, ok := ret.Get(0).(func(int) []byte); ok { - r0 = rf(pid) + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, pid) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(pid) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, pid) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FSFreezeAll provides a mock function with given fields: ctx +func (_m *Qmp) FSFreezeAll(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FSFreezeList provides a mock function with given fields: ctx, mountpoints +func (_m *Qmp) FSFreezeList(ctx context.Context, mountpoints []string) (int, error) { + ret := _m.Called(ctx, mountpoints) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []string) (int, error)); ok { + return rf(ctx, mountpoints) + } + if rf, ok := ret.Get(0).(func(context.Context, []string) int); ok { + r0 = rf(ctx, mountpoints) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { + r1 = rf(ctx, mountpoints) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FSFreezeStatus provides a mock function with given fields: ctx +func (_m *Qmp) FSFreezeStatus(ctx context.Context) (string, error) { + ret := _m.Called(ctx) + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) string); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FSThawAll provides a mock function with given fields: ctx +func (_m *Qmp) FSThawAll(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -89,13 +189,13 @@ func (_m *Qmp) ExecStatus(pid int) ([]byte, error) { return r0, r1 } -// FlushFile provides a mock function with given fields: handle -func (_m *Qmp) FlushFile(handle int) error { - ret := _m.Called(handle) +// FlushFile provides a mock function with given fields: ctx, handle +func (_m *Qmp) FlushFile(ctx context.Context, handle int) error { + ret := _m.Called(ctx, handle) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(handle) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, handle) } else { r0 = ret.Error(0) } @@ -103,25 +203,39 @@ func (_m *Qmp) FlushFile(handle int) error { return r0 } -// OpenFile provides a mock function with given fields: path, mode -func (_m *Qmp) OpenFile(path string, mode string) ([]byte, error) { - ret := _m.Called(path, mode) +// GetName provides a mock function with given fields: +func (_m *Qmp) GetName() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// OpenFile provides a mock function with given fields: ctx, path, mode +func (_m *Qmp) OpenFile(ctx context.Context, path string, mode string) ([]byte, error) { + ret := _m.Called(ctx, path, mode) var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { - return rf(path, mode) + if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]byte, error)); ok { + return rf(ctx, path, mode) } - if rf, ok := ret.Get(0).(func(string, string) []byte); ok { - r0 = rf(path, mode) + if rf, ok := ret.Get(0).(func(context.Context, string, string) []byte); ok { + r0 = rf(ctx, path, mode) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(path, mode) + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, path, mode) } else { r1 = ret.Error(1) } @@ -129,30 +243,30 @@ func (_m *Qmp) OpenFile(path string, mode string) ([]byte, error) { return r0, r1 } -// ReadFile provides a mock function with given fields: handle, p -func (_m *Qmp) ReadFile(handle int, p []byte) (int, bool, error) { - ret := _m.Called(handle, p) +// ReadFile provides a mock function with given fields: ctx, handle, p +func (_m *Qmp) ReadFile(ctx context.Context, handle int, p []byte) (int, bool, error) { + ret := _m.Called(ctx, handle, p) var r0 int var r1 bool var r2 error - if rf, ok := ret.Get(0).(func(int, []byte) (int, bool, error)); ok { - return rf(handle, p) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) (int, bool, error)); ok { + return rf(ctx, handle, p) } - if rf, ok := ret.Get(0).(func(int, []byte) int); ok { - r0 = rf(handle, p) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) int); ok { + r0 = rf(ctx, handle, p) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func(int, []byte) bool); ok { - r1 = rf(handle, p) + if rf, ok := ret.Get(1).(func(context.Context, int, []byte) bool); ok { + r1 = rf(ctx, handle, p) } else { r1 = ret.Get(1).(bool) } - if rf, ok := ret.Get(2).(func(int, []byte) error); ok { - r2 = rf(handle, p) + if rf, ok := ret.Get(2).(func(context.Context, int, []byte) error); ok { + r2 = rf(ctx, handle, p) } else { r2 = ret.Error(2) } @@ -160,30 +274,30 @@ func (_m *Qmp) ReadFile(handle int, p []byte) (int, bool, error) { return r0, r1, r2 } -// SeekFile provides a mock function with given fields: handle, offset, whence -func (_m *Qmp) SeekFile(handle int, offset int, whence int) (int, bool, error) { - ret := _m.Called(handle, offset, whence) +// SeekFile provides a mock function with given fields: ctx, handle, offset, whence +func (_m *Qmp) SeekFile(ctx context.Context, handle int, offset int, whence int) (int, bool, error) { + ret := _m.Called(ctx, handle, offset, whence) var r0 int var r1 bool var r2 error - if rf, ok := ret.Get(0).(func(int, int, int) (int, bool, error)); ok { - return rf(handle, offset, whence) + if rf, ok := ret.Get(0).(func(context.Context, int, int, int) (int, bool, error)); ok { + return rf(ctx, handle, offset, whence) } - if rf, ok := ret.Get(0).(func(int, int, int) int); ok { - r0 = rf(handle, offset, whence) + if rf, ok := ret.Get(0).(func(context.Context, int, int, int) int); ok { + r0 = rf(ctx, handle, offset, whence) } else { r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func(int, int, int) bool); ok { - r1 = rf(handle, offset, whence) + if rf, ok := ret.Get(1).(func(context.Context, int, int, int) bool); ok { + r1 = rf(ctx, handle, offset, whence) } else { r1 = ret.Get(1).(bool) } - if rf, ok := ret.Get(2).(func(int, int, int) error); ok { - r2 = rf(handle, offset, whence) + if rf, ok := ret.Get(2).(func(context.Context, int, int, int) error); ok { + r2 = rf(ctx, handle, offset, whence) } else { r2 = ret.Error(2) } @@ -191,13 +305,13 @@ func (_m *Qmp) SeekFile(handle int, offset int, whence int) (int, bool, error) { return r0, r1, r2 } -// WriteFile provides a mock function with given fields: handle, buf -func (_m *Qmp) WriteFile(handle int, buf []byte) error { - ret := _m.Called(handle, buf) +// WriteFile provides a mock function with given fields: ctx, handle, buf +func (_m *Qmp) WriteFile(ctx context.Context, handle int, buf []byte) error { + ret := _m.Called(ctx, handle, buf) var r0 error - if rf, ok := ret.Get(0).(func(int, []byte) error); ok { - r0 = rf(handle, buf) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok { + r0 = rf(ctx, handle, buf) } else { r0 = ret.Error(0) } @@ -205,13 +319,12 @@ func (_m *Qmp) WriteFile(handle int, buf []byte) error { return r0 } -type mockConstructorTestingTNewQmp interface { +// NewQmp creates a new instance of Qmp. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewQmp(t interface { mock.TestingT Cleanup(func()) -} - -// NewQmp creates a new instance of Qmp. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewQmp(t mockConstructorTestingTNewQmp) *Qmp { +}) *Qmp { mock := &Qmp{} mock.Mock.Test(t) diff --git a/internal/virt/agent/parted.go b/internal/virt/agent/parted.go index a257af8..3bcc3d4 100644 --- a/internal/virt/agent/parted.go +++ b/internal/virt/agent/parted.go @@ -4,7 +4,8 @@ import ( "context" "regexp" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" ) @@ -30,7 +31,7 @@ func (p Parted) GetSize(ctx context.Context) (int64, error) { st := <-p.ga.ExecOutput(ctx, "parted", "-s", p.dev, "unit", "B", "p") so, _, err := st.Stdio() if err != nil { - return 0, errors.Annotatef(err, "parted %s print failed", p.dev) + return 0, errors.Wrapf(err, "parted %s print failed", p.dev) } return p.getSize(string(so)) } @@ -38,7 +39,7 @@ func (p Parted) GetSize(ctx context.Context) (int64, error) { func (p Parted) getSize(so string) (int64, error) { mat := printSizeRegex.FindStringSubmatch(so) if len(mat) != 2 { - return 0, errors.Annotatef(errors.ErrInvalidValue, "invalid parted: %s", so) + return 0, errors.Wrapf(terrors.ErrInvalidValue, "invalid parted: %s", so) } return utils.Atoi64(mat[1]) diff --git a/internal/virt/agent/qmp.go b/internal/virt/agent/qmp.go index f815bfb..cb40fff 100644 --- a/internal/virt/agent/qmp.go +++ b/internal/virt/agent/qmp.go @@ -1,34 +1,41 @@ package agent import ( - "bufio" + "context" "encoding/base64" "encoding/json" "fmt" "net" + "strconv" "sync" - "github.com/projecteru2/yavirt/configs" - "github.com/projecteru2/yavirt/pkg/errors" - "github.com/projecteru2/yavirt/pkg/log" + "github.com/cockroachdb/errors" + "github.com/projecteru2/core/log" "github.com/projecteru2/yavirt/pkg/utils" + + "github.com/projecteru2/yavirt/pkg/libvirt" ) const maxBytesPerRead = 32 * utils.MB // ref https://www.qemu.org/docs/master/interop/qemu-ga-ref.html // Qmp . -type Qmp interface { +type Qmp interface { //nolint:interfacebloat Close() error - Exec(cmd string, args []string, stdio bool) ([]byte, error) - ExecStatus(pid int) ([]byte, error) - - OpenFile(path, mode string) ([]byte, error) - FlushFile(handle int) error - WriteFile(handle int, buf []byte) error - ReadFile(handle int, p []byte) (read int, eof bool, err error) - CloseFile(handle int) error - SeekFile(handle int, offset int, whence int) (position int, eof bool, err error) + Exec(ctx context.Context, cmd string, args []string, stdio bool) ([]byte, error) + ExecStatus(ctx context.Context, pid int) ([]byte, error) + + OpenFile(ctx context.Context, path, mode string) ([]byte, error) + FlushFile(ctx context.Context, handle int) error + WriteFile(ctx context.Context, handle int, buf []byte) error + ReadFile(ctx context.Context, handle int, p []byte) (read int, eof bool, err error) + CloseFile(ctx context.Context, handle int) error + SeekFile(ctx context.Context, handle int, offset int, whence int) (position int, eof bool, err error) + FSFreezeAll(ctx context.Context) (nFS int, err error) + FSFreezeList(ctx context.Context, mountpoints []string) (nFS int, err error) + FSThawAll(ctx context.Context) (nFS int, err error) + FSFreezeStatus(ctx context.Context) (status string, err error) + GetName() string } type qmp struct { @@ -38,12 +45,11 @@ type qmp struct { // the false value indicates virsh qemu-monitor-command. ga bool - sockfile string - sock net.Conn - reader *bufio.Reader - writer *bufio.Writer - - greeting *json.RawMessage + // sockfile string + name string + sock net.Conn + virt libvirt.Libvirt + dom libvirt.Domain } type qmpResp struct { @@ -62,14 +68,27 @@ func (e *qmpError) Error() string { return fmt.Sprintf("QMP error %s: %s", e.Class, e.Desc) } -func newQmp(sockfile string, ga bool) *qmp { +func newQmp(name string, virt libvirt.Libvirt, ga bool) *qmp { return &qmp{ - sockfile: sockfile, - ga: ga, + name: name, + virt: virt, + ga: ga, } } -func (q *qmp) Exec(path string, args []string, output bool) ([]byte, error) { +func (q *qmp) initIfNecessary() error { + if q.dom != nil { + return nil + } + dom, err := q.virt.LookupDomain(q.name) + if err != nil { + return err + } + q.dom = dom + return nil +} + +func (q *qmp) Exec(ctx context.Context, path string, args []string, output bool) ([]byte, error) { q.Lock() defer q.Unlock() @@ -81,39 +100,39 @@ func (q *qmp) Exec(path string, args []string, output bool) ([]byte, error) { exArg["arg"] = args } - log.Debugf("exec %s with %v", path, args) + log.WithFunc("qmp.Exec").Debugf(ctx, "exec %s with %v", path, args) - return q.exec("guest-exec", exArg) + return q.exec(ctx, "guest-exec", exArg) } -func (q *qmp) ExecStatus(pid int) ([]byte, error) { +func (q *qmp) ExecStatus(ctx context.Context, pid int) ([]byte, error) { q.Lock() defer q.Unlock() - return q.exec("guest-exec-status", map[string]any{"pid": pid}) + return q.exec(ctx, "guest-exec-status", map[string]any{"pid": pid}) } -func (q *qmp) OpenFile(path, mode string) ([]byte, error) { +func (q *qmp) OpenFile(ctx context.Context, path, mode string) ([]byte, error) { q.Lock() defer q.Unlock() - return q.exec("guest-file-open", map[string]any{"path": path, "mode": mode}) + return q.exec(ctx, "guest-file-open", map[string]any{"path": path, "mode": mode}) } -func (q *qmp) CloseFile(handle int) (err error) { +func (q *qmp) CloseFile(ctx context.Context, handle int) (err error) { q.Lock() defer q.Unlock() - _, err = q.exec("guest-file-close", map[string]any{"handle": handle}) + _, err = q.exec(ctx, "guest-file-close", map[string]any{"handle": handle}) return } -func (q *qmp) FlushFile(handle int) (err error) { +func (q *qmp) FlushFile(ctx context.Context, handle int) (err error) { q.Lock() defer q.Unlock() - _, err = q.exec("guest-file-flush", map[string]any{"handle": handle}) + _, err = q.exec(ctx, "guest-file-flush", map[string]any{"handle": handle}) return } // ReadFile . -func (q *qmp) ReadFile(handle int, p []byte) (read int, eof bool, err error) { +func (q *qmp) ReadFile(ctx context.Context, handle int, p []byte) (read int, eof bool, err error) { pcap := int64(cap(p)) args := map[string]any{ "handle": handle, @@ -125,7 +144,7 @@ func (q *qmp) ReadFile(handle int, p []byte) (read int, eof bool, err error) { for { var buf []byte - if buf, err = q.exec("guest-file-read", args); err != nil { + if buf, err = q.exec(ctx, "guest-file-read", args); err != nil { return } @@ -157,32 +176,83 @@ func (q *qmp) ReadFile(handle int, p []byte) (read int, eof bool, err error) { } } -func (q *qmp) WriteFile(handle int, buf []byte) (err error) { +func (q *qmp) WriteFile(ctx context.Context, handle int, buf []byte) (err error) { q.Lock() defer q.Unlock() var b64 = base64.StdEncoding.EncodeToString(buf) - _, err = q.exec("guest-file-write", map[string]any{"handle": handle, "buf-b64": b64}) + _, err = q.exec(ctx, "guest-file-write", map[string]any{"handle": handle, "buf-b64": b64}) return } -func (q *qmp) exec(cmd string, args map[string]any) ([]byte, error) { - var buf, err = newQmpCmd(cmd, args).bytes() - if err != nil { - return nil, errors.Trace(err) +func (q *qmp) FSFreezeAll(ctx context.Context) (nFS int, err error) { + q.Lock() + defer q.Unlock() + + var bs []byte + if bs, err = q.exec(ctx, "guest-fsfreeze-freeze", nil); err != nil { + return } + nFS, err = strconv.Atoi(string(bs)) - if err := q.connect(); err != nil { - return nil, errors.Trace(err) + return +} +func (q *qmp) FSFreezeList(ctx context.Context, mountpoints []string) (nFS int, err error) { + q.Lock() + defer q.Unlock() + var args map[string]any + if len(mountpoints) > 0 { + args = map[string]any{"mountpoints": mountpoints} } + var bs []byte + if bs, err = q.exec(ctx, "guest-fsfreeze-freeze-list", args); err != nil { + return + } + nFS, err = strconv.Atoi(string(bs)) + return +} - switch resp, err := q.req(buf); { +func (q *qmp) FSThawAll(ctx context.Context) (nFS int, err error) { + q.Lock() + defer q.Unlock() + + var bs []byte + if bs, err = q.exec(ctx, "guest-fsfreeze-thaw", nil); err != nil { + return + } + nFS, err = strconv.Atoi(string(bs)) + return +} + +func (q *qmp) FSFreezeStatus(ctx context.Context) (status string, err error) { + q.Lock() + defer q.Unlock() + + var bs []byte + if bs, err = q.exec(ctx, "guest-fsfreeze-status", nil); err != nil { + return + } + status = string(bs[1 : len(bs)-1]) + return +} + +func (q *qmp) exec(ctx context.Context, cmd string, args map[string]any) ([]byte, error) { + if err := q.initIfNecessary(); err != nil { + return nil, err + } + + var buf, err = newQmpCmd(cmd, args).bytes() + if err != nil { + return nil, errors.Wrap(err, "") + } + + switch resp, err := q.req(ctx, buf); { case err != nil: - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") case resp.Error != nil: - return nil, errors.Trace(resp.Error) + return nil, errors.Wrapf(resp.Error, "failed to exec %s", cmd) default: return []byte(*resp.Return), nil @@ -190,7 +260,7 @@ func (q *qmp) exec(cmd string, args map[string]any) ([]byte, error) { } // SeekFile . -func (q *qmp) SeekFile(handle int, offset int, whence int) (position int, eof bool, err error) { +func (q *qmp) SeekFile(ctx context.Context, handle int, offset int, whence int) (position int, eof bool, err error) { args := map[string]any{ "handle": handle, "offset": offset, @@ -201,7 +271,7 @@ func (q *qmp) SeekFile(handle int, offset int, whence int) (position int, eof bo defer q.Unlock() var buf []byte - if buf, err = q.exec("guest-file-seek", args); err != nil { + if buf, err = q.exec(ctx, "guest-file-seek", args); err != nil { return } @@ -216,75 +286,6 @@ func (q *qmp) SeekFile(handle int, offset int, whence int) (position int, eof bo return resp.Position, resp.EOF, nil } -func (q *qmp) connect() error { - if q.sock != nil { - return nil - } - - var sock, err = net.DialTimeout("unix", q.sockfile, configs.Conf.QMPConnectTimeout.Duration()) - if err != nil { - return errors.Trace(err) - } - - q.sock = sock - q.reader = bufio.NewReader(q.sock) - q.writer = bufio.NewWriter(q.sock) - - if !q.ga { - if err := q.handshake(); err != nil { - q.Close() - return errors.Trace(err) - } - } - - return nil -} - -func (q *qmp) handshake() error { - return utils.Invoke([]func() error{ - q.greet, - q.capabilities, - }) -} - -func (q *qmp) capabilities() error { - var cmd, err = newQmpCmd("qmp_capabilities", nil).bytes() - if err != nil { - return errors.Trace(err) - } - - switch resp, err := q.req(cmd); { - case err != nil: - return errors.Trace(err) - - case resp.Return == nil: - return errors.Errorf("QMP negotiation error") - - default: - return nil - } -} - -func (q *qmp) greet() error { - var buf, err = q.read() - if err != nil { - return errors.Trace(err) - } - - var resp qmpResp - - switch err := json.Unmarshal(buf, &resp.Greeting); { - case err != nil: - return errors.Trace(err) - case resp.Greeting == nil: - return errors.Errorf("QMP greeting error") - } - - q.greeting = resp.Greeting - - return nil -} - func (q *qmp) Close() (err error) { if q.sock != nil { err = q.sock.Close() @@ -292,56 +293,23 @@ func (q *qmp) Close() (err error) { return } -func (q *qmp) req(cmd []byte) (qmpResp, error) { +func (q *qmp) req(ctx context.Context, cmd []byte) (qmpResp, error) { var resp qmpResp - if err := q.write(cmd); err != nil { - return resp, errors.Trace(err) - } - - var buf, err = q.read() + rs, err := q.dom.QemuAgentCommand(ctx, string(cmd)) if err != nil { - return resp, errors.Trace(err) + return resp, errors.Wrap(err, "") } - if err := json.Unmarshal(buf, &resp); err != nil { - return resp, errors.Trace(err) + if err := json.Unmarshal([]byte(rs), &resp); err != nil { + return resp, errors.Wrap(err, "") } return resp, nil } -func (q *qmp) write(buf []byte) error { - if _, err := q.writer.Write(append(buf, '\x0a')); err != nil { - return errors.Trace(err) - } - - if err := q.writer.Flush(); err != nil { - return errors.Trace(err) - } - - return nil -} - -func (q *qmp) read() ([]byte, error) { - for { - var buf, err = q.reader.ReadBytes('\n') - if err != nil { - return nil, errors.Trace(err) - } - - var resp qmpResp - if err := json.Unmarshal(buf, &resp); err != nil { - return nil, errors.Trace(err) - } - - if resp.Event != nil { - log.Infof("recv event: %v", resp.Event) - continue - } - - return buf, nil - } +func (q *qmp) GetName() string { + return q.name } type qmpCmd struct { diff --git a/internal/virt/agent/qmp_test.go b/internal/virt/agent/qmp_test.go new file mode 100644 index 0000000..66cd418 --- /dev/null +++ b/internal/virt/agent/qmp_test.go @@ -0,0 +1,55 @@ +package agent + +import ( + "context" + "testing" + + "github.com/projecteru2/yavirt/pkg/libvirt/mocks" + "github.com/projecteru2/yavirt/pkg/test/assert" +) + +func newMockQmp(dom *mocks.Domain) *qmp { + return &qmp{ + name: "mock", + virt: nil, + ga: true, + dom: dom, + } +} + +func TestFsFreezeAll(t *testing.T) { + dom := &mocks.Domain{} + q := newMockQmp(dom) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cmd := `{"execute":"guest-fsfreeze-freeze"}` + dom.On("QemuAgentCommand", ctx, cmd).Return(`{"return": 3}`, nil) + nFs, err := q.FSFreezeAll(ctx) + assert.Nil(t, err) + assert.Equal(t, 3, nFs) +} + +func TestFSThawAll(t *testing.T) { + dom := &mocks.Domain{} + q := newMockQmp(dom) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cmd := `{"execute":"guest-fsfreeze-thaw"}` + dom.On("QemuAgentCommand", ctx, cmd).Return(`{"return": 3}`, nil) + nFs, err := q.FSThawAll(ctx) + assert.Nil(t, err) + assert.Equal(t, 3, nFs) +} + +func TestFsFreezeStatus(t *testing.T) { + dom := &mocks.Domain{} + q := newMockQmp(dom) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cmd := `{"execute":"guest-fsfreeze-status"}` + dom.On("QemuAgentCommand", ctx, cmd).Return(`{"return": "freezed"}`, nil) + status, err := q.FSFreezeStatus(ctx) + assert.Nil(t, err) + assert.Equal(t, "freezed", status) +} diff --git a/internal/virt/agent/types/types.go b/internal/virt/agent/types/types.go index 1edd131..f281a13 100644 --- a/internal/virt/agent/types/types.go +++ b/internal/virt/agent/types/types.go @@ -3,7 +3,8 @@ package types import ( "encoding/base64" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/cockroachdb/errors" + "github.com/projecteru2/yavirt/pkg/terrors" ) // Diskfree . @@ -50,11 +51,11 @@ func (s ExecStatus) Stdio() (so, se []byte, err error) { var xe error if so, xe = s.stdout(); xe != nil { - return nil, nil, errors.Wrap(err, xe) + return nil, nil, errors.CombineErrors(err, xe) } if se, xe = s.stderr(); xe != nil { - return nil, nil, errors.Wrap(err, xe) + return nil, nil, errors.CombineErrors(err, xe) } return @@ -70,8 +71,8 @@ func (s ExecStatus) stderr() ([]byte, error) { // CheckReturnCode . func (s ExecStatus) CheckReturnCode() (bool, error) { - if err := s.Error(); err != nil && !errors.Contain(err, errors.ErrExecNonZeroReturn) { - return false, errors.Trace(err) + if err := s.Error(); err != nil && !errors.Is(err, terrors.ErrExecNonZeroReturn) { + return false, errors.Wrap(err, "") } return s.Code == 0, nil } @@ -79,13 +80,13 @@ func (s ExecStatus) CheckReturnCode() (bool, error) { func (s ExecStatus) Error() error { switch { case s.Err != nil: - return errors.Trace(s.Err) + return errors.Wrap(s.Err, "") case !s.Exited: - return errors.ErrExecIsRunning + return terrors.ErrExecIsRunning case s.Code != 0: - return errors.Annotatef(errors.ErrExecNonZeroReturn, + return errors.Wrapf(terrors.ErrExecNonZeroReturn, "return %d; stdout: %s; stderr: %s", s.Code, decodeToString(s.Base64Out), decodeToString(s.Base64Err)) diff --git a/internal/virt/ctx.go b/internal/virt/ctx.go deleted file mode 100644 index e591716..0000000 --- a/internal/virt/ctx.go +++ /dev/null @@ -1,36 +0,0 @@ -package virt - -import ( - "context" - - calihandler "github.com/projecteru2/yavirt/internal/vnet/handler/calico" - "github.com/projecteru2/yavirt/pkg/errors" -) - -type key string - -const calicoHandlerKey key = "CalicoHandler" - -// Context . -type Context struct { - context.Context -} - -// NewContext . -func NewContext(ctx context.Context, caliHandler *calihandler.Handler) Context { - ctx = context.WithValue(ctx, calicoHandlerKey, caliHandler) - return Context{Context: ctx} -} - -// CalicoHandler . -func (c Context) CalicoHandler() (*calihandler.Handler, error) { - switch hand, ok := c.Value(calicoHandlerKey).(*calihandler.Handler); { - case !ok: - fallthrough - case hand == nil: - return nil, errors.Annotatef(errors.ErrInvalidValue, "nil *calihandler.Handler") - - default: - return hand, nil - } -} diff --git a/internal/virt/domain/domain.go b/internal/virt/domain/domain.go index b275cfc..6f1f761 100644 --- a/internal/virt/domain/domain.go +++ b/internal/virt/domain/domain.go @@ -1,18 +1,33 @@ package domain import ( + "context" + "encoding/json" "encoding/xml" + "fmt" "path/filepath" + "strings" "time" + _ "embed" + + "github.com/antchfx/xmlquery" + "github.com/cockroachdb/errors" + pciaddr "github.com/jaypipes/ghw/pkg/pci/address" + "github.com/projecteru2/core/log" "github.com/projecteru2/yavirt/configs" + "github.com/projecteru2/yavirt/internal/eru/resources" "github.com/projecteru2/yavirt/internal/models" + "github.com/projecteru2/yavirt/internal/network" + "github.com/projecteru2/yavirt/internal/types" "github.com/projecteru2/yavirt/internal/virt/template" - "github.com/projecteru2/yavirt/internal/virt/types" - "github.com/projecteru2/yavirt/internal/vnet" - "github.com/projecteru2/yavirt/pkg/errors" + "github.com/projecteru2/yavirt/internal/vmcache" "github.com/projecteru2/yavirt/pkg/libvirt" + "github.com/projecteru2/yavirt/pkg/terrors" "github.com/projecteru2/yavirt/pkg/utils" + "github.com/samber/lo" + gputypes "github.com/yuyang0/resource-gpu/gpu/types" + "libvirt.org/go/libvirtxml" ) const ( @@ -22,17 +37,31 @@ const ( InterfaceBridge = "bridge" ) +var ( + //go:embed templates/guest.xml + guestXML string + //go:embed templates/hostdev.xml + hostdevXML string +) + // Domain . type Domain interface { //nolint + Lookup() (libvirt.Domain, error) CheckShutoff() error + CheckRunning() error GetUUID() (string, error) GetConsoleTtyname() (string, error) - AttachVolume(filepath, devName string) (st libvirt.DomainState, err error) + OpenConsole(devname string, flages types.OpenConsoleFlags) (*libvirt.Console, error) + ReplaceSysVolume(diskXML string) error + AttachVolume(buf []byte) (st libvirt.DomainState, err error) + DetachVolume(dev string) (st libvirt.DomainState, err error) + AttachGPU(prod string, count int) (st libvirt.DomainState, err error) + DetachGPU(prod string, count int) (st libvirt.DomainState, err error) AmplifyVolume(filepath string, cap uint64) error Define() error Undefine() error - Shutdown(force bool) error - Boot() error + Shutdown(ctx context.Context, force bool) error + Boot(ctx context.Context) error Suspend() error Resume() error SetSpec(cpu int, mem int64) error @@ -53,37 +82,45 @@ func New(guest *models.Guest, virt libvirt.Libvirt) *VirtDomain { } } -// XML . -type XML struct { - Name string `xml:"name"` - Devices struct { - Channel []struct { - Source struct { - Path string `xml:"path,attr"` - } `xml:"source"` - Alias struct { - Name string `xml:"name,attr"` - } `xml:"alias"` - } `xml:"channel"` - } `xml:"devices"` -} - // Define . func (d *VirtDomain) Define() error { + ctx := context.TODO() + logger := log.WithFunc("VirtDomain.Define").WithField("guest", d.guest.ID) + + logger.Debugf(ctx, "GPU engine params: %v", d.guest.GPUEngineParams) + // if gpu resource is needed, we need to lock gpu resources here + // and unlock after the domain is defined + if d.guest.GPUEngineParams.Count() > 0 { + resources.GetManager().LockGPU() + defer resources.GetManager().UnlockGPU() + + // Updating the domain cache is necessary in this context. Consider the following scenario: + // We do not update the vmcache here because the event-driven update of vmcache may experience delays. + // Consequently, after unlocking the GPU locker, the vmcache may not have been updated. + // In such cases, the next GPU allocation may inadvertently select GPUs that are already in use by this VM. + // While this scenario is rare, it can occur. + defer func() { + logger.Debugf(ctx, " -------------- %s GPU addresses: %v", d.guest.ID, vmcache.FetchGPUAddrs()) + if err := vmcache.UpdateDomain(d.guest.ID); err != nil { + log.Errorf(ctx, err, "[Define] failed to update domain cache") + } + logger.Debugf(ctx, " +++++++++++++ %s GPU addresses: %v", d.guest.ID, vmcache.FetchGPUAddrs()) + }() + } + buf, err := d.render() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } dom, err := d.virt.DefineDomain(string(buf)) if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() switch st, err := dom.GetState(); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "") case st == libvirt.DomainShutoff: return nil default: @@ -92,45 +129,57 @@ func (d *VirtDomain) Define() error { } // Boot . -func (d *VirtDomain) Boot() error { - dom, err := d.lookup() +func (d *VirtDomain) Boot(ctx context.Context) error { + logger := log.WithFunc("VirtDomain.Boot") + dom, err := d.Lookup() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() + defer func() { + _ = dom.SetAutostart(true) + if err := dom.SetMemoryStatsPeriod(configs.Conf.MemStatsPeriod, false, false); err != nil { + logger.Warnf(ctx, "failed to set memory stats period: %v", err) + } + }() + domName, _ := dom.GetName() var expState = libvirt.DomainShutoff for i := 0; ; i++ { - time.Sleep(time.Second * time.Duration(i)) - i %= 5 - - switch st, err := dom.GetState(); { - case err != nil: - return errors.Trace(err) - - case st == libvirt.DomainRunning: - return nil - - case st == expState: - // Actually, dom.Create() means launch a defined domain. - if err := dom.Create(); err != nil { - return errors.Trace(err) + timeout := time.Duration(i%5) * time.Second + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(timeout): + switch st, err := dom.GetState(); { + case err != nil: + return errors.Wrap(err, "") + + case st == libvirt.DomainRunning: + return nil + + case st == expState: + // Actually, dom.Create() means launch a defined domain. + if err := dom.Create(); err != nil { + logger.Debugf(ctx, "create domain failed,dom name : %s , err: %s", domName, err.Error()) + return errors.Wrap(err, "") + } + logger.Infof(ctx, "create domain success, dom name : %s", domName) + continue + + default: + return types.NewDomainStatesErr(st, expState) } - continue - - default: - return types.NewDomainStatesErr(st, expState) } } } // Shutdown . -func (d *VirtDomain) Shutdown(force bool) error { - dom, err := d.lookup() +func (d *VirtDomain) Shutdown(ctx context.Context, force bool) error { + dom, err := d.Lookup() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() var expState = libvirt.DomainRunning @@ -140,36 +189,41 @@ func (d *VirtDomain) Shutdown(force bool) error { } for i := 0; ; i++ { - time.Sleep(time.Second * time.Duration(i)) - i %= 5 - - switch st, err := dom.GetState(); { - case err != nil: - return errors.Trace(err) - - case st == libvirt.DomainShutoff: - return nil - - case st == libvirt.DomainShutting: - // It's shutting now, waiting to be shutoff. - continue - - case st == libvirt.DomainPaused: - fallthrough - case st == expState: - if err := shut(dom); err != nil { - return errors.Trace(err) - } - continue - + select { + case <-ctx.Done(): + return ctx.Err() default: - return types.NewDomainStatesErr(st, expState) + time.Sleep(time.Second * time.Duration(i)) + i %= 5 + + switch st, err := dom.GetState(); { + case err != nil: + return errors.Wrap(err, "") + + case st == libvirt.DomainShutoff: + return nil + + case st == libvirt.DomainShutting: + // It's shutting now, waiting to be shutoff. + continue + + case st == libvirt.DomainPaused: + fallthrough + case st == expState: + if err := shut(dom); err != nil { + return errors.Wrap(err, "") + } + continue + + default: + return types.NewDomainStatesErr(st, expState) + } } } } func (d *VirtDomain) graceShutdown(dom libvirt.Domain) error { - return dom.ShutdownFlags(libvirt.DomainShutdownDefault) + return dom.ShutdownFlags(libvirt.DomainShutdownFlags(libvirt.DomainShutdownDefault)) } func (d *VirtDomain) forceShutdown(dom libvirt.Domain) error { @@ -178,15 +232,14 @@ func (d *VirtDomain) forceShutdown(dom libvirt.Domain) error { // CheckShutoff . func (d *VirtDomain) CheckShutoff() error { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() switch st, err := dom.GetState(); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "") case st != libvirt.DomainShutoff: return types.NewDomainStatesErr(st, libvirt.DomainShutoff) default: @@ -194,13 +247,29 @@ func (d *VirtDomain) CheckShutoff() error { } } +// CheckRunning . +func (d *VirtDomain) CheckRunning() error { + dom, err := d.Lookup() + if err != nil { + return errors.Wrap(err, "") + } + + switch st, err := dom.GetState(); { + case err != nil: + return errors.Wrap(err, "") + case st != libvirt.DomainRunning: + return types.NewDomainStatesErr(st, libvirt.DomainRunning) + default: + return nil + } +} + // Suspend . func (d *VirtDomain) Suspend() error { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() var expState = libvirt.DomainRunning for i := 0; ; i++ { @@ -209,14 +278,14 @@ func (d *VirtDomain) Suspend() error { switch st, err := dom.GetState(); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "") case st == libvirt.DomainPaused: return nil case st == expState: if err := dom.Suspend(); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } continue @@ -228,11 +297,10 @@ func (d *VirtDomain) Suspend() error { // Resume . func (d *VirtDomain) Resume() error { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() var expState = libvirt.DomainPaused for i := 0; ; i++ { @@ -241,14 +309,14 @@ func (d *VirtDomain) Resume() error { switch st, err := dom.GetState(); { case err != nil: - return errors.Trace(err) + return errors.Wrap(err, "") case st == libvirt.DomainRunning: return nil case st == expState: if err := dom.Resume(); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } continue @@ -260,22 +328,21 @@ func (d *VirtDomain) Resume() error { // Undefine . func (d *VirtDomain) Undefine() error { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { - if errors.IsDomainNotExistsErr(err) { + if terrors.IsDomainNotExistsErr(err) { return nil } - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() var expState = libvirt.DomainShutoff switch st, err := dom.GetState(); { case err != nil: - if errors.IsDomainNotExistsErr(err) { + if terrors.IsDomainNotExistsErr(err) { return nil } - return errors.Trace(err) + return errors.Wrap(err, "") case st == libvirt.DomainPaused: fallthrough @@ -289,40 +356,144 @@ func (d *VirtDomain) Undefine() error { // GetUUID . func (d *VirtDomain) GetUUID() (string, error) { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { - return "", errors.Trace(err) + return "", errors.Wrap(err, "") } - defer dom.Free() return dom.GetUUIDString() } func (d *VirtDomain) render() ([]byte, error) { uuid, err := d.checkUUID(d.guest.DmiUUID) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") } sysVol, err := d.guest.SysVolume() if err != nil { - return nil, errors.Trace(err) + return nil, errors.Wrap(err, "") + } + + sysVolXML, err := sysVol.GenerateXML() + if err != nil { + return nil, errors.Wrap(err, "") + } + dataVols, err := d.dataVols() + if err != nil { + return nil, err + } + metadataXML, err := d.metadataXML() + if err != nil { + return nil, err + } + ciXML, cdromSrcXML, err := d.cloudInitXML() + if err != nil { + return nil, err + } + vncXML, err := d.vncConfig() + if err != nil { + return nil, err } + var gpus []map[string]string + if d.guest.GPUEngineParams.Count() > 0 { + gpus, err = d.gpus() + if err != nil { + return nil, err + } + } var args = map[string]any{ "name": d.guest.ID, "uuid": uuid, "memory": d.guest.MemoryInMiB(), "cpu": d.guest.CPU, - "sysvol": sysVol.Filepath(), - "gasock": d.guest.SocketFilepath(), - "datavols": d.dataVols(d.guest.Vols), + "gpus": gpus, + "sysvol": string(sysVolXML), + "datavols": dataVols, "interface": d.getInterfaceType(), "pair": d.guest.NetworkPairName(), "mac": d.guest.MAC, + "bandwidth": d.networkBandwidth(), "cache_passthrough": configs.Conf.VirtCPUCachePassthrough, + "metadata_xml": metadataXML, + "cloud_init_xml": ciXML, + "cdrom_src_xml": cdromSrcXML, + "vnc": vncXML, + } + + return template.Render(d.guestTemplateFilepath(), guestXML, args) +} + +type AppMetadata struct { + ID int64 `json:"id"` + SID string `json:"sid"` + Name string `json:"name"` + From string `json:"from"` + UserID int64 `json:"user_id"` + UserName string `json:"user_name"` +} + +func (d *VirtDomain) metadataXML() (string, error) { + bs, ok := d.guest.JSONLabels["instance/metadata"] + if !ok { + return "", nil + } + obj := AppMetadata{} + if err := json.Unmarshal([]byte(bs), &obj); err != nil { + return "", errors.Wrap(err, "") + } + meta := types.CustomDomainMetadata{ + App: types.App{ + NS: "https://eru.org/v1", + From: obj.From, + Owner: types.AppOwner{ + UserID: fmt.Sprintf("%d", obj.UserID), + UserName: obj.UserName, + }, + Name: types.AppName{ + Name: obj.Name, + }, + ID: types.AppID{ + SID: obj.SID, + ID: fmt.Sprintf("%d", obj.ID), + }, + }, + } + if len(d.guest.IPNets) > 0 { + meta.App.IP.IP = d.guest.IPNets[0].IPv4() + } + xmlBS, err := xml.Marshal(meta) + if err != nil { + return "", errors.Wrap(err, "") } + return string(xmlBS), nil +} - return template.Render(d.guestTemplateFilepath(), args) +func (d *VirtDomain) cloudInitXML() (string, string, error) { + // for network + obj, err := d.guest.GenCloudInit() + if err != nil { + return "", "", errors.Wrap(err, "") + } + log.Debugf(context.TODO(), "cloud-init: %v", obj) + var ( + ciXML string + cdromSrcXML string + ) + switch { + case obj.URL != "": + ciXML = fmt.Sprintf("ds=nocloud-net;s=%s", obj.URL) + case obj.Username != "" || obj.Password != "": + output := filepath.Join(configs.Conf.VirtCloudInitDir, fmt.Sprintf("%s.iso", d.guest.ID)) + if err := obj.GenerateISO(output); err != nil { + return "", "", err + } + ciXML = "ds=nocloud" + cdromSrcXML = fmt.Sprintf("", output) + default: + return "", "", errors.New("invalid cloud-init config") + } + return ciXML, cdromSrcXML, nil } func (d *VirtDomain) checkUUID(raw string) (string, error) { @@ -331,7 +502,7 @@ func (d *VirtDomain) checkUUID(raw string) (string, error) { } if err := utils.CheckUUID(raw); err != nil { - return "", errors.Trace(err) + return "", errors.Wrap(err, "") } return raw, nil @@ -339,37 +510,112 @@ func (d *VirtDomain) checkUUID(raw string) (string, error) { func (d *VirtDomain) getInterfaceType() string { switch d.guest.NetworkMode { - case vnet.NetworkCalico: + case network.CalicoMode: return InterfaceEthernet default: return InterfaceBridge } } -func (d *VirtDomain) dataVols(vols models.Volumes) []map[string]string { - var dat = []map[string]string{} +func (d *VirtDomain) dataVols() ([]string, error) { + vols := d.guest.Vols + var dat = []string{} - for i, v := range vols { + for _, v := range vols { if v.IsSys() { continue } + buf, err := v.GenerateXML() + if err != nil { + return nil, errors.Wrap(err, "") + } + dat = append(dat, string(buf)) + } + return dat, nil +} + +func allocGPUs(eParams *gputypes.EngineParams) ([]map[string]string, error) { + infos, err := resources.GetManager().AllocGPU(eParams) + if err != nil { + return nil, err + } + res := lo.Map(infos, func(info types.GPUInfo, _ int) map[string]string { + addr := pciaddr.FromString(info.Address) + r := map[string]string{ + "domain": addr.Domain, + "bus": addr.Bus, + "slot": addr.Device, + "function": addr.Function, + } + return r + }) + return res, nil +} +func (d *VirtDomain) gpus() ([]map[string]string, error) { + return allocGPUs(d.guest.GPUEngineParams) +} - dat = append(dat, map[string]string{ - "path": v.Filepath(), - "dev": v.GetDeviceName(i), - }) +type vncConfig struct { + Port int `json:"port"` + Password string `json:"password"` +} + +func (d *VirtDomain) vncConfig() (string, error) { + bs, ok := d.guest.JSONLabels["instance/vnc"] + if !ok { + return "", nil + } + obj := vncConfig{} + if err := json.Unmarshal([]byte(bs), &obj); err != nil { + return "", errors.Wrap(err, "") + } + portCfg := "port='-1' autoport='yes'" + if obj.Port > 0 { + portCfg = fmt.Sprintf("port='%d'", obj.Port) + } + passwdCfg := "" + if obj.Password != "" { + passwdCfg = fmt.Sprintf("passwd='%s'", obj.Password) + } + vncXML := fmt.Sprintf(``, portCfg, passwdCfg) + return vncXML, nil +} + +func (d *VirtDomain) networkBandwidth() map[string]string { + // the Unit of libvirt is kbyte/s + // the default settings is avg: 2Gbps, peak: 3Gbps + // 1Gbps=1000Mbps=1000000Kbps=1000000000bit + ans := map[string]string{ + "average": fmt.Sprintf("%d", 2000000/8), + "peak": fmt.Sprintf("%d", 3000000/8), + } + ss, ok := d.guest.JSONLabels["instance/nic-bandwidth"] + if !ok { + return ans } - return dat + bandwidth := map[string]int64{} + err := json.Unmarshal([]byte(ss), &bandwidth) + if err != nil { + // just print log and use default values. + log.Warnf(context.TODO(), "Invalid bandwidth label: %s", ss) + } else { + if v, ok := bandwidth["average"]; ok { + ans["average"] = fmt.Sprintf("%d", v/8000) + } + if v, ok := bandwidth["peak"]; ok { + ans["peak"] = fmt.Sprintf("%d", v/8000) + } + } + return ans } // GetXMLString . func (d *VirtDomain) GetXMLString() (xml string, err error) { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { return } - defer dom.Free() var flags libvirt.DomainXMLFlags return dom.GetXMLDesc(flags) @@ -377,47 +623,38 @@ func (d *VirtDomain) GetXMLString() (xml string, err error) { // GetConsoleTtyname . func (d *VirtDomain) GetConsoleTtyname() (devname string, err error) { - var dom libvirt.Domain - if dom, err = d.lookup(); err != nil { - return - } - defer dom.Free() - - expState := libvirt.DomainRunning - switch st, err := dom.GetState(); { - case err != nil: - return "", errors.Trace(err) - - case st != expState: - return "", types.NewDomainStatesErr(st, expState) - } - x, err := d.GetXMLString() if err != nil { return } - domainXML := &XML{} - if err = xml.Unmarshal([]byte(x), domainXML); err != nil { + doc, err := xmlquery.Parse(strings.NewReader(x)) + if err != nil { return } - for _, c := range domainXML.Devices.Channel { - if c.Alias.Name == "channel0" { - return c.Source.Path, nil - } + aliasNode := xmlquery.FindOne(doc, "//devices/console[2]/alias") + if aliasNode != nil { + return aliasNode.SelectAttr("name"), nil + } + return "", nil +} + +func (d *VirtDomain) OpenConsole(devname string, flags types.OpenConsoleFlags) (*libvirt.Console, error) { + dom, err := d.Lookup() + if err != nil { + return nil, err } - return "", errors.Errorf("channel0 not found") + return dom.OpenConsole(devname, &flags.ConsoleFlags) } // SetSpec . func (d *VirtDomain) SetSpec(cpu int, mem int64) error { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() if err := d.setCPU(cpu, dom); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } return d.setMemory(mem, dom) @@ -426,7 +663,7 @@ func (d *VirtDomain) SetSpec(cpu int, mem int64) error { func (d *VirtDomain) setCPU(cpu int, dom libvirt.Domain) error { switch { case cpu < 0: - return errors.Annotatef(errors.ErrInvalidValue, "invalid CPU num: %d", cpu) + return errors.Wrapf(terrors.ErrInvalidValue, "invalid CPU num: %d", cpu) case cpu == 0: return nil } @@ -434,16 +671,16 @@ func (d *VirtDomain) setCPU(cpu int, dom libvirt.Domain) error { flag := libvirt.DomainVcpuConfig // Doesn't set with both Maximum and Current simultaneously. if err := dom.SetVcpusFlags(uint(cpu), flag|libvirt.DomainVcpuMaximum); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } return dom.SetVcpusFlags(uint(cpu), flag|libvirt.DomainVcpuCurrent) } func (d *VirtDomain) setMemory(mem int64, dom libvirt.Domain) error { - if mem < configs.Conf.MinMemory || mem > configs.Conf.MaxMemory { - return errors.Annotatef(errors.ErrInvalidValue, + if mem < configs.Conf.Resource.MinMemory || mem > configs.Conf.Resource.MaxMemory { + return errors.Wrapf(terrors.ErrInvalidValue, "invalid memory: %d, it shoule be [%d, %d]", - mem, configs.Conf.MinMemory, configs.Conf.MaxMemory) + mem, configs.Conf.Resource.MinMemory, configs.Conf.Resource.MaxMemory) } // converts bytes unit to kilobytes @@ -451,73 +688,212 @@ func (d *VirtDomain) setMemory(mem int64, dom libvirt.Domain) error { flag := libvirt.DomainMemConfig if err := dom.SetMemoryFlags(uint64(mem), flag|libvirt.DomainMemMaximum); err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } return dom.SetMemoryFlags(uint64(mem), flag|libvirt.DomainMemCurrent) } +func (d *VirtDomain) ReplaceSysVolume(diskXML string) error { + xmldoc, err := d.GetXMLString() + if err != nil { + return errors.Wrapf(err, "failed to get domain xml of guest %s", d.guest.ID) + } + domcfg := &libvirtxml.Domain{} + if err = domcfg.Unmarshal(xmldoc); err != nil { + return errors.Wrapf(err, "failed to unmarshal domain xml of guest %s", d.guest.ID) + } + sysDisk := &libvirtxml.DomainDisk{} + if err := sysDisk.Unmarshal(diskXML); err != nil { + return errors.Wrapf(err, "faied to unmarshal disk xml") + } + domcfg.Devices.Disks[0] = *sysDisk + newXMLDoc, err := domcfg.Marshal() + if err != nil { + return errors.Wrapf(err, "failed to marshal new domain xml for guest %s", d.guest.ID) + } + if _, err := d.virt.DefineDomain(newXMLDoc); err != nil { + return errors.Wrapf(err, "failed define domain for guest %s", d.guest.ID) + } + + return nil +} + // AttachVolume . -func (d *VirtDomain) AttachVolume(filepath, devName string) (st libvirt.DomainState, err error) { +func (d *VirtDomain) AttachVolume(buf []byte) (st libvirt.DomainState, err error) { var dom libvirt.Domain - if dom, err = d.lookup(); err != nil { + if dom, err = d.Lookup(); err != nil { return } - defer dom.Free() + return dom.AttachDevice(string(buf)) +} - var buf []byte - if buf, err = d.renderAttachVolumeXML(filepath, devName); err != nil { +func (d *VirtDomain) DetachVolume(devPath string) (st libvirt.DomainState, err error) { + x, err := d.GetXMLString() + if err != nil { + return + } + dev := filepath.Base(devPath) + doc, err := xmlquery.Parse(strings.NewReader(x)) + if err != nil { + return + } + node := xmlquery.FindOne(doc, fmt.Sprintf("//devices/disk[target[@dev='%s']]", dev)) + if node == nil { + err = errors.New("can't find device") return } - return dom.AttachVolume(string(buf)) + xml := node.OutputXML(true) + log.Infof(context.TODO(), "Detach volume, device(%s) xml: %s", devPath, xml) + var dom libvirt.Domain + if dom, err = d.Lookup(); err != nil { + return + } + return dom.DetachDevice(xml) +} + +// AttachGPU attaches new GPUs to guest. +func (d *VirtDomain) AttachGPU(prod string, count int) (st libvirt.DomainState, err error) { + logger := log.WithFunc("AttachGPU") + var dom libvirt.Domain + if dom, err = d.Lookup(); err != nil { + return + } + + resources.GetManager().LockGPU() + defer resources.GetManager().UnlockGPU() + + // Updating the domain cache is necessary in this context. Consider the following scenario: + // We do not update the vmcache here because the event-driven update of vmcache may experience delays. + // Consequently, after unlocking the GPU locker, the vmcache may not have been updated. + // In such cases, the next GPU allocation may inadvertently select GPUs that are already in use by this VM. + // While this scenario is rare, it can occur. + defer func() { + if err := vmcache.UpdateDomain(d.guest.ID); err != nil { + logger.Errorf(context.TODO(), err, "failed to update domain cache") + } + }() + eParams := &gputypes.EngineParams{ + ProdCountMap: map[string]int{ + prod: count, + }, + } + infos, err := allocGPUs(eParams) + if err != nil { + return + } + var buf []byte + for _, info := range infos { + buf, err = template.Render(d.hostdevTemplateFilepath(), hostdevXML, info) + if err != nil { + return 0, err + } + + if st, err = dom.AttachDevice(string(buf)); err != nil { + return st, err + } + } + return } -func (d *VirtDomain) renderAttachVolumeXML(filepath, devName string) ([]byte, error) { - args := map[string]any{ - "path": filepath, - "dev": devName, +func extractHostdevXML(doc *xmlquery.Node, gaddr string) (string, error) { + ctx := context.TODO() + logger := log.WithFunc("extractHostdevXML") + + addr := pciaddr.FromString(gaddr) + xpathFmt := "//devices/hostdev[source[address[@domain='0x%s' and @bus='0x%s' and @slot='0x%s' and @function='0x%s']]]" + node := xmlquery.FindOne(doc, fmt.Sprintf(xpathFmt, addr.Domain, addr.Bus, addr.Device, addr.Function)) + if node == nil { + return "", errors.Errorf("can't find device, pciaddr: %s", gaddr) } - return template.Render(d.diskTemplateFilepath(), args) + logger.Debugf(ctx, "Detach gpu, device(%s) xml: %s", gaddr, node.OutputXML(true)) + + xml := node.OutputXML(true) + logger.Infof(ctx, "Detach gpu, device(%s) xml: %s", gaddr, xml) + return xml, nil +} + +// +// +// +//
+// +// +// +func (d *VirtDomain) DetachGPU(_ string, count int) (st libvirt.DomainState, err error) { + defer func() { + if err := vmcache.UpdateDomain(d.guest.ID); err != nil { + log.Errorf(context.TODO(), err, "[DetachGPU] failed to update domain cache") + } + }() + var dom libvirt.Domain + if dom, err = d.Lookup(); err != nil { + return + } + + var flags libvirt.DomainXMLFlags + x, err := dom.GetXMLDesc(flags) + + if err != nil { + return + } + doc, err := xmlquery.Parse(strings.NewReader(x)) + if err != nil { + return + } + entry := vmcache.FetchDomainEntry(d.guest.ID) + if count > len(entry.GPUAddrs) { + count = len(entry.GPUAddrs) + } + for i := 0; i < count; i++ { + gaddr := entry.GPUAddrs[i] + // TODO check if the gaddr's product is equal to the product + xml, err := extractHostdevXML(doc, gaddr) + if err != nil { + return 0, err + } + if st, err = dom.DetachDevice(xml); err != nil { + return st, err + } + } + return } // GetState . func (d *VirtDomain) GetState() (libvirt.DomainState, error) { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { - return libvirt.DomainNoState, errors.Trace(err) + return libvirt.DomainNoState, errors.Wrap(err, "") } - defer dom.Free() return dom.GetState() } // AmplifyVolume . func (d *VirtDomain) AmplifyVolume(filepath string, cap uint64) error { - dom, err := d.lookup() + dom, err := d.Lookup() if err != nil { - return errors.Trace(err) + return errors.Wrap(err, "") } - defer dom.Free() return dom.AmplifyVolume(filepath, cap) } -func (d *VirtDomain) lookup() (libvirt.Domain, error) { +func (d *VirtDomain) Lookup() (libvirt.Domain, error) { return d.virt.LookupDomain(d.guest.ID) } -func (d *VirtDomain) diskTemplateFilepath() string { - return filepath.Join(configs.Conf.VirtTmplDir, "disk.xml") -} - func (d *VirtDomain) guestTemplateFilepath() string { return filepath.Join(configs.Conf.VirtTmplDir, "guest.xml") } +func (d *VirtDomain) hostdevTemplateFilepath() string { + return filepath.Join(configs.Conf.VirtTmplDir, "hostdev.xml") +} + // GetState . func GetState(name string, virt libvirt.Libvirt) (libvirt.DomainState, error) { dom, err := virt.LookupDomain(name) if err != nil { - return libvirt.DomainNoState, errors.Trace(err) + return libvirt.DomainNoState, errors.Wrap(err, "") } - defer dom.Free() return dom.GetState() } diff --git a/internal/virt/domain/domain_test.go b/internal/virt/domain/domain_test.go index 7cc4072..efd3054 100644 --- a/internal/virt/domain/domain_test.go +++ b/internal/virt/domain/domain_test.go @@ -1,8 +1,11 @@ package domain import ( + "fmt" + "strings" "testing" + "github.com/antchfx/xmlquery" "github.com/projecteru2/yavirt/internal/models" "github.com/projecteru2/yavirt/pkg/libvirt" libmocks "github.com/projecteru2/yavirt/pkg/libvirt/mocks" @@ -11,10 +14,6 @@ import ( "github.com/projecteru2/yavirt/pkg/utils" ) -func init() { - models.Setup() -} - func TestSetSpec(t *testing.T) { libdom := &libmocks.Domain{} defer libdom.AssertExpectations(t) @@ -23,7 +22,6 @@ func TestSetSpec(t *testing.T) { dom.virt.(*libmocks.Libvirt).On("LookupDomain", mock.Anything).Return(libdom, nil).Once() defer func() { dom.virt.(*libmocks.Libvirt).AssertExpectations(t) }() - libdom.On("Free").Return().Once() libdom.On("SetVcpusFlags", uint(1), libvirt.DomainVcpuConfig|libvirt.DomainVcpuMaximum).Return(nil).Once() libdom.On("SetVcpusFlags", uint(1), libvirt.DomainVcpuConfig|libvirt.DomainVcpuCurrent).Return(nil).Once() libdom.On("SetMemoryFlags", uint64(utils.GB>>10), libvirt.DomainMemConfig|libvirt.DomainMemMaximum).Return(nil).Once() @@ -32,6 +30,63 @@ func TestSetSpec(t *testing.T) { assert.NilErr(t, dom.SetSpec(1, utils.GB)) } +// func TestAttachGPU(t *testing.T) { +// libdom := &libmocks.Domain{} +// defer libdom.AssertExpectations(t) + +// dom := newMockedDomain(t) +// dom.virt.(*libmocks.Libvirt).On("LookupDomain", mock.Anything).Return(libdom, nil).Once() +// defer func() { dom.virt.(*libmocks.Libvirt).AssertExpectations(t) }() +// libdom.On("GetXMLDesc", mock.Anything).Return("", nil).Once() +// } + +func TestExtractHostdevXML(t *testing.T) { + x := ` + + haha + bbb + + + YAVIRT + + + + hvm + + + + + + + destroy + restart + restart + + + + + + + +
+ + + + +
+ + + + + + ` + doc, err := xmlquery.Parse(strings.NewReader(x)) + assert.Nil(t, err) + xml, err := extractHostdevXML(doc, "0000:81:00.0") + assert.Nil(t, err) + assert.Equal(t, `
`, xml, "xml is incorrect") + fmt.Printf("%s\n", xml) +} func newMockedDomain(t *testing.T) *VirtDomain { gmod, err := models.NewGuest(nil, nil) assert.NilErr(t, err) diff --git a/internal/virt/domain/mocks/Domain.go b/internal/virt/domain/mocks/Domain.go index e8a6e28..0a68aba 100644 --- a/internal/virt/domain/mocks/Domain.go +++ b/internal/virt/domain/mocks/Domain.go @@ -1,10 +1,17 @@ -// Code generated by mockery v2.26.1. DO NOT EDIT. +// Code generated by mockery v2.42.0. DO NOT EDIT. package mocks import ( - libvirt "github.com/libvirt/libvirt-go" + context "context" + + libvirt "github.com/projecteru2/yavirt/third_party/libvirt" + mock "github.com/stretchr/testify/mock" + + pkglibvirt "github.com/projecteru2/yavirt/pkg/libvirt" + + types "github.com/projecteru2/yavirt/internal/types" ) // Domain is an autogenerated mock type for the Domain type @@ -16,6 +23,10 @@ type Domain struct { func (_m *Domain) AmplifyVolume(filepath string, cap uint64) error { ret := _m.Called(filepath, cap) + if len(ret) == 0 { + panic("no return value specified for AmplifyVolume") + } + var r0 error if rf, ok := ret.Get(0).(func(string, uint64) error); ok { r0 = rf(filepath, cap) @@ -26,23 +37,55 @@ func (_m *Domain) AmplifyVolume(filepath string, cap uint64) error { return r0 } -// AttachVolume provides a mock function with given fields: filepath, devName -func (_m *Domain) AttachVolume(filepath string, devName string) (libvirt.DomainState, error) { - ret := _m.Called(filepath, devName) +// AttachGPU provides a mock function with given fields: prod, count +func (_m *Domain) AttachGPU(prod string, count int) (libvirt.DomainState, error) { + ret := _m.Called(prod, count) + + if len(ret) == 0 { + panic("no return value specified for AttachGPU") + } + + var r0 libvirt.DomainState + var r1 error + if rf, ok := ret.Get(0).(func(string, int) (libvirt.DomainState, error)); ok { + return rf(prod, count) + } + if rf, ok := ret.Get(0).(func(string, int) libvirt.DomainState); ok { + r0 = rf(prod, count) + } else { + r0 = ret.Get(0).(libvirt.DomainState) + } + + if rf, ok := ret.Get(1).(func(string, int) error); ok { + r1 = rf(prod, count) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AttachVolume provides a mock function with given fields: buf +func (_m *Domain) AttachVolume(buf []byte) (libvirt.DomainState, error) { + ret := _m.Called(buf) + + if len(ret) == 0 { + panic("no return value specified for AttachVolume") + } var r0 libvirt.DomainState var r1 error - if rf, ok := ret.Get(0).(func(string, string) (libvirt.DomainState, error)); ok { - return rf(filepath, devName) + if rf, ok := ret.Get(0).(func([]byte) (libvirt.DomainState, error)); ok { + return rf(buf) } - if rf, ok := ret.Get(0).(func(string, string) libvirt.DomainState); ok { - r0 = rf(filepath, devName) + if rf, ok := ret.Get(0).(func([]byte) libvirt.DomainState); ok { + r0 = rf(buf) } else { r0 = ret.Get(0).(libvirt.DomainState) } - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(filepath, devName) + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(buf) } else { r1 = ret.Error(1) } @@ -50,10 +93,32 @@ func (_m *Domain) AttachVolume(filepath string, devName string) (libvirt.DomainS return r0, r1 } -// Boot provides a mock function with given fields: -func (_m *Domain) Boot() error { +// Boot provides a mock function with given fields: ctx +func (_m *Domain) Boot(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Boot") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CheckRunning provides a mock function with given fields: +func (_m *Domain) CheckRunning() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for CheckRunning") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -68,6 +133,10 @@ func (_m *Domain) Boot() error { func (_m *Domain) CheckShutoff() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for CheckShutoff") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -82,6 +151,10 @@ func (_m *Domain) CheckShutoff() error { func (_m *Domain) Define() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Define") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -92,10 +165,70 @@ func (_m *Domain) Define() error { return r0 } +// DetachGPU provides a mock function with given fields: prod, count +func (_m *Domain) DetachGPU(prod string, count int) (libvirt.DomainState, error) { + ret := _m.Called(prod, count) + + if len(ret) == 0 { + panic("no return value specified for DetachGPU") + } + + var r0 libvirt.DomainState + var r1 error + if rf, ok := ret.Get(0).(func(string, int) (libvirt.DomainState, error)); ok { + return rf(prod, count) + } + if rf, ok := ret.Get(0).(func(string, int) libvirt.DomainState); ok { + r0 = rf(prod, count) + } else { + r0 = ret.Get(0).(libvirt.DomainState) + } + + if rf, ok := ret.Get(1).(func(string, int) error); ok { + r1 = rf(prod, count) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DetachVolume provides a mock function with given fields: dev +func (_m *Domain) DetachVolume(dev string) (libvirt.DomainState, error) { + ret := _m.Called(dev) + + if len(ret) == 0 { + panic("no return value specified for DetachVolume") + } + + var r0 libvirt.DomainState + var r1 error + if rf, ok := ret.Get(0).(func(string) (libvirt.DomainState, error)); ok { + return rf(dev) + } + if rf, ok := ret.Get(0).(func(string) libvirt.DomainState); ok { + r0 = rf(dev) + } else { + r0 = ret.Get(0).(libvirt.DomainState) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(dev) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetConsoleTtyname provides a mock function with given fields: func (_m *Domain) GetConsoleTtyname() (string, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetConsoleTtyname") + } + var r0 string var r1 error if rf, ok := ret.Get(0).(func() (string, error)); ok { @@ -120,6 +253,10 @@ func (_m *Domain) GetConsoleTtyname() (string, error) { func (_m *Domain) GetState() (libvirt.DomainState, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetState") + } + var r0 libvirt.DomainState var r1 error if rf, ok := ret.Get(0).(func() (libvirt.DomainState, error)); ok { @@ -144,6 +281,10 @@ func (_m *Domain) GetState() (libvirt.DomainState, error) { func (_m *Domain) GetUUID() (string, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetUUID") + } + var r0 string var r1 error if rf, ok := ret.Get(0).(func() (string, error)); ok { @@ -164,10 +305,92 @@ func (_m *Domain) GetUUID() (string, error) { return r0, r1 } +// Lookup provides a mock function with given fields: +func (_m *Domain) Lookup() (pkglibvirt.Domain, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Lookup") + } + + var r0 pkglibvirt.Domain + var r1 error + if rf, ok := ret.Get(0).(func() (pkglibvirt.Domain, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() pkglibvirt.Domain); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pkglibvirt.Domain) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OpenConsole provides a mock function with given fields: devname, flages +func (_m *Domain) OpenConsole(devname string, flages types.OpenConsoleFlags) (*pkglibvirt.Console, error) { + ret := _m.Called(devname, flages) + + if len(ret) == 0 { + panic("no return value specified for OpenConsole") + } + + var r0 *pkglibvirt.Console + var r1 error + if rf, ok := ret.Get(0).(func(string, types.OpenConsoleFlags) (*pkglibvirt.Console, error)); ok { + return rf(devname, flages) + } + if rf, ok := ret.Get(0).(func(string, types.OpenConsoleFlags) *pkglibvirt.Console); ok { + r0 = rf(devname, flages) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pkglibvirt.Console) + } + } + + if rf, ok := ret.Get(1).(func(string, types.OpenConsoleFlags) error); ok { + r1 = rf(devname, flages) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReplaceSysVolume provides a mock function with given fields: diskXML +func (_m *Domain) ReplaceSysVolume(diskXML string) error { + ret := _m.Called(diskXML) + + if len(ret) == 0 { + panic("no return value specified for ReplaceSysVolume") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(diskXML) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Resume provides a mock function with given fields: func (_m *Domain) Resume() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Resume") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -182,6 +405,10 @@ func (_m *Domain) Resume() error { func (_m *Domain) SetSpec(cpu int, mem int64) error { ret := _m.Called(cpu, mem) + if len(ret) == 0 { + panic("no return value specified for SetSpec") + } + var r0 error if rf, ok := ret.Get(0).(func(int, int64) error); ok { r0 = rf(cpu, mem) @@ -192,13 +419,17 @@ func (_m *Domain) SetSpec(cpu int, mem int64) error { return r0 } -// Shutdown provides a mock function with given fields: force -func (_m *Domain) Shutdown(force bool) error { - ret := _m.Called(force) +// Shutdown provides a mock function with given fields: ctx, force +func (_m *Domain) Shutdown(ctx context.Context, force bool) error { + ret := _m.Called(ctx, force) + + if len(ret) == 0 { + panic("no return value specified for Shutdown") + } var r0 error - if rf, ok := ret.Get(0).(func(bool) error); ok { - r0 = rf(force) + if rf, ok := ret.Get(0).(func(context.Context, bool) error); ok { + r0 = rf(ctx, force) } else { r0 = ret.Error(0) } @@ -210,6 +441,10 @@ func (_m *Domain) Shutdown(force bool) error { func (_m *Domain) Suspend() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Suspend") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -224,6 +459,10 @@ func (_m *Domain) Suspend() error { func (_m *Domain) Undefine() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Undefine") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -234,13 +473,12 @@ func (_m *Domain) Undefine() error { return r0 } -type mockConstructorTestingTNewDomain interface { +// NewDomain creates a new instance of Domain. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDomain(t interface { mock.TestingT Cleanup(func()) -} - -// NewDomain creates a new instance of Domain. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDomain(t mockConstructorTestingTNewDomain) *Domain { +}) *Domain { mock := &Domain{} mock.Mock.Test(t) diff --git a/internal/virt/domain/templates/disk.xml b/internal/virt/domain/templates/disk.xml new file mode 100644 index 0000000..7982766 --- /dev/null +++ b/internal/virt/domain/templates/disk.xml @@ -0,0 +1,11 @@ + + + + + + {{ .read_iops }} + {{ .write_iops }} + {{ .read_bps }} + {{ .write_bps }} + + diff --git a/internal/virt/template/guest.xml b/internal/virt/domain/templates/guest.xml similarity index 72% rename from internal/virt/template/guest.xml rename to internal/virt/domain/templates/guest.xml index 6fea8fb..fbe3960 100644 --- a/internal/virt/template/guest.xml +++ b/internal/virt/domain/templates/guest.xml @@ -1,6 +1,7 @@ {{.name}} {{.uuid}} + {{ .metadata_xml }} {{.memory}} {{.memory}} {{.cpu}} @@ -9,6 +10,7 @@ YAVIRT + {{ .cloud_init_xml }} yavirt virtd 0.0.1 @@ -41,20 +43,15 @@ - - - - - + {{ .sysvol }} + {{range .datavols}} - - - - - + {{ . }} {{end}} + + {{ .cdrom_src_xml }}
@@ -82,33 +79,65 @@
- + {{if (eq .interface "bridge")}} - + + + {{else if (eq .interface "ethernet")}}