From 0e371eee816118e7a52fb4d4fbf2b9bb9bdc79f2 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Tue, 23 Nov 2021 19:55:29 +0000 Subject: [PATCH 01/15] add image manager to manage container image pulling Signed-off-by: Amory Hoste --- ctriface/bench_test.go | 2 +- ctriface/iface.go | 78 +---------------- ctriface/iface_test.go | 4 +- ctriface/manual_cleanup_test.go | 4 +- ctriface/orch.go | 9 +- ctrimages/imageManager.go | 143 ++++++++++++++++++++++++++++++++ 6 files changed, 156 insertions(+), 84 deletions(-) create mode 100644 ctrimages/imageManager.go diff --git a/ctriface/bench_test.go b/ctriface/bench_test.go index 04540fe94..4bca84d6f 100644 --- a/ctriface/bench_test.go +++ b/ctriface/bench_test.go @@ -69,7 +69,7 @@ func TestBenchmarkStart(t *testing.T) { startMetrics := make([]*metrics.Metric, benchCount) // Pull image - _, err := orch.getImage(ctx, imageName) + _, err := orch.imageManager.GetImage(ctx, imageName) require.NoError(t, err, "Failed to pull image "+imageName) for i := 0; i < benchCount; i++ { diff --git a/ctriface/iface.go b/ctriface/iface.go index 63db8374e..9de93feb4 100644 --- a/ctriface/iface.go +++ b/ctriface/iface.go @@ -24,10 +24,6 @@ package ctriface import ( "context" - "fmt" - "net" - "net/http" - "net/url" "os" "os/exec" "strings" @@ -41,8 +37,6 @@ import ( "github.com/containerd/containerd/cio" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" - "github.com/containerd/containerd/remotes/docker" - "github.com/firecracker-microvm/firecracker-containerd/proto" // note: from the original repo "github.com/firecracker-microvm/firecracker-containerd/runtime/firecrackeroci" "github.com/pkg/errors" @@ -96,7 +90,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string) (_ * ctx = namespaces.WithNamespace(ctx, namespaceName) tStart = time.Now() - if vm.Image, err = o.getImage(ctx, imageName); err != nil { + if vm.Image, err = o.imageManager.GetImage(ctx, imageName); err != nil { return nil, nil, errors.Wrapf(err, "Failed to get/pull image") } startVMMetric.MetricMap[metrics.GetImage] = metrics.ToUS(time.Since(tStart)) @@ -280,76 +274,6 @@ func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string) error { return nil } -// Checks whether a URL has a .local domain -func isLocalDomain(s string) (bool, error) { - if !strings.Contains(s, "://") { - s = "dummy://" + s - } - - u, err := url.Parse(s) - if err != nil { - return false, err - } - - host, _, err := net.SplitHostPort(u.Host) - if err != nil { - host = u.Host - } - - i := strings.LastIndex(host, ".") - tld := host[i+1:] - - return tld == "local", nil -} - -// Converts an image name to a url if it is not a URL -func getImageURL(image string) string { - // Pull from dockerhub by default if not specified (default k8s behavior) - if strings.Contains(image, ".") { - return image - } - return "docker.io/" + image - -} - -func (o *Orchestrator) getImage(ctx context.Context, imageName string) (*containerd.Image, error) { - image, found := o.cachedImages[imageName] - if !found { - var err error - log.Debug(fmt.Sprintf("Pulling image %s", imageName)) - - imageURL := getImageURL(imageName) - local, _ := isLocalDomain(imageURL) - if local { - // Pull local image using HTTP - resolver := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, - Hosts: docker.ConfigureDefaultRegistries( - docker.WithPlainHTTP(docker.MatchAllHosts), - ), - }) - image, err = o.client.Pull(ctx, imageURL, - containerd.WithPullUnpack, - containerd.WithPullSnapshotter(o.snapshotter), - containerd.WithResolver(resolver), - ) - } else { - // Pull remote image - image, err = o.client.Pull(ctx, imageURL, - containerd.WithPullUnpack, - containerd.WithPullSnapshotter(o.snapshotter), - ) - } - - if err != nil { - return &image, err - } - o.cachedImages[imageName] = image - } - - return &image, nil -} - func getK8sDNS() []string { //using googleDNS as a backup dnsIPs := []string{"8.8.8.8"} diff --git a/ctriface/iface_test.go b/ctriface/iface_test.go index cda2e97a0..97883c696 100644 --- a/ctriface/iface_test.go +++ b/ctriface/iface_test.go @@ -186,7 +186,7 @@ func TestStartStopParallel(t *testing.T) { ) // Pull image - _, err := orch.getImage(ctx, testImageName) + _, err := orch.imageManager.GetImage(ctx, testImageName) require.NoError(t, err, "Failed to pull image "+testImageName) { @@ -245,7 +245,7 @@ func TestPauseResumeParallel(t *testing.T) { ) // Pull image - _, err := orch.getImage(ctx, testImageName) + _, err := orch.imageManager.GetImage(ctx, testImageName) require.NoError(t, err, "Failed to pull image "+testImageName) { diff --git a/ctriface/manual_cleanup_test.go b/ctriface/manual_cleanup_test.go index c4c4fd33a..d0eab54c1 100644 --- a/ctriface/manual_cleanup_test.go +++ b/ctriface/manual_cleanup_test.go @@ -174,7 +174,7 @@ func TestParallelSnapLoad(t *testing.T) { ) // Pull image - _, err := orch.getImage(ctx, testImageName) + _, err := orch.imageManager.GetImage(ctx, testImageName) require.NoError(t, err, "Failed to pull image "+testImageName) var vmGroup sync.WaitGroup @@ -236,7 +236,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { ) // Pull image - _, err := orch.getImage(ctx, testImageName) + _, err := orch.imageManager.GetImage(ctx, testImageName) require.NoError(t, err, "Failed to pull image "+testImageName) { diff --git a/ctriface/orch.go b/ctriface/orch.go index e01b1c431..de7a80a7e 100644 --- a/ctriface/orch.go +++ b/ctriface/orch.go @@ -23,13 +23,14 @@ package ctriface import ( + "github.com/ease-lab/vhive/ctrimages" "os" "os/signal" "path/filepath" - "syscall" - "time" "strings" "sync" + "syscall" + "time" log "github.com/sirupsen/logrus" @@ -79,6 +80,7 @@ type Orchestrator struct { snapshotter string client *containerd.Client fcClient *fcclient.Client + imageManager *ctrimages.ImageManager // store *skv.KVStore snapshotsEnabled bool isUPFEnabled bool @@ -135,6 +137,9 @@ func NewOrchestrator(snapshotter, hostIface string, opts ...OrchestratorOption) log.Fatal("Failed to start firecracker client", err) } log.Info("Created firecracker client") + + o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) + return o } diff --git a/ctrimages/imageManager.go b/ctrimages/imageManager.go new file mode 100644 index 000000000..3bec848fb --- /dev/null +++ b/ctrimages/imageManager.go @@ -0,0 +1,143 @@ +// Package ctrimages provides an image manager that manages and caches container images. +package ctrimages + +import ( + "context" + "github.com/containerd/containerd" + "github.com/containerd/containerd/remotes/docker" + log "github.com/sirupsen/logrus" + "net" + "net/http" + "net/url" + "strings" + "sync" +) + +// ImageState is used to synchronize image pulling to avoid pulling the same image multiple times concurrently. +type ImageState struct { + sync.Mutex + pulled bool +} + +// NewImageState creates a new ImageState object that can be used to synchronize image pulling. +func NewImageState() *ImageState { + state := new(ImageState) + state.pulled = false + return state +} + +// ImageManager manages the images that have been pulled to the node. +type ImageManager struct { + sync.Mutex + snapshotter string // image snapshotter + cachedImages map[string]containerd.Image // Cached container images + imageStates map[string]*ImageState + client *containerd.Client +} + +// NewImageManager creates a new imagemanager that can be used to fetch container images. +func NewImageManager(client *containerd.Client, snapshotter string) *ImageManager { + log.Info("Creating image manager") + manager := new(ImageManager) + manager.snapshotter = snapshotter + manager.cachedImages = make(map[string]containerd.Image) + manager.imageStates = make(map[string]*ImageState) + manager.client = client + return manager +} + +// pullImage fetches an image and adds it to the cached image list +func (mgr *ImageManager) pullImage(ctx context.Context, imageName string) error { + var err error + var image containerd.Image + + imageURL := getImageURL(imageName) + local, _ := isLocalDomain(imageURL) + if local { + // Pull local image using HTTP + resolver := docker.NewResolver(docker.ResolverOptions{ + Client: http.DefaultClient, + Hosts: docker.ConfigureDefaultRegistries( + docker.WithPlainHTTP(docker.MatchAllHosts), + ), + }) + image, err = mgr.client.Pull(ctx, imageURL, + containerd.WithPullUnpack, + containerd.WithPullSnapshotter(mgr.snapshotter), + containerd.WithResolver(resolver), + ) + } else { + // Pull remote image + image, err = mgr.client.Pull(ctx, imageURL, + containerd.WithPullUnpack, + containerd.WithPullSnapshotter(mgr.snapshotter), + ) + } + if err != nil { + return err + } + mgr.Lock() + mgr.cachedImages[imageName] = image + mgr.Unlock() + return nil +} + +// GetImage fetches an image that can be used to create a container using containerd +func (mgr *ImageManager) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { + mgr.Lock() + imgState, found := mgr.imageStates[imageName] + if !found { + imgState = NewImageState() + mgr.imageStates[imageName] = imgState + } + mgr.Unlock() + + // Pull image if necessary + imgState.Lock() + if !imgState.pulled { + if err := mgr.pullImage(ctx, imageName); err != nil { + imgState.Unlock() + return nil, err + } + imgState.pulled = true + } + imgState.Unlock() + + mgr.Lock() + image := mgr.cachedImages[imageName] + mgr.Unlock() + + return &image, nil +} + +// Converts an image name to a url if it is not a URL +func getImageURL(image string) string { + // Pull from dockerhub by default if not specified (default k8s behavior) + if strings.Contains(image, ".") { + return image + } + return "docker.io/" + image + +} + +// Checks whether a URL has a .local domain +func isLocalDomain(s string) (bool, error) { + if ! strings.Contains(s, "://") { + s = "dummy://" + s + } + + u, err := url.Parse(s) + if err != nil { + return false, err + } + + host, _, err := net.SplitHostPort(u.Host) + if err != nil { + host = u.Host + } + + i := strings.LastIndex(host, ".") + tld := host[i+1:] + + return tld == "local", nil +} \ No newline at end of file From d37fd9ff5a4f0a73a88265946dbd8117c7a3fdf4 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Tue, 23 Nov 2021 20:18:46 +0000 Subject: [PATCH 02/15] disable failing pmu tools essential files download Signed-off-by: Amory Hoste --- scripts/install_pmutools.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/install_pmutools.sh b/scripts/install_pmutools.sh index 62e2061af..8fc03537d 100755 --- a/scripts/install_pmutools.sh +++ b/scripts/install_pmutools.sh @@ -32,4 +32,4 @@ sudo git clone https://github.com/ease-lab/pmu-tools -b master /usr/local/pmu-to sudo sysctl -w kernel.perf_event_paranoid=-1 # first run, download essential files -/usr/local/pmu-tools/toplev --print > /dev/null +#/usr/local/pmu-tools/toplev --print > /dev/null From 93ccbad9ee33cfd51a3186396581352062308bdf Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Sun, 21 Nov 2021 20:33:29 +0000 Subject: [PATCH 03/15] add networkmanager to more efficiently manage networking Signed-off-by: Amory Hoste --- ctriface/iface.go | 17 +- ctriface/orch.go | 6 +- ctriface/orch_options.go | 8 - go.mod | 1 + misc/misc_test.go | 42 +-- misc/types.go | 4 +- misc/vm_pool.go | 53 ++-- networking/networkManager.go | 202 ++++++++++++++ networking/networkconfig.go | 292 ++++++++++++++++++++ networking/networking.go | 507 +++++++++++++++++++++++++++++++++++ taps/Makefile | 33 --- taps/tapManager.go | 392 --------------------------- taps/taps_test.go | 105 -------- taps/types.go | 54 ---- vhive.go | 2 + 15 files changed, 1060 insertions(+), 658 deletions(-) create mode 100644 networking/networkManager.go create mode 100644 networking/networkconfig.go create mode 100644 networking/networking.go delete mode 100644 taps/Makefile delete mode 100644 taps/tapManager.go delete mode 100644 taps/taps_test.go delete mode 100644 taps/types.go diff --git a/ctriface/iface.go b/ctriface/iface.go index 9de93feb4..59fdc3271 100644 --- a/ctriface/iface.go +++ b/ctriface/iface.go @@ -73,7 +73,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string) (_ * logger := log.WithFields(log.Fields{"vmID": vmID, "image": imageName}) logger.Debug("StartVM: Received StartVM") - vm, err := o.vmPool.Allocate(vmID, o.hostIface) + vm, err := o.vmPool.Allocate(vmID) if err != nil { logger.Error("failed to allocate VM in VM pool") return nil, nil, err @@ -215,7 +215,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string) (_ * logger.Debug("Successfully started a VM") - return &StartVMResponse{GuestIP: vm.Ni.PrimaryAddress}, startVMMetric, nil + return &StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, startVMMetric, nil } // StopSingleVM Shuts down a VM @@ -305,15 +305,16 @@ func (o *Orchestrator) getVMConfig(vm *misc.VM) *proto.CreateVMRequest { }, NetworkInterfaces: []*proto.FirecrackerNetworkInterface{{ StaticConfig: &proto.StaticNetworkConfiguration{ - MacAddress: vm.Ni.MacAddress, - HostDevName: vm.Ni.HostDevName, + MacAddress: vm.NetConfig.GetMacAddress(), + HostDevName: vm.NetConfig.GetHostDevName(), IPConfig: &proto.IPConfiguration{ - PrimaryAddr: vm.Ni.PrimaryAddress + vm.Ni.Subnet, - GatewayAddr: vm.Ni.GatewayAddress, + PrimaryAddr: vm.NetConfig.GetContainerCIDR(), + GatewayAddr: vm.NetConfig.GetGatewayIP(), Nameservers: getK8sDNS(), }, }, }}, + // NetworkNamespace: vm.NetConfig.GetNamespacePath(), // TODO } } @@ -485,8 +486,8 @@ func (o *Orchestrator) Offload(ctx context.Context, vmID string) error { return err } - if err := o.vmPool.RecreateTap(vmID, o.hostIface); err != nil { - logger.Error("Failed to recreate tap upon offloading") + if err := o.vmPool.Free(vmID); err != nil { + logger.Error("failed to free VM from VM pool") return err } diff --git a/ctriface/orch.go b/ctriface/orch.go index de7a80a7e..9909f58a0 100644 --- a/ctriface/orch.go +++ b/ctriface/orch.go @@ -87,21 +87,19 @@ type Orchestrator struct { isLazyMode bool snapshotsDir string isMetricsMode bool - hostIface string memoryManager *manager.MemoryManager } // NewOrchestrator Initializes a new orchestrator -func NewOrchestrator(snapshotter, hostIface string, opts ...OrchestratorOption) *Orchestrator { +func NewOrchestrator(snapshotter, hostIface string, netPoolSize int, opts ...OrchestratorOption) *Orchestrator { var err error o := new(Orchestrator) - o.vmPool = misc.NewVMPool() + o.vmPool = misc.NewVMPool(hostIface, netPoolSize) o.cachedImages = make(map[string]containerd.Image) o.snapshotter = snapshotter o.snapshotsDir = "/fccd/snapshots" - o.hostIface = hostIface for _, opt := range opts { opt(o) diff --git a/ctriface/orch_options.go b/ctriface/orch_options.go index 8b941896f..83fb17be9 100644 --- a/ctriface/orch_options.go +++ b/ctriface/orch_options.go @@ -72,11 +72,3 @@ func WithMetricsMode(isMetricsMode bool) OrchestratorOption { o.isMetricsMode = isMetricsMode } } - -// WithCustomHostIface Sets the custom host net interface -// for the VMs to link to -func WithCustomHostIface(hostIface string) OrchestratorOption { - return func(o *Orchestrator) { - o.hostIface = hostIface - } -} diff --git a/go.mod b/go.mod index 5945b4216..7802ed9b1 100644 --- a/go.mod +++ b/go.mod @@ -61,6 +61,7 @@ require ( github.com/sirupsen/logrus v1.8.0 github.com/stretchr/testify v1.7.0 github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 + github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae github.com/wcharczuk/go-chart v2.0.1+incompatible golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb // indirect golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 diff --git a/misc/misc_test.go b/misc/misc_test.go index db512c257..bf3e918ee 100644 --- a/misc/misc_test.go +++ b/misc/misc_test.go @@ -48,12 +48,12 @@ func TestMain(m *testing.M) { } func TestAllocateFreeVMs(t *testing.T) { - vmPool := NewVMPool() + vmPool := NewVMPool("", 10) vmIDs := [2]string{"test1", "test2"} for _, vmID := range vmIDs { - _, err := vmPool.Allocate(vmID, "") + _, err := vmPool.Allocate(vmID) require.NoError(t, err, "Failed to allocate VM") } @@ -62,13 +62,13 @@ func TestAllocateFreeVMs(t *testing.T) { require.NoError(t, err, "Failed to free a VM") } - vmPool.RemoveBridges() + vmPool.CleanupNetwork() } func TestAllocateFreeVMsParallel(t *testing.T) { vmNum := 100 - vmPool := NewVMPool() + vmPool := NewVMPool("", 10) var vmGroup sync.WaitGroup for i := 0; i < vmNum; i++ { @@ -76,7 +76,7 @@ func TestAllocateFreeVMsParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("test_%d", i) - _, err := vmPool.Allocate(vmID, "") + _, err := vmPool.Allocate(vmID) require.NoError(t, err, "Failed to allocate VM") }(i) } @@ -94,13 +94,13 @@ func TestAllocateFreeVMsParallel(t *testing.T) { } vmGroupFree.Wait() - vmPool.RemoveBridges() + vmPool.CleanupNetwork() } -func TestRecreateParallel(t *testing.T) { +func TestReuseTaps(t *testing.T) { vmNum := 100 - vmPool := NewVMPool() + vmPool := NewVMPool("", 10) var vmGroup sync.WaitGroup for i := 0; i < vmNum; i++ { @@ -108,12 +108,24 @@ func TestRecreateParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("test_%d", i) - _, err := vmPool.Allocate(vmID, "") + _, err := vmPool.Allocate(vmID) require.NoError(t, err, "Failed to allocate VM") }(i) } vmGroup.Wait() + var vmGroupFree sync.WaitGroup + for i := 0; i < vmNum; i++ { + vmGroupFree.Add(1) + go func(i int) { + defer vmGroupFree.Done() + vmID := fmt.Sprintf("test_%d", i) + err := vmPool.Free(vmID) + require.NoError(t, err, "Failed to free a VM") + }(i) + } + vmGroupFree.Wait() + var vmGroupRecreate sync.WaitGroup tStart := time.Now() @@ -123,7 +135,7 @@ func TestRecreateParallel(t *testing.T) { go func(i int) { defer vmGroupRecreate.Done() vmID := fmt.Sprintf("test_%d", i) - err := vmPool.RecreateTap(vmID, "") + err := vmPool.Free(vmID) require.NoError(t, err, "Failed to recreate tap") }(i) } @@ -132,17 +144,17 @@ func TestRecreateParallel(t *testing.T) { tElapsed := time.Since(tStart) log.Infof("Recreated %d taps in %d ms", vmNum, tElapsed.Milliseconds()) - var vmGroupFree sync.WaitGroup + var vmGroupCleanup sync.WaitGroup for i := 0; i < vmNum; i++ { - vmGroupFree.Add(1) + vmGroupCleanup.Add(1) go func(i int) { - defer vmGroupFree.Done() + defer vmGroupCleanup.Done() vmID := fmt.Sprintf("test_%d", i) err := vmPool.Free(vmID) require.NoError(t, err, "Failed to free a VM") }(i) } - vmGroupFree.Wait() + vmGroupCleanup.Wait() - vmPool.RemoveBridges() + vmPool.CleanupNetwork() } diff --git a/misc/types.go b/misc/types.go index 37d1036f0..b4b01bc35 100644 --- a/misc/types.go +++ b/misc/types.go @@ -23,6 +23,7 @@ package misc import ( + "github.com/ease-lab/vhive/networking" "sync" "github.com/containerd/containerd" @@ -38,12 +39,13 @@ type VM struct { Task *containerd.Task TaskCh <-chan containerd.ExitStatus Ni *taps.NetworkInterface + NetConfig *networking.NetworkConfig } // VMPool Pool of active VMs (can be in several states though) type VMPool struct { vmMap sync.Map - tapManager *taps.TapManager + networkManager *networking.NetworkManager } // NewVM Initialize a VM diff --git a/misc/vm_pool.go b/misc/vm_pool.go index 33a04b7af..b83696601 100644 --- a/misc/vm_pool.go +++ b/misc/vm_pool.go @@ -23,21 +23,24 @@ package misc import ( + "github.com/ease-lab/vhive/networking" log "github.com/sirupsen/logrus" - - "github.com/ease-lab/vhive/taps" ) // NewVMPool Initializes a pool of VMs -func NewVMPool() *VMPool { +func NewVMPool(hostIface string, netPoolSize int) *VMPool { p := new(VMPool) - p.tapManager = taps.NewTapManager() + mgr, err := networking.NewNetworkManager(hostIface, netPoolSize) + if err != nil { + log.Println(err) + } + p.networkManager = mgr return p } // Allocate Initializes a VM, activates it and then adds it to VM map -func (p *VMPool) Allocate(vmID, hostIface string) (*VM, error) { +func (p *VMPool) Allocate(vmID string) (*VM, error) { logger := log.WithFields(log.Fields{"vmID": vmID}) @@ -50,9 +53,9 @@ func (p *VMPool) Allocate(vmID, hostIface string) (*VM, error) { vm := NewVM(vmID) var err error - vm.Ni, err = p.tapManager.AddTap(vmID+"_tap", hostIface) + vm.NetConfig, err = p.networkManager.CreateNetwork(vmID) if err != nil { - logger.Warn("Ni allocation failed") + logger.Warn("VM network creation failed") return nil, err } @@ -73,8 +76,8 @@ func (p *VMPool) Free(vmID string) error { return nil } - if err := p.tapManager.RemoveTap(vmID + "_tap"); err != nil { - logger.Error("Could not delete tap") + if err := p.networkManager.RemoveNetwork(vmID); err != nil { + logger.Error("Could not remove network config") return err } @@ -83,32 +86,6 @@ func (p *VMPool) Free(vmID string) error { return nil } -// RecreateTap Deletes and creates the tap for a VM -func (p *VMPool) RecreateTap(vmID, hostIface string) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - - logger.Debug("Recreating tap") - - _, isPresent := p.vmMap.Load(vmID) - if !isPresent { - log.WithFields(log.Fields{"vmID": vmID}).Panic("RecreateTap: VM does not exist in the map") - return NonExistErr("RecreateTap: VM does not exist when recreating its tap") - } - - if err := p.tapManager.RemoveTap(vmID + "_tap"); err != nil { - logger.Error("Failed to delete tap") - return err - } - - _, err := p.tapManager.AddTap(vmID+"_tap", hostIface) - if err != nil { - logger.Error("Failed to add tap") - return err - } - - return nil -} - // GetVMMap Returns a copy of vmMap as a regular concurrency-unsafe map func (p *VMPool) GetVMMap() map[string]*VM { m := make(map[string]*VM) @@ -131,7 +108,7 @@ func (p *VMPool) GetVM(vmID string) (*VM, error) { return vm.(*VM), nil } -// RemoveBridges Removes the bridges created by the tap manager -func (p *VMPool) RemoveBridges() { - p.tapManager.RemoveBridges() +// CleanupNetwork removes and deallocates all network configurations +func (p *VMPool) CleanupNetwork() { + _ = p.networkManager.Cleanup() } diff --git a/networking/networkManager.go b/networking/networkManager.go new file mode 100644 index 000000000..a86757148 --- /dev/null +++ b/networking/networkManager.go @@ -0,0 +1,202 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Package networking provides primitives to connect function instances to the network. +package networking + +import ( + log "github.com/sirupsen/logrus" + "sync" +) + +// NetworkManager manages the in use network configurations along with a pool of free network configurations +// that can be used to connect a function instance to the network. +type NetworkManager struct { + sync.Mutex + nextID int + hostIfaceName string + + // Pool of free network configs + networkPool []*NetworkConfig + poolCond *sync.Cond + poolSize int + + // Mapping of function instance IDs to their network config + netConfigs map[string]*NetworkConfig +} + +// NewNetworkManager creates and returns a new network manager that connects function instances to the network +// using the supplied interface. If no interface is supplied, the default interface is used. To take the network +// setup of the critical path of a function creation, the network manager tries to maintain a pool of ready to use +// network configurations of size at least poolSize. +func NewNetworkManager(hostIfaceName string, poolSize int) (*NetworkManager, error) { + manager := new(NetworkManager) + + if hostIfaceName == "" { + hostIface, err := getHostIfaceName() + if err != nil { + return nil, err + } else { + manager.hostIfaceName = hostIface + } + } + + manager.netConfigs = make(map[string]*NetworkConfig) + manager.networkPool = make([]*NetworkConfig, 0) + + startId, err := getNetworkStartID() + if err == nil { + manager.nextID = startId + } else { + manager.nextID = 0 + } + + manager.poolCond = sync.NewCond(new(sync.Mutex)) + manager.initConfigPool(poolSize) + manager.poolSize = poolSize + + return manager, nil +} + +// initConfigPool fills an empty network pool up to the given poolSize +func (mgr *NetworkManager) initConfigPool(poolSize int) { + var wg sync.WaitGroup + wg.Add(poolSize) + + // Concurrently create poolSize network configs + for i := 0; i < poolSize; i++ { + go func() { + mgr.addNetConfig() + wg.Done() + }() + } + wg.Wait() +} + +// addNetConfig creates and initializes a new network config +func (mgr *NetworkManager) addNetConfig() { + mgr.Lock() + id := mgr.nextID + mgr.nextID += 1 + mgr.Unlock() + + netCfg := NewNetworkConfig(id, mgr.hostIfaceName) + if err := netCfg.CreateNetwork(); err != nil { + log.Errorf("failed to create network %s:", err) + } + + mgr.poolCond.L.Lock() + mgr.networkPool = append(mgr.networkPool, netCfg) + // Signal in case someone is waiting for a new config to become available in the pool + mgr.poolCond.Signal() + mgr.poolCond.L.Unlock() +} + +// allocNetConfig allocates a new network config from the pool to a function instance identified by funcID +func (mgr *NetworkManager) allocNetConfig(funcID string) *NetworkConfig { + // Add netconfig to pool to keep pool to configured size + go mgr.addNetConfig() + + // Pop a network config from the pool and allocate it to the function instance + mgr.poolCond.L.Lock() + if len(mgr.networkPool) == 0 { + // Wait until a new network config has been created + mgr.poolCond.Wait() + } + + config := mgr.networkPool[len(mgr.networkPool)-1] + mgr.networkPool = mgr.networkPool[:len(mgr.networkPool)-1] + mgr.poolCond.L.Unlock() + + mgr.Lock() + mgr.netConfigs[funcID] = config + mgr.Unlock() + return config +} + +// releaseNetConfig releases the network config of a given function instance with id funcID back to the pool +func (mgr *NetworkManager) releaseNetConfig(funcID string) { + mgr.Lock() + config := mgr.netConfigs[funcID] + delete(mgr.netConfigs, funcID) + mgr.Unlock() + + // Add network config back to the pool. We allow the pool to grow over it's configured size here since the + // overhead of keeping a network config in the pool is low compared to the cost of creating a new config. + mgr.poolCond.L.Lock() + mgr.networkPool = append(mgr.networkPool, config) + mgr.poolCond.Signal() + mgr.poolCond.L.Unlock() +} + +// CreateNetwork creates the networking for a function instance identified by funcID +func (mgr *NetworkManager) CreateNetwork(funcID string) (*NetworkConfig, error) { + netCfg := mgr.allocNetConfig(funcID) + return netCfg, nil +} + +// GetConfig returns the network config assigned to a function instance identified by funcID +func (mgr *NetworkManager) GetConfig(funcID string) *NetworkConfig { + mgr.Lock() + defer mgr.Unlock() + + cfg := mgr.netConfigs[funcID] + return cfg +} + +// RemoveNetwork removes the network config of a function instance identified by funcID. The allocated network devices +// for the given function instance must not be in use anymore when calling this function. +func (mgr *NetworkManager) RemoveNetwork(funcID string) error { + mgr.releaseNetConfig(funcID) + return nil +} + +// Cleanup removes and deallocates all network configurations that are in use or in the network pool. +func (mgr *NetworkManager) Cleanup() error { + log.Info("Cleaning up network manager") + mgr.Lock() + defer mgr.Unlock() + + // Release network configs still in use + for funcID := range mgr.netConfigs { + mgr.releaseNetConfig(funcID) + } + + // Cleanup network pool + mgr.poolCond.L.Lock() + var wg sync.WaitGroup + wg.Add(len(mgr.networkPool)) + + for _, config := range mgr.networkPool { + go func() { + if err := config.RemoveNetwork(); err != nil { + log.Errorf("failed to remove network %s:", err) + } + wg.Done() + }() + } + wg.Wait() + mgr.networkPool = make([]*NetworkConfig, 0) + mgr.poolCond.L.Unlock() + + return nil +} \ No newline at end of file diff --git a/networking/networkconfig.go b/networking/networkconfig.go new file mode 100644 index 000000000..256489cf1 --- /dev/null +++ b/networking/networkconfig.go @@ -0,0 +1,292 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package networking + +import ( + "fmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/vishvananda/netns" + "net" + "runtime" +) + +const ( + defaultContainerCIDR = "172.16.0.2/24" + defaultGatewayCIDR = "172.16.0.1/24" + defaultContainerTap = "tap0" + defaultContainerMac = "AA:FC:00:00:00:01" +) + +// NetworkConfig represents the network devices, IPs, namespaces, routes and filter rules to connect a uVM +// to the network. Note that due to the current allocation of IPs at most 2^14 VMs can be simultaneously be +// available on a single host. +type NetworkConfig struct { + id int + containerCIDR string // Container IP address (CIDR notation) + gatewayCIDR string // Container gateway IP address + containerTap string // Container tap name + containerMac string // Container Mac address + hostIfaceName string // Host network interface name +} + +// NewNetworkConfig creates a new network config with a given id and default host interface +func NewNetworkConfig(id int, hostIfaceName string) *NetworkConfig { + return &NetworkConfig{ + id: id, + containerCIDR: defaultContainerCIDR, + gatewayCIDR: defaultGatewayCIDR, + containerTap: defaultContainerTap, + containerMac: defaultContainerMac, + hostIfaceName: hostIfaceName, + } +} + +// GetMacAddress returns the mac address used for the uVM +func (cfg *NetworkConfig) GetMacAddress() string { + return cfg.containerMac +} + +// GetHostDevName returns the device connecting the uVM to the host +func (cfg *NetworkConfig) GetHostDevName() string { + return cfg.containerTap +} + +// getVeth0Name returns the name for the veth device at the side of the uVM +func (cfg *NetworkConfig) getVeth0Name() string { + return fmt.Sprintf("veth%d-0", cfg.id) +} + +// getVeth0CIDR returns the IP address for the veth device at the side of the uVM in CIDR notation +func (cfg *NetworkConfig) getVeth0CIDR() string { + return fmt.Sprintf("172.17.%d.%d/30", (4 * cfg.id) / 256, ((4 * cfg.id) + 2) % 256) +} + +// getVeth1Name returns the name for the veth device at the side of the host +func (cfg *NetworkConfig) getVeth1Name() string { + return fmt.Sprintf("veth%d-1", cfg.id) +} + +// getVeth1Name returns the IP address for the veth device at the side of the host in CIDR notation +func (cfg *NetworkConfig) getVeth1CIDR() string { + return fmt.Sprintf("172.17.%d.%d/30", (4 * cfg.id) / 256, ((4 * cfg.id) + 1) % 256) +} + +// GetCloneIP returns the IP address the uVM is reachable at from the host +func (cfg *NetworkConfig) GetCloneIP() string { + return fmt.Sprintf("172.18.%d.%d", cfg.id / 254, 1 + (cfg.id % 254)) +} + +// GetContainerCIDR returns the internal IP of the uVM in CIDR notation +func (cfg *NetworkConfig) GetContainerCIDR() string { + return cfg.containerCIDR +} + +// getNamespaceName returns the network namespace name for the uVM +func (cfg *NetworkConfig) getNamespaceName() string { + return fmt.Sprintf("uvmns%d", cfg.id) +} + +// GetNamespacePath returns the full path to the network namespace for the uVM +func (cfg *NetworkConfig) GetNamespacePath() string { + return fmt.Sprintf("/var/run/netns/%s", cfg.getNamespaceName()) +} + +// getContainerIP returns the internal IP of the uVM +func (cfg *NetworkConfig) getContainerIP() string { + ip, _, _ := net.ParseCIDR(cfg.containerCIDR) + return ip.String() +} + +// GetGatewayIP returns the IP address of the tap device associated with the uVM +func (cfg *NetworkConfig) GetGatewayIP() string { + ip, _, _ := net.ParseCIDR(cfg.gatewayCIDR) + return ip.String() +} + +// createVmNetwork creates network devices, namespaces, routes and filter rules for the uVM at the +// uVM side +func (cfg *NetworkConfig) createVmNetwork(hostNsHandle netns.NsHandle) error { + // A. In uVM netns + // A.1. Create network namespace for uVM & join network namespace + vmNsHandle, err := netns.NewNamed(cfg.getNamespaceName()) // Switches namespace + if err != nil { + log.Println(err) + return err + } + defer vmNsHandle.Close() + + // A.2. Create tap device for uVM + if err := createTap(cfg.containerTap, cfg.gatewayCIDR, cfg.getNamespaceName()); err != nil { + return err + } + + // A.3. Create veth pair for uVM + // A.3.1 Create veth pair + if err := createVethPair(cfg.getVeth0Name(), cfg.getVeth1Name(), vmNsHandle, hostNsHandle); err != nil { + return err + } + + // A.3.2 Configure uVM side veth pair + if err := configVeth(cfg.getVeth0Name(), cfg.getVeth0CIDR()); err != nil { + return err + } + + // A.3.3 Designate host side as default gateway for packets leaving namespace + if err := setDefaultGateway(cfg.getVeth1CIDR()); err != nil { + return err + } + + // A.4. Setup NAT rules + if err := setupNatRules(cfg.getVeth0Name(), cfg.getContainerIP(), cfg.GetCloneIP(), vmNsHandle); err != nil { + return err + } + + return nil +} + +// createHostNetwork creates network devices, namespaces, routes and filter rules for the uVM at the host +// side +func (cfg *NetworkConfig) createHostNetwork() error { + // B. In host netns + // B.1 Configure host side veth pair + if err := configVeth(cfg.getVeth1Name(), cfg.getVeth1CIDR()); err != nil { + return err + } + + // B.2 Add a route on the host for the clone address + if err := addRoute(cfg.GetCloneIP(), cfg.getVeth0CIDR()); err != nil { + return err + } + + // B.3 Setup nat to route traffic out of veth device + if err := setupForwardRules(cfg.getVeth1Name(), cfg.hostIfaceName); err != nil { + return err + } + return nil +} + +// CreateNetwork creates the necessary network devices, namespaces, routes and filter rules to connect the uVM to the +// network. The networking is created as described in the Firecracker documentation on providing networking for clones +// (https://github.com/firecracker-microvm/firecracker/blob/main/docs/snapshotting/network-for-clones.md) +func (cfg *NetworkConfig) CreateNetwork() error { + // 1. Lock the OS Thread so we don't accidentally switch namespaces + runtime.LockOSThread() + + // 2. Get host network namespace + hostNsHandle, err := netns.Get() + defer hostNsHandle.Close() + if err != nil { + log.Printf("Failed to get host ns, %s\n", err) + return err + } + + // 3. Setup networking in instance namespace + if err := cfg.createVmNetwork(hostNsHandle); err != nil { + netns.Set(hostNsHandle) + runtime.UnlockOSThread() + return err + } + + // 4. Go back to host namespace + err = netns.Set(hostNsHandle) + if err != nil { + return err + } + + runtime.UnlockOSThread() + + // 5. Setup networking in host namespace + if err := cfg.createHostNetwork(); err != nil { + return err + } + + return nil +} + +// CreateNetwork removes the necessary network devices, namespaces, routes and filter rules to connect the +// function instance to the network +func (cfg *NetworkConfig) RemoveNetwork() error { + // Delete nat to route traffic out of veth device + if err := deleteForwardRules(cfg.getVeth1Name()); err != nil { + return err + } + + // Delete route on the host for the clone address + if err := deleteRoute(cfg.GetCloneIP(), cfg.getVeth0CIDR()); err != nil { + return err + } + + runtime.LockOSThread() + + hostNsHandle, err := netns.Get() + defer hostNsHandle.Close() + if err != nil { + log.Printf("Failed to get host ns, %s\n", err) + return err + } + + // Get uVM namespace handle + vmNsHandle, err := netns.GetFromName(cfg.getNamespaceName()) + defer vmNsHandle.Close() + if err != nil { + return err + } + err = netns.Set(vmNsHandle) + if err != nil { + return err + } + + // Delete NAT rules + if err := deleteNatRules(vmNsHandle); err != nil { + return err + } + + // Delete default gateway for packets leaving namespace + if err := deleteDefaultGateway(cfg.getVeth1CIDR()); err != nil { + return err + } + + // Delete uVM side veth pair + if err := deleteVethPair(cfg.getVeth0Name(), cfg.getVeth1Name(), vmNsHandle, hostNsHandle); err != nil { + return err + } + + // Delete tap device for uVM + if err := deleteTap(cfg.containerTap); err != nil { + return err + } + + // Delete namespace + if err := netns.DeleteNamed(cfg.getNamespaceName()); err != nil { + return errors.Wrapf(err, "deleting network namespace") + } + + err = netns.Set(hostNsHandle) + if err != nil { + return err + } + runtime.UnlockOSThread() + + return nil +} \ No newline at end of file diff --git a/networking/networking.go b/networking/networking.go new file mode 100644 index 000000000..23c72ca5d --- /dev/null +++ b/networking/networking.go @@ -0,0 +1,507 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package networking + +import ( + "bufio" + "bytes" + "fmt" + "github.com/google/nftables" + "github.com/google/nftables/expr" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" + "io/ioutil" + "net" + "os/exec" + "regexp" + "strconv" + "strings" +) + +// getHostIfaceName returns the default host network interface name. +func getHostIfaceName() (string, error) { + out, err := exec.Command( + "route", + ).Output() + if err != nil { + log.Warnf("Failed to fetch host net interfaces %v\n%s\n", err, out) + return "", err + } + + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "default") { + return line[strings.LastIndex(line, " ")+1:], nil + } + } + return "", errors.New("Failed to fetch host net interface") +} + +// createTap creates a TAP device with name tapName, IP gatewayIP in the network namespace with name netnsName +func createTap(tapName, gatewayIP, netnsName string) error { + // 1. Create tap device + la := netlink.NewLinkAttrs() + la.Name = tapName + la.Namespace = netnsName + tap0 := &netlink.Tuntap{LinkAttrs: la, Mode: netlink.TUNTAP_MODE_TAP} + if err := netlink.LinkAdd(tap0); err != nil { + return errors.Wrapf(err, "creating tap") + } + + // 2. Give tap device ip address + addr, _ := netlink.ParseAddr(gatewayIP) + addr.Broadcast = net.IPv4(0, 0, 0, 0) + if err := netlink.AddrAdd(tap0, addr); err != nil { + return errors.Wrapf(err, "adding tap ip address") + } + + // 3. Enable tap network interface + if err := netlink.LinkSetUp(tap0); err != nil { + return errors.Wrapf(err, "enabling tap") + } + + return nil +} + +// deleteTap deletes the tap device identified by name tapName +func deleteTap(tapName string) error { + if err := netlink.LinkDel(&netlink.Tuntap{LinkAttrs: netlink.LinkAttrs{Name: tapName}}); err != nil { + return errors.Wrapf(err, "deleting tap %s", tapName) + } + + return nil +} + +// createVethPair creates a virtual ethernet pair connecting the supplied namespaces +func createVethPair(veth0Name, veth1Name string, veth0NsHandle, veth1NsHandle netns.NsHandle) error { + veth := &netlink.Veth{netlink.LinkAttrs{Name: veth0Name, Namespace: netlink.NsFd(veth0NsHandle), TxQLen: 1000}, veth1Name, nil, netlink.NsFd(veth1NsHandle)} + if err := netlink.LinkAdd(veth); err != nil { + return errors.Wrapf(err, "creating veth pair") + } + + return nil +} + +// deleteVethPair deletes the virtual ethernet pair connecting the supplied namespaces +func deleteVethPair(veth0Name, veth1Name string, veth0NsHandle, veth1NsHandle netns.NsHandle) error { + if err := netlink.LinkDel(&netlink.Veth{LinkAttrs: netlink.LinkAttrs{Name: veth0Name, Namespace: netlink.NsFd(veth0NsHandle)}, PeerName: veth1Name, PeerNamespace: netlink.NsFd(veth1NsHandle)}); err != nil { + return errors.Wrapf(err, "deleting veth %s", veth0Name) + } + return nil +} + +// configVeth configures the IP address of a veth device and enables the device +func configVeth(linkName, vethIp string) error { + // 1. Get link + veth, err := netlink.LinkByName(linkName) + if err != nil { + return errors.Wrapf(err, "Finding veth link") + } + + // 2. Set IP address + addr, _ := netlink.ParseAddr(vethIp) + addr.Broadcast = net.IPv4(0, 0, 0, 0) + if err := netlink.AddrAdd(veth, addr); err != nil { + return errors.Wrapf(err, "adding veth link ip address") + } + + // 3. Enable link + if err := netlink.LinkSetUp(veth); err != nil { + return errors.Wrapf(err, "enabling veth link") + } + + return nil +} + +// setDefaultGateway creates a default routing rule to the supplied gatewayIP +func setDefaultGateway(gatewayIp string) error { + gw, _, err := net.ParseCIDR(gatewayIp) + if err != nil { + return errors.Wrapf(err, "parsing ip") + } + + defaultRoute := &netlink.Route{ + Dst: nil, + Gw: gw, + } + + if err := netlink.RouteAdd(defaultRoute); err != nil { + return errors.Wrapf(err, "adding default route") + } + + return nil +} + +// deleteDefaultGateway deletes the default routing rule to the supplied gatewayIP +func deleteDefaultGateway(gatewayIp string) error { + gw, _, err := net.ParseCIDR(gatewayIp) + if err != nil { + return errors.Wrapf(err, "parsing ip") + } + + defaultRoute := &netlink.Route{ + Dst: nil, + Gw: gw, + } + + if err := netlink.RouteDel(defaultRoute); err != nil { + return errors.Wrapf(err, "deleting default route") + } + + return nil +} + +// setupNatRules configures the NAT rules. Each uVMs address is translated to an external clone address to avoid +// conflicts (see https://github.com/firecracker-microvm/firecracker/blob/main/docs/snapshotting/network-for-clones.md) +func setupNatRules(vethVmName, hostIp, cloneIp string, vmNsHandle netns.NsHandle) error { + conn := nftables.Conn{NetNS: int(vmNsHandle)} + + // 1. add table ip nat + natTable := &nftables.Table{ + Name: "nat", + Family: nftables.TableFamilyIPv4, + } + + // 2. Iptables: -t nat -A POSTROUTING -o veth1-0 -s 172.16.0.2 -j SNAT --to 192.168.0.1 + // 2.1 add chain ip nat POSTROUTING { type nat hook postrouting priority 0; policy accept; } + polAccept := nftables.ChainPolicyAccept + postRouteCh := &nftables.Chain{ + Name: "POSTROUTING", + Table: natTable, + Type: nftables.ChainTypeNAT, + Priority: 0, + Hooknum: nftables.ChainHookPostrouting, + Policy: &polAccept, + } + + // 2.2 add rule ip nat POSTROUTING oifname veth1-0 ip saddr 172.16.0.2 counter snat to 192.168.0.1 + snatRule := &nftables.Rule{ + Table: natTable, + Chain: postRouteCh, + Exprs: []expr.Any{ + // Load iffname in register 1 + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + // Check iifname == veth1-0 + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", vethVmName)), + }, + // Load source IP address (offset 12 bytes network header) in register 1 + &expr.Payload{ + DestRegister: 1, + Base: expr.PayloadBaseNetworkHeader, + Offset: 12, + Len: 4, + }, + // Check source ip address == 172.16.0.2 + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: net.ParseIP(hostIp).To4(), + }, + // Load snatted address (192.168.0.1) in register 1 + &expr.Immediate{ + Register: 1, + Data: net.ParseIP(cloneIp).To4(), + }, + &expr.NAT{ + Type: expr.NATTypeSourceNAT, // Snat + Family: unix.NFPROTO_IPV4, + RegAddrMin: 1, + }, + }, + } + + // 3. Iptables: -t nat -A PREROUTING -i veth1-0 -d 192.168.0.1 -j DNAT --to 172.16.0.2 + // 3.1 add chain ip nat PREROUTING { type nat hook prerouting priority 0; policy accept; } + preRouteCh := &nftables.Chain{ + Name: "PREROUTING", + Table: natTable, + Type: nftables.ChainTypeNAT, + Priority: 0, + Hooknum: nftables.ChainHookPrerouting, + Policy: &polAccept, + } + + // 3.2 add rule ip nat PREROUTING iifname veth1-0 ip daddr 192.168.0.1 counter dnat to 172.16.0.2 + dnatRule := &nftables.Rule{ + Table: natTable, + Chain: preRouteCh, + Exprs: []expr.Any{ + // Load iffname in register 1 + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + // Check iifname == veth1-0 + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", vethVmName)), + }, + // Load destination IP address (offset 16 bytes network header) in register 1 + &expr.Payload{ + DestRegister: 1, + Base: expr.PayloadBaseNetworkHeader, + Offset: 16, + Len: 4, + }, + // Check destination ip address == 192.168.0.1 + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: net.ParseIP(cloneIp).To4(), + }, + // Load dnatted address (172.16.0.2) in register 1 + &expr.Immediate{ + Register: 1, + Data: net.ParseIP(hostIp).To4(), + }, + &expr.NAT{ + Type: expr.NATTypeDestNAT, // Dnat + Family: unix.NFPROTO_IPV4, + RegAddrMin: 1, + }, + }, + } + + // Apply rules + conn.AddTable(natTable) + conn.AddChain(postRouteCh) + conn.AddRule(snatRule) + conn.AddChain(preRouteCh) + conn.AddRule(dnatRule) + if err := conn.Flush(); err != nil { + return errors.Wrapf(err, "creating nat rules") + } + return nil +} + +// deleteNatRules deletes the NAT rules to give each uVM a clone address. +func deleteNatRules(vmNsHandle netns.NsHandle) error { + conn := nftables.Conn{NetNS: int(vmNsHandle)} + + natTable := &nftables.Table{ + Name: "nat", + Family: nftables.TableFamilyIPv4, + } + + // Apply + conn.DelTable(natTable) + if err := conn.Flush(); err != nil { + return errors.Wrapf(err, "deleting nat rules") + } + return nil +} + +// setupForwardRules creates forwarding rules to allow traffic from the end of the veth pair to the default host interface. +func setupForwardRules(vethHostName, hostIface string) error { + conn := nftables.Conn{} + + // 1. add table ip filter + filterTable := &nftables.Table{ + Name: "filter", + Family: nftables.TableFamilyIPv4, + } + + // 2. add chain ip filter FORWARD { type filter hook forward priority 0; policy accept; } + polAccept := nftables.ChainPolicyAccept + fwdCh := &nftables.Chain{ + Name: fmt.Sprintf("FORWARD%s", vethHostName), + Table: filterTable, + Type: nftables.ChainTypeFilter, + Priority: 0, + Hooknum: nftables.ChainHookForward, + Policy: &polAccept, + } + + // 3. Iptables: -A FORWARD -i veth1-1 -o eno49 -j ACCEPT + // 3.1 add rule ip filter FORWARD iifname veth1-1 oifname eno49 counter accept + outRule := &nftables.Rule{ + Table: filterTable, + Chain: fwdCh, + Exprs: []expr.Any{ + // Load iffname in register 1 + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + // Check iifname == veth1-0 + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", vethHostName)), + }, + // Load oif in register 1 + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + // Check iifname == veth1-0 + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", hostIface)), + }, + &expr.Verdict{ + Kind: expr.VerdictAccept, + }, + }, + } + + // 4. Iptables: -A FORWARD -o veth1-1 -i eno49 -j ACCEPT + // 4.1 add rule ip filter FORWARD iifname eno49 oifname veth1-1 counter accept + inRule := &nftables.Rule{ + Table: filterTable, + Chain: fwdCh, + Exprs: []expr.Any{ + // Load oifname in register 1 + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + // Check iifname == veth1-0 + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", vethHostName)), + }, + // Load oif in register 1 + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + // Check iifname == veth1-0 + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", hostIface)), + }, + &expr.Verdict{ + Kind: expr.VerdictAccept, + }, + }, + } + conn.AddTable(filterTable) + conn.AddChain(fwdCh) + conn.AddRule(outRule) + conn.AddRule(inRule) + + if err := conn.Flush(); err != nil { + return errors.Wrapf(err, "creating forward rules") + } + return nil +} + +// deleteNatRules deletes the forward rules to allow traffic to the default host interface. +func deleteForwardRules(vethHostName string) error { + conn := nftables.Conn{} + + // 1. add table ip filter + filterTable := &nftables.Table{ + Name: "filter", + Family: nftables.TableFamilyIPv4, + } + + // 2. add chain ip filter FORWARD { type filter hook forward priority 0; policy accept; } + polAccept := nftables.ChainPolicyAccept + fwdCh := &nftables.Chain{ + Name: fmt.Sprintf("FORWARD%s", vethHostName), + Table: filterTable, + Type: nftables.ChainTypeFilter, + Priority: 0, + Hooknum: nftables.ChainHookForward, + Policy: &polAccept, + } + + // Apply + conn.FlushChain(fwdCh) + conn.DelChain(fwdCh) + if err := conn.Flush(); err != nil { + return errors.Wrapf(err, "deleting forward rules") + } + return nil +} + +// addRoute adds a routing table entry to destIp with gateway gatewayIp. +func addRoute(destIp, gatewayIp string) error { + _, dstNet, err := net.ParseCIDR(fmt.Sprintf("%s/32", destIp)) + if err != nil { + return errors.Wrapf(err, "parsing route destination ip") + } + + gwAddr, _, err := net.ParseCIDR(gatewayIp) + if err != nil { + return errors.Wrapf(err, "parsing route gateway ip") + } + + route := &netlink.Route{ + Dst: dstNet, + Gw: gwAddr, + } + + if err := netlink.RouteAdd(route); err != nil { + return errors.Wrapf(err, "adding route") + } + return nil +} + +// addRoute deletes the routing table entry to destIp with gateway gatewayIp. +func deleteRoute(destIp, gatewayIp string) error { + _, dstNet, err := net.ParseCIDR(fmt.Sprintf("%s/32", destIp)) + if err != nil { + return errors.Wrapf(err, "parsing route destination ip") + } + + gwAddr, _, err := net.ParseCIDR(gatewayIp) + if err != nil { + return errors.Wrapf(err, "parsing route gateway ip") + } + + route := &netlink.Route{ + Dst: dstNet, + Gw: gwAddr, + } + + if err := netlink.RouteDel(route); err != nil { + return errors.Wrapf(err, "deleting route") + } + return nil +} + +// getNetworkStartID fetches the +func getNetworkStartID() (int, error) { + files, err := ioutil.ReadDir("/run/netns") + if err != nil { + return 0, errors.Wrapf(err,"Couldn't read network namespace dir") + } + + maxId := 0 + for _, f := range files { + if ! f.IsDir() { + netnsName := f.Name() + + re := regexp.MustCompile(`^uvmns([0-9]+)$`) + regres := re.FindStringSubmatch(netnsName) + + if len(regres) > 1 { + id, err := strconv.Atoi(regres[1]) + if err == nil && id > maxId { + maxId = id + } + } + } + } + + return maxId + 1, nil +} \ No newline at end of file diff --git a/taps/Makefile b/taps/Makefile deleted file mode 100644 index b3fbde9c2..000000000 --- a/taps/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# MIT License -# -# Copyright (c) 2020 Dmitrii Ustiugov, Plamen Petrov and EASE lab -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -EXTRAGOARGS:=-v -race -cover - -test: - # Need to pass GOROOT because GitHub-hosted runners may have several - # go versions installed so that calling go from root may fail - sudo env "PATH=$(PATH)" "GOROOT=$(GOROOT)" go test ./ $(EXTRAGOARGS) - -test-man: - echo "Nothing to test manually" - -.PHONY: test test-man diff --git a/taps/tapManager.go b/taps/tapManager.go deleted file mode 100644 index d079d1e79..000000000 --- a/taps/tapManager.go +++ /dev/null @@ -1,392 +0,0 @@ -// MIT License -// -// Copyright (c) 2021 Plamen Petrov, Amory Hoste and EASE lab -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package taps - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "github.com/google/nftables" - "github.com/google/nftables/expr" - "os/exec" - "strings" - "sync/atomic" - - log "github.com/sirupsen/logrus" - - "net" - - "github.com/vishvananda/netlink" -) - -// getGatewayAddr Creates the gateway address (first address in pool) -func getGatewayAddr(bridgeID int) string { - return fmt.Sprintf("19%d.128.0.1", bridgeID) -} - -// getBridgeName Create bridge name -func getBridgeName(id int) string { - return fmt.Sprintf("br%d", id) -} - -// getPrimaryAddress Creates the primary address for a tap -func getPrimaryAddress(curTaps, bridgeID int) string { - return fmt.Sprintf("19%d.128.%d.%d", bridgeID, (curTaps+2)/256, (curTaps+2)%256) -} - -// NewTapManager Creates a new tap manager -func NewTapManager() *TapManager { - tm := new(TapManager) - - tm.numBridges = NumBridges - tm.TapCountsPerBridge = make([]int64, NumBridges) - tm.createdTaps = make(map[string]*NetworkInterface) - - log.Info("Registering bridges for tap manager") - - for i := 0; i < NumBridges; i++ { - brName := getBridgeName(i) - gatewayAddr := getGatewayAddr(i) - - createBridge(brName, gatewayAddr) - } - - return tm -} - -// Creates the bridge, add a gateway to it, and enables it -func createBridge(bridgeName, gatewayAddr string) { - logger := log.WithFields(log.Fields{"bridge": bridgeName}) - - logger.Debug("Creating bridge") - - la := netlink.NewLinkAttrs() - la.Name = bridgeName - - br := &netlink.Bridge{LinkAttrs: la} - - if err := netlink.LinkAdd(br); err != nil { - logger.Panic("Bridge could not be created") - } - - if err := netlink.LinkSetUp(br); err != nil { - logger.Panic("Bridge could not be enabled") - } - - bridgeAddress := gatewayAddr + Subnet - - addr, err := netlink.ParseAddr(bridgeAddress) - if err != nil { - log.Panic(fmt.Sprintf("could not parse bridge address %s", bridgeAddress)) - } - - if err := netlink.AddrAdd(br, addr); err != nil { - logger.Panic(fmt.Sprintf("could not add %s to bridge", bridgeAddress)) - } -} - -// setupForwardRules sets up forwarding rules to enable internet access inside the vm -func setupForwardRules(tapName, hostIface string) error { - // Fetch host default interface if not specified - if hostIface == "" { - out, err := exec.Command( - "route", - ).Output() - if err != nil { - log.Warnf("Failed to fetch host net interfaces %v\n%s\n", err, out) - return err - } - scanner := bufio.NewScanner(bytes.NewReader(out)) - for scanner.Scan() { - line := scanner.Text() - if strings.Contains(line, "default") { - hostIface = line[strings.LastIndex(line, " ")+1:] - } - } - } - - - conn := nftables.Conn{} - - // 1. nft add table ip filter - filterTable := &nftables.Table{ - Name: "filter", - Family: nftables.TableFamilyIPv4, - } - - // 2. nft add chain ip filter FORWARD { type filter hook forward priority 0; policy accept; } - polAccept := nftables.ChainPolicyAccept - fwdCh := &nftables.Chain{ - Name: fmt.Sprintf("FORWARD%s", tapName), - Table: filterTable, - Type: nftables.ChainTypeFilter, - Priority: 0, - Hooknum: nftables.ChainHookForward, - Policy: &polAccept, - } - - // 3. iptables -A FORWARD -i tapName -o hostIface -j ACCEPT - // 3.1 nft add rule ip filter FORWARD iifname tapName oifname hostIface counter accept - outRule := &nftables.Rule{ - Table: filterTable, - Chain: fwdCh, - Exprs: []expr.Any{ - // Load iffname in register 1 - &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, - // Check iifname == tapName - &expr.Cmp{ - Op: expr.CmpOpEq, - Register: 1, - Data: []byte(fmt.Sprintf("%s\x00", tapName)), - }, - // Load oifname in register 1 - &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, - // Check oifname == hostIface - &expr.Cmp{ - Op: expr.CmpOpEq, - Register: 1, - Data: []byte(fmt.Sprintf("%s\x00", hostIface)), - }, - &expr.Verdict{ - Kind: expr.VerdictAccept, - }, - }, - } - - // 4. iptables -A FORWARD -o tapName -i hostIface -j ACCEPT - // 4.1 nft add rule ip filter FORWARD iifname hostIface oifname tapName counter accept - inRule := &nftables.Rule{ - Table: filterTable, - Chain: fwdCh, - Exprs: []expr.Any{ - // Load oifname in register 1 - &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, - // Check oifname == tapName - &expr.Cmp{ - Op: expr.CmpOpEq, - Register: 1, - Data: []byte(fmt.Sprintf("%s\x00", tapName)), - }, - // Load iifname in register 1 - &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, - // Check iifname == hostIface - &expr.Cmp{ - Op: expr.CmpOpEq, - Register: 1, - Data: []byte(fmt.Sprintf("%s\x00", hostIface)), - }, - &expr.Verdict{ - Kind: expr.VerdictAccept, - }, - }, - } - conn.AddTable(filterTable) - conn.AddChain(fwdCh) - conn.AddRule(outRule) - conn.AddRule(inRule) - - if err := conn.Flush(); err != nil { - log.Warnf("Failed to setup forwarding out from tap %v\n%s\n", tapName, err) - return err - } - return nil -} - -// AddTap Creates a new tap and returns the corresponding network interface -func (tm *TapManager) AddTap(tapName, hostIface string) (*NetworkInterface, error) { - tm.Lock() - - if ni, ok := tm.createdTaps[tapName]; ok { - tm.Unlock() - return ni, tm.reconnectTap(tapName, ni) - } - - tm.Unlock() - - for i := 0; i < tm.numBridges; i++ { - tapsInBridge := atomic.AddInt64(&tm.TapCountsPerBridge[i], 1) - if tapsInBridge-1 < TapsPerBridge { - // Create a tap with this bridge - ni, err := tm.addTap(tapName, i, int(tapsInBridge-1)) - if err == nil { - tm.Lock() - tm.createdTaps[tapName] = ni - tm.Unlock() - err := setupForwardRules(tapName, hostIface) - if err != nil { - return nil, err - } - } - - return ni, err - } - } - log.Error("No space for creating taps") - return nil, errors.New("No space for creating taps") -} - -// Reconnects a single tap with the same network interface that it was -// create with previously -func (tm *TapManager) reconnectTap(tapName string, ni *NetworkInterface) error { - logger := log.WithFields(log.Fields{"tap": tapName, "bridge": ni.BridgeName}) - - la := netlink.NewLinkAttrs() - la.Name = tapName - - logger.Debug("Reconnecting tap") - - tap := &netlink.Tuntap{LinkAttrs: la, Mode: netlink.TUNTAP_MODE_TAP} - - if err := netlink.LinkAdd(tap); err != nil { - logger.Error("Tap could not be reconnected") - return err - } - - br, err := netlink.LinkByName(ni.BridgeName) - if err != nil { - logger.Error("Could not reconnect tap, because corresponding bridge does not exist") - return err - } - - hwAddr, err := net.ParseMAC(ni.MacAddress) - if err != nil { - logger.Error("Could not parse MAC") - return err - } - - if err := netlink.LinkSetHardwareAddr(tap, hwAddr); err != nil { - logger.Error("Could not set MAC address") - return err - } - - if err := netlink.LinkSetMaster(tap, br); err != nil { - logger.Error("Master could not be set") - return err - } - - if err := netlink.LinkSetUp(tap); err != nil { - logger.Error("Tap could not be enabled") - return err - } - - return nil -} - -// Creates a single tap and connects it to the corresponding bridge -func (tm *TapManager) addTap(tapName string, bridgeID, currentNumTaps int) (*NetworkInterface, error) { - bridgeName := getBridgeName(bridgeID) - - logger := log.WithFields(log.Fields{"tap": tapName, "bridge": bridgeName}) - - la := netlink.NewLinkAttrs() - la.Name = tapName - - logger.Debug("Creating tap") - - tap := &netlink.Tuntap{LinkAttrs: la, Mode: netlink.TUNTAP_MODE_TAP} - - if err := netlink.LinkAdd(tap); err != nil { - logger.Error("Tap could not be created") - return nil, err - } - - br, err := netlink.LinkByName(bridgeName) - if err != nil { - logger.Error("Could not create tap, because corresponding bridge does not exist") - return nil, err - } - - if err := netlink.LinkSetMaster(tap, br); err != nil { - logger.Error("Master could not be set") - return nil, err - } - - macIndex := bridgeID*TapsPerBridge + currentNumTaps - macAddress := fmt.Sprintf("02:FC:00:00:%02X:%02X", macIndex/256, macIndex%256) - - hwAddr, err := net.ParseMAC(macAddress) - if err != nil { - logger.Error("Could not parse MAC") - return nil, err - } - - if err := netlink.LinkSetHardwareAddr(tap, hwAddr); err != nil { - logger.Error("Could not set MAC address") - return nil, err - } - - if err := netlink.LinkSetUp(tap); err != nil { - logger.Error("Tap could not be enabled") - return nil, err - } - - return &NetworkInterface{ - BridgeName: bridgeName, - MacAddress: macAddress, - PrimaryAddress: getPrimaryAddress(currentNumTaps, bridgeID), - HostDevName: tapName, - Subnet: Subnet, - GatewayAddress: getGatewayAddr(bridgeID), - }, nil -} - -// RemoveTap Removes the tap -func (tm *TapManager) RemoveTap(tapName string) error { - logger := log.WithFields(log.Fields{"tap": tapName}) - - logger.Debug("Removing tap") - - tap, err := netlink.LinkByName(tapName) - if err != nil { - logger.Warn("Could not find tap") - return nil - } - - if err := netlink.LinkDel(tap); err != nil { - logger.Error("Tap could not be removed") - return err - } - - return nil -} - -// RemoveBridges Removes the bridges created by the tap manager -func (tm *TapManager) RemoveBridges() { - log.Info("Removing bridges") - for i := 0; i < tm.numBridges; i++ { - bridgeName := getBridgeName(i) - - logger := log.WithFields(log.Fields{"bridge": bridgeName}) - - br, err := netlink.LinkByName(bridgeName) - if err != nil { - logger.Warn("Could not find bridge") - continue - } - - if err := netlink.LinkDel(br); err != nil { - logger.WithFields(log.Fields{"bridge": bridgeName}).Panic("Bridge could not be deleted") - } - } -} diff --git a/taps/taps_test.go b/taps/taps_test.go deleted file mode 100644 index cbfbf00f1..000000000 --- a/taps/taps_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// MIT License -// -// Copyright (c) 2020 Plamen Petrov and EASE lab -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package taps - -import ( - "fmt" - "os" - "sync" - "testing" - - ctrdlog "github.com/containerd/containerd/log" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" -) - -func TestMain(m *testing.M) { - // call flag.Parse() here if TestMain uses flags - - log.SetFormatter(&log.TextFormatter{ - TimestampFormat: ctrdlog.RFC3339NanoFixed, - FullTimestamp: true, - }) - //log.SetReportCaller(true) // FIXME: make sure it's false unless debugging - - log.SetOutput(os.Stdout) - - log.SetLevel(log.InfoLevel) - - os.Exit(m.Run()) -} - -func TestCreateCleanBridges(t *testing.T) { - tm := NewTapManager() - tm.RemoveBridges() -} - -func TestCreateRemoveTaps(t *testing.T) { - tapsNum := []int{100, 1100} - - tm := NewTapManager() - defer tm.RemoveBridges() - - for _, n := range tapsNum { - var wg sync.WaitGroup - for i := 0; i < n; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - _, _ = tm.AddTap(fmt.Sprintf("tap_%d", i), "") - }(i) - } - wg.Wait() - for i := 0; i < n; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - _ = tm.RemoveTap(fmt.Sprintf("tap_%d", i)) - }(i) - } - wg.Wait() - } -} - -func TestCreateRemoveExtra(t *testing.T) { - - t.Skip("Test disabled due to execution failure in GitHub Actions and it doesn't seem essential for the test coverage") - - tapsNum := 2001 - - tm := NewTapManager() - defer tm.RemoveBridges() - - for i := 0; i < tapsNum; i++ { - _, err := tm.AddTap(fmt.Sprintf("tap_%d", i), "") - if i < tm.numBridges*TapsPerBridge { - require.NoError(t, err, "Failed to create tap") - } else { - require.Error(t, err, "Did not fail to create extra taps") - } - } - - for i := 0; i < tapsNum; i++ { - _ = tm.RemoveTap(fmt.Sprintf("tap_%d", i)) - } -} diff --git a/taps/types.go b/taps/types.go deleted file mode 100644 index d3e9a2a6d..000000000 --- a/taps/types.go +++ /dev/null @@ -1,54 +0,0 @@ -// MIT License -// -// Copyright (c) 2020 Plamen Petrov and EASE lab -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package taps - -import ( - "sync" -) - -const ( - // Subnet Number of bits in the subnet mask - Subnet = "/10" - // TapsPerBridge Number of taps per bridge - TapsPerBridge = 1000 - // NumBridges is the number of bridges for the TapManager - NumBridges = 2 -) - -// TapManager A Tap Manager -type TapManager struct { - sync.Mutex - numBridges int - TapCountsPerBridge []int64 - createdTaps map[string]*NetworkInterface -} - -// NetworkInterface Network interface type, NI names are generated based on expected tap names -type NetworkInterface struct { - BridgeName string - MacAddress string - HostDevName string - PrimaryAddress string - Subnet string - GatewayAddress string -} diff --git a/vhive.go b/vhive.go index 7567bd1ec..31d4ff821 100644 --- a/vhive.go +++ b/vhive.go @@ -84,6 +84,7 @@ func main() { criSock = flag.String("criSock", "/etc/vhive-cri/vhive-cri.sock", "Socket address for CRI service") hostIface = flag.String("hostIface", "", "Host net-interface for the VMs to bind to for internet access") sandbox := flag.String("sandbox", "firecracker", "Sandbox tech to use, valid options: firecracker, gvisor") + netPoolSize := flag.Int("netpoolsize", 50, "Amount of network configs to preallocate in a pool") flag.Parse() if *sandbox != "firecracker" && *sandbox != "gvisor" { @@ -137,6 +138,7 @@ func main() { orch = ctriface.NewOrchestrator( *snapshotter, *hostIface, + *netPoolSize, ctriface.WithTestModeOn(testModeOn), ctriface.WithSnapshots(*isSnapshotsEnabled), ctriface.WithUPF(*isUPFEnabled), From 7687152611bc338f05404176ad1751a03c0ada60 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Tue, 4 Jan 2022 20:05:22 +0100 Subject: [PATCH 04/15] Add device mapper functionality Signed-off-by: Amory Hoste --- devmapper/deviceSnapshot.go | 176 ++++++++++++++++ devmapper/devicemapper.go | 334 ++++++++++++++++++++++++++++++ devmapper/thindelta/blockDelta.go | 131 ++++++++++++ devmapper/thindelta/thinDelta.go | 115 ++++++++++ go.mod | 10 +- go.sum | 9 + 6 files changed, 774 insertions(+), 1 deletion(-) create mode 100644 devmapper/deviceSnapshot.go create mode 100644 devmapper/devicemapper.go create mode 100644 devmapper/thindelta/blockDelta.go create mode 100644 devmapper/thindelta/thinDelta.go diff --git a/devmapper/deviceSnapshot.go b/devmapper/deviceSnapshot.go new file mode 100644 index 000000000..25beecfd6 --- /dev/null +++ b/devmapper/deviceSnapshot.go @@ -0,0 +1,176 @@ +package devmapper + +import ( + "fmt" + "github.com/pkg/errors" + "io/ioutil" + "os" + "os/exec" + "strings" + "sync" + "syscall" +) + +// DeviceSnapshot represents a device mapper snapshot +type DeviceSnapshot struct { + sync.Mutex + poolName string + deviceName string + deviceId string + mountDir string + mountedReadonly bool + numMounted int + numActivated int +} + +// GetDevicePath returns the path to the snapshot device. +func (dsnp *DeviceSnapshot) GetDevicePath() string { + return fmt.Sprintf("/dev/mapper/%s", dsnp.deviceName) +} + +// getPoolpath returns the path of the thin pool used by the snapshot. +func (dsnp *DeviceSnapshot) getPoolPath() string { + return fmt.Sprintf("/dev/mapper/%s", dsnp.poolName) +} + +// NewDeviceSnapshot initializes a new device mapper snapshot. +func NewDeviceSnapshot(poolName, deviceName, deviceId string) *DeviceSnapshot { + dsnp := new(DeviceSnapshot) + dsnp.poolName = poolName + dsnp.deviceName = deviceName + dsnp.deviceId = deviceId + dsnp.mountDir = "" + dsnp.mountedReadonly = false + dsnp.numMounted = 0 + dsnp.numActivated = 0 + return dsnp +} + +// Activate creates a snapshot. +func (dsnp *DeviceSnapshot) Activate() error { + dsnp.Lock() + defer dsnp.Unlock() + + if dsnp.numActivated == 0 { + tableEntry := fmt.Sprintf("0 20971520 thin %s %s", dsnp.getPoolPath(), dsnp.deviceId) + + cmd := exec.Command("sudo", "dmsetup", "create", dsnp.deviceName, "--table", fmt.Sprintf("%s", tableEntry)) + err := cmd.Run() + if err != nil { + return errors.Wrapf(err, "activating snapshot %s", dsnp.deviceName) + } + + } + + dsnp.numActivated += 1 + + return nil +} + +// Deactivate removes a snapshot. +func (dsnp *DeviceSnapshot) Deactivate() error { + dsnp.Lock() + defer dsnp.Unlock() + + if dsnp.numActivated == 1 { + cmd := exec.Command("sudo", "dmsetup", "remove", dsnp.deviceName) + err := cmd.Run() + if err != nil { + return errors.Wrapf(err, "deactivating snapshot %s", dsnp.deviceName) + } + } + + dsnp.numActivated -= 1 + return nil +} + +// Mount mounts a snapshot device and returns the path where it is mounted. For better performance and efficiency, +// a snapshot is only mounted once and shared if it is already mounted. +func (dsnp *DeviceSnapshot) Mount(readOnly bool) (string, error) { + dsnp.Lock() + defer dsnp.Unlock() + + if dsnp.numActivated == 0 { + return "", errors.New("failed to mount: snapshot not activated") + } + + if dsnp.numMounted != 0 && (!dsnp.mountedReadonly || dsnp.mountedReadonly && !readOnly) { + return "", errors.New("failed to mount: can't mount snapshot for both reading and writing") + } + + if dsnp.numMounted == 0 { + mountDir, err := ioutil.TempDir("", dsnp.deviceName) + if err != nil { + return "", err + } + mountDir = removeTrailingSlash(mountDir) + + err = mountExt4(dsnp.GetDevicePath(), mountDir, readOnly) + if err != nil { + return "", errors.Wrapf(err, "mounting %s at %s", dsnp.GetDevicePath(), mountDir) + } + dsnp.mountDir = mountDir + dsnp.mountedReadonly = readOnly + } + + dsnp.numMounted += 1 + + return dsnp.mountDir, nil +} + +// UnMounts a device snapshot. Due to mounted snapshot being shared, a snapshot is only actually unmounted if it is not +// in use by anyone else. +func (dsnp *DeviceSnapshot) UnMount() error { + dsnp.Lock() + defer dsnp.Unlock() + + if dsnp.numMounted == 1 { + err := unMountExt4(dsnp.mountDir) + if err != nil { + return errors.Wrapf(err, "unmounting %s", dsnp.mountDir) + } + + err = os.RemoveAll(dsnp.mountDir) + if err != nil { + return errors.Wrapf(err, "removing %s", dsnp.mountDir) + } + dsnp.mountDir = "" + } + + dsnp.numMounted -= 1 + return nil +} + +// mountExt4 mounts a snapshot device available at devicePath at the specified mountPath. +func mountExt4(devicePath, mountPath string, readOnly bool) error { + // Specify flags for faster mounting and performance: + // * Do not update access times for (all types of) files on this filesystem. + // * Do not allow access to devices (special files) on this filesystem. + // * Do not allow programs to be executed from this filesystem. + // * Do not honor set-user-ID and set-group-ID bits or file capabilities when executing programs from this filesystem. + // * Suppress the display of certain (printk()) warning messages in the kernel log. + var flags uintptr = syscall.MS_NOATIME | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_SILENT + options := make([]string, 0) + + if readOnly { + // Mount filesystem read-only. + flags |= syscall.MS_RDONLY + options = append(options, "noload") + } + + return syscall.Mount(devicePath, mountPath, "ext4", flags, strings.Join(options, ",")) +} + +// unMountExt4 unmounts a snapshot device mounted at mountPath. +func unMountExt4(mountPath string) error { + return syscall.Unmount(mountPath, syscall.MNT_DETACH) +} + +// removeTrailingSlash returns a path with the trailing slash removed. +func removeTrailingSlash(path string) string { + if strings.HasSuffix(path, "/") { + return path[:len(path)-1] + } else { + return path + } +} \ No newline at end of file diff --git a/devmapper/devicemapper.go b/devmapper/devicemapper.go new file mode 100644 index 000000000..60c983c11 --- /dev/null +++ b/devmapper/devicemapper.go @@ -0,0 +1,334 @@ +package devmapper + +import ( + "context" + "fmt" + "github.com/containerd/containerd" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/snapshots" + "github.com/ease-lab/vhive/devmapper/thindelta" + "github.com/opencontainers/image-spec/identity" + "github.com/pkg/errors" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +// DeviceMapper creates and manages device snapshots used to store container images. +type DeviceMapper struct { + sync.Mutex + poolName string + snapDevices map[string]*DeviceSnapshot // maps revision snapkey to snapshot device + snapshotService snapshots.Snapshotter // used to interact with the device mapper through containerd + thinDelta *thindelta.ThinDelta + + // Manage leases to avoid garbage collection of manually created snapshots. Done automatically for snapshots + // created directly through containerd (eg. container.create) + leaseManager leases.Manager + leases map[string]*leases.Lease +} + +func NewDeviceMapper(client *containerd.Client, poolName, metadataDev string) *DeviceMapper { + devMapper := new(DeviceMapper) + devMapper.poolName = poolName + devMapper.thinDelta = thindelta.NewThinDelta(poolName, metadataDev) + devMapper.snapDevices = make(map[string]*DeviceSnapshot) + devMapper.snapshotService = client.SnapshotService("devmapper") + devMapper.leaseManager = client.LeasesService() + devMapper.leases = make(map[string]*leases.Lease) + return devMapper +} + +// getImageKeys returns the key used in containerd to identify the snapshot of the given image +func getImageKey(image containerd.Image, ctx context.Context) (string, error) { + diffIDs, err := image.RootFS(ctx) + if err != nil { + return "", err + } + return identity.ChainID(diffIDs).String(), nil +} + +// CreateDeviceSnapshotFromImage creates a new device mapper snapshot based on the given image. +func (dmpr *DeviceMapper) CreateDeviceSnapshotFromImage(ctx context.Context, snapshotKey string, image containerd.Image) error { + parent, err := getImageKey(image, ctx) + if err != nil { + return err + } + + return dmpr.CreateDeviceSnapshot(ctx, snapshotKey, parent) +} + +// CreateDeviceSnapshot creates a new device mapper snapshot from the given parent snapshot. +func (dmpr *DeviceMapper) CreateDeviceSnapshot(ctx context.Context, snapKey, parentKey string) error { + // Create lease to avoid garbage collection + lease, err := dmpr.leaseManager.Create(ctx, leases.WithID(snapKey)) + if err != nil { + return err + } + + // Create snapshot from parent + leasedCtx := leases.WithLease(ctx, lease.ID) + mounts, err := dmpr.snapshotService.Prepare(leasedCtx, snapKey, parentKey) + if err != nil { + return err + } + + // Retrieve snapshot info + deviceName := filepath.Base(mounts[0].Source) + info, err := dmpr.snapshotService.Stat(ctx, snapKey) + if err != nil { + return err + } + + dmpr.Lock() + dsnp := NewDeviceSnapshot(dmpr.poolName, deviceName, info.SnapshotDev) + dsnp.numActivated = 1 // Newly created snapshots through containerd are always activated + dmpr.snapDevices[snapKey] = dsnp + dmpr.leases[snapKey] = &lease + dmpr.Unlock() + return nil +} + +// CommitDeviceSnapshot commits the changes made on a newly created snapshot (see containerd docs). +func (dmpr *DeviceMapper) CommitDeviceSnapshot(ctx context.Context, snapName, snapKey string) error { + lease := dmpr.leases[snapKey] + leasedCtx := leases.WithLease(ctx, lease.ID) + + if err := dmpr.snapshotService.Commit(leasedCtx, snapName, snapKey); err != nil { + return err + } + + dmpr.Lock() + dmpr.snapDevices[snapKey].numActivated = 0 + dmpr.Unlock() + return nil +} + +// RemoveDeviceSnapshot removes the device mapper snapshot identified by the given snapKey. This is only necessary for +// snapshots created through CreateDeviceSnapshot since other snapshots are managed by containerd. The locking here +// also assumes this function is only used to remove snapshots that are a child and are only used by a single container. +func (dmpr *DeviceMapper) RemoveDeviceSnapshot(ctx context.Context, snapKey string) error { + dmpr.Lock() + + lease, present := dmpr.leases[snapKey] + if ! present { + dmpr.Unlock() + return errors.New(fmt.Sprintf("Delete device snapshot: lease for key %s does not exist", snapKey)) + } + + if _, present := dmpr.snapDevices[snapKey]; !present { + dmpr.Unlock() + return errors.New(fmt.Sprintf("Delete device snapshot: device for key %s does not exist", snapKey)) + } + delete(dmpr.snapDevices, snapKey) + delete(dmpr.leases, snapKey) + dmpr.Unlock() + + // Not only deactivates but also deletes device + err := dmpr.snapshotService.Remove(ctx, snapKey) + if err != nil { + return err + } + + if err := dmpr.leaseManager.Delete(ctx, *lease); err != nil { + return err + } + + return nil +} + +// GetImageSnapshot retrieves the device mapper snapshot for a given image. +func (dmpr *DeviceMapper) GetImageSnapshot(ctx context.Context, image containerd.Image) (*DeviceSnapshot, error) { + imageSnapKey, err := getImageKey(image, ctx) + if err != nil { + return nil, err + } + + return dmpr.GetDeviceSnapshot(ctx, imageSnapKey) +} + +// GetDeviceSnapshot returns the device mapper snapshot identified by the given snapKey. +func (dmpr *DeviceMapper) GetDeviceSnapshot(ctx context.Context, snapKey string) (*DeviceSnapshot, error) { + dmpr.Lock() + defer dmpr.Unlock() + _, present := dmpr.snapDevices[snapKey] + + if !present { + info, err := dmpr.snapshotService.Stat(ctx, snapKey) + if err != nil { + return nil, err + } + deviceName := getDeviceName(dmpr.poolName, info.SnapshotId) + + dsnp := NewDeviceSnapshot(dmpr.poolName, deviceName, info.SnapshotDev) + if _, err := os.Stat(dsnp.GetDevicePath()); err == nil { + // Snapshot already activated + dsnp.numActivated = 1 + } + + dmpr.snapDevices[snapKey] = dsnp + } + + return dmpr.snapDevices[snapKey], nil +} + +// addTrailingSlash adds a trailing slash to a path if it is not present yet. +func addTrailingSlash(path string) string { + if strings.HasSuffix(path, "/") { + return path + } else { + return path + "/" + } +} + +// getDeviceName returns the device name of a snapshot with the specified id made on the given poolName +func getDeviceName(poolName, snapshotId string) string { + return fmt.Sprintf("%s-snap-%s", poolName, snapshotId) +} + +// CreatePatch creates a patch file storing the difference between an image and the container filesystem +// CreatePatch creates a patch file storing the file differences between and image and the changes applied +// by the container using rsync. Note that this is a different approach than using thin_delta which is able to +// extract blocks directly by leveraging the metadata stored by the device mapper. +func (dmpr *DeviceMapper) CreatePatch(ctx context.Context, patchPath, containerSnapKey string, image containerd.Image) error { + + containerSnap, err := dmpr.GetDeviceSnapshot(ctx, containerSnapKey) + if err != nil { + return err + } + + imageSnap, err := dmpr.GetImageSnapshot(ctx, image) + if err != nil { + return err + } + + // 1. Activate image snapshot + err = imageSnap.Activate() + if err != nil { + return errors.Wrapf(err, "failed to activate image snapshot") + } + defer imageSnap.Deactivate() + + // 2. Mount original and snapshot image + imageMountPath, err := imageSnap.Mount(true) + if err != nil { + return err + } + defer imageSnap.UnMount() + + containerMountPath, err := containerSnap.Mount(true) + if err != nil { + return err + } + defer containerSnap.UnMount() + + // 3. Save changes to file + result := extractPatch(imageMountPath, containerMountPath, patchPath) + + return result +} + +// extractPatch extracts the file differences between the file systems mounted at the supplied paths using rsync and +// writes the differences to the supplied patchPath. +func extractPatch(imageMountPath, containerMountPath, patchPath string) error { + patchArg := fmt.Sprintf("--only-write-batch=%s", patchPath) + cmd := exec.Command("sudo", "rsync", "-ar", patchArg, addTrailingSlash(imageMountPath), addTrailingSlash(containerMountPath)) + err := cmd.Run() + if err != nil { + return errors.Wrapf(err, "creating patch between %s and %s at %s", imageMountPath, containerMountPath, patchPath) + } + + err = os.Remove(patchPath + ".sh") // Remove unnecessary script output + if err!= nil { + return errors.Wrapf(err, "removing %s", patchPath + ".sh") + } + return nil +} + +// RestorePatch applies the file changes stored in the supplied patch file on top of the given container snapshot. +func (dmpr *DeviceMapper) RestorePatch(ctx context.Context, containerSnapKey, patchPath string) error { + containerSnap, err := dmpr.GetDeviceSnapshot(ctx, containerSnapKey) + if err != nil { + return err + } + + // 1. Mount container snapshot device + containerMountPath, err := containerSnap.Mount(false) + if err != nil { + return err + } + defer containerSnap.UnMount() + + // 2. Apply changes to container mounted file system + return applyPatch(containerMountPath, patchPath) +} + +// applyPatch applies the file changes stored in the supplied patch file to the filesystem mounted at the supplied path +func applyPatch(containerMountPath, patchPath string) error { + patchArg := fmt.Sprintf("--read-batch=%s", patchPath) + cmd := exec.Command("sudo", "rsync", "-ar", patchArg, addTrailingSlash(containerMountPath)) + err := cmd.Run() + if err!= nil { + return errors.Wrapf(err, "applying %s at %s", patchPath, containerMountPath) + } + return nil +} + +/**************************************************************************************** + * Below functions are legacy but useful for a first implementation of remote snapshotting. + * They are not in use anymore due to thin_delta only supporting one metadata snapshot at + * a time, which reduces the amount of snapshots we can make concurrently. Although this + * might be easy to fix. + ****************************************************************************************/ + +// ForkContainerSnap duplicates the snapshot with key oldContainerSnapKey into a new snapshot with name +// newContainerSnapName which can be used to boot a new container. The new snapshot is created by extracting the +// changes applied by oldContainerSnap on top of the image using thin_delta and writing these on a new snapshot created +// from the same image. +func (dmpr *DeviceMapper) ForkContainerSnap(ctx context.Context, oldContainerSnapKey, newContainerSnapName string, image containerd.Image) error { + oldContainerSnap, err := dmpr.GetDeviceSnapshot(ctx, oldContainerSnapKey) + if err != nil { + return err + } + + imageSnap, err := dmpr.GetImageSnapshot(ctx, image) + if err != nil { + return err + } + + // 1. Get block difference of the old container snapshot from thinpool metadata + blockDelta, err := dmpr.thinDelta.GetBlocksDelta(imageSnap.deviceId, oldContainerSnap.deviceId) + if err != nil { + return errors.Wrapf(err, "getting block delta") + } + + // 2. Read the calculated block difference from the old container snapshot + if err := blockDelta.ReadBlocks(oldContainerSnap.GetDevicePath()); err != nil { + return errors.Wrapf(err, "reading block delta") + } + + // 3. Create the new container snapshot + newContainerSnapKey := newContainerSnapName + "active" + if err := dmpr.CreateDeviceSnapshotFromImage(ctx, newContainerSnapKey, image); err != nil { + return errors.Wrapf(err, "creating forked container snapshot") + } + newContainerSnap, err := dmpr.GetDeviceSnapshot(ctx, newContainerSnapKey) + if err != nil { + return errors.Wrapf(err, "previously created forked container device does not exist") + } + + // 4. Write calculated block difference to new container snapshot + if err := blockDelta.WriteBlocks(newContainerSnap.GetDevicePath()); err != nil { + return errors.Wrapf(err, "writing block delta") + } + + // 5. Commit the new container snapshot + if err := dmpr.CommitDeviceSnapshot(ctx, newContainerSnapName, newContainerSnapKey); err != nil { + return errors.Wrapf(err, "committing container snapshot") + } + + return nil +} + diff --git a/devmapper/thindelta/blockDelta.go b/devmapper/thindelta/blockDelta.go new file mode 100644 index 000000000..e8f88d32a --- /dev/null +++ b/devmapper/thindelta/blockDelta.go @@ -0,0 +1,131 @@ +package thindelta + +import ( + "encoding/gob" + "github.com/pkg/errors" + "os" +) + +// BlockDelta Stores the block difference between two snapshot devices. +type BlockDelta struct { + DiffBlocks *[]DiffBlock + BlockSizeBytes int64 +} + +// DiffBlock represent a contiguous set of Length physical blocks starting at block Begin on disk that differ between +// two devices. If blocks have not been deleted in the second device, the bytes contained in the block are stored in +// the Bytes array. +type DiffBlock struct { + Begin int64 + Length int64 + Delete bool + Bytes []byte +} + +// NewBlockDelta initializes a new BlockDelta to store the block difference between two snapshot devices. +func NewBlockDelta(diffBlocks *[]DiffBlock, blockSizeBytes int64) *BlockDelta { + blockDelta := new(BlockDelta) + blockDelta.DiffBlocks = diffBlocks + blockDelta.BlockSizeBytes = blockSizeBytes + return blockDelta +} + +// Serialize serializes the difference between two snapshots to disk. This could be used to implement remote +// snapshotting if snapshots of the same image are deterministically flattened into a file system. +func (bld *BlockDelta) Serialize(storePath string) error { + file, err := os.Create(storePath) + if err != nil { + return errors.Wrapf(err, "creating block delta file") + } + defer file.Close() + + encoder := gob.NewEncoder(file) + + err = encoder.Encode(*bld.DiffBlocks) + if err != nil { + return errors.Wrapf(err, "encoding blocks delta") + } + return nil +} + +// DeserializeDiffBlocks deserializes the difference between two snapshots from disk. BlockDelta can be initialized +// as an empty array before using. +func (bld *BlockDelta) DeserializeDiffBlocks(storePath string) error { + file, err := os.Open(storePath) + if err != nil { + return errors.Wrapf(err, "opening block delta file") + } + defer file.Close() + + encoder := gob.NewDecoder(file) + + err = encoder.Decode(bld.DiffBlocks) + if err != nil { + return errors.Wrapf(err, "decoding block delta") + } + return nil +} + +// ReadBlocks directly reads the computed differing blocks from the specified data device. +func (bld *BlockDelta) ReadBlocks(dataDevPath string) error { + file, err := os.Open(dataDevPath) + defer file.Close() + + if err != nil { + return errors.Wrapf(err, "opening data device for reading") + } + + for idx, diffBlock := range *bld.DiffBlocks { + if ! diffBlock.Delete { + toRead := diffBlock.Length * bld.BlockSizeBytes + + buf := make([]byte, toRead) + offset := diffBlock.Begin * bld.BlockSizeBytes + + bytesRead, err := file.ReadAt(buf, offset) + if err != nil { + return errors.Wrapf(err, "reading snapshot blocks") + } + + if bytesRead != int(toRead) { + return errors.New("Read less bytes than requested. This should not happen") + } + (*bld.DiffBlocks)[idx].Bytes = buf + } + } + return nil +} + +// WriteBlocks directly writes the differing blocks to the specified destination data device. +func (bld *BlockDelta) WriteBlocks(dataDevPath string) error { + file, err := os.OpenFile(dataDevPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + defer file.Close() + + if err != nil { + return errors.Wrapf(err, "opening data device for writing") + } + + for _, diffBlock := range *bld.DiffBlocks { + toWrite := diffBlock.Length * bld.BlockSizeBytes + + var buf []byte + if ! diffBlock.Delete { + buf = diffBlock.Bytes + } else { + // If delete, write 0 bytes. Could be done more optimally + buf = make([]byte, toWrite) + } + + offset := diffBlock.Begin * bld.BlockSizeBytes + + bytesWritten, err := file.WriteAt(buf, offset) + if err != nil { + return errors.Wrapf(err, "writing snapshot blocks") + } + + if bytesWritten != int(toWrite) { + return errors.New("Wrote less bytes than requested. This should not happen") + } + } + return nil +} \ No newline at end of file diff --git a/devmapper/thindelta/thinDelta.go b/devmapper/thindelta/thinDelta.go new file mode 100644 index 000000000..b2ef4aab9 --- /dev/null +++ b/devmapper/thindelta/thinDelta.go @@ -0,0 +1,115 @@ +package thindelta + +import ( + "bufio" + "bytes" + "fmt" + "github.com/pkg/errors" + xmlparser "github.com/tamerh/xml-stream-parser" + "os/exec" + "strconv" + "sync" +) + +const ( + blockSizeSectors = 128 + sectorSizeBytes = 512 + blockSizeBytes = blockSizeSectors * sectorSizeBytes +) + +// ThinDelta is used to compute the block difference between device mapper snapshots using the thin_delta command line +// tool of the thin provisioning tools suite (https://github.com/jthornber/thin-provisioning-tools). +type ThinDelta struct { + sync.Mutex + poolName string + metaDataDev string +} + +func NewThinDelta(poolName string, metaDataDev string) *ThinDelta { + thinDelta := new(ThinDelta) + thinDelta.poolName = poolName + thinDelta.metaDataDev = metaDataDev + return thinDelta +} + +// getPoolPath returns the path of the devicemapper thinpool. +func (thd *ThinDelta) getPoolPath() string { + return fmt.Sprintf("/dev/mapper/%s", thd.poolName) +} + +// reserveMetadataSnap creates a snapshot of the thinpool metadata to avoid concurrency conflicts when accessing the +// thinpool metadata. Note that dmsetup only supports a single thinpool metadata snapshot to exist. +func (thd *ThinDelta) reserveMetadataSnap() error { + thd.Lock() // Can only have one snap at a time + cmd := exec.Command("sudo", "dmsetup", "message", thd.getPoolPath(), "0", "reserve_metadata_snap") + err := cmd.Run() + if err != nil { + thd.Unlock() + } + return err +} + +// releaseMetadataSnap releases the currently existing thinpool metadata snapshot. +func (thd *ThinDelta) releaseMetadataSnap() error { + cmd := exec.Command("sudo", "dmsetup", "message", thd.getPoolPath(), "0", "release_metadata_snap") + err := cmd.Run() + thd.Unlock() + return err +} + +// getBlocksRawDelta computes the block difference between the two specified snapshot devices using the thin_delta +// command line utility. +func (thd *ThinDelta) getBlocksRawDelta(snap1DeviceId, snap2DeviceId string) (*bytes.Buffer, error) { + // Reserve metadata snapshot + err := thd.reserveMetadataSnap() + + if err != nil { + return nil, errors.Wrapf(err, "failed to reserve metadata snapshot") + } + defer func() { + thd.releaseMetadataSnap() + }() + + cmd := exec.Command("sudo", "thin_delta", "-m", thd.metaDataDev, "--snap1", snap1DeviceId, "--snap2", snap2DeviceId) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err = cmd.Run() + if err != nil { + return nil, errors.Wrapf(err, "getting snapshot delta: %s", stderr.String()) + } + return &stdout, nil +} + +// GetBlocksDelta computes the block difference between the two specified snapshot devices. +func (thd *ThinDelta) GetBlocksDelta(snap1DeviceId, snap2DeviceId string) (*BlockDelta, error) { + // Retrieve block delta using thin_delta utility as XML + stdout, err := thd.getBlocksRawDelta(snap1DeviceId, snap2DeviceId) + if err != nil { + return nil, errors.Wrapf(err, "getting block delta") + } + + // Parse XML output into DiffBlocks + diffBlocks := make([]DiffBlock, 0) + + br := bufio.NewReaderSize(stdout,65536) + parser := xmlparser.NewXMLParser(br, "different", "right_only", "left_only").ParseAttributesOnly("different", "right_only", "left_only") + + for xml := range parser.Stream() { + begin, err := strconv.ParseInt(xml.Attrs["begin"], 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing xml begin attribute") + } + + length, err := strconv.ParseInt(xml.Attrs["length"], 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing xml length attribute") + } + + diffBlocks = append(diffBlocks, DiffBlock{Begin: begin, Length: length, Delete: xml.Name == "left_only"}) + } + + return NewBlockDelta(&diffBlocks, blockSizeBytes), nil +} + diff --git a/go.mod b/go.mod index 7802ed9b1..9f53be215 100644 --- a/go.mod +++ b/go.mod @@ -42,24 +42,32 @@ replace ( replace ( github.com/ease-lab/vhive/examples/protobuf/helloworld => ./examples/protobuf/helloworld - github.com/firecracker-microvm/firecracker-containerd => github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4 + // github.com/firecracker-microvm/firecracker-containerd => github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4 + github.com/containerd/containerd => github.com/amohoste/containerd v1.5.5-ids // TODO + github.com/firecracker-microvm/firecracker-containerd => github.com/amohoste/firecracker-containerd v1.0.0-sparse // TODO ) + require ( + github.com/antchfx/xpath v1.2.0 // indirect github.com/blend/go-sdk v1.20211025.3 // indirect github.com/containerd/containerd v1.5.2 github.com/containerd/go-cni v1.1.4 github.com/davecgh/go-spew v1.1.1 github.com/ease-lab/vhive/examples/protobuf/helloworld v0.0.0-00010101000000-000000000000 + github.com/ease-lab/vhive/taps v0.0.0-20210607161503-ce9e244976f7 github.com/firecracker-microvm/firecracker-containerd v0.0.0-00010101000000-000000000000 github.com/ftrvxmtrx/fd v0.0.0-20150925145434-c6d800382fff github.com/go-multierror/multierror v1.0.2 github.com/golang/protobuf v1.4.3 github.com/google/nftables v0.0.0-20210916140115-16a134723a96 github.com/montanaflynn/stats v0.6.5 + github.com/opencontainers/image-spec v1.0.1 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.8.0 github.com/stretchr/testify v1.7.0 + github.com/tamerh/xml-stream-parser v1.4.0 + github.com/tamerh/xpath v1.0.0 // indirect github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae github.com/wcharczuk/go-chart v2.0.1+incompatible diff --git a/go.sum b/go.sum index 7e8bfddcd..53b687162 100644 --- a/go.sum +++ b/go.sum @@ -96,6 +96,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= +github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -187,6 +189,7 @@ github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go. github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.6/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -313,6 +316,8 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4 h1:aSZHGMFJMcS47URi6xu7n1anJfEHagZjGf6HR8ZHggg= github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4/go.mod h1:Uon4eMMkFBsj2aYWnk1wz3xebaKgR3+CCjmc62cCcvo= +github.com/ease-lab/vhive/taps v0.0.0-20210607161503-ce9e244976f7 h1:c8DqqFtlBn7ivZ37PDJShZSfDy65F7ELu0FJ+cS/wbI= +github.com/ease-lab/vhive/taps v0.0.0-20210607161503-ce9e244976f7/go.mod h1:74sgHZg376wVFR0xATx3aA8vQZ2jGp7aF/1CUl2Om2Y= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -1020,6 +1025,10 @@ github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4/go.mod h1:hkRG github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tamerh/xml-stream-parser v1.4.0 h1:Vb1ZqshlXi53vvUBzZUdEsEJBvnKVWhfrGEJhfQABfc= +github.com/tamerh/xml-stream-parser v1.4.0/go.mod h1:lrpNpthn/iYpnyICCe4KwJSANxywFIfSvsqokQOV9q0= +github.com/tamerh/xpath v1.0.0 h1:NccMES/Ej8slPCFDff73Kf6V1xu9hdbuKf2RyDsxf5Q= +github.com/tamerh/xpath v1.0.0/go.mod h1:t0wnh72FQlOVEO20f2Dl3EoVxso9GnLREh1WTpvNmJQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= From c47a81b78876e8bd6671d2b88d151faa38ed6cce Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Tue, 4 Jan 2022 20:40:03 +0100 Subject: [PATCH 05/15] make VM cpu and memory configurable Signed-off-by: Amory Hoste --- cri/firecracker/coordinator.go | 8 ++-- cri/firecracker/coordinator_test.go | 4 +- cri/firecracker/service.go | 62 +++++++++++++++++++++++++---- ctriface/bench_test.go | 2 +- ctriface/failing_test.go | 2 +- ctriface/iface.go | 20 ++++++++-- ctriface/iface_test.go | 10 ++--- ctriface/manual_cleanup_test.go | 8 ++-- functions.go | 2 +- misc/types.go | 2 + 10 files changed, 92 insertions(+), 28 deletions(-) diff --git a/cri/firecracker/coordinator.go b/cri/firecracker/coordinator.go index c123b95df..36f034f52 100644 --- a/cri/firecracker/coordinator.go +++ b/cri/firecracker/coordinator.go @@ -98,13 +98,13 @@ func (c *coordinator) setIdleInstance(fi *funcInstance) { c.idleInstances[fi.Image] = append(c.idleInstances[fi.Image], fi) } -func (c *coordinator) startVM(ctx context.Context, image string) (*funcInstance, error) { +func (c *coordinator) startVM(ctx context.Context, image string, memSizeMib, vCPUCount uint32) (*funcInstance, error) { if fi := c.getIdleInstance(image); c.orch != nil && c.orch.GetSnapshotsEnabled() && fi != nil { err := c.orchLoadInstance(ctx, fi) return fi, err } - return c.orchStartVM(ctx, image) + return c.orchStartVM(ctx, image, memSizeMib, vCPUCount) } func (c *coordinator) stopVM(ctx context.Context, containerID string) error { @@ -150,7 +150,7 @@ func (c *coordinator) insertActive(containerID string, fi *funcInstance) error { return nil } -func (c *coordinator) orchStartVM(ctx context.Context, image string) (*funcInstance, error) { +func (c *coordinator) orchStartVM(ctx context.Context, image string, memSizeMib, vCPUCount uint32) (*funcInstance, error) { vmID := strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1))) logger := log.WithFields( log.Fields{ @@ -170,7 +170,7 @@ func (c *coordinator) orchStartVM(ctx context.Context, image string) (*funcInsta defer cancel() if !c.withoutOrchestrator { - resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image) + resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount) if err != nil { logger.WithError(err).Error("coordinator failed to start VM") } diff --git a/cri/firecracker/coordinator_test.go b/cri/firecracker/coordinator_test.go index d51529bb5..b9ec4867d 100644 --- a/cri/firecracker/coordinator_test.go +++ b/cri/firecracker/coordinator_test.go @@ -45,7 +45,7 @@ func TestMain(m *testing.M) { func TestStartStop(t *testing.T) { containerID := "1" - fi, err := coord.startVM(context.Background(), containerID) + fi, err := coord.startVM(context.Background(), containerID, 0, 0) require.NoError(t, err, "could not start VM") err = coord.insertActive(containerID, fi) @@ -72,7 +72,7 @@ func TestParallelStartStop(t *testing.T) { defer wg.Done() containerID := strconv.Itoa(i) - fi, err := coord.startVM(context.Background(), containerID) + fi, err := coord.startVM(context.Background(), containerID, 0, 0) require.NoError(t, err, "could not start VM") err = coord.insertActive(containerID, fi) diff --git a/cri/firecracker/service.go b/cri/firecracker/service.go index fa73fc226..01a33ba17 100644 --- a/cri/firecracker/service.go +++ b/cri/firecracker/service.go @@ -25,6 +25,7 @@ package firecracker import ( "context" "errors" + "strconv" "sync" "github.com/ease-lab/vhive/cri" @@ -34,11 +35,13 @@ import ( ) const ( - userContainerName = "user-container" - queueProxyName = "queue-proxy" - guestIPEnv = "GUEST_ADDR" - guestPortEnv = "GUEST_PORT" - guestImageEnv = "GUEST_IMAGE" + userContainerName = "user-container" + queueProxyName = "queue-proxy" + guestIPEnv = "GUEST_ADDR" + guestPortEnv = "GUEST_PORT" + guestImageEnv = "GUEST_IMAGE" + guestMemorySizeMibEnv = "MEM_SIZE_MB" + guestvCPUCount = "VCPU_COUNT" ) type FirecrackerService struct { @@ -110,7 +113,19 @@ func (fs *FirecrackerService) createUserContainer(ctx context.Context, r *criapi return nil, err } - funcInst, err := fs.coordinator.startVM(context.Background(), guestImage) + memSizeMib, err := getMemorySize(config) + if err != nil { + log.WithError(err).Error() + return nil, err + } + + vCPUCount, err := getvCPUCount(config) + if err != nil { + log.WithError(err).Error() + return nil, err + } + + funcInst, err := fs.coordinator.startVM(context.Background(), guestImage, memSizeMib, vCPUCount) if err != nil { log.WithError(err).Error("failed to start VM") return nil, err @@ -215,6 +230,39 @@ func getEnvVal(key string, config *criapi.ContainerConfig) (string, error) { } - return "", errors.New("failed to provide non empty guest image in user container config") + return "", errors.New("failed to retrieve environment variable from user container config") +} + +func getMemorySize(config *criapi.ContainerConfig) (uint32, error) { + envs := config.GetEnvs() + for _, kv := range envs { + if kv.GetKey() == guestMemorySizeMibEnv { + memSize, err := strconv.Atoi(kv.GetValue()) + if err == nil { + return uint32(memSize), nil + } else { + return 0, err + } + } + } + + return uint32(256), nil } + +func getvCPUCount(config *criapi.ContainerConfig) (uint32, error) { + envs := config.GetEnvs() + for _, kv := range envs { + if kv.GetKey() == guestvCPUCount { + vCPUCount, err := strconv.Atoi(kv.GetValue()) + if err == nil { + return uint32(vCPUCount), nil + } else { + return 0, err + } + } + + } + + return uint32(1), nil +} \ No newline at end of file diff --git a/ctriface/bench_test.go b/ctriface/bench_test.go index 4bca84d6f..e1af170fd 100644 --- a/ctriface/bench_test.go +++ b/ctriface/bench_test.go @@ -75,7 +75,7 @@ func TestBenchmarkStart(t *testing.T) { for i := 0; i < benchCount; i++ { dropPageCache() - _, metric, err := orch.StartVM(ctx, vmIDString, imageName) + _, metric, err := orch.StartVM(ctx, vmIDString, imageName, 0, 0) require.NoError(t, err, "Failed to start VM") startMetrics[i] = metric diff --git a/ctriface/failing_test.go b/ctriface/failing_test.go index 0f07caecb..6b600f842 100644 --- a/ctriface/failing_test.go +++ b/ctriface/failing_test.go @@ -55,7 +55,7 @@ func TestStartSnapStop(t *testing.T) { vmID := "2" - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) diff --git a/ctriface/iface.go b/ctriface/iface.go index 59fdc3271..725e0fe96 100644 --- a/ctriface/iface.go +++ b/ctriface/iface.go @@ -61,10 +61,12 @@ type StartVMResponse struct { const ( testImageName = "ghcr.io/ease-lab/helloworld:var_workload" + defaultVcpuCount = 1 + defaultMemsizeMib = 256 ) // StartVM Boots a VM if it does not exist -func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { +func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { var ( startVMMetric *metrics.Metric = metrics.NewMetric() tStart time.Time @@ -79,6 +81,18 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string) (_ * return nil, nil, err } + // Set VM vCPU and Memory + if memSizeMib != 0 { + vm.MemSizeMib = memSizeMib + } else { + vm.MemSizeMib = defaultMemsizeMib + } + if vCPUCount != 0 { + vm.VCPUCount = vCPUCount + } else { + vm.VCPUCount = defaultVcpuCount + } + defer func() { // Free the VM from the pool if function returns error if retErr != nil { @@ -300,8 +314,8 @@ func (o *Orchestrator) getVMConfig(vm *misc.VM) *proto.CreateVMRequest { TimeoutSeconds: 100, KernelArgs: kernelArgs, MachineCfg: &proto.FirecrackerMachineConfiguration{ - VcpuCount: 1, - MemSizeMib: 256, + VcpuCount: vm.VCPUCount, + MemSizeMib: vm.MemSizeMib, }, NetworkInterfaces: []*proto.FirecrackerNetworkInterface{{ StaticConfig: &proto.StaticNetworkConfiguration{ diff --git a/ctriface/iface_test.go b/ctriface/iface_test.go index 97883c696..4484a2a81 100644 --- a/ctriface/iface_test.go +++ b/ctriface/iface_test.go @@ -69,7 +69,7 @@ func TestPauseSnapResume(t *testing.T) { vmID := "4" - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) @@ -112,7 +112,7 @@ func TestStartStopSerial(t *testing.T) { vmID := "5" - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM") err = orch.StopSingleVM(ctx, vmID) @@ -146,7 +146,7 @@ func TestPauseResumeSerial(t *testing.T) { vmID := "6" - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) @@ -196,7 +196,7 @@ func TestStartStopParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM "+vmID) }(i) } @@ -255,7 +255,7 @@ func TestPauseResumeParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM") }(i) } diff --git a/ctriface/manual_cleanup_test.go b/ctriface/manual_cleanup_test.go index d0eab54c1..cfd85e608 100644 --- a/ctriface/manual_cleanup_test.go +++ b/ctriface/manual_cleanup_test.go @@ -63,7 +63,7 @@ func TestSnapLoad(t *testing.T) { vmID := "1" - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) @@ -113,7 +113,7 @@ func TestSnapLoadMultiple(t *testing.T) { vmID := "3" - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) @@ -184,7 +184,7 @@ func TestParallelSnapLoad(t *testing.T) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM, "+vmID) err = orch.PauseVM(ctx, vmID) @@ -246,7 +246,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, testImageName) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) require.NoError(t, err, "Failed to start VM, "+vmID) }(i) } diff --git a/functions.go b/functions.go index 3783a258e..4ded99161 100644 --- a/functions.go +++ b/functions.go @@ -356,7 +356,7 @@ func (f *Function) AddInstance() *metrics.Metric { if f.isSnapshotReady { metr = f.LoadInstance() } else { - resp, _, err := orch.StartVM(ctx, f.getVMID(), f.imageName) + resp, _, err := orch.StartVM(ctx, f.getVMID(), f.imageName, 0, 0) if err != nil { log.Panic(err) } diff --git a/misc/types.go b/misc/types.go index b4b01bc35..d8b17032b 100644 --- a/misc/types.go +++ b/misc/types.go @@ -40,6 +40,8 @@ type VM struct { TaskCh <-chan containerd.ExitStatus Ni *taps.NetworkInterface NetConfig *networking.NetworkConfig + VCPUCount uint32 + MemSizeMib uint32 } // VMPool Pool of active VMs (can be in several states though) From ea44fe20be3a873eb953400caa56b0ebe64f3f44 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Sun, 9 Jan 2022 10:48:59 +0000 Subject: [PATCH 06/15] add netpoolsize to tests Signed-off-by: Amory Hoste --- ctriface/bench_test.go | 2 +- ctriface/failing_test.go | 2 +- ctriface/iface_test.go | 5 +++++ ctriface/manual_cleanup_test.go | 4 ++++ ctriface/orch.go | 2 +- 5 files changed, 12 insertions(+), 3 deletions(-) diff --git a/ctriface/bench_test.go b/ctriface/bench_test.go index e1af170fd..3452b7495 100644 --- a/ctriface/bench_test.go +++ b/ctriface/bench_test.go @@ -56,7 +56,7 @@ func TestBenchmarkStart(t *testing.T) { ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) defer cancel() - orch := NewOrchestrator("devmapper", "", WithTestModeOn(true), WithUPF(*isUPFEnabled)) + orch := NewOrchestrator("devmapper", "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled)) images := getAllImages() benchCount := 10 diff --git a/ctriface/failing_test.go b/ctriface/failing_test.go index 6b600f842..c74b9b05b 100644 --- a/ctriface/failing_test.go +++ b/ctriface/failing_test.go @@ -51,7 +51,7 @@ func TestStartSnapStop(t *testing.T) { ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) defer cancel() - orch := NewOrchestrator("devmapper", "", WithTestModeOn(true)) + orch := NewOrchestrator("devmapper", "", 10, WithTestModeOn(true)) vmID := "2" diff --git a/ctriface/iface_test.go b/ctriface/iface_test.go index 4484a2a81..36a154187 100644 --- a/ctriface/iface_test.go +++ b/ctriface/iface_test.go @@ -62,6 +62,7 @@ func TestPauseSnapResume(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), @@ -105,6 +106,7 @@ func TestStartStopSerial(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), @@ -139,6 +141,7 @@ func TestPauseResumeSerial(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), @@ -180,6 +183,7 @@ func TestStartStopParallel(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), @@ -239,6 +243,7 @@ func TestPauseResumeParallel(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), diff --git a/ctriface/manual_cleanup_test.go b/ctriface/manual_cleanup_test.go index cfd85e608..0a883aeaa 100644 --- a/ctriface/manual_cleanup_test.go +++ b/ctriface/manual_cleanup_test.go @@ -56,6 +56,7 @@ func TestSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), @@ -106,6 +107,7 @@ func TestSnapLoadMultiple(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), @@ -168,6 +170,7 @@ func TestParallelSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), @@ -230,6 +233,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), diff --git a/ctriface/orch.go b/ctriface/orch.go index 9909f58a0..67a9365b7 100644 --- a/ctriface/orch.go +++ b/ctriface/orch.go @@ -156,7 +156,7 @@ func (o *Orchestrator) setupCloseHandler() { // Cleanup Removes the bridges created by the VM pool's tap manager // Cleans up snapshots directory func (o *Orchestrator) Cleanup() { - o.vmPool.RemoveBridges() + o.vmPool.CleanupNetwork() if err := os.RemoveAll(o.snapshotsDir); err != nil { log.Panic("failed to delete snapshots dir", err) } From 974e3c5e410a2cd967addf03b463f7aebd639381 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Sat, 15 Jan 2022 17:19:35 +0000 Subject: [PATCH 07/15] add improved snapshotting functionality Signed-off-by: Amory Hoste --- bin/containerd-shim-aws-firecracker | 4 +- bin/firecracker | 4 +- bin/firecracker-containerd | 4 +- bin/firecracker-ctr | 4 +- bin/jailer | 4 +- .../firecracker-runtime.json | 10 +- cri/firecracker/coordinator.go | 214 ++++---- cri/firecracker/coordinator_test.go | 13 +- cri/firecracker/funcInstance.go | 39 +- cri/firecracker/service.go | 20 +- ctriface/bench_test.go | 4 +- ctriface/failing_test.go | 14 +- ctriface/iface.go | 203 +++++--- ctriface/iface_test.go | 25 +- ctriface/manual_cleanup_test.go | 74 ++- ctriface/orch.go | 14 +- ctrimages/imageManager.go | 22 + devmapper/deviceSnapshot.go | 22 + devmapper/devicemapper.go | 22 + devmapper/thindelta/blockDelta.go | 22 + devmapper/thindelta/thinDelta.go | 46 ++ functions.go | 32 +- go.mod | 14 +- go.sum | 483 +----------------- metrics/metrics.go | 5 + misc/types.go | 29 +- networking/networkManager.go | 3 +- scripts/setup_system.sh | 8 +- snapshotting/snapHeap.go | 54 ++ snapshotting/snapshot.go | 166 ++++++ snapshotting/snapshotmanager.go | 230 +++++++++ vhive.go | 33 +- vhive_test.go | 3 + 33 files changed, 1073 insertions(+), 771 deletions(-) create mode 100644 snapshotting/snapHeap.go create mode 100644 snapshotting/snapshot.go create mode 100644 snapshotting/snapshotmanager.go diff --git a/bin/containerd-shim-aws-firecracker b/bin/containerd-shim-aws-firecracker index f81210219..532957497 100755 --- a/bin/containerd-shim-aws-firecracker +++ b/bin/containerd-shim-aws-firecracker @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73f90aa2f7016202b3cc81077989137e39ceaccd1f9cb820fc442a876b8f5f7e -size 23718874 +oid sha256:e290046f2e24c117ef450a3bef6c8f8e3b1ec387decc76ccc936e1f54c827327 +size 26355405 diff --git a/bin/firecracker b/bin/firecracker index de3692802..5ba3cbf31 100755 --- a/bin/firecracker +++ b/bin/firecracker @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc78ce90c66c599af63bd32c883202fc73fe6ec08f8db8ba688b6218d2eb5d1c -size 3678288 +oid sha256:561cff75b2e1d768d2a4e7dad01cffb3eaff194e1b1696ad3ede5284c404fb0c +size 4010736 diff --git a/bin/firecracker-containerd b/bin/firecracker-containerd index 500283533..d486a278f 100755 --- a/bin/firecracker-containerd +++ b/bin/firecracker-containerd @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f724d742b66ecb6e1523b211fb5f4917ebdc500f08a0fc869c8184bc1d14f92 -size 43283896 +oid sha256:89c20c096978dafa7f3ba3b1d66a9e574f2fd89f3781ee0537da30120aea6455 +size 46999272 diff --git a/bin/firecracker-ctr b/bin/firecracker-ctr index 8f5f199ea..4b992a70e 100755 --- a/bin/firecracker-ctr +++ b/bin/firecracker-ctr @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3892fdae25a2f53274a0e597b28e03418898273b1fba06e6b87cd58da87e6e9 -size 31858944 +oid sha256:1b0bab69371a224e9eaed86edb26dd57e2a0b04eaa7e9b4da7e3e8c7c38e0016 +size 34476496 diff --git a/bin/jailer b/bin/jailer index 090c8c588..e8be92549 100755 --- a/bin/jailer +++ b/bin/jailer @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:88b8d09513ee03a872096fc5671a9dd82c218dd85754f4890e5ad021ee494b2b -size 2828176 +oid sha256:375abd369c55ad8057ec6cd39ee77e8f68933fd7a97e1d1901881805f22815f8 +size 3060760 diff --git a/configs/firecracker-containerd/firecracker-runtime.json b/configs/firecracker-containerd/firecracker-runtime.json index 0986eb94d..1fac6a89c 100644 --- a/configs/firecracker-containerd/firecracker-runtime.json +++ b/configs/firecracker-containerd/firecracker-runtime.json @@ -1,8 +1,14 @@ { "firecracker_binary_path": "/usr/local/bin/firecracker", "kernel_image_path": "/var/lib/firecracker-containerd/runtime/hello-vmlinux.bin", - "kernel_args": "console=ttyS0 noapic reboot=k panic=1 pci=off nomodules ro systemd.journald.forward_to_console systemd.unit=firecracker.target init=/sbin/overlay-init", "root_drive": "/var/lib/firecracker-containerd/runtime/default-rootfs.img", + "cpu_count": 1, "cpu_template": "T2", - "log_levels": ["info"] + "log_fifo": "fc-logs.fifo", + "log_levels": ["info"], + "metrics_fifo": "fc-metrics.fifo", + "kernel_args": "console=ttyS0 noapic reboot=k panic=1 pci=off nomodules ro systemd.journald.forward_to_console systemd.unit=firecracker.target init=/sbin/overlay-init", + "jailer": { + "runc_binary_path": "/usr/bin/runc" + } } \ No newline at end of file diff --git a/cri/firecracker/coordinator.go b/cri/firecracker/coordinator.go index 36f034f52..a118dbd71 100644 --- a/cri/firecracker/coordinator.go +++ b/cri/firecracker/coordinator.go @@ -24,7 +24,10 @@ package firecracker import ( "context" - "errors" + "fmt" + "github.com/ease-lab/vhive/metrics" + "github.com/ease-lab/vhive/snapshotting" + "github.com/pkg/errors" "strconv" "sync" "sync/atomic" @@ -34,13 +37,16 @@ import ( log "github.com/sirupsen/logrus" ) +const snapshotsDir = "/fccd/snapshots" + type coordinator struct { sync.Mutex orch *ctriface.Orchestrator nextID uint64 + isSparseSnaps bool - activeInstances map[string]*funcInstance - idleInstances map[string][]*funcInstance + activeInstances map[string]*FuncInstance + snapshotManager *snapshotting.SnapshotManager withoutOrchestrator bool } @@ -53,11 +59,12 @@ func withoutOrchestrator() coordinatorOption { } } -func newFirecrackerCoordinator(orch *ctriface.Orchestrator, opts ...coordinatorOption) *coordinator { +func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool, opts ...coordinatorOption) *coordinator { c := &coordinator{ - activeInstances: make(map[string]*funcInstance), - idleInstances: make(map[string][]*funcInstance), + activeInstances: make(map[string]*FuncInstance), orch: orch, + snapshotManager: snapshotting.NewSnapshotManager(snapshotsDir, snapsCapacityMiB), + isSparseSnaps: isSparseSnaps, } for _, opt := range opts { @@ -67,60 +74,46 @@ func newFirecrackerCoordinator(orch *ctriface.Orchestrator, opts ...coordinatorO return c } -func (c *coordinator) getIdleInstance(image string) *funcInstance { - c.Lock() - defer c.Unlock() - - idles, ok := c.idleInstances[image] - if !ok { - c.idleInstances[image] = []*funcInstance{} - return nil - } - - if len(idles) != 0 { - fi := idles[0] - c.idleInstances[image] = idles[1:] - return fi - } - - return nil -} - -func (c *coordinator) setIdleInstance(fi *funcInstance) { - c.Lock() - defer c.Unlock() - - _, ok := c.idleInstances[fi.Image] - if !ok { - c.idleInstances[fi.Image] = []*funcInstance{} - } - - c.idleInstances[fi.Image] = append(c.idleInstances[fi.Image], fi) -} - -func (c *coordinator) startVM(ctx context.Context, image string, memSizeMib, vCPUCount uint32) (*funcInstance, error) { - if fi := c.getIdleInstance(image); c.orch != nil && c.orch.GetSnapshotsEnabled() && fi != nil { - err := c.orchLoadInstance(ctx, fi) - return fi, err +func (c *coordinator) startVM(ctx context.Context, image string, revision string, memSizeMib, vCPUCount uint32) (*FuncInstance, error) { + if c.orch != nil && c.orch.GetSnapshotsEnabled() { + // Check if snapshot is available + if snap, err := c.snapshotManager.AcquireSnapshot(revision); err == nil { + if snap.MemSizeMib != memSizeMib || snap.VCPUCount != vCPUCount { + return nil, errors.New("Please create a new revision when updating uVM memory size or vCPU count") + } else { + return c.orchStartVMSnapshot(ctx, snap, memSizeMib, vCPUCount) + } + } else { + return c.orchStartVM(ctx, image, revision, memSizeMib, vCPUCount) + } } - return c.orchStartVM(ctx, image, memSizeMib, vCPUCount) + return c.orchStartVM(ctx, image, revision, memSizeMib, vCPUCount) } func (c *coordinator) stopVM(ctx context.Context, containerID string) error { c.Lock() - fi, ok := c.activeInstances[containerID] - delete(c.activeInstances, containerID) + fi, present := c.activeInstances[containerID] + if present { + delete(c.activeInstances, containerID) + } c.Unlock() - if !ok { + // Not a request to remove vm container + if !present { return nil } - if c.orch != nil && c.orch.GetSnapshotsEnabled() { - return c.orchOffloadInstance(ctx, fi) + if fi.snapBooted { + defer c.snapshotManager.ReleaseSnapshot(fi.revisionId) + } else if c.orch != nil && c.orch.GetSnapshotsEnabled() { + // Create snapshot + err := c.orchCreateSnapshot(ctx, fi) + if err != nil { + log.Printf("Err creating snapshot %s\n", err) + } } return c.orchStopVM(ctx, fi) @@ -135,14 +128,14 @@ func (c *coordinator) isActive(containerID string) bool { return ok } -func (c *coordinator) insertActive(containerID string, fi *funcInstance) error { +func (c *coordinator) insertActive(containerID string, fi *FuncInstance) error { c.Lock() defer c.Unlock() - logger := log.WithFields(log.Fields{"containerID": containerID, "vmID": fi.VmID}) + logger := log.WithFields(log.Fields{"containerID": containerID, "vmID": fi.vmID}) if fi, present := c.activeInstances[containerID]; present { - logger.Errorf("entry for container already exists with vmID %s" + fi.VmID) + logger.Errorf("entry for container already exists with vmID %s" + fi.vmID) return errors.New("entry for container already exists") } @@ -150,7 +143,8 @@ func (c *coordinator) insertActive(containerID string, fi *funcInstance) error { return nil } -func (c *coordinator) orchStartVM(ctx context.Context, image string, memSizeMib, vCPUCount uint32) (*funcInstance, error) { +func (c *coordinator) orchStartVM(ctx context.Context, image, revision string, memSizeMib, vCPUCount uint32) (*FuncInstance, error) { + tStartCold := time.Now() vmID := strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1))) logger := log.WithFields( log.Fields{ @@ -170,90 +164,114 @@ func (c *coordinator) orchStartVM(ctx context.Context, image string, memSizeMib, defer cancel() if !c.withoutOrchestrator { - resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount) + trackDirtyPages := c.isSparseSnaps + resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount, trackDirtyPages) if err != nil { logger.WithError(err).Error("coordinator failed to start VM") } } - fi := newFuncInstance(vmID, image, resp) + coldStartTimeMs := metrics.ToMs(time.Since(tStartCold)) + + fi := NewFuncInstance(vmID, image, revision, resp, false, memSizeMib, vCPUCount, coldStartTimeMs) logger.Debug("successfully created fresh instance") return fi, err } -func (c *coordinator) orchLoadInstance(ctx context.Context, fi *funcInstance) error { - fi.Logger.Debug("found idle instance to load") +func (c *coordinator) orchStartVMSnapshot(ctx context.Context, snap *snapshotting.Snapshot, memSizeMib, vCPUCount uint32) (*FuncInstance, error) { + tStartCold := time.Now() + vmID := strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1))) + logger := log.WithFields( + log.Fields{ + "vmID": vmID, + "image": snap.GetImage(), + }, + ) + + logger.Debug("loading instance from snapshot") + + var ( + resp *ctriface.StartVMResponse + err error + ) ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*30) defer cancel() - if _, err := c.orch.LoadSnapshot(ctxTimeout, fi.VmID); err != nil { - fi.Logger.WithError(err).Error("failed to load VM") - return err + resp, _, err = c.orch.LoadSnapshot(ctxTimeout, vmID, snap) + if err != nil { + logger.WithError(err).Error("failed to load VM") + return nil, err } - if _, err := c.orch.ResumeVM(ctxTimeout, fi.VmID); err != nil { - fi.Logger.WithError(err).Error("failed to load VM") - return err + if _, err := c.orch.ResumeVM(ctxTimeout, vmID); err != nil { + logger.WithError(err).Error("failed to load VM") + return nil, err } - fi.Logger.Debug("successfully loaded idle instance") - return nil -} - -func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *funcInstance) error { - var err error - - fi.OnceCreateSnapInstance.Do( - func() { - ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*60) - defer cancel() + coldStartTimeMs := metrics.ToMs(time.Since(tStartCold)) + fi := NewFuncInstance(vmID, snap.GetImage(), snap.GetRevisionId(), resp, true, memSizeMib, vCPUCount, coldStartTimeMs) + logger.Debug("successfully loaded instance from snapshot") - fi.Logger.Debug("creating instance snapshot on first time offloading") - - err = c.orch.PauseVM(ctxTimeout, fi.VmID) - if err != nil { - fi.Logger.WithError(err).Error("failed to pause VM") - return - } + return fi, err +} - err = c.orch.CreateSnapshot(ctxTimeout, fi.VmID) - if err != nil { - fi.Logger.WithError(err).Error("failed to create snapshot") - return - } +func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) error { + logger := log.WithFields( + log.Fields{ + "vmID": fi.vmID, + "image": fi.image, }, ) - return err -} - -func (c *coordinator) orchOffloadInstance(ctx context.Context, fi *funcInstance) error { - fi.Logger.Debug("offloading instance") + removeContainerSnaps, snap, err := c.snapshotManager.InitSnapshot(fi.revisionId, fi.image, fi.coldStartTimeMs, fi.memSizeMib, fi.vCPUCount, c.isSparseSnaps) + if err != nil { + if fmt.Sprint(err) == "There is not enough free space available" { + fi.logger.Info(fmt.Sprintf("There is not enough space available for snapshots of %s", fi.revisionId)) + } + return nil + } - if err := c.orchCreateSnapshot(ctx, fi); err != nil { - return err + if removeContainerSnaps != nil { + for _, cleanupSnapId := range *removeContainerSnaps { + if err := c.orch.CleanupRevisionSnapshot(ctx, cleanupSnapId); err != nil { + return errors.Wrap(err, "removing devmapper revision snapshot") + } + } } - ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*10) + ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*60) defer cancel() - if err := c.orch.Offload(ctxTimeout, fi.VmID); err != nil { - fi.Logger.WithError(err).Error("failed to offload instance") + logger.Debug("creating instance snapshot before stopping") + + err = c.orch.PauseVM(ctxTimeout, fi.vmID) + if err != nil { + logger.WithError(err).Error("failed to pause VM") + return nil + } + + err = c.orch.CreateSnapshot(ctxTimeout, fi.vmID, snap) + if err != nil { + fi.logger.WithError(err).Error("failed to create snapshot") + return nil } - c.setIdleInstance(fi) + if err := c.snapshotManager.CommitSnapshot(fi.revisionId); err != nil { + fi.logger.WithError(err).Error("failed to commit snapshot") + return err + } return nil } -func (c *coordinator) orchStopVM(ctx context.Context, fi *funcInstance) error { +func (c *coordinator) orchStopVM(ctx context.Context, fi *FuncInstance) error { if c.withoutOrchestrator { return nil } - if err := c.orch.StopSingleVM(ctx, fi.VmID); err != nil { - fi.Logger.WithError(err).Error("failed to stop VM for instance") + if err := c.orch.StopSingleVM(ctx, fi.vmID); err != nil { + fi.logger.WithError(err).Error("failed to stop VM for instance") return err } diff --git a/cri/firecracker/coordinator_test.go b/cri/firecracker/coordinator_test.go index b9ec4867d..3022655ca 100644 --- a/cri/firecracker/coordinator_test.go +++ b/cri/firecracker/coordinator_test.go @@ -24,6 +24,7 @@ package firecracker import ( "context" + "fmt" "os" "strconv" "sync" @@ -32,12 +33,16 @@ import ( "github.com/stretchr/testify/require" ) +const ( + testImageName = "ghcr.io/ease-lab/helloworld:var_workload" +) + var ( coord *coordinator ) func TestMain(m *testing.M) { - coord = newFirecrackerCoordinator(nil, withoutOrchestrator()) + coord = newFirecrackerCoordinator(nil, 10240, false, withoutOrchestrator()) ret := m.Run() os.Exit(ret) @@ -45,7 +50,8 @@ func TestMain(m *testing.M) { func TestStartStop(t *testing.T) { containerID := "1" - fi, err := coord.startVM(context.Background(), containerID, 0, 0) + revisionID := "myrev-1" + fi, err := coord.startVM(context.Background(), testImageName, revisionID,0, 0) require.NoError(t, err, "could not start VM") err = coord.insertActive(containerID, fi) @@ -72,7 +78,8 @@ func TestParallelStartStop(t *testing.T) { defer wg.Done() containerID := strconv.Itoa(i) - fi, err := coord.startVM(context.Background(), containerID, 0, 0) + revisionID := fmt.Sprintf("myrev-%d", i) + fi, err := coord.startVM(context.Background(), testImageName, revisionID, 0, 0) require.NoError(t, err, "could not start VM") err = coord.insertActive(containerID, fi) diff --git a/cri/firecracker/funcInstance.go b/cri/firecracker/funcInstance.go index a7e007a8c..ca5c2136c 100644 --- a/cri/firecracker/funcInstance.go +++ b/cri/firecracker/funcInstance.go @@ -23,34 +23,41 @@ package firecracker import ( - "sync" - "github.com/ease-lab/vhive/ctriface" log "github.com/sirupsen/logrus" ) -type funcInstance struct { - VmID string - Image string - Logger *log.Entry - OnceCreateSnapInstance *sync.Once - StartVMResponse *ctriface.StartVMResponse +type FuncInstance struct { + vmID string + image string + revisionId string + snapBooted bool + coldStartTimeMs int64 + memSizeMib uint32 + vCPUCount uint32 + logger *log.Entry + StartVMResponse *ctriface.StartVMResponse } -func newFuncInstance(vmID, image string, startVMResponse *ctriface.StartVMResponse) *funcInstance { - f := &funcInstance{ - VmID: vmID, - Image: image, - OnceCreateSnapInstance: new(sync.Once), - StartVMResponse: startVMResponse, +func NewFuncInstance(vmID, image, revisionId string, startVMResponse *ctriface.StartVMResponse, snapBooted bool, memSizeMib, vCPUCount uint32, coldstartTimeMs int64) *FuncInstance { + f := &FuncInstance{ + vmID: vmID, + image: image, + revisionId: revisionId, + StartVMResponse: startVMResponse, + snapBooted: snapBooted, + memSizeMib: memSizeMib, + vCPUCount: vCPUCount, + coldStartTimeMs: coldstartTimeMs, } - f.Logger = log.WithFields( + f.logger = log.WithFields( log.Fields{ "vmID": vmID, "image": image, + "revision": revisionId, }, ) return f -} +} \ No newline at end of file diff --git a/cri/firecracker/service.go b/cri/firecracker/service.go index 01a33ba17..4fb5a1a4b 100644 --- a/cri/firecracker/service.go +++ b/cri/firecracker/service.go @@ -37,6 +37,7 @@ import ( const ( userContainerName = "user-container" queueProxyName = "queue-proxy" + revisionEnv = "K_REVISION" guestIPEnv = "GUEST_ADDR" guestPortEnv = "GUEST_PORT" guestImageEnv = "GUEST_IMAGE" @@ -60,7 +61,7 @@ type VMConfig struct { guestPort string } -func NewFirecrackerService(orch *ctriface.Orchestrator) (*FirecrackerService, error) { +func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool) (*FirecrackerService, error) { fs := new(FirecrackerService) stockRuntimeClient, err := cri.NewStockRuntimeServiceClient() if err != nil { @@ -68,7 +69,7 @@ func NewFirecrackerService(orch *ctriface.Orchestrator) (*FirecrackerService, er return nil, err } fs.stockRuntimeClient = stockRuntimeClient - fs.coordinator = newFirecrackerCoordinator(orch) + fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps) fs.vmConfigs = make(map[string]*VMConfig) return fs, nil } @@ -113,6 +114,12 @@ func (fs *FirecrackerService) createUserContainer(ctx context.Context, r *criapi return nil, err } + revision, err := getEnvVal(revisionEnv, config) + if err != nil { + log.WithError(err).Error() + return nil, err + } + memSizeMib, err := getMemorySize(config) if err != nil { log.WithError(err).Error() @@ -125,7 +132,7 @@ func (fs *FirecrackerService) createUserContainer(ctx context.Context, r *criapi return nil, err } - funcInst, err := fs.coordinator.startVM(context.Background(), guestImage, memSizeMib, vCPUCount) + funcInst, err := fs.coordinator.startVM(context.Background(), guestImage, revision, memSizeMib, vCPUCount) if err != nil { log.WithError(err).Error("failed to start VM") return nil, err @@ -137,6 +144,7 @@ func (fs *FirecrackerService) createUserContainer(ctx context.Context, r *criapi return nil, err } + // Temporarily store vm config so we can access this info when creating the queue-proxy container vmConfig := &VMConfig{guestIP: funcInst.StartVMResponse.GuestIP, guestPort: guestPort} fs.insertVMConfig(r.GetPodSandboxId(), vmConfig) @@ -149,6 +157,12 @@ func (fs *FirecrackerService) createUserContainer(ctx context.Context, r *criapi return nil, stockErr } + // Check for error from container creation + if stockErr != nil { + log.WithError(stockErr).Error("failed to create container") + return nil, stockErr + } + containerdID := stockResp.ContainerId err = fs.coordinator.insertActive(containerdID, funcInst) if err != nil { diff --git a/ctriface/bench_test.go b/ctriface/bench_test.go index 3452b7495..bd5926432 100644 --- a/ctriface/bench_test.go +++ b/ctriface/bench_test.go @@ -56,7 +56,7 @@ func TestBenchmarkStart(t *testing.T) { ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) defer cancel() - orch := NewOrchestrator("devmapper", "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled)) + orch := NewOrchestrator("devmapper", "", "fc-dev-thinpool","",10, WithTestModeOn(true), WithUPF(*isUPFEnabled)) images := getAllImages() benchCount := 10 @@ -75,7 +75,7 @@ func TestBenchmarkStart(t *testing.T) { for i := 0; i < benchCount; i++ { dropPageCache() - _, metric, err := orch.StartVM(ctx, vmIDString, imageName, 0, 0) + _, metric, err := orch.StartVM(ctx, vmIDString, imageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") startMetrics[i] = metric diff --git a/ctriface/failing_test.go b/ctriface/failing_test.go index c74b9b05b..e65fb1a63 100644 --- a/ctriface/failing_test.go +++ b/ctriface/failing_test.go @@ -24,6 +24,7 @@ package ctriface import ( "context" + "github.com/ease-lab/vhive/snapshotting" "os" "testing" "time" @@ -51,23 +52,22 @@ func TestStartSnapStop(t *testing.T) { ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) defer cancel() - orch := NewOrchestrator("devmapper", "", 10, WithTestModeOn(true)) + orch := NewOrchestrator("devmapper", "", "fc-dev-thinpool","",10, WithTestModeOn(true)) vmID := "2" + revisionID := "myrev-2" - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - err = orch.CreateSnapshot(ctx, vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") - err = orch.Offload(ctx, vmID) - require.NoError(t, err, "Failed to offload VM") - - _, err = orch.LoadSnapshot(ctx, vmID) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) diff --git a/ctriface/iface.go b/ctriface/iface.go index 725e0fe96..9a104b654 100644 --- a/ctriface/iface.go +++ b/ctriface/iface.go @@ -24,6 +24,7 @@ package ctriface import ( "context" + "github.com/ease-lab/vhive/snapshotting" "os" "os/exec" "strings" @@ -53,7 +54,6 @@ import ( ) // StartVMResponse is the response returned by StartVM -// TODO: Integrate response with non-cri API type StartVMResponse struct { // GuestIP is the IP of the guest MicroVM GuestIP string @@ -61,12 +61,10 @@ type StartVMResponse struct { const ( testImageName = "ghcr.io/ease-lab/helloworld:var_workload" - defaultVcpuCount = 1 - defaultMemsizeMib = 256 ) // StartVM Boots a VM if it does not exist -func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { +func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { var ( startVMMetric *metrics.Metric = metrics.NewMetric() tStart time.Time @@ -75,6 +73,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS logger := log.WithFields(log.Fields{"vmID": vmID, "image": imageName}) logger.Debug("StartVM: Received StartVM") + // 1. Allocate VM metadata & create vm network vm, err := o.vmPool.Allocate(vmID) if err != nil { logger.Error("failed to allocate VM in VM pool") @@ -84,13 +83,9 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS // Set VM vCPU and Memory if memSizeMib != 0 { vm.MemSizeMib = memSizeMib - } else { - vm.MemSizeMib = defaultMemsizeMib } if vCPUCount != 0 { vm.VCPUCount = vCPUCount - } else { - vm.VCPUCount = defaultVcpuCount } defer func() { @@ -103,14 +98,17 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS }() ctx = namespaces.WithNamespace(ctx, namespaceName) + + // 2. Fetch VM image tStart = time.Now() if vm.Image, err = o.imageManager.GetImage(ctx, imageName); err != nil { return nil, nil, errors.Wrapf(err, "Failed to get/pull image") } startVMMetric.MetricMap[metrics.GetImage] = metrics.ToUS(time.Since(tStart)) + // 3. Create VM tStart = time.Now() - conf := o.getVMConfig(vm) + conf := o.getVMConfig(vm, trackDirtyPages) resp, err := o.fcClient.CreateVM(ctx, conf) startVMMetric.MetricMap[metrics.FcCreateVM] = metrics.ToUS(time.Since(tStart)) if err != nil { @@ -125,13 +123,14 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS } }() + // 4. Create container logger.Debug("StartVM: Creating a new container") tStart = time.Now() container, err := o.client.NewContainer( ctx, - vmID, + vm.ContainerSnapKey, containerd.WithSnapshotter(o.snapshotter), - containerd.WithNewSnapshot(vmID, *vm.Image), + containerd.WithNewSnapshot(vm.ContainerSnapKey, *vm.Image), containerd.WithNewSpec( oci.WithImageConfig(*vm.Image), firecrackeroci.WithVMID(vmID), @@ -153,6 +152,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS } }() + // 5. Turn container into runnable process iologger := NewWorkloadIoWriter(vmID) o.workloadIo.Store(vmID, &iologger) logger.Debug("StartVM: Creating a new task") @@ -172,6 +172,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS } }() + // 6. Wait for task to get ready logger.Debug("StartVM: Waiting for the task to get ready") tStart = time.Now() ch, err := task.Wait(ctx) @@ -189,6 +190,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS } }() + // 7. Start process inside container logger.Debug("StartVM: Starting the task") tStart = time.Now() if err := task.Start(ctx); err != nil { @@ -250,32 +252,37 @@ func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string) error { logger = log.WithFields(log.Fields{"vmID": vmID}) - task := *vm.Task - if err := task.Kill(ctx, syscall.SIGKILL); err != nil { - logger.WithError(err).Error("Failed to kill the task") - return err - } + // Cleanup and remove container if VM not booted from snapshot + if ! vm.SnapBooted { + task := *vm.Task + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + logger.WithError(err).Error("Failed to kill the task") + return err + } - <-vm.TaskCh - //FIXME: Seems like some tasks need some extra time to die Issue#15, lr_training - time.Sleep(500 * time.Millisecond) + <-vm.TaskCh + //FIXME: Seems like some tasks need some extra time to die Issue#15, lr_training + time.Sleep(500 * time.Millisecond) - if _, err := task.Delete(ctx); err != nil { - logger.WithError(err).Error("failed to delete task") - return err - } + if _, err := task.Delete(ctx); err != nil { + logger.WithError(err).Error("failed to delete task") + return err + } - container := *vm.Container - if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { - logger.WithError(err).Error("failed to delete container") - return err + container := *vm.Container + if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { + logger.WithError(err).Error("failed to delete container") + return err + } } + // Stop VM if _, err := o.fcClient.StopVM(ctx, &proto.StopVMRequest{VMID: vmID}); err != nil { logger.WithError(err).Error("failed to stop firecracker-containerd VM") return err } + // Free VM metadata and clean up network if err := o.vmPool.Free(vmID); err != nil { logger.Error("failed to free VM from VM pool") return err @@ -283,6 +290,21 @@ func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string) error { o.workloadIo.Delete(vmID) + // Cleanup VM devmapper container snapshot if booted from snapshot + if vm.SnapBooted { + if err := o.devMapper.RemoveDeviceSnapshot(ctx, vm.ContainerSnapKey); err != nil { + logger.Error("failed to deactivate container snapshot") + return err + } + + if o.GetUPFEnabled() { + if err := o.memoryManager.Deactivate(vmID); err != nil { + logger.Error("Failed to deactivate VM in the memory manager") + return err + } + } + } + logger.Debug("Stopped VM successfully") return nil @@ -306,7 +328,7 @@ func getK8sDNS() []string { return dnsIPs } -func (o *Orchestrator) getVMConfig(vm *misc.VM) *proto.CreateVMRequest { +func (o *Orchestrator) getVMConfig(vm *misc.VM, trackDirtyPages bool) *proto.CreateVMRequest { kernelArgs := "ro noapic reboot=k panic=1 pci=off nomodules systemd.log_color=false systemd.unit=firecracker.target init=/sbin/overlay-init tsc=reliable quiet 8250.nr_uarts=0 ipv6.disable=1" return &proto.CreateVMRequest{ @@ -316,6 +338,7 @@ func (o *Orchestrator) getVMConfig(vm *misc.VM) *proto.CreateVMRequest { MachineCfg: &proto.FirecrackerMachineConfiguration{ VcpuCount: vm.VCPUCount, MemSizeMib: vm.MemSizeMib, + TrackDirtyPages: trackDirtyPages, }, NetworkInterfaces: []*proto.FirecrackerNetworkInterface{{ StaticConfig: &proto.StaticNetworkConfiguration{ @@ -328,7 +351,7 @@ func (o *Orchestrator) getVMConfig(vm *misc.VM) *proto.CreateVMRequest { }, }, }}, - // NetworkNamespace: vm.NetConfig.GetNamespacePath(), // TODO + NetworkNamespace: vm.NetConfig.GetNamespacePath(), } } @@ -397,16 +420,24 @@ func (o *Orchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metr } // CreateSnapshot Creates a snapshot of a VM -func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string) error { +func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { logger := log.WithFields(log.Fields{"vmID": vmID}) logger.Debug("Orchestrator received CreateSnapshot") ctx = namespaces.WithNamespace(ctx, namespaceName) + // 1. Get VM metadata + vm, err := o.vmPool.GetVM(vmID) + if err != nil { + return err + } + + // 2. Create VM & VM memory state snapshot req := &proto.CreateSnapshotRequest{ VMID: vmID, - SnapshotFilePath: o.getSnapshotFile(vmID), - MemFilePath: o.getMemoryFile(vmID), + SnapshotFilePath: snap.GetSnapFilePath(), + MemFilePath: snap.GetMemFilePath(), + SnapshotType: snap.GetSnapType(), } if _, err := o.fcClient.CreateSnapshot(ctx, req); err != nil { @@ -414,11 +445,30 @@ func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string) error { return err } + // 3. Backup disk state difference. + // 3.B Alternatively could also do ForkContainerSnap(ctx, vm.ContainerSnapKey, snap.GetContainerSnapName(), *vm.Image, forkMetric) + if err := o.devMapper.CreatePatch(ctx, snap.GetPatchFilePath(), vm.ContainerSnapKey, *vm.Image); err != nil { + logger.WithError(err).Error("failed to create container patch file") + return err + } + + // 4. Serialize snapshot info + if err := snap.SerializeSnapInfo(); err != nil { + logger.WithError(err).Error("failed to serialize snapshot info") + return err + } + + // 5. Resume + if _, err := o.fcClient.ResumeVM(ctx, &proto.ResumeVMRequest{VMID: vmID}); err != nil { + log.Printf("failed to resume the VM") + return err + } + return nil } // LoadSnapshot Loads a snapshot of a VM -func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string) (*metrics.Metric, error) { +func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { var ( loadSnapshotMetric *metrics.Metric = metrics.NewMetric() tStart time.Time @@ -431,16 +481,56 @@ func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string) (*metrics. ctx = namespaces.WithNamespace(ctx, namespaceName) + // 1. Allocate VM metadata & create vm network + vm, err := o.vmPool.Allocate(vmID) + if err != nil { + logger.Error("failed to allocate VM in VM pool") + return nil, nil, err + } + + defer func() { + // Free the VM from the pool if function returns error + if retErr != nil { + if err := o.vmPool.Free(vmID); err != nil { + logger.WithError(err).Errorf("failed to free VM from pool after failure") + } + } + }() + + // 2. Fetch image for VM + if vm.Image, err = o.imageManager.GetImage(ctx, snap.GetImage()); err != nil { + return nil, nil, errors.Wrapf(err, "Failed to get/pull image") + } + + // 3. Create snapshot for container to run + // 3.B Alternatively could also do CreateDeviceSnapshot(ctx, vm.ContainerSnapKey, snap.GetContainerSnapName()) + if err := o.devMapper.CreateDeviceSnapshotFromImage(ctx, vm.ContainerSnapKey, *vm.Image); err != nil { + return nil, nil, errors.Wrapf(err, "creating container snapshot") + } + + containerSnap, err := o.devMapper.GetDeviceSnapshot(ctx, vm.ContainerSnapKey) + if err != nil { + return nil, nil, errors.Wrapf(err, "previously created container device does not exist") + } + + // 4. Unpack patch into container snapshot + if err := o.devMapper.RestorePatch(ctx, vm.ContainerSnapKey, snap.GetPatchFilePath()); err != nil { + return nil, nil, errors.Wrapf(err, "unpacking patch into container snapshot") + } + + // 5. Load VM from snapshot req := &proto.LoadSnapshotRequest{ VMID: vmID, - SnapshotFilePath: o.getSnapshotFile(vmID), - MemFilePath: o.getMemoryFile(vmID), + SnapshotFilePath: snap.GetSnapFilePath(), + MemFilePath: snap.GetMemFilePath(), EnableUserPF: o.GetUPFEnabled(), + NetworkNamespace: vm.NetConfig.GetNamespacePath(), + NewSnapshotPath: containerSnap.GetDevicePath(), } if o.GetUPFEnabled() { if err := o.memoryManager.FetchState(vmID); err != nil { - return nil, err + return nil, nil, err } } @@ -466,44 +556,17 @@ func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string) (*metrics. if loadErr != nil || activateErr != nil { multierr := multierror.Of(loadErr, activateErr) - return nil, multierr - } - - return loadSnapshotMetric, nil -} - -// Offload Shuts down the VM but leaves shim and other resources running. -func (o *Orchestrator) Offload(ctx context.Context, vmID string) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received Offload") - - ctx = namespaces.WithNamespace(ctx, namespaceName) - - _, err := o.vmPool.GetVM(vmID) - if err != nil { - if _, ok := err.(*misc.NonExistErr); ok { - logger.Panic("Offload: VM does not exist") - } - logger.Panic("Offload: GetVM() failed for an unknown reason") - + return nil, nil, multierr } - if o.GetUPFEnabled() { - if err := o.memoryManager.Deactivate(vmID); err != nil { - logger.Error("Failed to deactivate VM in the memory manager") - return err - } - } + vm.SnapBooted = true - if _, err := o.fcClient.Offload(ctx, &proto.OffloadRequest{VMID: vmID}); err != nil { - logger.WithError(err).Error("failed to offload the VM") - return err - } + return &StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, nil, nil +} - if err := o.vmPool.Free(vmID); err != nil { - logger.Error("failed to free VM from VM pool") - return err +func (o *Orchestrator) CleanupRevisionSnapshot(ctx context.Context, revisionID string) error { + if err := o.devMapper.RemoveDeviceSnapshot(ctx, revisionID); err != nil { + return errors.Wrapf(err, "removing revision snapshot") } - return nil } diff --git a/ctriface/iface_test.go b/ctriface/iface_test.go index 36a154187..acb73aa9e 100644 --- a/ctriface/iface_test.go +++ b/ctriface/iface_test.go @@ -25,6 +25,7 @@ import ( "context" "flag" "fmt" + "github.com/ease-lab/vhive/snapshotting" "os" "sync" "testing" @@ -62,6 +63,8 @@ func TestPauseSnapResume(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + "fc-dev-thinpool", + "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), @@ -69,14 +72,16 @@ func TestPauseSnapResume(t *testing.T) { ) vmID := "4" + revisionID := "myrev-4" - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - err = orch.CreateSnapshot(ctx, vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) @@ -105,6 +110,8 @@ func TestStartStopSerial(t *testing.T) { orch := NewOrchestrator( "devmapper", + "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), @@ -114,7 +121,7 @@ func TestStartStopSerial(t *testing.T) { vmID := "5" - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.StopSingleVM(ctx, vmID) @@ -140,6 +147,8 @@ func TestPauseResumeSerial(t *testing.T) { orch := NewOrchestrator( "devmapper", + "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), @@ -149,7 +158,7 @@ func TestPauseResumeSerial(t *testing.T) { vmID := "6" - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) @@ -183,6 +192,8 @@ func TestStartStopParallel(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + "fc-dev-thinpool", + "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), @@ -200,7 +211,7 @@ func TestStartStopParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM "+vmID) }(i) } @@ -243,6 +254,8 @@ func TestPauseResumeParallel(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + "fc-dev-thinpool", + "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), @@ -260,7 +273,7 @@ func TestPauseResumeParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") }(i) } diff --git a/ctriface/manual_cleanup_test.go b/ctriface/manual_cleanup_test.go index 0a883aeaa..4b0241648 100644 --- a/ctriface/manual_cleanup_test.go +++ b/ctriface/manual_cleanup_test.go @@ -25,6 +25,7 @@ package ctriface import ( "context" "fmt" + "github.com/ease-lab/vhive/snapshotting" "os" "sync" "testing" @@ -56,6 +57,8 @@ func TestSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + "fc-dev-thinpool", + "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), @@ -63,23 +66,22 @@ func TestSnapLoad(t *testing.T) { ) vmID := "1" + revisionID := "myrev-1" - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - err = orch.CreateSnapshot(ctx, vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - err = orch.Offload(ctx, vmID) - require.NoError(t, err, "Failed to offload VM") - - _, err = orch.LoadSnapshot(ctx, vmID) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) @@ -107,6 +109,8 @@ func TestSnapLoadMultiple(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + "fc-dev-thinpool", + "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), @@ -114,37 +118,30 @@ func TestSnapLoadMultiple(t *testing.T) { ) vmID := "3" + revisionID := "myrev-3" - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - err = orch.CreateSnapshot(ctx, vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") - err = orch.Offload(ctx, vmID) - require.NoError(t, err, "Failed to offload VM") - - _, err = orch.LoadSnapshot(ctx, vmID) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - err = orch.Offload(ctx, vmID) - require.NoError(t, err, "Failed to offload VM") - - _, err = orch.LoadSnapshot(ctx, vmID) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM, ") - err = orch.Offload(ctx, vmID) - require.NoError(t, err, "Failed to offload VM") - orch.Cleanup() } @@ -170,6 +167,8 @@ func TestParallelSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + "fc-dev-thinpool", + "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), @@ -186,20 +185,19 @@ func TestParallelSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) + revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM, "+vmID) err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM, "+vmID) - err = orch.CreateSnapshot(ctx, vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM, "+vmID) - err = orch.Offload(ctx, vmID) - require.NoError(t, err, "Failed to offload VM, "+vmID) - - _, err = orch.LoadSnapshot(ctx, vmID) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM, "+vmID) _, err = orch.ResumeVM(ctx, vmID) @@ -233,6 +231,8 @@ func TestParallelPhasedSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", + "fc-dev-thinpool", + "", 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), @@ -250,7 +250,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, testImageName, 0, 0) + _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM, "+vmID) }(i) } @@ -278,7 +278,9 @@ func TestParallelPhasedSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - err := orch.CreateSnapshot(ctx, vmID) + revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM, "+vmID) }(i) } @@ -292,21 +294,9 @@ func TestParallelPhasedSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - err := orch.Offload(ctx, vmID) - require.NoError(t, err, "Failed to offload VM, "+vmID) - }(i) - } - vmGroup.Wait() - } - - { - var vmGroup sync.WaitGroup - for i := 0; i < vmNum; i++ { - vmGroup.Add(1) - go func(i int) { - defer vmGroup.Done() - vmID := fmt.Sprintf("%d", i+vmIDBase) - _, err := orch.LoadSnapshot(ctx, vmID) + revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + _, _, err := orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM, "+vmID) }(i) } diff --git a/ctriface/orch.go b/ctriface/orch.go index 67a9365b7..792f48009 100644 --- a/ctriface/orch.go +++ b/ctriface/orch.go @@ -24,6 +24,7 @@ package ctriface import ( "github.com/ease-lab/vhive/ctrimages" + "github.com/ease-lab/vhive/devmapper" "os" "os/signal" "path/filepath" @@ -75,11 +76,11 @@ func (wio WorkloadIoWriter) Write(p []byte) (n int, err error) { // Orchestrator Drives all VMs type Orchestrator struct { vmPool *misc.VMPool - cachedImages map[string]containerd.Image workloadIo sync.Map // vmID string -> WorkloadIoWriter snapshotter string client *containerd.Client fcClient *fcclient.Client + devMapper *devmapper.DeviceMapper imageManager *ctrimages.ImageManager // store *skv.KVStore snapshotsEnabled bool @@ -87,19 +88,20 @@ type Orchestrator struct { isLazyMode bool snapshotsDir string isMetricsMode bool + hostIface string memoryManager *manager.MemoryManager } // NewOrchestrator Initializes a new orchestrator -func NewOrchestrator(snapshotter, hostIface string, netPoolSize int, opts ...OrchestratorOption) *Orchestrator { +func NewOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPoolSize int, opts ...OrchestratorOption) *Orchestrator { // TODO: args var err error o := new(Orchestrator) o.vmPool = misc.NewVMPool(hostIface, netPoolSize) - o.cachedImages = make(map[string]containerd.Image) o.snapshotter = snapshotter o.snapshotsDir = "/fccd/snapshots" + o.hostIface = hostIface for _, opt := range opts { opt(o) @@ -136,6 +138,8 @@ func NewOrchestrator(snapshotter, hostIface string, netPoolSize int, opts ...Orc } log.Info("Created firecracker client") + o.devMapper = devmapper.NewDeviceMapper(o.client, poolName, metadataDev) + o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) return o @@ -197,11 +201,11 @@ func (o *Orchestrator) GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error return o.memoryManager.GetUPFLatencyStats(vmID) } -func (o *Orchestrator) getSnapshotFile(vmID string) string { +func (o *Orchestrator) getSnapshotFile(vmID string) string { // TODO: remove return filepath.Join(o.getVMBaseDir(vmID), "snap_file") } -func (o *Orchestrator) getMemoryFile(vmID string) string { +func (o *Orchestrator) getMemoryFile(vmID string) string { // TODO: remove return filepath.Join(o.getVMBaseDir(vmID), "mem_file") } diff --git a/ctrimages/imageManager.go b/ctrimages/imageManager.go index 3bec848fb..04bda27f1 100644 --- a/ctrimages/imageManager.go +++ b/ctrimages/imageManager.go @@ -1,3 +1,25 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + // Package ctrimages provides an image manager that manages and caches container images. package ctrimages diff --git a/devmapper/deviceSnapshot.go b/devmapper/deviceSnapshot.go index 25beecfd6..b3cc5af80 100644 --- a/devmapper/deviceSnapshot.go +++ b/devmapper/deviceSnapshot.go @@ -1,3 +1,25 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package devmapper import ( diff --git a/devmapper/devicemapper.go b/devmapper/devicemapper.go index 60c983c11..7142b1350 100644 --- a/devmapper/devicemapper.go +++ b/devmapper/devicemapper.go @@ -1,3 +1,25 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package devmapper import ( diff --git a/devmapper/thindelta/blockDelta.go b/devmapper/thindelta/blockDelta.go index e8f88d32a..3470fd781 100644 --- a/devmapper/thindelta/blockDelta.go +++ b/devmapper/thindelta/blockDelta.go @@ -1,3 +1,25 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package thindelta import ( diff --git a/devmapper/thindelta/thinDelta.go b/devmapper/thindelta/thinDelta.go index b2ef4aab9..ef25ebe14 100644 --- a/devmapper/thindelta/thinDelta.go +++ b/devmapper/thindelta/thinDelta.go @@ -1,3 +1,25 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package thindelta import ( @@ -5,9 +27,11 @@ import ( "bytes" "fmt" "github.com/pkg/errors" + log "github.com/sirupsen/logrus" xmlparser "github.com/tamerh/xml-stream-parser" "os/exec" "strconv" + "strings" "sync" ) @@ -29,9 +53,31 @@ func NewThinDelta(poolName string, metaDataDev string) *ThinDelta { thinDelta := new(ThinDelta) thinDelta.poolName = poolName thinDelta.metaDataDev = metaDataDev + if thinDelta.metaDataDev == "" { + metaDev, _ := getMetadataDev() + thinDelta.metaDataDev = metaDev + } return thinDelta } +// getMetadataDev returns the metadata device used by the device mapper +func getMetadataDev() (string, error) { + out, err := exec.Command("sudo", "losetup").Output() + if err != nil { + log.Warnf("Failed to fetch devmapper metadata device, %v\n", err) + return "", err + } + + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "devmapper/metadata") { + return line[:strings.Index(line, " ")], nil + } + } + return "", errors.New("Failed to fetch devmapper metadata device") +} + // getPoolPath returns the path of the devicemapper thinpool. func (thd *ThinDelta) getPoolPath() string { return fmt.Sprintf("/dev/mapper/%s", thd.poolName) diff --git a/functions.go b/functions.go index 4ded99161..480e277c5 100644 --- a/functions.go +++ b/functions.go @@ -25,6 +25,7 @@ package main import ( "context" "fmt" + "github.com/ease-lab/vhive/snapshotting" "math/rand" "net" "os" @@ -356,7 +357,7 @@ func (f *Function) AddInstance() *metrics.Metric { if f.isSnapshotReady { metr = f.LoadInstance() } else { - resp, _, err := orch.StartVM(ctx, f.getVMID(), f.imageName, 0, 0) + resp, _, err := orch.StartVM(ctx, f.getVMID(), f.imageName, 256, 1, false) if err != nil { log.Panic(err) } @@ -410,10 +411,7 @@ func (f *Function) RemoveInstance(isSync bool) (string, error) { f.OnceAddInstance = new(sync.Once) - if orch.GetSnapshotsEnabled() { - f.OffloadInstance() - r = "Successfully offloaded instance " + f.vmID - } else { + if ! orch.GetSnapshotsEnabled() { if isSync { err = orch.StopSingleVM(context.Background(), f.vmID) } else { @@ -450,7 +448,9 @@ func (f *Function) CreateInstanceSnapshot() { log.Panic(err) } - err = orch.CreateSnapshot(ctx, f.vmID) + revisionID := fmt.Sprintf("myrev-%d", f.vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, 0, 256, 1, false) + err = orch.CreateSnapshot(ctx, f.vmID, snap) if err != nil { log.Panic(err) } @@ -461,22 +461,6 @@ func (f *Function) CreateInstanceSnapshot() { } } -// OffloadInstance Offloads the instance -func (f *Function) OffloadInstance() { - logger := log.WithFields(log.Fields{"fID": f.fID}) - - logger.Debug("Offloading instance") - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - defer cancel() - - err := orch.Offload(ctx, f.vmID) - if err != nil { - log.Panic(err) - } - f.conn.Close() -} - // LoadInstance Loads a new instance of the function from its snapshot and resumes it // The tap, the shim and the vmID remain the same func (f *Function) LoadInstance() *metrics.Metric { @@ -487,7 +471,9 @@ func (f *Function) LoadInstance() *metrics.Metric { ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) defer cancel() - loadMetr, err := orch.LoadSnapshot(ctx, f.vmID) + revisionID := fmt.Sprintf("myrev-%d", f.vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, 0, 256, 1, false) + _, loadMetr, err := orch.LoadSnapshot(ctx, f.vmID, snap) if err != nil { log.Panic(err) } diff --git a/go.mod b/go.mod index 9f53be215..bf2a107c9 100644 --- a/go.mod +++ b/go.mod @@ -41,13 +41,12 @@ replace ( ) replace ( - github.com/ease-lab/vhive/examples/protobuf/helloworld => ./examples/protobuf/helloworld // github.com/firecracker-microvm/firecracker-containerd => github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4 - github.com/containerd/containerd => github.com/amohoste/containerd v1.5.5-ids // TODO - github.com/firecracker-microvm/firecracker-containerd => github.com/amohoste/firecracker-containerd v1.0.0-sparse // TODO + github.com/containerd/containerd => github.com/amohoste/containerd v1.5.5-ids // TODO: change to vhive + github.com/ease-lab/vhive/examples/protobuf/helloworld => ./examples/protobuf/helloworld + github.com/firecracker-microvm/firecracker-containerd => github.com/amohoste/firecracker-containerd v1.0.0-sparse // TODO: change to vhive ) - require ( github.com/antchfx/xpath v1.2.0 // indirect github.com/blend/go-sdk v1.20211025.3 // indirect @@ -55,16 +54,15 @@ require ( github.com/containerd/go-cni v1.1.4 github.com/davecgh/go-spew v1.1.1 github.com/ease-lab/vhive/examples/protobuf/helloworld v0.0.0-00010101000000-000000000000 - github.com/ease-lab/vhive/taps v0.0.0-20210607161503-ce9e244976f7 github.com/firecracker-microvm/firecracker-containerd v0.0.0-00010101000000-000000000000 github.com/ftrvxmtrx/fd v0.0.0-20150925145434-c6d800382fff github.com/go-multierror/multierror v1.0.2 - github.com/golang/protobuf v1.4.3 + github.com/golang/protobuf v1.5.0 github.com/google/nftables v0.0.0-20210916140115-16a134723a96 github.com/montanaflynn/stats v0.6.5 github.com/opencontainers/image-spec v1.0.1 github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.8.0 + github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.7.0 github.com/tamerh/xml-stream-parser v1.4.0 github.com/tamerh/xpath v1.0.0 // indirect @@ -74,7 +72,7 @@ require ( golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb // indirect golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210324051608-47abb6519492 + golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 gonum.org/v1/gonum v0.9.0 gonum.org/v1/plot v0.9.0 google.golang.org/grpc v1.34.0 diff --git a/go.sum b/go.sum index 53b687162..f590b9411 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -34,57 +33,33 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v4.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/Microsoft/hcsshim v0.8.18 h1:cYnKADiM1869gvBpos3YCteeT6sZLB48lB5dmMMs8Tg= +github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20181212234831-e0a55b97c705/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ= @@ -94,8 +69,11 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/amohoste/containerd v1.5.5-ids h1:ewus7bzwx6j8ZlKqNjoctyQ2EOKAK+9nYBqC+D4XKfg= +github.com/amohoste/containerd v1.5.5-ids/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo= +github.com/amohoste/firecracker-containerd v1.0.0-sparse h1:cGDp1kcB5gBxtWdgW1VYw8nroSYq751at07q5L2OBZY= +github.com/amohoste/firecracker-containerd v1.0.0-sparse/go.mod h1:+/08aD580irjp7X1+gyIIgRf4IbsxhgFhDQ15m+SFyY= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -106,147 +84,58 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.41/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/awslabs/tc-redirect-tap v0.0.0-20200708224642-a0300978797d/go.mod h1:kQGbgU5sye2xV5J0ruPiscqz3Cj30VtoWRrhBeIo7dM= -github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= -github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= -github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blend/go-sdk v1.20211025.3 h1:3f8hYTMb9ufP8IkBtsflNohAqoKo4hEBbeR0s4bfBqI= github.com/blend/go-sdk v1.20211025.3/go.mod h1:nbmX7cdPm66JOqg6M3cKMtuqj6RzkE72sHZue61T5c0= github.com/blend/sentry-go v1.0.1/go.mod h1:hgyX3WXen2YBiA0NitlfsXsvS+9ly2YlEBmmmYDgrWY= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.6/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.2 h1:MG/Bg1pbmMb61j3wHCFWPxESXHieiKr2xG64px/k8zQ= -github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-cni v1.1.4 h1:Mv3XkOjVsjTJHMpSi+dKZQPQGXEMpmXWs8oYZDaCK+s= github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= @@ -256,28 +145,22 @@ github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtr github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJRZVOhrCQj91YyC92sxg= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -291,46 +174,24 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4 h1:aSZHGMFJMcS47URi6xu7n1anJfEHagZjGf6HR8ZHggg= -github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4/go.mod h1:Uon4eMMkFBsj2aYWnk1wz3xebaKgR3+CCjmc62cCcvo= -github.com/ease-lab/vhive/taps v0.0.0-20210607161503-ce9e244976f7 h1:c8DqqFtlBn7ivZ37PDJShZSfDy65F7ELu0FJ+cS/wbI= -github.com/ease-lab/vhive/taps v0.0.0-20210607161503-ce9e244976f7/go.mod h1:74sgHZg376wVFR0xATx3aA8vQZ2jGp7aF/1CUl2Om2Y= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/firecracker-microvm/firecracker-go-sdk v0.22.1-0.20210520223842-abd0815b8bf9/go.mod h1:Dbh2OFp/p0Obqp7An+3ktnfC6/a5DKUkMQ5zA/Qmb+0= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -340,17 +201,10 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ftrvxmtrx/fd v0.0.0-20150925145434-c6d800382fff h1:zk1wwii7uXmI0znwU+lqg+wFL9G5+vm5I+9rv2let60= github.com/ftrvxmtrx/fd v0.0.0-20150925145434-c6d800382fff/go.mod h1:yUhRXHewUVJ1k89wHKP68xfzk7kwXUx/DV1nx4EBMbw= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs= github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= @@ -361,19 +215,16 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07 h1:OTlfMvwR1rLyf9goVmXfuS5AJn80+Vmj4rTf4n46SOs= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-multierror/multierror v1.0.2 h1:AwsKbEXkmf49ajdFJgcFXqSG0aLo0HEyAE9zk9JguJo= github.com/go-multierror/multierror v1.0.2/go.mod h1:U7SZR/D9jHgt2nkSj8XcbCWdmVM2igraCHQ3HC1HiKY= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -439,22 +290,8 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -479,12 +316,9 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.3.2 h1:kX1es4djPJrsDhY7aZKJy7aZasdcB5oSOEphMjSB53c= @@ -492,7 +326,6 @@ github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4Oe github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -506,7 +339,6 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -517,30 +349,8 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71 github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= -github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -550,10 +360,9 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -570,7 +379,6 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -578,14 +386,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -594,12 +396,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= @@ -608,22 +408,16 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= @@ -668,11 +462,6 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= @@ -690,54 +479,33 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/koneu/natend v0.0.0-20150829182554-ec0926ea948d h1:MFX8DxRnKMY/2M3H61iSsVbo/n3h0MWGmWNN1UViOU0= github.com/koneu/natend v0.0.0-20150829182554-ec0926ea948d/go.mod h1:QHb4k4cr1fQikUahfcRVPcEXiUgFsdIstGqlurL0XL4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magefile/mage v1.10.0 h1:3HiXzCUY12kh9bIuyXShaVe529fJfyqoVM42o/uom2g= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -749,8 +517,6 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -762,40 +528,27 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v0.0.0-20191009155606-de872b0d824b h1:W3er9pI7mt2gOqOWzwvx20iJ8Akiqz1mUMTxU6wdvl8= github.com/mdlayher/netlink v0.0.0-20191009155606-de872b0d824b/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/vsock v0.0.0-20190329173812-a92c53d5dcab/go.mod h1:D7ATxm5dbu8KgVaJHLbtcFfkt6/ERTpnCK7kVpGOqsk= github.com/mediocregopher/radix/v4 v4.0.0-beta.1/go.mod h1:Z74pilm773ghbGV4EEoPvi6XWgkAfr0VCNkfa8gI1PU= -github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= @@ -804,23 +557,13 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.6.5 h1:FhV+8hkLRa1fUu6E93WI5ru9FpccbVZYg1Cfefw0D2A= github.com/montanaflynn/stats v0.6.5/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= @@ -830,53 +573,32 @@ github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.13.0 h1:M76yO2HkZASFjXL0HSoZJ1AYEmQxNJmY41Jx1zNUq1Y= github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs= +github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= @@ -888,7 +610,6 @@ github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2 github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -897,43 +618,32 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -941,78 +651,55 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.0 h1:nfhvjKcUMhBMVqbKHJlk5RPrrfYr/NMo3692g0dwfWU= github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/sparrc/go-ping v0.0.0-20190613174326-4e5b6552494c/go.mod h1:eMyUVp6f/5jnzM+3zahzl7q6UXLbgSc3MKg/+ow9QW0= -github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1021,69 +708,41 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tamerh/xml-stream-parser v1.4.0 h1:Vb1ZqshlXi53vvUBzZUdEsEJBvnKVWhfrGEJhfQABfc= github.com/tamerh/xml-stream-parser v1.4.0/go.mod h1:lrpNpthn/iYpnyICCe4KwJSANxywFIfSvsqokQOV9q0= github.com/tamerh/xpath v1.0.0 h1:NccMES/Ej8slPCFDff73Kf6V1xu9hdbuKf2RyDsxf5Q= github.com/tamerh/xpath v1.0.0/go.mod h1:t0wnh72FQlOVEO20f2Dl3EoVxso9GnLREh1WTpvNmJQ= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tilinna/clock v1.0.2/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= -github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= -github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vmware/govmomi v0.20.1/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/wcharczuk/go-chart v2.0.1+incompatible h1:0pz39ZAycJFF7ju/1mepnk26RLVLBCWz1STcD3doU0A= github.com/wcharczuk/go-chart v2.0.1+incompatible/go.mod h1:PF5tmL4EIx/7Wf+hEkpCqYi5He4u90sw+0+6FhrryuE= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1111,36 +770,25 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1148,7 +796,6 @@ golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= @@ -1187,31 +834,25 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1244,12 +885,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxW golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20201203001011-0b49973bad19/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1263,7 +902,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1271,13 +909,9 @@ golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190329044733-9eb1bfa1ce65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1287,26 +921,19 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191029155521-f43be2a4598c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1333,10 +960,8 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1344,16 +969,15 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1363,23 +987,16 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1387,7 +1004,6 @@ golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1398,9 +1014,7 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1442,19 +1056,15 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.0 h1:KSrriwTGNTqNqyR6ZWWvwtInD/kl89sTrKsDfmbk2HU= gonum.org/v1/gonum v0.9.0/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0 h1:3sEo36Uopv1/SA/dMFFaxXoL5XyikJ9Sf2Vll/k6+2E= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1475,37 +1085,33 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/DataDog/dd-trace-go.v1 v1.27.1/go.mod h1:Sp1lku8WJMvNV0kjDI4Ni/T7J/U3BO5ct5kEaoVU8+I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1519,72 +1125,37 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.16.6/go.mod h1:naJcEPKsa3oqutLPPMxA2oLSqV4KxGDLU6IgkqHqgFE= -k8s.io/apiextensions-apiserver v0.16.6/go.mod h1:WbwakFromAVhfvLITDk5nRf5UJJwazjeZRx+yKeDcY0= k8s.io/apimachinery v0.16.7-beta.0/go.mod h1:mhhO3hoLkWO+2eCvqjPtH2Ly92l9nJDwsswzWKpkN2w= k8s.io/apiserver v0.16.6/go.mod h1:JaDblfPzg2nbxaA0H3PsMgO72QAx2rBoSYwxLEKu5RE= -k8s.io/cli-runtime v0.16.6/go.mod h1:8N6G/UJmYvLXzpD1kjpuss6mFUeez+eg6Nu15VtBHvM= k8s.io/client-go v0.16.6/go.mod h1:xIQ44uaAH4SD1EHMtCHsB9By7D0qblbv1ADeGyXpZUQ= -k8s.io/cloud-provider v0.16.6/go.mod h1:rTwoMb7ogSqEAZWev8ds88EApSPC6vVAikKgpvjOxpE= -k8s.io/cluster-bootstrap v0.16.6/go.mod h1:cOnd4cgo8AthVSyH7rIWpUNUdJyuCthsZjA2MEsFipI= -k8s.io/code-generator v0.16.7-beta.0/go.mod h1:2aiDuxDU7RQK2PVypXAXHo6+YwOlF33iezHQbSmKSA4= k8s.io/component-base v0.16.6/go.mod h1:8+4lrSEgLQ9wqOzHVYx4GLSCU6sus8wqg8bfaTdXTwg= k8s.io/cri-api v0.16.16-rc.0 h1:ZxoaF9IFOdmX2bOqSrvjfoiso5SqtL2/fMX21iG2+DU= k8s.io/cri-api v0.16.16-rc.0/go.mod h1:W6aMMPN5fmxcRGaHnb6BEfoTeS82OsJcsUJyKf+EWYc= -k8s.io/csi-translation-lib v0.16.6/go.mod h1:T/bEjsu1sQn2qVi9FzsPqjvT31mSqpThoFwtnj327jg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-aggregator v0.16.6/go.mod h1:lRjo9e3xeyF8tjkIKEX+pErNOdE4yTazx9VPO6zzdcw= -k8s.io/kube-controller-manager v0.16.6/go.mod h1:7ovDaVMCHc4TBOQHzfb5w2XCib7rjx+QCMZTRVQteD4= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-proxy v0.16.6/go.mod h1:l7jgZcYyjERYxALU/EizkMx/JmIhN2Ff/f/aR/azFKg= -k8s.io/kube-scheduler v0.16.6/go.mod h1:ohT2kmuQnNex0cDUYvXBAdMKHlneruoD4KOacEDpPq4= -k8s.io/kubectl v0.16.6/go.mod h1:ybKdxxoYuQLRqsmBFylvgyFPeVmmRYUbxk134JCiNoM= -k8s.io/kubelet v0.16.6/go.mod h1:NAuB1uZwiOgUnJSgAnJIkWlueXFYkzxwv7xWEA/P35Y= -k8s.io/kubernetes v1.16.6/go.mod h1:rO6tSgbJjbo6lLkrq4jryUaXqZ2PdDJjzWXKZQmLfnQ= -k8s.io/legacy-cloud-providers v0.16.6/go.mod h1:trzyJ8vT+vD+FEP4NHDbJvOXYtksUbpD7PfR6Iwnhxk= -k8s.io/metrics v0.16.6/go.mod h1:de0nJbsn2wX/fapLW0Yi7k+GwXvEv4/g54agaDjzmQY= -k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= -k8s.io/sample-apiserver v0.16.6/go.mod h1:fyN8DaZXgtcQKCtb/x2mr4TDTUkaAdgWNU7BaLnlSqg= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/metrics/metrics.go b/metrics/metrics.go index 0b7e0ed5c..c31e8d7d6 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -189,3 +189,8 @@ func PrintMeanStd(resultsPath, funcName string, metricsList ...*Metric) error { func ToUS(dur time.Duration) float64 { return float64(dur.Microseconds()) } + +// ToUS Converts Duration to milliseconds +func ToMs(dur time.Duration) int64 { + return int64(dur.Milliseconds()) +} diff --git a/misc/types.go b/misc/types.go index d8b17032b..bfdb2f6c3 100644 --- a/misc/types.go +++ b/misc/types.go @@ -23,25 +23,30 @@ package misc import ( + "fmt" "github.com/ease-lab/vhive/networking" "sync" "github.com/containerd/containerd" +) - "github.com/ease-lab/vhive/taps" +const ( + defaultVcpuCount = 1 + defaultMemsizeMib = 256 ) // VM type type VM struct { - ID string - Image *containerd.Image - Container *containerd.Container - Task *containerd.Task - TaskCh <-chan containerd.ExitStatus - Ni *taps.NetworkInterface - NetConfig *networking.NetworkConfig - VCPUCount uint32 - MemSizeMib uint32 + ID string + ContainerSnapKey string + SnapBooted bool + Image *containerd.Image + Container *containerd.Container + Task *containerd.Task + TaskCh <-chan containerd.ExitStatus + NetConfig *networking.NetworkConfig + VCPUCount uint32 + MemSizeMib uint32 } // VMPool Pool of active VMs (can be in several states though) @@ -54,6 +59,10 @@ type VMPool struct { func NewVM(vmID string) *VM { vm := new(VM) vm.ID = vmID + vm.ContainerSnapKey = fmt.Sprintf("vm%s-containersnap", vmID) + vm.SnapBooted = false + vm.MemSizeMib = defaultMemsizeMib + vm.VCPUCount = defaultVcpuCount return vm } diff --git a/networking/networkManager.go b/networking/networkManager.go index a86757148..0dc4a103d 100644 --- a/networking/networkManager.go +++ b/networking/networkManager.go @@ -51,7 +51,8 @@ type NetworkManager struct { func NewNetworkManager(hostIfaceName string, poolSize int) (*NetworkManager, error) { manager := new(NetworkManager) - if hostIfaceName == "" { + manager.hostIfaceName = hostIfaceName + if manager.hostIfaceName == "" { hostIface, err := getHostIfaceName() if err != nil { return nil, err diff --git a/scripts/setup_system.sh b/scripts/setup_system.sh index 413d4fb2d..7b59d15af 100755 --- a/scripts/setup_system.sh +++ b/scripts/setup_system.sh @@ -29,6 +29,10 @@ sudo apt-get update >> /dev/null sudo apt-get -y install \ apt-transport-https \ + ca-certificates \ + curl \ + e2fsprogs \ + util-linux \ gcc \ g++ \ make \ @@ -43,7 +47,9 @@ sudo apt-get -y install \ software-properties-common \ iproute2 \ nftables \ - git-lfs >> /dev/null + git-lfs \ + thin-provisioning-tools \ + skopeo >> /dev/null # stack size, # of open files, # of pids sudo sh -c "echo \"* soft nofile 1000000\" >> /etc/security/limits.conf" diff --git a/snapshotting/snapHeap.go b/snapshotting/snapHeap.go new file mode 100644 index 000000000..9360d49d9 --- /dev/null +++ b/snapshotting/snapHeap.go @@ -0,0 +1,54 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package snapshotting + +type SnapHeap []*Snapshot + +func (h SnapHeap) Len() int { + return len(h) +} +func (h SnapHeap) Less(i, j int) bool { + return h[i].score < h[j].score +} +func (h SnapHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *SnapHeap) Push(x interface{}) { + *h = append(*h, x.(*Snapshot)) +} + +func (h *SnapHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +func (h *SnapHeap) Peek() interface{} { + old := *h + n := len(old) + x := old[n-1] + return x +} \ No newline at end of file diff --git a/snapshotting/snapshot.go b/snapshotting/snapshot.go new file mode 100644 index 000000000..bdac36f4f --- /dev/null +++ b/snapshotting/snapshot.go @@ -0,0 +1,166 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package snapshotting + +import ( + "encoding/gob" + "fmt" + "github.com/pkg/errors" + "golang.org/x/sys/unix" + "math" + "os" + "path/filepath" + "time" +) + +// Snapshot identified by revision +// Only capitalized fields are serialised / deserialised +type Snapshot struct { + revisionId string + containerSnapName string + snapDir string + Image string + MemSizeMib uint32 + VCPUCount uint32 + usable bool + sparse bool + + // Eviction + numUsing uint32 + TotalSizeMiB int64 + freq int64 + coldStartTimeMs int64 + lastUsedClock int64 + score int64 +} + +func NewSnapshot(revisionId, baseFolder, image string, sizeMiB, coldStartTimeMs, lastUsed int64, memSizeMib, vCPUCount uint32, sparse bool) *Snapshot { + s := &Snapshot{ + revisionId: revisionId, + snapDir: filepath.Join(baseFolder, revisionId), + containerSnapName: fmt.Sprintf("%s%s", revisionId, time.Now().Format("20060102150405")), + Image: image, + MemSizeMib: memSizeMib, + VCPUCount: vCPUCount, + usable: false, + numUsing: 0, + TotalSizeMiB: sizeMiB, + coldStartTimeMs: coldStartTimeMs, + lastUsedClock: lastUsed, // Initialize with used now to avoid immediately removing + sparse: sparse, + } + + return s +} + +// UpdateDiskSize Updates the estimated disk size to real disk size in use by snapshot +func (snp *Snapshot) UpdateDiskSize() { + snp.TotalSizeMiB = getRealSizeMib(snp.GetMemFilePath()) + getRealSizeMib(snp.GetSnapFilePath()) + getRealSizeMib(snp.GetInfoFilePath()) + getRealSizeMib(snp.GetPatchFilePath()) +} + +// getRealSizeMib returns the disk space used by a certain file +func getRealSizeMib(filePath string) int64 { + var st unix.Stat_t + if err := unix.Stat(filePath, &st); err != nil { + return 0 + } + return int64(math.Ceil((float64(st.Blocks) * 512) / (1024 * 1024))) +} + +// UpdateScore updates the score of the snapshot used by the keepalive policy +func (snp *Snapshot) UpdateScore() { + snp.score = snp.lastUsedClock + (snp.freq * snp.coldStartTimeMs) / snp.TotalSizeMiB +} + +func (snp *Snapshot) GetImage() string { + return snp.Image +} + +func (snp *Snapshot) GetRevisionId() string { + return snp.revisionId +} + +func (snp *Snapshot) GetContainerSnapName() string { + return snp.containerSnapName +} + +func (snp *Snapshot) GetSnapFilePath() string { + return filepath.Join(snp.snapDir, "snapfile") +} + +func (snp *Snapshot) GetSnapType() string { + var snapType string + if snp.sparse { + snapType = "Diff" + } else { + snapType = "Full" + } + return snapType +} + +func (snp *Snapshot) GetMemFilePath() string { + return filepath.Join(snp.snapDir, "memfile") +} + +func (snp *Snapshot) GetPatchFilePath() string { + return filepath.Join(snp.snapDir, "patchfile") +} + +func (snp *Snapshot) GetInfoFilePath() string { + return filepath.Join(snp.snapDir, "infofile") +} + +// SerializeSnapInfo serializes the snapshot info using gob. This can be useful for remote snapshots +func (snp *Snapshot) SerializeSnapInfo() error { + file, err := os.Create(snp.GetInfoFilePath()) + if err != nil { + return errors.Wrapf(err, "failed to create snapinfo file") + } + defer file.Close() + + encoder := gob.NewEncoder(file) + + err = encoder.Encode(*snp) + if err != nil { + return errors.Wrapf(err, "failed to encode snapinfo") + } + return nil +} + +// LoadSnapInfo loads the snapshot info from a file. This can be useful for remote snapshots. +func (snp *Snapshot) LoadSnapInfo(infoPath string) error { + file, err := os.Open(infoPath) + if err != nil { + return errors.Wrapf(err, "failed to open snapinfo file") + } + defer file.Close() + + encoder := gob.NewDecoder(file) + + err = encoder.Decode(snp) + if err != nil { + return errors.Wrapf(err, "failed to decode snapinfo") + } + + return nil +} \ No newline at end of file diff --git a/snapshotting/snapshotmanager.go b/snapshotting/snapshotmanager.go new file mode 100644 index 000000000..8402a05d4 --- /dev/null +++ b/snapshotting/snapshotmanager.go @@ -0,0 +1,230 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package snapshotting + +import ( + "container/heap" + "fmt" + "github.com/pkg/errors" + "math" + "os" + "sync" +) + +// SnapshotManager manages snapshots stored on the node. +type SnapshotManager struct { + sync.Mutex + snapshots map[string]*Snapshot + + // Heap of snapshots not in use sorted on score + freeSnaps SnapHeap + baseFolder string + + // Eviction + clock int64 // When container last used. Increased to priority terminated container on termination + capacityMib int64 + usedMib int64 +} + +func NewSnapshotManager(baseFolder string, capacityMib int64) *SnapshotManager { + manager := new(SnapshotManager) + manager.snapshots = make(map[string]*Snapshot) + heap.Init(&manager.freeSnaps) + manager.baseFolder = baseFolder + manager.clock = 0 + manager.capacityMib = capacityMib + manager.usedMib = 0 + + // Clean & init basefolder + os.RemoveAll(manager.baseFolder) + os.MkdirAll(manager.baseFolder, os.ModePerm) + + return manager +} + +// AcquireSnapshot returns a snapshot for the specified revision if it is available and increments the internal counter +// such that the snapshot can't get removed. Similar to how a RW lock works +func (mgr *SnapshotManager) AcquireSnapshot(revision string) (*Snapshot, error) { + mgr.Lock() + defer mgr.Unlock() + + // Check if a snapshot is available for the specified revision + snap, present := mgr.snapshots[revision] + if !present { + return nil, errors.New(fmt.Sprintf("Get: Snapshot for revision %s does not exist", revision)) + } + + // Snapshot registered in manager but creation not finished yet + if ! snap.usable { // Could also wait until snapshot usable (trade-off) + return nil, errors.New(fmt.Sprintf("Snapshot is not yet usable")) + } + + if snap.numUsing == 0 { + // Remove from free snaps so can't be deleted (could be done more efficiently) + heapIdx := 0 + for i, heapSnap := range mgr.freeSnaps { + if heapSnap.revisionId == revision { + heapIdx = i + break + } + } + heap.Remove(&mgr.freeSnaps, heapIdx) + } + + snap.numUsing += 1 + + // Update stats for keepalive policy + snap.freq += 1 + snap.lastUsedClock = mgr.clock + + return snap, nil +} + +// ReleaseSnapshot releases the snapshot with the given revision so that it can possibly get deleted if it is not in use +// by any other VMs. +func (mgr *SnapshotManager) ReleaseSnapshot(revision string) error { + mgr.Lock() + defer mgr.Unlock() + + snap, present := mgr.snapshots[revision] + if !present { + return errors.New(fmt.Sprintf("Get: Snapshot for revision %s does not exist", revision)) + } + + snap.numUsing -= 1 + + if snap.numUsing == 0 { + // Add to free snaps + snap.UpdateScore() + heap.Push(&mgr.freeSnaps, snap) + } + + return nil +} + +// InitSnapshot initializes a snapshot by adding its metadata to the SnapshotManager. Once the snapshot has been created, +// CommitSnapshot must be run to finalize the snapshot creation and make the snapshot available fo ruse +func (mgr *SnapshotManager) InitSnapshot(revision, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *Snapshot, error) { + mgr.Lock() + + if _, present := mgr.snapshots[revision]; present { + mgr.Unlock() + return nil, nil, errors.New(fmt.Sprintf("Add: Snapshot for revision %s already exists", revision)) + } + + var removeContainerSnaps *[]string + + // Calculate an estimate of the snapshot size + estimatedSnapSizeMibf := float64(memSizeMib) * 1.25 + var estimatedSnapSizeMib = int64(math.Ceil(estimatedSnapSizeMibf)) + + // Ensure enough space is available for snapshot to be created + availableMib := mgr.capacityMib - mgr.usedMib + if estimatedSnapSizeMib > availableMib { + var err error + spaceNeeded := estimatedSnapSizeMib - availableMib + removeContainerSnaps, err = mgr.freeSpace(spaceNeeded) + if err != nil { + mgr.Unlock() + return removeContainerSnaps, nil, err + } + } + mgr.usedMib += estimatedSnapSizeMib + + // Add snapshot metadata to manager + snap := NewSnapshot(revision, mgr.baseFolder, image, estimatedSnapSizeMib, coldStartTimeMs, mgr.clock, memSizeMib, vCPUCount, sparse) + mgr.snapshots[revision] = snap + mgr.Unlock() + + // Create directory to store snapshot data + err := os.Mkdir(snap.snapDir, 0755) + if err != nil { + return removeContainerSnaps, nil, errors.Wrapf(err, "creating snapDir for snapshots %s", revision) + } + + return removeContainerSnaps, snap, nil +} + +// CommitSnapshot finalizes the snapshot creation and makes it available for use. +func (mgr *SnapshotManager) CommitSnapshot(revision string) error { + mgr.Lock() + snap, present := mgr.snapshots[revision] + if !present { + mgr.Unlock() + return errors.New(fmt.Sprintf("Snapshot for revision %s to commit does not exist", revision)) + } + mgr.Unlock() + + // Calculate actual disk size used + var sizeIncrement int64 = 0 + oldSize := snap.TotalSizeMiB + snap.UpdateDiskSize() // Should always result in a decrease or equal! + sizeIncrement = snap.TotalSizeMiB - oldSize + + mgr.Lock() + defer mgr.Unlock() + mgr.usedMib += sizeIncrement + snap.usable = true + snap.UpdateScore() + heap.Push(&mgr.freeSnaps, snap) + + return nil +} + +// freeSpace makes sure neededMib of disk space is available by removing unused snapshots. Make sure to have a lock +// when calling this function. +func (mgr *SnapshotManager) freeSpace(neededMib int64) (*[]string, error) { + var toDelete []string + var freedMib int64 = 0 + var removeContainerSnaps []string + + // Get id of snapshot and name of devmapper snapshot to delete + for freedMib < neededMib && len(mgr.freeSnaps) > 0 { + snap := heap.Pop(&mgr.freeSnaps).(*Snapshot) + snap.usable = false + toDelete = append(toDelete, snap.revisionId) + removeContainerSnaps = append(removeContainerSnaps, snap.containerSnapName) + freedMib += snap.TotalSizeMiB + } + + // Delete snapshots resources, update clock & delete snapshot map entry + for _, revisionId := range toDelete { + snap := mgr.snapshots[revisionId] + if err := os.RemoveAll(snap.snapDir); err != nil { + return &removeContainerSnaps, errors.Wrapf(err, "removing snapshot snapDir %s", snap.snapDir) + } + snap.UpdateScore() // Update score (see Faascache policy) + if snap.score > mgr.clock { + mgr.clock = snap.score + } + delete(mgr.snapshots, revisionId) + } + + mgr.usedMib -= freedMib + + if freedMib < neededMib { + return nil, errors.New("There is not enough free space available") + } + + return &removeContainerSnaps, nil +} \ No newline at end of file diff --git a/vhive.go b/vhive.go index 31d4ff821..b10efa2eb 100644 --- a/vhive.go +++ b/vhive.go @@ -56,6 +56,8 @@ var ( funcPool *FuncPool isSaveMemory *bool + snapsCapacityMiB *int64 + isSparseSnaps *bool isSnapshotsEnabled *bool isUPFEnabled *bool isLazyMode *bool @@ -71,20 +73,33 @@ func main() { runtime.GOMAXPROCS(16) rand.Seed(42) - snapshotter := flag.String("ss", "devmapper", "snapshotter name") + debug := flag.Bool("dbg", false, "Enable debug logging") + isMetricsMode = flag.Bool("metrics", false, "Calculate UPF metrics") + criSock = flag.String("criSock", "/etc/vhive-cri/vhive-cri.sock", "Socket address for CRI service") + sandbox := flag.String("sandbox", "firecracker", "Sandbox tech to use, valid options: firecracker, gvisor") + // Funcpool isSaveMemory = flag.Bool("ms", false, "Enable memory saving") - isSnapshotsEnabled = flag.Bool("snapshots", false, "Use VM snapshots when adding function instances") - isUPFEnabled = flag.Bool("upf", false, "Enable user-level page faults guest memory management") - isMetricsMode = flag.Bool("metrics", false, "Calculate UPF metrics") servedThreshold = flag.Uint64("st", 1000*1000, "Functions serves X RPCs before it shuts down (if saveMemory=true)") pinnedFuncNum = flag.Int("hn", 0, "Number of functions pinned in memory (IDs from 0 to X)") + + // Snapshotting + isSnapshotsEnabled = flag.Bool("snapshots", false, "Use VM snapshots when adding function instances") + isSparseSnaps = flag.Bool("sparsesnaps", false, "Makes memory files sparse after storing to reduce disk utilization") + snapsCapacityMiB = flag.Int64("snapcapacity", 102400, "Capacity set aside for storing snapshots (Mib)") + isUPFEnabled = flag.Bool("upf", false, "Enable user-level page faults guest memory management") isLazyMode = flag.Bool("lazy", false, "Enable lazy serving mode when UPFs are enabled") - criSock = flag.String("criSock", "/etc/vhive-cri/vhive-cri.sock", "Socket address for CRI service") - hostIface = flag.String("hostIface", "", "Host net-interface for the VMs to bind to for internet access") - sandbox := flag.String("sandbox", "firecracker", "Sandbox tech to use, valid options: firecracker, gvisor") + + // Networking netPoolSize := flag.Int("netpoolsize", 50, "Amount of network configs to preallocate in a pool") + hostIface = flag.String("hostIface", "", "Host net-interface for the VMs to bind to for internet access") + + // Devicemapper + snapshotter := flag.String("ss", "devmapper", "snapshotter name") + poolName := flag.String("poolname", "fc-dev-thinpool", "Device mapper thinpool name") + metadataDev := flag.String("metadev", "", "Device used by devicemapper for metadata storage") + flag.Parse() if *sandbox != "firecracker" && *sandbox != "gvisor" { @@ -138,6 +153,8 @@ func main() { orch = ctriface.NewOrchestrator( *snapshotter, *hostIface, + *poolName, + *metadataDev, *netPoolSize, ctriface.WithTestModeOn(testModeOn), ctriface.WithSnapshots(*isSnapshotsEnabled), @@ -169,7 +186,7 @@ func setupFirecrackerCRI() { s := grpc.NewServer() - fcService, err := fccri.NewFirecrackerService(orch) + fcService, err := fccri.NewFirecrackerService(orch, *snapsCapacityMiB, *isSparseSnaps) if err != nil { log.Fatalf("failed to create firecracker service %v", err) } diff --git a/vhive_test.go b/vhive_test.go index 601a92d85..9777fc379 100644 --- a/vhive_test.go +++ b/vhive_test.go @@ -75,6 +75,9 @@ func TestMain(m *testing.M) { orch = ctriface.NewOrchestrator( "devmapper", "", + "fc-dev-thinpool", + "", + 10, ctriface.WithTestModeOn(true), ctriface.WithSnapshots(*isSnapshotsEnabledTest), ctriface.WithUPF(*isUPFEnabledTest), From 27e693c6c76f04795d9b7a159a28ab0d57aa1605 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Sun, 16 Jan 2022 20:28:32 +0000 Subject: [PATCH 08/15] add separate option for new snapshots Signed-off-by: Amory Hoste --- bin/containerd-shim-aws-firecracker | 4 +- bin/firecracker | 4 +- bin/firecracker-containerd | 4 +- bin/firecracker-ctr | 4 +- bin/jailer | 4 +- cri/firecracker/coordinator.go | 86 ++- cri/firecracker/coordinator_test.go | 2 +- cri/firecracker/service.go | 6 +- ctriface/bench_test.go | 25 +- ctriface/{ => deduplicated}/iface.go | 118 ++-- ctriface/deduplicated/orch.go | 198 +++++++ ctriface/deduplicated/orch_options.go | 74 +++ ctriface/failing_test.go | 28 +- ctriface/iface_test.go | 179 +++--- ctriface/manual_cleanup_test.go | 163 +++--- ctriface/orch.go | 214 ++----- ctriface/regular/iface.go | 539 ++++++++++++++++++ ctriface/regular/orch.go | 229 ++++++++ ctriface/{ => regular}/orch_options.go | 16 +- ctriface/types.go | 57 ++ functions.go | 8 +- go.mod | 16 +- go.sum | 76 ++- networking/networkManager.go | 4 +- .../manager.go} | 109 ++-- snapshotting/{ => deduplicated}/snapHeap.go | 6 +- snapshotting/deduplicated/snapStats.go | 38 ++ snapshotting/manager.go | 59 ++ snapshotting/regular/manager.go | 127 +++++ snapshotting/snapshot.go | 64 +-- snapshotting/types.go | 30 + vhive.go | 50 +- vhive_test.go | 25 +- 33 files changed, 1951 insertions(+), 615 deletions(-) rename ctriface/{ => deduplicated}/iface.go (80%) create mode 100644 ctriface/deduplicated/orch.go create mode 100644 ctriface/deduplicated/orch_options.go create mode 100644 ctriface/regular/iface.go create mode 100644 ctriface/regular/orch.go rename ctriface/{ => regular}/orch_options.go (89%) create mode 100644 ctriface/types.go rename snapshotting/{snapshotmanager.go => deduplicated/manager.go} (65%) rename snapshotting/{ => deduplicated}/snapHeap.go (94%) create mode 100644 snapshotting/deduplicated/snapStats.go create mode 100644 snapshotting/manager.go create mode 100644 snapshotting/regular/manager.go create mode 100644 snapshotting/types.go diff --git a/bin/containerd-shim-aws-firecracker b/bin/containerd-shim-aws-firecracker index 532957497..67ff4cdcc 100755 --- a/bin/containerd-shim-aws-firecracker +++ b/bin/containerd-shim-aws-firecracker @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e290046f2e24c117ef450a3bef6c8f8e3b1ec387decc76ccc936e1f54c827327 -size 26355405 +oid sha256:5b48fcdff74c342e8b4f65659139056dea1c27fdb99a0c2f267070b6b3b97b0b +size 26530283 diff --git a/bin/firecracker b/bin/firecracker index 5ba3cbf31..6fbf61872 100755 --- a/bin/firecracker +++ b/bin/firecracker @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:561cff75b2e1d768d2a4e7dad01cffb3eaff194e1b1696ad3ede5284c404fb0c -size 4010736 +oid sha256:d42ddb2c3d970d6a234e0d3f92980e085fc04a9ae17e29e05bb4ca73debfe0b8 +size 4016240 diff --git a/bin/firecracker-containerd b/bin/firecracker-containerd index d486a278f..9887c4b33 100755 --- a/bin/firecracker-containerd +++ b/bin/firecracker-containerd @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89c20c096978dafa7f3ba3b1d66a9e574f2fd89f3781ee0537da30120aea6455 -size 46999272 +oid sha256:cc908873170a25ca713ca2e80323cf1496d5d9b7a3449778d0018a84825dd0f7 +size 47224352 diff --git a/bin/firecracker-ctr b/bin/firecracker-ctr index 4b992a70e..1acca2487 100755 --- a/bin/firecracker-ctr +++ b/bin/firecracker-ctr @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b0bab69371a224e9eaed86edb26dd57e2a0b04eaa7e9b4da7e3e8c7c38e0016 -size 34476496 +oid sha256:51a994f7cb2cd48087a4b5a27476577c60d9fd6ce34a470435de5f33c2fb3508 +size 34510472 diff --git a/bin/jailer b/bin/jailer index e8be92549..2422b132c 100755 --- a/bin/jailer +++ b/bin/jailer @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:375abd369c55ad8057ec6cd39ee77e8f68933fd7a97e1d1901881805f22815f8 -size 3060760 +oid sha256:80284269eb8b44483b84a527bf7f4012932d94410e32bd20e4d25d09303336ea +size 3072784 diff --git a/cri/firecracker/coordinator.go b/cri/firecracker/coordinator.go index a118dbd71..2e4b043f3 100644 --- a/cri/firecracker/coordinator.go +++ b/cri/firecracker/coordinator.go @@ -25,25 +25,30 @@ package firecracker import ( "context" "fmt" + "github.com/ease-lab/vhive/ctriface" "github.com/ease-lab/vhive/metrics" "github.com/ease-lab/vhive/snapshotting" + "github.com/ease-lab/vhive/snapshotting/deduplicated" + "github.com/ease-lab/vhive/snapshotting/regular" "github.com/pkg/errors" "strconv" "sync" "sync/atomic" "time" - "github.com/ease-lab/vhive/ctriface" log "github.com/sirupsen/logrus" ) const snapshotsDir = "/fccd/snapshots" +// TODO: interface for orchestrator + type coordinator struct { sync.Mutex orch *ctriface.Orchestrator nextID uint64 isSparseSnaps bool + isDeduplicatedSnaps bool activeInstances map[string]*FuncInstance snapshotManager *snapshotting.SnapshotManager @@ -59,12 +64,18 @@ func withoutOrchestrator() coordinatorOption { } } -func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool, opts ...coordinatorOption) *coordinator { +func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool, isDeduplicatedSnaps bool, opts ...coordinatorOption) *coordinator { c := &coordinator{ activeInstances: make(map[string]*FuncInstance), orch: orch, - snapshotManager: snapshotting.NewSnapshotManager(snapshotsDir, snapsCapacityMiB), - isSparseSnaps: isSparseSnaps, + isSparseSnaps: isSparseSnaps, + isDeduplicatedSnaps: isDeduplicatedSnaps, + } + + if isDeduplicatedSnaps { + c.snapshotManager = snapshotting.NewSnapshotManager(deduplicated.NewSnapshotManager(snapshotsDir, snapsCapacityMiB)) + } else { + c.snapshotManager = snapshotting.NewSnapshotManager(regular.NewRegularSnapshotManager(snapshotsDir)) } for _, opt := range opts { @@ -76,12 +87,24 @@ func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int func (c *coordinator) startVM(ctx context.Context, image string, revision string, memSizeMib, vCPUCount uint32) (*FuncInstance, error) { if c.orch != nil && c.orch.GetSnapshotsEnabled() { + id := image + if c.isDeduplicatedSnaps { + id = revision + } + // Check if snapshot is available - if snap, err := c.snapshotManager.AcquireSnapshot(revision); err == nil { + if snap, err := c.snapshotManager.AcquireSnapshot(id); err == nil { if snap.MemSizeMib != memSizeMib || snap.VCPUCount != vCPUCount { return nil, errors.New("Please create a new revision when updating uVM memory size or vCPU count") } else { - return c.orchStartVMSnapshot(ctx, snap, memSizeMib, vCPUCount) + vmID := "" + if c.isDeduplicatedSnaps { + vmID = strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1))) + } else { + vmID = snap.GetId() + } + + return c.orchStartVMSnapshot(ctx, snap, memSizeMib, vCPUCount, vmID) } } else { return c.orchStartVM(ctx, image, revision, memSizeMib, vCPUCount) @@ -106,9 +129,18 @@ func (c *coordinator) stopVM(ctx context.Context, containerID string) error { return nil } + if c.orch == nil || ! c.orch.GetSnapshotsEnabled() { + return c.orchStopVM(ctx, fi) + } + + id := fi.vmID + if c.isDeduplicatedSnaps { + id = fi.revisionId + } + if fi.snapBooted { - defer c.snapshotManager.ReleaseSnapshot(fi.revisionId) - } else if c.orch != nil && c.orch.GetSnapshotsEnabled() { + defer c.snapshotManager.ReleaseSnapshot(id) + } else { // Create snapshot err := c.orchCreateSnapshot(ctx, fi) if err != nil { @@ -116,7 +148,11 @@ func (c *coordinator) stopVM(ctx context.Context, containerID string) error { } } - return c.orchStopVM(ctx, fi) + if c.isDeduplicatedSnaps { + return c.orchStopVM(ctx, fi) + } else { + return c.orchOffloadVM(ctx, fi) + } } // for testing @@ -178,9 +214,8 @@ func (c *coordinator) orchStartVM(ctx context.Context, image, revision string, m return fi, err } -func (c *coordinator) orchStartVMSnapshot(ctx context.Context, snap *snapshotting.Snapshot, memSizeMib, vCPUCount uint32) (*FuncInstance, error) { +func (c *coordinator) orchStartVMSnapshot(ctx context.Context, snap *snapshotting.Snapshot, memSizeMib, vCPUCount uint32, vmID string) (*FuncInstance, error) { tStartCold := time.Now() - vmID := strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1))) logger := log.WithFields( log.Fields{ "vmID": vmID, @@ -210,7 +245,7 @@ func (c *coordinator) orchStartVMSnapshot(ctx context.Context, snap *snapshottin } coldStartTimeMs := metrics.ToMs(time.Since(tStartCold)) - fi := NewFuncInstance(vmID, snap.GetImage(), snap.GetRevisionId(), resp, true, memSizeMib, vCPUCount, coldStartTimeMs) + fi := NewFuncInstance(vmID, snap.GetImage(), snap.GetId(), resp, true, memSizeMib, vCPUCount, coldStartTimeMs) logger.Debug("successfully loaded instance from snapshot") return fi, err @@ -224,7 +259,13 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) }, ) - removeContainerSnaps, snap, err := c.snapshotManager.InitSnapshot(fi.revisionId, fi.image, fi.coldStartTimeMs, fi.memSizeMib, fi.vCPUCount, c.isSparseSnaps) + id := fi.vmID + if c.isDeduplicatedSnaps { + id = fi.revisionId + } + + removeContainerSnaps, snap, err := c.snapshotManager.InitSnapshot(id, fi.image, fi.coldStartTimeMs, fi.memSizeMib, fi.vCPUCount, c.isSparseSnaps) + if err != nil { if fmt.Sprint(err) == "There is not enough free space available" { fi.logger.Info(fmt.Sprintf("There is not enough space available for snapshots of %s", fi.revisionId)) @@ -232,9 +273,9 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) return nil } - if removeContainerSnaps != nil { + if c.isDeduplicatedSnaps && removeContainerSnaps != nil { for _, cleanupSnapId := range *removeContainerSnaps { - if err := c.orch.CleanupRevisionSnapshot(ctx, cleanupSnapId); err != nil { + if err := c.orch.CleanupSnapshot(ctx, cleanupSnapId); err != nil { return errors.Wrap(err, "removing devmapper revision snapshot") } } @@ -257,7 +298,7 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) return nil } - if err := c.snapshotManager.CommitSnapshot(fi.revisionId); err != nil { + if err := c.snapshotManager.CommitSnapshot(id); err != nil { fi.logger.WithError(err).Error("failed to commit snapshot") return err } @@ -277,3 +318,16 @@ func (c *coordinator) orchStopVM(ctx context.Context, fi *FuncInstance) error { return nil } + +func (c *coordinator) orchOffloadVM(ctx context.Context, fi *FuncInstance) error { + if c.withoutOrchestrator { + return nil + } + + if err := c.orch.OffloadVM(ctx, fi.vmID); err != nil { + fi.logger.WithError(err).Error("failed to offload VM") + return err + } + + return nil +} diff --git a/cri/firecracker/coordinator_test.go b/cri/firecracker/coordinator_test.go index 3022655ca..0affa7a14 100644 --- a/cri/firecracker/coordinator_test.go +++ b/cri/firecracker/coordinator_test.go @@ -42,7 +42,7 @@ var ( ) func TestMain(m *testing.M) { - coord = newFirecrackerCoordinator(nil, 10240, false, withoutOrchestrator()) + coord = newFirecrackerCoordinator(nil, 10240, false, false, withoutOrchestrator()) ret := m.Run() os.Exit(ret) diff --git a/cri/firecracker/service.go b/cri/firecracker/service.go index 4fb5a1a4b..0dcfc3f06 100644 --- a/cri/firecracker/service.go +++ b/cri/firecracker/service.go @@ -25,11 +25,11 @@ package firecracker import ( "context" "errors" + "github.com/ease-lab/vhive/ctriface" "strconv" "sync" "github.com/ease-lab/vhive/cri" - "github.com/ease-lab/vhive/ctriface" log "github.com/sirupsen/logrus" criapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" ) @@ -61,7 +61,7 @@ type VMConfig struct { guestPort string } -func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool) (*FirecrackerService, error) { +func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps, isDeduplicatedSnaps bool) (*FirecrackerService, error) { fs := new(FirecrackerService) stockRuntimeClient, err := cri.NewStockRuntimeServiceClient() if err != nil { @@ -69,7 +69,7 @@ func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, return nil, err } fs.stockRuntimeClient = stockRuntimeClient - fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps) + fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps, isDeduplicatedSnaps) fs.vmConfigs = make(map[string]*VMConfig) return fs, nil } diff --git a/ctriface/bench_test.go b/ctriface/bench_test.go index bd5926432..7f2f7c74f 100644 --- a/ctriface/bench_test.go +++ b/ctriface/bench_test.go @@ -24,6 +24,7 @@ package ctriface import ( "context" + "github.com/ease-lab/vhive/ctriface/regular" "os" "os/exec" "path/filepath" @@ -43,6 +44,22 @@ const ( ) func TestBenchmarkStart(t *testing.T) { + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + )) + + benchCount := 10 + vmID := 0 + benchmarkStart(t, orch, benchCount, vmID) +} + +func benchmarkStart(t *testing.T, orch *Orchestrator, benchCount, vmID int) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -53,14 +70,10 @@ func TestBenchmarkStart(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 2000 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - orch := NewOrchestrator("devmapper", "", "fc-dev-thinpool","",10, WithTestModeOn(true), WithUPF(*isUPFEnabled)) - images := getAllImages() - benchCount := 10 - vmID := 0 createResultsDir() @@ -69,7 +82,7 @@ func TestBenchmarkStart(t *testing.T) { startMetrics := make([]*metrics.Metric, benchCount) // Pull image - _, err := orch.imageManager.GetImage(ctx, imageName) + _, err := orch.GetImage(ctx, imageName) require.NoError(t, err, "Failed to pull image "+imageName) for i := 0; i < benchCount; i++ { diff --git a/ctriface/iface.go b/ctriface/deduplicated/iface.go similarity index 80% rename from ctriface/iface.go rename to ctriface/deduplicated/iface.go index 9a104b654..0e0bd0346 100644 --- a/ctriface/iface.go +++ b/ctriface/deduplicated/iface.go @@ -20,10 +20,11 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package ctriface +package deduplicated import ( "context" + "github.com/ease-lab/vhive/ctriface" "github.com/ease-lab/vhive/snapshotting" "os" "os/exec" @@ -45,7 +46,6 @@ import ( _ "google.golang.org/grpc/codes" //tmp _ "google.golang.org/grpc/status" //tmp - "github.com/ease-lab/vhive/memory/manager" "github.com/ease-lab/vhive/metrics" "github.com/ease-lab/vhive/misc" "github.com/go-multierror/multierror" @@ -53,18 +53,8 @@ import ( _ "github.com/davecgh/go-spew/spew" //tmp ) -// StartVMResponse is the response returned by StartVM -type StartVMResponse struct { - // GuestIP is the IP of the guest MicroVM - GuestIP string -} - -const ( - testImageName = "ghcr.io/ease-lab/helloworld:var_workload" -) - // StartVM Boots a VM if it does not exist -func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { +func (o *DedupOrchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages bool) (_ *ctriface.StartVMResponse, _ *metrics.Metric, retErr error) { var ( startVMMetric *metrics.Metric = metrics.NewMetric() tStart time.Time @@ -101,7 +91,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS // 2. Fetch VM image tStart = time.Now() - if vm.Image, err = o.imageManager.GetImage(ctx, imageName); err != nil { + if vm.Image, err = o.GetImage(ctx, imageName); err != nil { return nil, nil, errors.Wrapf(err, "Failed to get/pull image") } startVMMetric.MetricMap[metrics.GetImage] = metrics.ToUS(time.Since(tStart)) @@ -109,7 +99,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS // 3. Create VM tStart = time.Now() conf := o.getVMConfig(vm, trackDirtyPages) - resp, err := o.fcClient.CreateVM(ctx, conf) + _, err = o.fcClient.CreateVM(ctx, conf) startVMMetric.MetricMap[metrics.FcCreateVM] = metrics.ToUS(time.Since(tStart)) if err != nil { return nil, nil, errors.Wrap(err, "failed to create the microVM in firecracker-containerd") @@ -206,39 +196,16 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS } }() - if err := os.MkdirAll(o.getVMBaseDir(vmID), 0777); err != nil { - logger.Error("Failed to create VM base dir") - return nil, nil, err - } - if o.GetUPFEnabled() { - logger.Debug("Registering VM with the memory manager") - - stateCfg := manager.SnapshotStateCfg{ - VMID: vmID, - GuestMemPath: o.getMemoryFile(vmID), - BaseDir: o.getVMBaseDir(vmID), - GuestMemSize: int(conf.MachineCfg.MemSizeMib) * 1024 * 1024, - IsLazyMode: o.isLazyMode, - VMMStatePath: o.getSnapshotFile(vmID), - WorkingSetPath: o.getWorkingSetFile(vmID), - InstanceSockAddr: resp.UPFSockPath, - } - if err := o.memoryManager.RegisterVM(stateCfg); err != nil { - return nil, nil, errors.Wrap(err, "failed to register VM with memory manager") - // NOTE (Plamen): Potentially need a defer(DeregisteVM) here if RegisterVM is not last to execute - } - } - logger.Debug("Successfully started a VM") - return &StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, startVMMetric, nil + return &ctriface.StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, startVMMetric, nil } // StopSingleVM Shuts down a VM // Note: VMs are not quisced before being stopped -func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string) error { +func (o *DedupOrchestrator) StopSingleVM(ctx context.Context, vmID string) error { logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received StopVM") + logger.Debug("DedupOrchestrator received StopVM") ctx = namespaces.WithNamespace(ctx, namespaceName) vm, err := o.vmPool.GetVM(vmID) @@ -296,13 +263,6 @@ func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string) error { logger.Error("failed to deactivate container snapshot") return err } - - if o.GetUPFEnabled() { - if err := o.memoryManager.Deactivate(vmID); err != nil { - logger.Error("Failed to deactivate VM in the memory manager") - return err - } - } } logger.Debug("Stopped VM successfully") @@ -328,7 +288,7 @@ func getK8sDNS() []string { return dnsIPs } -func (o *Orchestrator) getVMConfig(vm *misc.VM, trackDirtyPages bool) *proto.CreateVMRequest { +func (o *DedupOrchestrator) getVMConfig(vm *misc.VM, trackDirtyPages bool) *proto.CreateVMRequest { kernelArgs := "ro noapic reboot=k panic=1 pci=off nomodules systemd.log_color=false systemd.unit=firecracker.target init=/sbin/overlay-init tsc=reliable quiet 8250.nr_uarts=0 ipv6.disable=1" return &proto.CreateVMRequest{ @@ -352,11 +312,17 @@ func (o *Orchestrator) getVMConfig(vm *misc.VM, trackDirtyPages bool) *proto.Cre }, }}, NetworkNamespace: vm.NetConfig.GetNamespacePath(), + OffloadEnabled: false, } } +// Offload Shuts down the VM but leaves shim and other resources running. +func (o *DedupOrchestrator) OffloadVM(ctx context.Context, vmID string) error { + return errors.New("Deduplicated snapshots do not support offloading") +} + // StopActiveVMs Shuts down all active VMs -func (o *Orchestrator) StopActiveVMs() error { +func (o *DedupOrchestrator) StopActiveVMs() error { var vmGroup sync.WaitGroup for vmID, vm := range o.vmPool.GetVMMap() { vmGroup.Add(1) @@ -383,9 +349,9 @@ func (o *Orchestrator) StopActiveVMs() error { } // PauseVM Pauses a VM -func (o *Orchestrator) PauseVM(ctx context.Context, vmID string) error { +func (o *DedupOrchestrator) PauseVM(ctx context.Context, vmID string) error { logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received PauseVM") + logger.Debug("DedupOrchestrator received PauseVM") ctx = namespaces.WithNamespace(ctx, namespaceName) @@ -398,14 +364,14 @@ func (o *Orchestrator) PauseVM(ctx context.Context, vmID string) error { } // ResumeVM Resumes a VM -func (o *Orchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) { +func (o *DedupOrchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) { var ( resumeVMMetric *metrics.Metric = metrics.NewMetric() tStart time.Time ) logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received ResumeVM") + logger.Debug("DedupOrchestrator received ResumeVM") ctx = namespaces.WithNamespace(ctx, namespaceName) @@ -420,9 +386,9 @@ func (o *Orchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metr } // CreateSnapshot Creates a snapshot of a VM -func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { +func (o *DedupOrchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received CreateSnapshot") + logger.Debug("DedupOrchestrator received CreateSnapshot") ctx = namespaces.WithNamespace(ctx, namespaceName) @@ -468,16 +434,15 @@ func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *sn } // LoadSnapshot Loads a snapshot of a VM -func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { +func (o *DedupOrchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *ctriface.StartVMResponse, _ *metrics.Metric, retErr error) { var ( loadSnapshotMetric *metrics.Metric = metrics.NewMetric() tStart time.Time loadErr, activateErr error - loadDone = make(chan int) ) logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received LoadSnapshot") + logger.Debug("DedupOrchestrator received LoadSnapshot") ctx = namespaces.WithNamespace(ctx, namespaceName) @@ -498,7 +463,7 @@ func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snap }() // 2. Fetch image for VM - if vm.Image, err = o.imageManager.GetImage(ctx, snap.GetImage()); err != nil { + if vm.Image, err = o.GetImage(ctx, snap.GetImage()); err != nil { return nil, nil, errors.Wrapf(err, "Failed to get/pull image") } @@ -523,35 +488,18 @@ func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snap VMID: vmID, SnapshotFilePath: snap.GetSnapFilePath(), MemFilePath: snap.GetMemFilePath(), - EnableUserPF: o.GetUPFEnabled(), + EnableUserPF: false, NetworkNamespace: vm.NetConfig.GetNamespacePath(), NewSnapshotPath: containerSnap.GetDevicePath(), - } - - if o.GetUPFEnabled() { - if err := o.memoryManager.FetchState(vmID); err != nil { - return nil, nil, err - } + Offloaded: false, } tStart = time.Now() - go func() { - defer close(loadDone) - - if _, loadErr = o.fcClient.LoadSnapshot(ctx, req); loadErr != nil { - logger.Error("Failed to load snapshot of the VM: ", loadErr) - } - }() - - if o.GetUPFEnabled() { - if activateErr = o.memoryManager.Activate(vmID); activateErr != nil { - logger.Warn("Failed to activate VM in the memory manager", activateErr) - } + if _, loadErr = o.fcClient.LoadSnapshot(ctx, req); loadErr != nil { + logger.Error("Failed to load snapshot of the VM: ", loadErr) } - <-loadDone - loadSnapshotMetric.MetricMap[metrics.LoadVMM] = metrics.ToUS(time.Since(tStart)) if loadErr != nil || activateErr != nil { @@ -561,12 +509,16 @@ func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snap vm.SnapBooted = true - return &StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, nil, nil + return &ctriface.StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, nil, nil } -func (o *Orchestrator) CleanupRevisionSnapshot(ctx context.Context, revisionID string) error { +func (o *DedupOrchestrator) CleanupSnapshot(ctx context.Context, revisionID string) error { if err := o.devMapper.RemoveDeviceSnapshot(ctx, revisionID); err != nil { return errors.Wrapf(err, "removing revision snapshot") } return nil } + +func (o *DedupOrchestrator) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { + return o.imageManager.GetImage(ctx, imageName) +} diff --git a/ctriface/deduplicated/orch.go b/ctriface/deduplicated/orch.go new file mode 100644 index 000000000..1bc671706 --- /dev/null +++ b/ctriface/deduplicated/orch.go @@ -0,0 +1,198 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package deduplicated + +import ( + "github.com/ease-lab/vhive/ctrimages" + "github.com/ease-lab/vhive/devmapper" + "os" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/containerd/containerd" + + fcclient "github.com/firecracker-microvm/firecracker-containerd/firecracker-control/client" + // note: from the original repo + + _ "google.golang.org/grpc/codes" //tmp + _ "google.golang.org/grpc/status" //tmp + + "github.com/ease-lab/vhive/metrics" + "github.com/ease-lab/vhive/misc" + + _ "github.com/davecgh/go-spew/spew" //tmp +) + +const ( + containerdAddress = "/run/firecracker-containerd/containerd.sock" + containerdTTRPCAddress = containerdAddress + ".ttrpc" + namespaceName = "firecracker-containerd" +) + +type WorkloadIoWriter struct { + logger *log.Entry +} + +func NewWorkloadIoWriter(vmID string) WorkloadIoWriter { + return WorkloadIoWriter{log.WithFields(log.Fields{"vmID": vmID})} +} + +func (wio WorkloadIoWriter) Write(p []byte) (n int, err error) { + s := string(p) + lines := strings.Split(s, "\n") + for i := range lines { + wio.logger.Info(string(lines[i])) + } + return len(p), nil +} + +// DedupOrchestrator Drives all VMs +type DedupOrchestrator struct { + vmPool *misc.VMPool + workloadIo sync.Map // vmID string -> WorkloadIoWriter + snapshotter string + client *containerd.Client + fcClient *fcclient.Client + devMapper *devmapper.DeviceMapper + imageManager *ctrimages.ImageManager + // store *skv.KVStore + snapshotsEnabled bool + isUPFEnabled bool + isLazyMode bool + snapshotsDir string + isMetricsMode bool + hostIface string +} + +// NewDedupOrchestrator Initializes a new orchestrator +func NewDedupOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPoolSize int, opts ...OrchestratorOption) *DedupOrchestrator { // TODO: args + var err error + + o := new(DedupOrchestrator) + o.vmPool = misc.NewVMPool(hostIface, netPoolSize) + o.snapshotter = snapshotter + o.snapshotsDir = "/fccd/snapshots" + o.hostIface = hostIface + + for _, opt := range opts { + opt(o) + } + + if _, err := os.Stat(o.snapshotsDir); err != nil { + if !os.IsNotExist(err) { + log.Panicf("Snapshot dir %s exists", o.snapshotsDir) + } + } + + if err := os.MkdirAll(o.snapshotsDir, 0777); err != nil { + log.Panicf("Failed to create snapshots dir %s", o.snapshotsDir) + } + + log.Info("Creating containerd client") + o.client, err = containerd.New(containerdAddress) + if err != nil { + log.Fatal("Failed to start containerd client", err) + } + log.Info("Created containerd client") + + log.Info("Creating firecracker client") + o.fcClient, err = fcclient.New(containerdTTRPCAddress) + if err != nil { + log.Fatal("Failed to start firecracker client", err) + } + log.Info("Created firecracker client") + + o.devMapper = devmapper.NewDeviceMapper(o.client, poolName, metadataDev) + + o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) + + return o +} + +func (o *DedupOrchestrator) setupCloseHandler() { + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + log.Info("\r- Ctrl+C pressed in Terminal") + _ = o.StopActiveVMs() + o.Cleanup() + os.Exit(0) + }() +} + +// Cleanup Removes the bridges created by the VM pool's tap manager +// Cleans up snapshots directory +func (o *DedupOrchestrator) Cleanup() { + o.vmPool.CleanupNetwork() + if err := os.RemoveAll(o.snapshotsDir); err != nil { + log.Panic("failed to delete snapshots dir", err) + } +} + +// GetSnapshotsEnabled Returns the snapshots mode of the orchestrator +func (o *DedupOrchestrator) GetSnapshotsEnabled() bool { + return o.snapshotsEnabled +} + +// GetUPFEnabled Returns the UPF mode of the orchestrator +func (o *DedupOrchestrator) GetUPFEnabled() bool { + return false +} + +// DumpUPFPageStats Dumps the memory manager's stats about the number of +// the unique pages and the number of the pages that are reused across invocations +func (o *DedupOrchestrator) DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error { + return nil +} + +// DumpUPFLatencyStats Dumps the memory manager's latency stats +func (o *DedupOrchestrator) DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error { + return nil +} + +// GetUPFLatencyStats Returns the memory manager's latency stats +func (o *DedupOrchestrator) GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) { + return make([]*metrics.Metric, 0), nil +} + +func (o *DedupOrchestrator) getVMBaseDir(vmID string) string { + return filepath.Join(o.snapshotsDir, vmID) +} + +func (o *DedupOrchestrator) setupHeartbeat() { + heartbeat := time.NewTicker(60 * time.Second) + + go func() { + for { + <-heartbeat.C + log.Info("HEARTBEAT: number of active VMs: ", len(o.vmPool.GetVMMap())) + } // for + }() // go func +} diff --git a/ctriface/deduplicated/orch_options.go b/ctriface/deduplicated/orch_options.go new file mode 100644 index 000000000..88186b3e8 --- /dev/null +++ b/ctriface/deduplicated/orch_options.go @@ -0,0 +1,74 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package deduplicated + +// OrchestratorOption Options to pass to DedupOrchestrator +type OrchestratorOption func(*DedupOrchestrator) + +// WithTestModeOn Sets the test mode +func WithTestModeOn(testModeOn bool) OrchestratorOption { + return func(o *DedupOrchestrator) { + if !testModeOn { + o.setupCloseHandler() + o.setupHeartbeat() + } + } +} + +// WithSnapshots Sets the snapshot mode on or off +func WithSnapshots(snapshotsEnabled bool) OrchestratorOption { + return func(o *DedupOrchestrator) { + o.snapshotsEnabled = snapshotsEnabled + } +} + +// WithUPF Sets the user-page faults mode on or off +func WithUPF(isUPFEnabled bool) OrchestratorOption { + return func(o *DedupOrchestrator) { + o.isUPFEnabled = isUPFEnabled + } +} + +// WithSnapshotsDir Sets the directory where +// snapshots should be stored +func WithSnapshotsDir(snapshotsDir string) OrchestratorOption { + return func(o *DedupOrchestrator) { + o.snapshotsDir = snapshotsDir + } +} + +// WithLazyMode Sets the lazy paging mode on (or off), +// where all guest memory pages are brought on demand. +// Only works if snapshots are enabled +func WithLazyMode(isLazyMode bool) OrchestratorOption { + return func(o *DedupOrchestrator) { + o.isLazyMode = isLazyMode + } +} + +// WithMetricsMode Sets the metrics mode +func WithMetricsMode(isMetricsMode bool) OrchestratorOption { + return func(o *DedupOrchestrator) { + o.isMetricsMode = isMetricsMode + } +} diff --git a/ctriface/failing_test.go b/ctriface/failing_test.go index e65fb1a63..1235198d6 100644 --- a/ctriface/failing_test.go +++ b/ctriface/failing_test.go @@ -24,6 +24,7 @@ package ctriface import ( "context" + "github.com/ease-lab/vhive/ctriface/regular" "github.com/ease-lab/vhive/snapshotting" "os" "testing" @@ -36,6 +37,22 @@ import ( ) func TestStartSnapStop(t *testing.T) { + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true)), + ) + + vmID := "2" + revisionID := "myrev-2" + + startSnapStop(t, orch, vmID, revisionID) +} + +func startSnapStop(t *testing.T, orch *Orchestrator, vmID, revisionID string) { // BROKEN BECAUSE StopVM does not work yet. t.Skip("skipping failing test") log.SetFormatter(&log.TextFormatter{ @@ -49,21 +66,16 @@ func TestStartSnapStop(t *testing.T) { log.SetLevel(log.DebugLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - orch := NewOrchestrator("devmapper", "", "fc-dev-thinpool","",10, WithTestModeOn(true)) - - vmID := "2" - revisionID := "myrev-2" - - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") diff --git a/ctriface/iface_test.go b/ctriface/iface_test.go index acb73aa9e..72e464e0e 100644 --- a/ctriface/iface_test.go +++ b/ctriface/iface_test.go @@ -25,6 +25,7 @@ import ( "context" "flag" "fmt" + "github.com/ease-lab/vhive/ctriface/regular" "github.com/ease-lab/vhive/snapshotting" "os" "sync" @@ -46,6 +47,24 @@ var ( ) func TestPauseSnapResume(t *testing.T) { + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + vmID := "4" + revisionID := "myrev-4" + + pauseSnapResume(t, orch, vmID, revisionID) +} + +func pauseSnapResume(t *testing.T, orch *Orchestrator, vmID, revisionID string) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -57,30 +76,16 @@ func TestPauseSnapResume(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - orch := NewOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - - vmID := "4" - revisionID := "myrev-4" - - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") @@ -94,6 +99,23 @@ func TestPauseSnapResume(t *testing.T) { } func TestStartStopSerial(t *testing.T) { + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "fc-dev-thinpool", + "", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + vmID := "5" + + startStopSerial(t, orch, vmID) +} + +func startStopSerial(t *testing.T, orch *Orchestrator, vmID string) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -105,23 +127,10 @@ func TestStartStopSerial(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - orch := NewOrchestrator( - "devmapper", - "fc-dev-thinpool", - "", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - - vmID := "5" - - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.StopSingleVM(ctx, vmID) @@ -131,6 +140,23 @@ func TestStartStopSerial(t *testing.T) { } func TestPauseResumeSerial(t *testing.T) { + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "fc-dev-thinpool", + "", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + vmID := "6" + + pauseResumeSerial(t, orch, vmID) +} + +func pauseResumeSerial(t *testing.T, orch *Orchestrator, vmID string) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -142,23 +168,10 @@ func TestPauseResumeSerial(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - orch := NewOrchestrator( - "devmapper", - "fc-dev-thinpool", - "", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - - vmID := "6" - - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) @@ -174,6 +187,22 @@ func TestPauseResumeSerial(t *testing.T) { } func TestStartStopParallel(t *testing.T) { + vmNum := 10 + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + startStopParallel(t, orch, vmNum) +} + +func startStopParallel(t *testing.T, orch *Orchestrator, vmNum int) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -185,24 +214,12 @@ func TestStartStopParallel(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 360 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - vmNum := 10 - orch := NewOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - // Pull image - _, err := orch.imageManager.GetImage(ctx, testImageName) - require.NoError(t, err, "Failed to pull image "+testImageName) + _, err := orch.GetImage(ctx, regular.TestImageName) + require.NoError(t, err, "Failed to pull image "+regular.TestImageName) { var vmGroup sync.WaitGroup @@ -211,7 +228,7 @@ func TestStartStopParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM "+vmID) }(i) } @@ -236,6 +253,22 @@ func TestStartStopParallel(t *testing.T) { } func TestPauseResumeParallel(t *testing.T) { + vmNum := 10 + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + pauseResumeParallel(t, orch, vmNum) +} + +func pauseResumeParallel(t *testing.T, orch *Orchestrator, vmNum int) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -247,24 +280,12 @@ func TestPauseResumeParallel(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - vmNum := 10 - orch := NewOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - // Pull image - _, err := orch.imageManager.GetImage(ctx, testImageName) - require.NoError(t, err, "Failed to pull image "+testImageName) + _, err := orch.GetImage(ctx, regular.TestImageName) + require.NoError(t, err, "Failed to pull image "+regular.TestImageName) { var vmGroup sync.WaitGroup @@ -273,7 +294,7 @@ func TestPauseResumeParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") }(i) } diff --git a/ctriface/manual_cleanup_test.go b/ctriface/manual_cleanup_test.go index 4b0241648..752c9e0ed 100644 --- a/ctriface/manual_cleanup_test.go +++ b/ctriface/manual_cleanup_test.go @@ -25,6 +25,7 @@ package ctriface import ( "context" "fmt" + "github.com/ease-lab/vhive/ctriface/regular" "github.com/ease-lab/vhive/snapshotting" "os" "sync" @@ -38,6 +39,24 @@ import ( ) func TestSnapLoad(t *testing.T) { + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + vmID := "1" + revisionID := "myrev-1" + + snapLoad(t, orch, vmID, revisionID) +} + +func snapLoad(t *testing.T, orch *Orchestrator, vmID string, revisionID string) { // Need to clean up manually after this test because StopVM does not // work for stopping machines which are loaded from snapshots yet log.SetFormatter(&log.TextFormatter{ @@ -51,30 +70,16 @@ func TestSnapLoad(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - orch := NewOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - - vmID := "1" - revisionID := "myrev-1" - - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") @@ -91,6 +96,24 @@ func TestSnapLoad(t *testing.T) { } func TestSnapLoadMultiple(t *testing.T) { + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + vmID := "3" + revisionID := "myrev-3" + + snapLoadMultiple(t, orch, vmID, revisionID) +} + +func snapLoadMultiple(t *testing.T, orch *Orchestrator, vmID string, revisionID string) { // Needs to be cleaned up manually. log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, @@ -103,30 +126,16 @@ func TestSnapLoadMultiple(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - orch := NewOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - - vmID := "3" - revisionID := "myrev-3" - - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0,false) err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") @@ -146,6 +155,24 @@ func TestSnapLoadMultiple(t *testing.T) { } func TestParallelSnapLoad(t *testing.T) { + vmNum := 5 + vmIDBase := 6 + + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + parallelSnapLoad(t, orch, vmNum, vmIDBase) +} + +func parallelSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBase int) { // Needs to be cleaned up manually. log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, @@ -158,26 +185,12 @@ func TestParallelSnapLoad(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - vmNum := 5 - vmIDBase := 6 - - orch := NewOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - // Pull image - _, err := orch.imageManager.GetImage(ctx, testImageName) - require.NoError(t, err, "Failed to pull image "+testImageName) + _, err := orch.GetImage(ctx, regular.TestImageName) + require.NoError(t, err, "Failed to pull image "+regular.TestImageName) var vmGroup sync.WaitGroup for i := 0; i < vmNum; i++ { @@ -187,13 +200,13 @@ func TestParallelSnapLoad(t *testing.T) { vmID := fmt.Sprintf("%d", i+vmIDBase) revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM, "+vmID) err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM, "+vmID) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM, "+vmID) @@ -210,6 +223,24 @@ func TestParallelSnapLoad(t *testing.T) { } func TestParallelPhasedSnapLoad(t *testing.T) { + vmNum := 10 + vmIDBase := 11 + + orch := NewOrchestrator(regular.NewRegOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + regular.WithTestModeOn(true), + regular.WithUPF(*isUPFEnabled), + regular.WithLazyMode(*isLazyMode), + )) + + parallelPhasedSnapLoad(t, orch, vmNum, vmIDBase) +} + +func parallelPhasedSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBase int) { // Needs to be cleaned up manually. log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, @@ -222,26 +253,12 @@ func TestParallelPhasedSnapLoad(t *testing.T) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) defer cancel() - vmNum := 10 - vmIDBase := 11 - - orch := NewOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - WithTestModeOn(true), - WithUPF(*isUPFEnabled), - WithLazyMode(*isLazyMode), - ) - // Pull image - _, err := orch.imageManager.GetImage(ctx, testImageName) - require.NoError(t, err, "Failed to pull image "+testImageName) + _, err := orch.GetImage(ctx, regular.TestImageName) + require.NoError(t, err, "Failed to pull image "+regular.TestImageName) { var vmGroup sync.WaitGroup @@ -250,7 +267,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, testImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM, "+vmID) }(i) } @@ -279,7 +296,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM, "+vmID) }(i) @@ -295,7 +312,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", testImageName, 0, 0, 0, 256, 1, false) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) _, _, err := orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM, "+vmID) }(i) diff --git a/ctriface/orch.go b/ctriface/orch.go index 792f48009..3e42a5742 100644 --- a/ctriface/orch.go +++ b/ctriface/orch.go @@ -1,6 +1,6 @@ // MIT License // -// Copyright (c) 2020 Plamen Petrov and EASE lab +// Copyright (c) 2021 Amory Hoste and EASE lab // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,207 +23,85 @@ package ctriface import ( - "github.com/ease-lab/vhive/ctrimages" - "github.com/ease-lab/vhive/devmapper" - "os" - "os/signal" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - log "github.com/sirupsen/logrus" - + "context" "github.com/containerd/containerd" - - fcclient "github.com/firecracker-microvm/firecracker-containerd/firecracker-control/client" - // note: from the original repo - - _ "google.golang.org/grpc/codes" //tmp - _ "google.golang.org/grpc/status" //tmp - - "github.com/ease-lab/vhive/memory/manager" "github.com/ease-lab/vhive/metrics" - "github.com/ease-lab/vhive/misc" - - _ "github.com/davecgh/go-spew/spew" //tmp + "github.com/ease-lab/vhive/snapshotting" ) -const ( - containerdAddress = "/run/firecracker-containerd/containerd.sock" - containerdTTRPCAddress = containerdAddress + ".ttrpc" - namespaceName = "firecracker-containerd" -) - -type WorkloadIoWriter struct { - logger *log.Entry -} - -func NewWorkloadIoWriter(vmID string) WorkloadIoWriter { - return WorkloadIoWriter {log.WithFields(log.Fields{"vmID": vmID})} -} - -func (wio WorkloadIoWriter) Write(p []byte) (n int, err error) { - s := string(p) - lines := strings.Split(s, "\n") - for i := range lines { - wio.logger.Info(string(lines[i])) - } - return len(p), nil -} - -// Orchestrator Drives all VMs type Orchestrator struct { - vmPool *misc.VMPool - workloadIo sync.Map // vmID string -> WorkloadIoWriter - snapshotter string - client *containerd.Client - fcClient *fcclient.Client - devMapper *devmapper.DeviceMapper - imageManager *ctrimages.ImageManager - // store *skv.KVStore - snapshotsEnabled bool - isUPFEnabled bool - isLazyMode bool - snapshotsDir string - isMetricsMode bool - hostIface string - - memoryManager *manager.MemoryManager -} - -// NewOrchestrator Initializes a new orchestrator -func NewOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPoolSize int, opts ...OrchestratorOption) *Orchestrator { // TODO: args - var err error - - o := new(Orchestrator) - o.vmPool = misc.NewVMPool(hostIface, netPoolSize) - o.snapshotter = snapshotter - o.snapshotsDir = "/fccd/snapshots" - o.hostIface = hostIface - - for _, opt := range opts { - opt(o) - } - - if _, err := os.Stat(o.snapshotsDir); err != nil { - if !os.IsNotExist(err) { - log.Panicf("Snapshot dir %s exists", o.snapshotsDir) - } - } - - if err := os.MkdirAll(o.snapshotsDir, 0777); err != nil { - log.Panicf("Failed to create snapshots dir %s", o.snapshotsDir) - } - - if o.GetUPFEnabled() { - managerCfg := manager.MemoryManagerCfg{ - MetricsModeOn: o.isMetricsMode, - } - o.memoryManager = manager.NewMemoryManager(managerCfg) - } - - log.Info("Creating containerd client") - o.client, err = containerd.New(containerdAddress) - if err != nil { - log.Fatal("Failed to start containerd client", err) - } - log.Info("Created containerd client") + // generic snapshot manager + orch OrchestratorInterface +} - log.Info("Creating firecracker client") - o.fcClient, err = fcclient.New(containerdTTRPCAddress) - if err != nil { - log.Fatal("Failed to start firecracker client", err) +func NewOrchestrator(orch OrchestratorInterface) *Orchestrator { + o := &Orchestrator{ + orch: orch, } - log.Info("Created firecracker client") - - o.devMapper = devmapper.NewDeviceMapper(o.client, poolName, metadataDev) - - o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) return o } -func (o *Orchestrator) setupCloseHandler() { - c := make(chan os.Signal, 2) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - log.Info("\r- Ctrl+C pressed in Terminal") - _ = o.StopActiveVMs() - o.Cleanup() - os.Exit(0) - }() +func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib, vCPUCount uint32, trackDirtyPages bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { + return o.orch.StartVM(ctx, vmID, imageName, memSizeMib, vCPUCount, trackDirtyPages) } -// Cleanup Removes the bridges created by the VM pool's tap manager -// Cleans up snapshots directory -func (o *Orchestrator) Cleanup() { - o.vmPool.CleanupNetwork() - if err := os.RemoveAll(o.snapshotsDir); err != nil { - log.Panic("failed to delete snapshots dir", err) - } +func (o *Orchestrator) OffloadVM(ctx context.Context, vmID string) error { + return o.orch.OffloadVM(ctx, vmID) } -// GetSnapshotsEnabled Returns the snapshots mode of the orchestrator -func (o *Orchestrator) GetSnapshotsEnabled() bool { - return o.snapshotsEnabled +func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string) error { + return o.orch.StopSingleVM(ctx, vmID) } -// GetUPFEnabled Returns the UPF mode of the orchestrator -func (o *Orchestrator) GetUPFEnabled() bool { - return o.isUPFEnabled +func (o *Orchestrator) StopActiveVMs() error { + return o.orch.StopActiveVMs() } -// DumpUPFPageStats Dumps the memory manager's stats about the number of -// the unique pages and the number of the pages that are reused across invocations -func (o *Orchestrator) DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received DumpUPFPageStats") - - return o.memoryManager.DumpUPFPageStats(vmID, functionName, metricsOutFilePath) +func (o *Orchestrator) PauseVM(ctx context.Context, vmID string) error { + return o.orch.PauseVM(ctx, vmID) } -// DumpUPFLatencyStats Dumps the memory manager's latency stats -func (o *Orchestrator) DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received DumpUPFPageStats") +func (o *Orchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) { + return o.orch.ResumeVM(ctx, vmID) +} - return o.memoryManager.DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath) +func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { + return o.orch.CreateSnapshot(ctx, vmID, snap) } -// GetUPFLatencyStats Returns the memory manager's latency stats -func (o *Orchestrator) GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("Orchestrator received DumpUPFPageStats") +func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { + return o.orch.LoadSnapshot(ctx, vmID, snap) +} - return o.memoryManager.GetUPFLatencyStats(vmID) +func (o *Orchestrator) CleanupSnapshot(ctx context.Context, id string) error { + return o.orch.CleanupSnapshot(ctx, id) } -func (o *Orchestrator) getSnapshotFile(vmID string) string { // TODO: remove - return filepath.Join(o.getVMBaseDir(vmID), "snap_file") +func (o *Orchestrator) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { + return o.orch.GetImage(ctx, imageName) } -func (o *Orchestrator) getMemoryFile(vmID string) string { // TODO: remove - return filepath.Join(o.getVMBaseDir(vmID), "mem_file") +func (o *Orchestrator) Cleanup() { + o.orch.Cleanup() } -func (o *Orchestrator) getWorkingSetFile(vmID string) string { - return filepath.Join(o.getVMBaseDir(vmID), "working_set_pages") +func (o *Orchestrator) GetSnapshotsEnabled() bool { + return o.orch.GetSnapshotsEnabled() } -func (o *Orchestrator) getVMBaseDir(vmID string) string { - return filepath.Join(o.snapshotsDir, vmID) +func (o *Orchestrator) GetUPFEnabled() bool { + return o.orch.GetUPFEnabled() } -func (o *Orchestrator) setupHeartbeat() { - heartbeat := time.NewTicker(60 * time.Second) +func (o *Orchestrator) DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error { + return o.orch.DumpUPFPageStats(vmID, functionName, metricsOutFilePath) +} - go func() { - for { - <-heartbeat.C - log.Info("HEARTBEAT: number of active VMs: ", len(o.vmPool.GetVMMap())) - } // for - }() // go func +func (o *Orchestrator) DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error { + return o.orch.DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath) } + +func (o *Orchestrator) GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) { + return o.orch.GetUPFLatencyStats(vmID) +} \ No newline at end of file diff --git a/ctriface/regular/iface.go b/ctriface/regular/iface.go new file mode 100644 index 000000000..7a4da7e13 --- /dev/null +++ b/ctriface/regular/iface.go @@ -0,0 +1,539 @@ +// MIT License +// +// Copyright (c) 2020 Dmitrii Ustiugov, Plamen Petrov and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package regular + +import ( + "context" + "github.com/ease-lab/vhive/ctriface" + "github.com/ease-lab/vhive/snapshotting" + "os" + "os/exec" + "strings" + "sync" + "syscall" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/oci" + "github.com/firecracker-microvm/firecracker-containerd/proto" // note: from the original repo + "github.com/firecracker-microvm/firecracker-containerd/runtime/firecrackeroci" + "github.com/pkg/errors" + + _ "google.golang.org/grpc/codes" //tmp + _ "google.golang.org/grpc/status" //tmp + + "github.com/ease-lab/vhive/memory/manager" + "github.com/ease-lab/vhive/metrics" + "github.com/ease-lab/vhive/misc" + "github.com/go-multierror/multierror" + + _ "github.com/davecgh/go-spew/spew" //tmp +) + +const ( + TestImageName = "ghcr.io/ease-lab/helloworld:var_workload" +) + +// StartVM Boots a VM if it does not exist +func (o *RegOrchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages bool) (_ *ctriface.StartVMResponse, _ *metrics.Metric, retErr error) { + var ( + startVMMetric *metrics.Metric = metrics.NewMetric() + tStart time.Time + ) + + logger := log.WithFields(log.Fields{"vmID": vmID, "image": imageName}) + logger.Debug("StartVM: Received StartVM") + + // 1. Allocate VM metadata & create vm network + vm, err := o.vmPool.Allocate(vmID) + if err != nil { + logger.Error("failed to allocate VM in VM pool") + return nil, nil, err + } + + // Set VM vCPU and Memory + if memSizeMib != 0 { + vm.MemSizeMib = memSizeMib + } + if vCPUCount != 0 { + vm.VCPUCount = vCPUCount + } + + defer func() { + // Free the VM from the pool if function returns error + if retErr != nil { + if err := o.vmPool.Free(vmID); err != nil { + logger.WithError(err).Errorf("failed to free VM from pool after failure") + } + } + }() + + ctx = namespaces.WithNamespace(ctx, NamespaceName) + + // 2. Fetch VM image + tStart = time.Now() + if vm.Image, err = o.GetImage(ctx, imageName); err != nil { + return nil, nil, errors.Wrapf(err, "Failed to get/pull image") + } + startVMMetric.MetricMap[metrics.GetImage] = metrics.ToUS(time.Since(tStart)) + + // 3. Create VM + tStart = time.Now() + conf := o.getVMConfig(vm, trackDirtyPages) + resp, err := o.fcClient.CreateVM(ctx, conf) + startVMMetric.MetricMap[metrics.FcCreateVM] = metrics.ToUS(time.Since(tStart)) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create the microVM in firecracker-containerd") + } + + defer func() { + if retErr != nil { + if _, err := o.fcClient.StopVM(ctx, &proto.StopVMRequest{VMID: vmID}); err != nil { + logger.WithError(err).Errorf("failed to stop firecracker-containerd VM after failure") + } + } + }() + + // 4. Create container + logger.Debug("StartVM: Creating a new container") + tStart = time.Now() + container, err := o.client.NewContainer( + ctx, + vmID, + containerd.WithSnapshotter(o.snapshotter), + containerd.WithNewSnapshot(vmID, *vm.Image), + containerd.WithNewSpec( + oci.WithImageConfig(*vm.Image), + firecrackeroci.WithVMID(vmID), + firecrackeroci.WithVMNetwork, + ), + containerd.WithRuntime("aws.firecracker", nil), + ) + startVMMetric.MetricMap[metrics.NewContainer] = metrics.ToUS(time.Since(tStart)) + vm.Container = &container + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create a container") + } + + defer func() { + if retErr != nil { + if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { + logger.WithError(err).Errorf("failed to delete container after failure") + } + } + }() + + // 5. Turn container into runnable process + iologger := NewWorkloadIoWriter(vmID) + o.workloadIo.Store(vmID, &iologger) + logger.Debug("StartVM: Creating a new task") + tStart = time.Now() + task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(os.Stdin, iologger, iologger))) + startVMMetric.MetricMap[metrics.NewTask] = metrics.ToUS(time.Since(tStart)) + vm.Task = &task + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to create a task") + } + + defer func() { + if retErr != nil { + if _, err := task.Delete(ctx); err != nil { + logger.WithError(err).Errorf("failed to delete task after failure") + } + } + }() + + // 6. Wait for task to get ready + logger.Debug("StartVM: Waiting for the task to get ready") + tStart = time.Now() + ch, err := task.Wait(ctx) + startVMMetric.MetricMap[metrics.TaskWait] = metrics.ToUS(time.Since(tStart)) + vm.TaskCh = ch + if err != nil { + return nil, nil, errors.Wrap(err, "failed to wait for a task") + } + + defer func() { + if retErr != nil { + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + logger.WithError(err).Errorf("failed to kill task after failure") + } + } + }() + + // 7. Start process inside container + logger.Debug("StartVM: Starting the task") + tStart = time.Now() + if err := task.Start(ctx); err != nil { + return nil, nil, errors.Wrap(err, "failed to start a task") + } + startVMMetric.MetricMap[metrics.TaskStart] = metrics.ToUS(time.Since(tStart)) + + defer func() { + if retErr != nil { + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + logger.WithError(err).Errorf("failed to kill task after failure") + } + } + }() + + if err := os.MkdirAll(o.getVMBaseDir(vmID), 0777); err != nil { + logger.Error("Failed to create VM base dir") + return nil, nil, err + } + if o.GetUPFEnabled() { + logger.Debug("Registering VM with the memory manager") + + stateCfg := manager.SnapshotStateCfg{ + VMID: vmID, + GuestMemPath: o.getMemoryFile(vmID), + BaseDir: o.getVMBaseDir(vmID), + GuestMemSize: int(conf.MachineCfg.MemSizeMib) * 1024 * 1024, + IsLazyMode: o.isLazyMode, + VMMStatePath: o.getSnapshotFile(vmID), + WorkingSetPath: o.getWorkingSetFile(vmID), + InstanceSockAddr: resp.UPFSockPath, + } + if err := o.memoryManager.RegisterVM(stateCfg); err != nil { + return nil, nil, errors.Wrap(err, "failed to register VM with memory manager") + // NOTE (Plamen): Potentially need a defer(DeregisteVM) here if RegisterVM is not last to execute + } + } + + logger.Debug("Successfully started a VM") + + return &ctriface.StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, startVMMetric, nil +} + +// StopSingleVM Shuts down a VM +// Note: VMs are not quisced before being stopped +func (o *RegOrchestrator) StopSingleVM(ctx context.Context, vmID string) error { + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("RegOrchestrator received StopVM") + + ctx = namespaces.WithNamespace(ctx, NamespaceName) + vm, err := o.vmPool.GetVM(vmID) + if err != nil { + if _, ok := err.(*misc.NonExistErr); ok { + logger.Panic("StopVM: VM does not exist") + } + logger.Panic("StopVM: GetVM() failed for an unknown reason") + + } + + logger = log.WithFields(log.Fields{"vmID": vmID}) + + task := *vm.Task + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + logger.WithError(err).Error("Failed to kill the task") + return err + } + + <-vm.TaskCh + //FIXME: Seems like some tasks need some extra time to die Issue#15, lr_training + time.Sleep(500 * time.Millisecond) + + if _, err := task.Delete(ctx); err != nil { + logger.WithError(err).Error("failed to delete task") + return err + } + + container := *vm.Container + if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { + logger.WithError(err).Error("failed to delete container") + return err + } + + if _, err := o.fcClient.StopVM(ctx, &proto.StopVMRequest{VMID: vmID}); err != nil { + logger.WithError(err).Error("failed to stop firecracker-containerd VM") + return err + } + + if err := o.vmPool.Free(vmID); err != nil { + logger.Error("failed to free VM from VM pool") + return err + } + + o.workloadIo.Delete(vmID) + + logger.Debug("Stopped VM successfully") + + return nil +} + +func getK8sDNS() []string { + //using googleDNS as a backup + dnsIPs := []string{"8.8.8.8"} + //get k8s DNS clusterIP + cmd := exec.Command( + "kubectl", "get", "service", "-n", "kube-system", "kube-dns", "-o=custom-columns=:.spec.clusterIP", "--no-headers", + ) + stdoutStderr, err := cmd.CombinedOutput() + if err != nil { + log.Warnf("Failed to Fetch k8s dns clusterIP %v\n%s\n", err, stdoutStderr) + log.Warnf("Using google dns %s\n", dnsIPs[0]) + } else { + //adding k8s DNS clusterIP to the list + dnsIPs = []string{strings.TrimSpace(string(stdoutStderr)), dnsIPs[0]} + } + return dnsIPs +} + +func (o *RegOrchestrator) getVMConfig(vm *misc.VM, trackDirtyPages bool) *proto.CreateVMRequest { + kernelArgs := "ro noapic reboot=k panic=1 pci=off nomodules systemd.log_color=false systemd.unit=firecracker.target init=/sbin/overlay-init tsc=reliable quiet 8250.nr_uarts=0 ipv6.disable=1" + + return &proto.CreateVMRequest{ + VMID: vm.ID, + TimeoutSeconds: 100, + KernelArgs: kernelArgs, + MachineCfg: &proto.FirecrackerMachineConfiguration{ + VcpuCount: vm.VCPUCount, + MemSizeMib: vm.MemSizeMib, + TrackDirtyPages: trackDirtyPages, + }, + NetworkInterfaces: []*proto.FirecrackerNetworkInterface{{ + StaticConfig: &proto.StaticNetworkConfiguration{ + MacAddress: vm.NetConfig.GetMacAddress(), + HostDevName: vm.NetConfig.GetHostDevName(), + IPConfig: &proto.IPConfiguration{ + PrimaryAddr: vm.NetConfig.GetContainerCIDR(), + GatewayAddr: vm.NetConfig.GetGatewayIP(), + Nameservers: getK8sDNS(), + }, + }, + }}, + NetworkNamespace: vm.NetConfig.GetNamespacePath(), + OffloadEnabled: true, + } +} + +// StopActiveVMs Shuts down all active VMs +func (o *RegOrchestrator) StopActiveVMs() error { + var vmGroup sync.WaitGroup + for vmID, vm := range o.vmPool.GetVMMap() { + vmGroup.Add(1) + logger := log.WithFields(log.Fields{"vmID": vmID}) + go func(vmID string, vm *misc.VM) { + defer vmGroup.Done() + err := o.StopSingleVM(context.Background(), vmID) + if err != nil { + logger.Warn(err) + } + }(vmID, vm) + } + + log.Info("waiting for goroutines") + vmGroup.Wait() + log.Info("waiting done") + + log.Info("Closing fcClient") + o.fcClient.Close() + log.Info("Closing containerd client") + o.client.Close() + + return nil +} + +// PauseVM Pauses a VM +func (o *RegOrchestrator) PauseVM(ctx context.Context, vmID string) error { + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("RegOrchestrator received PauseVM") + + ctx = namespaces.WithNamespace(ctx, NamespaceName) + + if _, err := o.fcClient.PauseVM(ctx, &proto.PauseVMRequest{VMID: vmID}); err != nil { + logger.WithError(err).Error("failed to pause the VM") + return err + } + + return nil +} + +// ResumeVM Resumes a VM +func (o *RegOrchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) { + var ( + resumeVMMetric *metrics.Metric = metrics.NewMetric() + tStart time.Time + ) + + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("RegOrchestrator received ResumeVM") + + ctx = namespaces.WithNamespace(ctx, NamespaceName) + + tStart = time.Now() + if _, err := o.fcClient.ResumeVM(ctx, &proto.ResumeVMRequest{VMID: vmID}); err != nil { + logger.WithError(err).Error("failed to resume the VM") + return nil, err + } + resumeVMMetric.MetricMap[metrics.FcResume] = metrics.ToUS(time.Since(tStart)) + + return resumeVMMetric, nil +} + +// CreateSnapshot Creates a snapshot of a VM +func (o *RegOrchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("RegOrchestrator received CreateSnapshot") + + ctx = namespaces.WithNamespace(ctx, NamespaceName) + + req := &proto.CreateSnapshotRequest{ + VMID: vmID, + SnapshotFilePath: o.getSnapshotFile(vmID), + MemFilePath: o.getMemoryFile(vmID), + SnapshotType: snap.GetSnapType(), + } + + if _, err := o.fcClient.CreateSnapshot(ctx, req); err != nil { + logger.WithError(err).Error("failed to create snapshot of the VM") + return err + } + + return nil +} + +// LoadSnapshot Loads a snapshot of a VM +func (o *RegOrchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *ctriface.StartVMResponse, _ *metrics.Metric, retErr error) { + var ( + loadSnapshotMetric *metrics.Metric = metrics.NewMetric() + tStart time.Time + loadErr, activateErr error + loadDone = make(chan int) + ) + + + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("RegOrchestrator received LoadSnapshot") + + ctx = namespaces.WithNamespace(ctx, NamespaceName) + + // Allocate VM metadata & create vm network + vm, err := o.vmPool.Allocate(vmID) + if err != nil { + logger.Error("failed to allocate VM in VM pool") + return nil, nil, err + } + + defer func() { + // Free the VM from the pool if function returns error + if retErr != nil { + if err := o.vmPool.Free(vmID); err != nil { + logger.WithError(err).Errorf("failed to free VM from pool after failure") + } + } + }() + + req := &proto.LoadSnapshotRequest{ + VMID: vmID, + SnapshotFilePath: snap.GetSnapFilePath(), + MemFilePath: snap.GetMemFilePath(), + EnableUserPF: o.GetUPFEnabled(), + NetworkNamespace: vm.NetConfig.GetNamespacePath(), + Offloaded: true, + } + + if o.GetUPFEnabled() { + if err := o.memoryManager.FetchState(vmID); err != nil { + return nil, nil, err + } + } + + tStart = time.Now() + + go func() { + defer close(loadDone) + + if _, loadErr = o.fcClient.LoadSnapshot(ctx, req); loadErr != nil { + logger.Error("Failed to load snapshot of the VM: ", loadErr) + } + }() + + if o.GetUPFEnabled() { + if activateErr = o.memoryManager.Activate(vmID); activateErr != nil { + logger.Warn("Failed to activate VM in the memory manager", activateErr) + } + } + + <-loadDone + + loadSnapshotMetric.MetricMap[metrics.LoadVMM] = metrics.ToUS(time.Since(tStart)) + + if loadErr != nil || activateErr != nil { + multierr := multierror.Of(loadErr, activateErr) + return nil, nil, multierr + } + + return &ctriface.StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, nil, nil +} + +// Offload Shuts down the VM but leaves shim and other resources running. +func (o *RegOrchestrator) OffloadVM(ctx context.Context, vmID string) error { + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("Orchestrator received Offload") + + ctx = namespaces.WithNamespace(ctx, NamespaceName) + + _, err := o.vmPool.GetVM(vmID) + if err != nil { + if _, ok := err.(*misc.NonExistErr); ok { + logger.Panic("Offload: VM does not exist") + } + logger.Panic("Offload: GetVM() failed for an unknown reason") + + } + + if o.GetUPFEnabled() { + if err := o.memoryManager.Deactivate(vmID); err != nil { + logger.Error("Failed to deactivate VM in the memory manager") + return err + } + } + + if _, err := o.fcClient.Offload(ctx, &proto.OffloadRequest{VMID: vmID}); err != nil { + logger.WithError(err).Error("failed to offload the VM") + return err + } + + if err := o.vmPool.Free(vmID); err != nil { + logger.Error("failed to free VM from VM pool") + return err + } + + return nil +} + +func (o *RegOrchestrator) CleanupSnapshot(ctx context.Context, revisionID string) error { + if err := o.devMapper.RemoveDeviceSnapshot(ctx, revisionID); err != nil { + return errors.Wrapf(err, "removing revision snapshot") + } + return nil +} + +func (o *RegOrchestrator) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { + return o.imageManager.GetImage(ctx, imageName) +} \ No newline at end of file diff --git a/ctriface/regular/orch.go b/ctriface/regular/orch.go new file mode 100644 index 000000000..5d35be8ec --- /dev/null +++ b/ctriface/regular/orch.go @@ -0,0 +1,229 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package regular + +import ( + "github.com/ease-lab/vhive/ctrimages" + "github.com/ease-lab/vhive/devmapper" + "os" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/containerd/containerd" + + fcclient "github.com/firecracker-microvm/firecracker-containerd/firecracker-control/client" + // note: from the original repo + + _ "google.golang.org/grpc/codes" //tmp + _ "google.golang.org/grpc/status" //tmp + + "github.com/ease-lab/vhive/memory/manager" + "github.com/ease-lab/vhive/metrics" + "github.com/ease-lab/vhive/misc" + + _ "github.com/davecgh/go-spew/spew" //tmp +) + +const ( + containerdAddress = "/run/firecracker-containerd/containerd.sock" + containerdTTRPCAddress = containerdAddress + ".ttrpc" + NamespaceName = "firecracker-containerd" +) + +type WorkloadIoWriter struct { + logger *log.Entry +} + +func NewWorkloadIoWriter(vmID string) WorkloadIoWriter { + return WorkloadIoWriter{log.WithFields(log.Fields{"vmID": vmID})} +} + +func (wio WorkloadIoWriter) Write(p []byte) (n int, err error) { + s := string(p) + lines := strings.Split(s, "\n") + for i := range lines { + wio.logger.Info(string(lines[i])) + } + return len(p), nil +} + +// RegOrchestrator Drives all VMs +type RegOrchestrator struct { + vmPool *misc.VMPool + workloadIo sync.Map // vmID string -> WorkloadIoWriter + snapshotter string + client *containerd.Client + fcClient *fcclient.Client + devMapper *devmapper.DeviceMapper + imageManager *ctrimages.ImageManager + // store *skv.KVStore + snapshotsEnabled bool + isUPFEnabled bool + isLazyMode bool + snapshotsDir string + isMetricsMode bool + hostIface string + + memoryManager *manager.MemoryManager +} + +// NewDedupOrchestrator Initializes a new orchestrator +func NewRegOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPoolSize int, opts ...OrchestratorOption) *RegOrchestrator { // TODO: args + var err error + + o := new(RegOrchestrator) + o.vmPool = misc.NewVMPool(hostIface, netPoolSize) + o.snapshotter = snapshotter + o.snapshotsDir = "/fccd/snapshots" + o.hostIface = hostIface + + for _, opt := range opts { + opt(o) + } + + if _, err := os.Stat(o.snapshotsDir); err != nil { + if !os.IsNotExist(err) { + log.Panicf("Snapshot dir %s exists", o.snapshotsDir) + } + } + + if err := os.MkdirAll(o.snapshotsDir, 0777); err != nil { + log.Panicf("Failed to create snapshots dir %s", o.snapshotsDir) + } + + if o.GetUPFEnabled() { + managerCfg := manager.MemoryManagerCfg{ + MetricsModeOn: o.isMetricsMode, + } + o.memoryManager = manager.NewMemoryManager(managerCfg) + } + + log.Info("Creating containerd client") + o.client, err = containerd.New(containerdAddress) + if err != nil { + log.Fatal("Failed to start containerd client", err) + } + log.Info("Created containerd client") + + log.Info("Creating firecracker client") + o.fcClient, err = fcclient.New(containerdTTRPCAddress) + if err != nil { + log.Fatal("Failed to start firecracker client", err) + } + log.Info("Created firecracker client") + + o.devMapper = devmapper.NewDeviceMapper(o.client, poolName, metadataDev) + + o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) + + return o +} + +func (o *RegOrchestrator) setupCloseHandler() { + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + log.Info("\r- Ctrl+C pressed in Terminal") + _ = o.StopActiveVMs() + o.Cleanup() + os.Exit(0) + }() +} + +// Cleanup Removes the bridges created by the VM pool's tap manager +// Cleans up snapshots directory +func (o *RegOrchestrator) Cleanup() { + o.vmPool.CleanupNetwork() + if err := os.RemoveAll(o.snapshotsDir); err != nil { + log.Panic("failed to delete snapshots dir", err) + } +} + +// GetSnapshotsEnabled Returns the snapshots mode of the orchestrator +func (o *RegOrchestrator) GetSnapshotsEnabled() bool { + return o.snapshotsEnabled +} + +// GetUPFEnabled Returns the UPF mode of the orchestrator +func (o *RegOrchestrator) GetUPFEnabled() bool { + return o.isUPFEnabled +} + +// DumpUPFPageStats Dumps the memory manager's stats about the number of +// the unique pages and the number of the pages that are reused across invocations +func (o *RegOrchestrator) DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error { + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("RegOrchestrator received DumpUPFPageStats") + + return o.memoryManager.DumpUPFPageStats(vmID, functionName, metricsOutFilePath) +} + +// DumpUPFLatencyStats Dumps the memory manager's latency stats +func (o *RegOrchestrator) DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error { + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("RegOrchestrator received DumpUPFPageStats") + + return o.memoryManager.DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath) +} + +// GetUPFLatencyStats Returns the memory manager's latency stats +func (o *RegOrchestrator) GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) { + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("RegOrchestrator received DumpUPFPageStats") + + return o.memoryManager.GetUPFLatencyStats(vmID) +} + +func (o *RegOrchestrator) getSnapshotFile(vmID string) string { + return filepath.Join(o.getVMBaseDir(vmID), "snap_file") +} + +func (o *RegOrchestrator) getMemoryFile(vmID string) string { + return filepath.Join(o.getVMBaseDir(vmID), "mem_file") +} + +func (o *RegOrchestrator) getWorkingSetFile(vmID string) string { + return filepath.Join(o.getVMBaseDir(vmID), "working_set_pages") +} + +func (o *RegOrchestrator) getVMBaseDir(vmID string) string { + return filepath.Join(o.snapshotsDir, vmID) +} + +func (o *RegOrchestrator) setupHeartbeat() { + heartbeat := time.NewTicker(60 * time.Second) + + go func() { + for { + <-heartbeat.C + log.Info("HEARTBEAT: number of active VMs: ", len(o.vmPool.GetVMMap())) + } // for + }() // go func +} diff --git a/ctriface/orch_options.go b/ctriface/regular/orch_options.go similarity index 89% rename from ctriface/orch_options.go rename to ctriface/regular/orch_options.go index 83fb17be9..fdaaf2c6b 100644 --- a/ctriface/orch_options.go +++ b/ctriface/regular/orch_options.go @@ -20,14 +20,14 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package ctriface +package regular // OrchestratorOption Options to pass to Orchestrator -type OrchestratorOption func(*Orchestrator) +type OrchestratorOption func(*RegOrchestrator) // WithTestModeOn Sets the test mode func WithTestModeOn(testModeOn bool) OrchestratorOption { - return func(o *Orchestrator) { + return func(o *RegOrchestrator) { if !testModeOn { o.setupCloseHandler() o.setupHeartbeat() @@ -37,14 +37,14 @@ func WithTestModeOn(testModeOn bool) OrchestratorOption { // WithSnapshots Sets the snapshot mode on or off func WithSnapshots(snapshotsEnabled bool) OrchestratorOption { - return func(o *Orchestrator) { + return func(o *RegOrchestrator) { o.snapshotsEnabled = snapshotsEnabled } } // WithUPF Sets the user-page faults mode on or off func WithUPF(isUPFEnabled bool) OrchestratorOption { - return func(o *Orchestrator) { + return func(o *RegOrchestrator) { o.isUPFEnabled = isUPFEnabled } } @@ -52,7 +52,7 @@ func WithUPF(isUPFEnabled bool) OrchestratorOption { // WithSnapshotsDir Sets the directory where // snapshots should be stored func WithSnapshotsDir(snapshotsDir string) OrchestratorOption { - return func(o *Orchestrator) { + return func(o *RegOrchestrator) { o.snapshotsDir = snapshotsDir } } @@ -61,14 +61,14 @@ func WithSnapshotsDir(snapshotsDir string) OrchestratorOption { // where all guest memory pages are brought on demand. // Only works if snapshots are enabled func WithLazyMode(isLazyMode bool) OrchestratorOption { - return func(o *Orchestrator) { + return func(o *RegOrchestrator) { o.isLazyMode = isLazyMode } } // WithMetricsMode Sets the metrics mode func WithMetricsMode(isMetricsMode bool) OrchestratorOption { - return func(o *Orchestrator) { + return func(o *RegOrchestrator) { o.isMetricsMode = isMetricsMode } } diff --git a/ctriface/types.go b/ctriface/types.go new file mode 100644 index 000000000..81c0a2e19 --- /dev/null +++ b/ctriface/types.go @@ -0,0 +1,57 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package ctriface + +import ( + "context" + "github.com/containerd/containerd" + "github.com/ease-lab/vhive/metrics" + "github.com/ease-lab/vhive/snapshotting" +) + +// StartVMResponse is the response returned by StartVM +type StartVMResponse struct { + // GuestIP is the IP of the guest MicroVM + GuestIP string +} + +type OrchestratorInterface interface { + StartVM(ctx context.Context, vmID, imageName string, memSizeMib, vCPUCount uint32, trackDirtyPages bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) + StopSingleVM(ctx context.Context, vmID string) error + OffloadVM(ctx context.Context, vmID string) error + StopActiveVMs() error + PauseVM(ctx context.Context, vmID string) error + ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) + CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error + LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *StartVMResponse, _ *metrics.Metric, retErr error) + CleanupSnapshot(ctx context.Context, id string) error + GetImage(ctx context.Context, imageName string) (*containerd.Image, error) + GetSnapshotsEnabled() bool + GetUPFEnabled() bool + Cleanup() + + // TODO: these should be removed in the future + DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error + DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error + GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) +} diff --git a/functions.go b/functions.go index 480e277c5..5dc734421 100644 --- a/functions.go +++ b/functions.go @@ -448,8 +448,8 @@ func (f *Function) CreateInstanceSnapshot() { log.Panic(err) } - revisionID := fmt.Sprintf("myrev-%d", f.vmID) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, 0, 256, 1, false) + revisionID := fmt.Sprintf("myrev-%s", f.vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, false) err = orch.CreateSnapshot(ctx, f.vmID, snap) if err != nil { log.Panic(err) @@ -471,8 +471,8 @@ func (f *Function) LoadInstance() *metrics.Metric { ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) defer cancel() - revisionID := fmt.Sprintf("myrev-%d", f.vmID) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, 0, 256, 1, false) + revisionID := fmt.Sprintf("myrev-%s", f.vmID) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, false) _, loadMetr, err := orch.LoadSnapshot(ctx, f.vmID, snap) if err != nil { log.Panic(err) diff --git a/go.mod b/go.mod index bf2a107c9..4d2e1d834 100644 --- a/go.mod +++ b/go.mod @@ -44,20 +44,20 @@ replace ( // github.com/firecracker-microvm/firecracker-containerd => github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4 github.com/containerd/containerd => github.com/amohoste/containerd v1.5.5-ids // TODO: change to vhive github.com/ease-lab/vhive/examples/protobuf/helloworld => ./examples/protobuf/helloworld - github.com/firecracker-microvm/firecracker-containerd => github.com/amohoste/firecracker-containerd v1.0.0-sparse // TODO: change to vhive + github.com/firecracker-microvm/firecracker-containerd => github.com/amohoste/firecracker-containerd v1.0.0-enhanced-snap // TODO: change to vhive ) require ( github.com/antchfx/xpath v1.2.0 // indirect github.com/blend/go-sdk v1.20211025.3 // indirect - github.com/containerd/containerd v1.5.2 - github.com/containerd/go-cni v1.1.4 + github.com/containerd/containerd v1.5.8 + github.com/containerd/go-cni v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/ease-lab/vhive/examples/protobuf/helloworld v0.0.0-00010101000000-000000000000 github.com/firecracker-microvm/firecracker-containerd v0.0.0-00010101000000-000000000000 github.com/ftrvxmtrx/fd v0.0.0-20150925145434-c6d800382fff github.com/go-multierror/multierror v1.0.2 - github.com/golang/protobuf v1.5.0 + github.com/golang/protobuf v1.5.2 github.com/google/nftables v0.0.0-20210916140115-16a134723a96 github.com/montanaflynn/stats v0.6.5 github.com/opencontainers/image-spec v1.0.1 @@ -70,11 +70,11 @@ require ( github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae github.com/wcharczuk/go-chart v2.0.1+incompatible golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb // indirect - golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 + golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef gonum.org/v1/gonum v0.9.0 gonum.org/v1/plot v0.9.0 - google.golang.org/grpc v1.34.0 + google.golang.org/grpc v1.41.0 k8s.io/cri-api v0.20.6 ) diff --git a/go.sum b/go.sum index f590b9411..29ba21888 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,5 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -47,8 +48,9 @@ github.com/DataDog/datadog-go v4.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.18 h1:cYnKADiM1869gvBpos3YCteeT6sZLB48lB5dmMMs8Tg= @@ -71,8 +73,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/amohoste/containerd v1.5.5-ids h1:ewus7bzwx6j8ZlKqNjoctyQ2EOKAK+9nYBqC+D4XKfg= github.com/amohoste/containerd v1.5.5-ids/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo= -github.com/amohoste/firecracker-containerd v1.0.0-sparse h1:cGDp1kcB5gBxtWdgW1VYw8nroSYq751at07q5L2OBZY= -github.com/amohoste/firecracker-containerd v1.0.0-sparse/go.mod h1:+/08aD580irjp7X1+gyIIgRf4IbsxhgFhDQ15m+SFyY= +github.com/amohoste/firecracker-containerd v1.0.0-enhanced-snap h1:PRbQkUGXhT+4V0WBzEbv9dS2xPD5kmIpi7RH56Zo2Ig= +github.com/amohoste/firecracker-containerd v1.0.0-enhanced-snap/go.mod h1:DlVSzah7WtO75UNGqzBqlEL3acKBd2ZJB6n6K8nbevk= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= @@ -91,8 +93,9 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.2.1 h1:M+/hrU9xlMp7t4TyTDQW97d3tRPVuKFC6zBEK16QnXY= +github.com/bits-and-blooms/bitset v1.2.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blend/go-sdk v1.20211025.3 h1:3f8hYTMb9ufP8IkBtsflNohAqoKo4hEBbeR0s4bfBqI= @@ -119,29 +122,29 @@ github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f2 github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.0 h1:j/9Wnn+hrEWjLvHuIxUU1YI5JjEjVlT2AA68cse9rwY= +github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.2 h1:YbJAhpTevL2v6u8JC1NhCYRwf+3Vzxcc5vGnYoJ7VeE= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.4 h1:Mv3XkOjVsjTJHMpSi+dKZQPQGXEMpmXWs8oYZDaCK+s= -github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.0.1 h1:9OIL/sZmMYDBe+G8svzILAlulUpaDTUjeAbtH/JNLBo= -github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJRZVOhrCQj91YyC92sxg= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= @@ -337,8 +340,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -379,8 +383,9 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -486,8 +491,9 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/koneu/natend v0.0.0-20150829182554-ec0926ea948d h1:MFX8DxRnKMY/2M3H61iSsVbo/n3h0MWGmWNN1UViOU0= github.com/koneu/natend v0.0.0-20150829182554-ec0926ea948d/go.mod h1:QHb4k4cr1fQikUahfcRVPcEXiUgFsdIstGqlurL0XL4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -535,7 +541,7 @@ github.com/mdlayher/netlink v0.0.0-20191009155606-de872b0d824b/go.mod h1:KxeJAFO github.com/mdlayher/vsock v0.0.0-20190329173812-a92c53d5dcab/go.mod h1:D7ATxm5dbu8KgVaJHLbtcFfkt6/ERTpnCK7kVpGOqsk= github.com/mediocregopher/radix/v4 v4.0.0-beta.1/go.mod h1:Z74pilm773ghbGV4EEoPvi6XWgkAfr0VCNkfa8gI1PU= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -574,28 +580,29 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0 h1:M76yO2HkZASFjXL0HSoZJ1AYEmQxNJmY41Jx1zNUq1Y= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs= github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210910115017-0d6cc581aeea h1:WmF5mV2OwWlHap/Ol8Z+iLZVlvLJrG7PzO/j8vwSLz8= +github.com/opencontainers/runtime-spec v1.0.3-0.20210910115017-0d6cc581aeea/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.8.5 h1:OkT6bMHOQ1JQQO4ihjQ49sj0+wciDcjziSVTRn8VeTA= +github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -656,7 +663,6 @@ github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfF github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -757,8 +763,9 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -771,7 +778,6 @@ go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslx go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -786,6 +792,7 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -836,7 +843,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -858,6 +864,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -870,7 +877,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -881,8 +887,9 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5RKzgYBMMxLxt6n5XW50= +golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -899,13 +906,13 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -932,6 +939,8 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -957,7 +966,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -972,8 +980,10 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -982,8 +992,10 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1014,6 +1026,7 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1092,6 +1105,7 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/DataDog/dd-trace-go.v1 v1.27.1/go.mod h1:Sp1lku8WJMvNV0kjDI4Ni/T7J/U3BO5ct5kEaoVU8+I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/networking/networkManager.go b/networking/networkManager.go index 0dc4a103d..bdf0dfcf7 100644 --- a/networking/networkManager.go +++ b/networking/networkManager.go @@ -188,12 +188,12 @@ func (mgr *NetworkManager) Cleanup() error { wg.Add(len(mgr.networkPool)) for _, config := range mgr.networkPool { - go func() { + go func(config *NetworkConfig) { if err := config.RemoveNetwork(); err != nil { log.Errorf("failed to remove network %s:", err) } wg.Done() - }() + }(config) } wg.Wait() mgr.networkPool = make([]*NetworkConfig, 0) diff --git a/snapshotting/snapshotmanager.go b/snapshotting/deduplicated/manager.go similarity index 65% rename from snapshotting/snapshotmanager.go rename to snapshotting/deduplicated/manager.go index 8402a05d4..30e5dc7dd 100644 --- a/snapshotting/snapshotmanager.go +++ b/snapshotting/deduplicated/manager.go @@ -20,25 +20,27 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package snapshotting +package deduplicated import ( "container/heap" "fmt" + "github.com/ease-lab/vhive/snapshotting" "github.com/pkg/errors" "math" "os" "sync" ) -// SnapshotManager manages snapshots stored on the node. -type SnapshotManager struct { +// ImprovedSnapshotManager manages snapshots stored on the node. +type ImprovedSnapshotManager struct { sync.Mutex - snapshots map[string]*Snapshot + snapshots map[string]*snapshotting.Snapshot + snapStats map[string]*SnapshotStats // Heap of snapshots not in use sorted on score - freeSnaps SnapHeap - baseFolder string + freeSnaps SnapHeap + baseFolder string // Eviction clock int64 // When container last used. Increased to priority terminated container on termination @@ -46,9 +48,10 @@ type SnapshotManager struct { usedMib int64 } -func NewSnapshotManager(baseFolder string, capacityMib int64) *SnapshotManager { - manager := new(SnapshotManager) - manager.snapshots = make(map[string]*Snapshot) +func NewSnapshotManager(baseFolder string, capacityMib int64) *ImprovedSnapshotManager { + manager := new(ImprovedSnapshotManager) + manager.snapshots = make(map[string]*snapshotting.Snapshot) + manager.snapStats = make(map[string]*SnapshotStats) heap.Init(&manager.freeSnaps) manager.baseFolder = baseFolder manager.clock = 0 @@ -64,22 +67,22 @@ func NewSnapshotManager(baseFolder string, capacityMib int64) *SnapshotManager { // AcquireSnapshot returns a snapshot for the specified revision if it is available and increments the internal counter // such that the snapshot can't get removed. Similar to how a RW lock works -func (mgr *SnapshotManager) AcquireSnapshot(revision string) (*Snapshot, error) { +func (mgr *ImprovedSnapshotManager) AcquireSnapshot(revision string) (*snapshotting.Snapshot, error) { mgr.Lock() defer mgr.Unlock() // Check if a snapshot is available for the specified revision - snap, present := mgr.snapshots[revision] + snapStat, present := mgr.snapStats[revision] if !present { return nil, errors.New(fmt.Sprintf("Get: Snapshot for revision %s does not exist", revision)) } // Snapshot registered in manager but creation not finished yet - if ! snap.usable { // Could also wait until snapshot usable (trade-off) + if ! snapStat.usable { // Could also wait until snapshot usable (trade-off) return nil, errors.New(fmt.Sprintf("Snapshot is not yet usable")) } - if snap.numUsing == 0 { + if snapStat.numUsing == 0 { // Remove from free snaps so can't be deleted (could be done more efficiently) heapIdx := 0 for i, heapSnap := range mgr.freeSnaps { @@ -91,40 +94,40 @@ func (mgr *SnapshotManager) AcquireSnapshot(revision string) (*Snapshot, error) heap.Remove(&mgr.freeSnaps, heapIdx) } - snap.numUsing += 1 + snapStat.numUsing += 1 // Update stats for keepalive policy - snap.freq += 1 - snap.lastUsedClock = mgr.clock + snapStat.freq += 1 + snapStat.lastUsedClock = mgr.clock - return snap, nil + return mgr.snapshots[revision], nil } // ReleaseSnapshot releases the snapshot with the given revision so that it can possibly get deleted if it is not in use // by any other VMs. -func (mgr *SnapshotManager) ReleaseSnapshot(revision string) error { +func (mgr *ImprovedSnapshotManager) ReleaseSnapshot(revision string) error { mgr.Lock() defer mgr.Unlock() - snap, present := mgr.snapshots[revision] + snapStat, present := mgr.snapStats[revision] if !present { return errors.New(fmt.Sprintf("Get: Snapshot for revision %s does not exist", revision)) } - snap.numUsing -= 1 + snapStat.numUsing -= 1 - if snap.numUsing == 0 { + if snapStat.numUsing == 0 { // Add to free snaps - snap.UpdateScore() - heap.Push(&mgr.freeSnaps, snap) + snapStat.UpdateScore() + heap.Push(&mgr.freeSnaps, snapStat) } return nil } -// InitSnapshot initializes a snapshot by adding its metadata to the SnapshotManager. Once the snapshot has been created, +// InitSnapshot initializes a snapshot by adding its metadata to the ImprovedSnapshotManager. Once the snapshot has been created, // CommitSnapshot must be run to finalize the snapshot creation and make the snapshot available fo ruse -func (mgr *SnapshotManager) InitSnapshot(revision, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *Snapshot, error) { +func (mgr *ImprovedSnapshotManager) InitSnapshot(revision, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *snapshotting.Snapshot, error) { mgr.Lock() if _, present := mgr.snapshots[revision]; present { @@ -151,13 +154,16 @@ func (mgr *SnapshotManager) InitSnapshot(revision, image string, coldStartTimeMs } mgr.usedMib += estimatedSnapSizeMib - // Add snapshot metadata to manager - snap := NewSnapshot(revision, mgr.baseFolder, image, estimatedSnapSizeMib, coldStartTimeMs, mgr.clock, memSizeMib, vCPUCount, sparse) + // Add snapshot and snapshot metadata to manager + snap := snapshotting.NewSnapshot(revision, mgr.baseFolder, image, memSizeMib, vCPUCount, sparse) mgr.snapshots[revision] = snap + + snapStat := NewSnapshotStats(revision, estimatedSnapSizeMib, coldStartTimeMs, mgr.clock) + mgr.snapStats[revision] = snapStat mgr.Unlock() // Create directory to store snapshot data - err := os.Mkdir(snap.snapDir, 0755) + err := snap.CreateSnapDir() if err != nil { return removeContainerSnaps, nil, errors.Wrapf(err, "creating snapDir for snapshots %s", revision) } @@ -166,58 +172,65 @@ func (mgr *SnapshotManager) InitSnapshot(revision, image string, coldStartTimeMs } // CommitSnapshot finalizes the snapshot creation and makes it available for use. -func (mgr *SnapshotManager) CommitSnapshot(revision string) error { +func (mgr *ImprovedSnapshotManager) CommitSnapshot(revision string) error { mgr.Lock() - snap, present := mgr.snapshots[revision] + snapStat, present := mgr.snapStats[revision] if !present { mgr.Unlock() return errors.New(fmt.Sprintf("Snapshot for revision %s to commit does not exist", revision)) } + snap := mgr.snapshots[revision] mgr.Unlock() // Calculate actual disk size used var sizeIncrement int64 = 0 - oldSize := snap.TotalSizeMiB - snap.UpdateDiskSize() // Should always result in a decrease or equal! - sizeIncrement = snap.TotalSizeMiB - oldSize + oldSize := snapStat.TotalSizeMiB + + snapStat.UpdateSize(snap.CalculateDiskSize()) // Should always result in a decrease or equal! + sizeIncrement = snapStat.TotalSizeMiB - oldSize mgr.Lock() defer mgr.Unlock() mgr.usedMib += sizeIncrement - snap.usable = true - snap.UpdateScore() - heap.Push(&mgr.freeSnaps, snap) + snapStat.usable = true + snapStat.UpdateScore() + heap.Push(&mgr.freeSnaps, snapStat) return nil } // freeSpace makes sure neededMib of disk space is available by removing unused snapshots. Make sure to have a lock // when calling this function. -func (mgr *SnapshotManager) freeSpace(neededMib int64) (*[]string, error) { +func (mgr *ImprovedSnapshotManager) freeSpace(neededMib int64) (*[]string, error) { var toDelete []string var freedMib int64 = 0 var removeContainerSnaps []string // Get id of snapshot and name of devmapper snapshot to delete for freedMib < neededMib && len(mgr.freeSnaps) > 0 { - snap := heap.Pop(&mgr.freeSnaps).(*Snapshot) - snap.usable = false - toDelete = append(toDelete, snap.revisionId) - removeContainerSnaps = append(removeContainerSnaps, snap.containerSnapName) - freedMib += snap.TotalSizeMiB + snapStat := heap.Pop(&mgr.freeSnaps).(*SnapshotStats) + snapStat.usable = false + toDelete = append(toDelete, snapStat.revisionId) + + snap := mgr.snapshots[snapStat.revisionId] + removeContainerSnaps = append(removeContainerSnaps, snap.ContainerSnapName) + freedMib += snapStat.TotalSizeMiB } // Delete snapshots resources, update clock & delete snapshot map entry for _, revisionId := range toDelete { snap := mgr.snapshots[revisionId] - if err := os.RemoveAll(snap.snapDir); err != nil { - return &removeContainerSnaps, errors.Wrapf(err, "removing snapshot snapDir %s", snap.snapDir) - } - snap.UpdateScore() // Update score (see Faascache policy) - if snap.score > mgr.clock { - mgr.clock = snap.score + if err := snap.Cleanup(); err != nil { + return &removeContainerSnaps, errors.Wrapf(err, "removing snapshot %s snapDir", snap.GetId()) } delete(mgr.snapshots, revisionId) + + snapStat := mgr.snapStats[revisionId] + snapStat.UpdateScore() // Update score (see Faascache policy) + if snapStat.score > mgr.clock { + mgr.clock = snapStat.score + } + delete(mgr.snapStats, revisionId) } mgr.usedMib -= freedMib diff --git a/snapshotting/snapHeap.go b/snapshotting/deduplicated/snapHeap.go similarity index 94% rename from snapshotting/snapHeap.go rename to snapshotting/deduplicated/snapHeap.go index 9360d49d9..31f364ff4 100644 --- a/snapshotting/snapHeap.go +++ b/snapshotting/deduplicated/snapHeap.go @@ -20,9 +20,9 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package snapshotting +package deduplicated -type SnapHeap []*Snapshot +type SnapHeap []*SnapshotStats func (h SnapHeap) Len() int { return len(h) @@ -35,7 +35,7 @@ func (h SnapHeap) Swap(i, j int) { } func (h *SnapHeap) Push(x interface{}) { - *h = append(*h, x.(*Snapshot)) + *h = append(*h, x.(*SnapshotStats)) } func (h *SnapHeap) Pop() interface{} { diff --git a/snapshotting/deduplicated/snapStats.go b/snapshotting/deduplicated/snapStats.go new file mode 100644 index 000000000..fb0b7b8f9 --- /dev/null +++ b/snapshotting/deduplicated/snapStats.go @@ -0,0 +1,38 @@ +package deduplicated + +// Snapshot identified by revision +// Only capitalized fields are serialised / deserialised +type SnapshotStats struct { + revisionId string + + // Eviction + usable bool + numUsing uint32 + TotalSizeMiB int64 + freq int64 + coldStartTimeMs int64 + lastUsedClock int64 + score int64 +} + +func NewSnapshotStats(revisionId string, sizeMiB, coldStartTimeMs, lastUsed int64) *SnapshotStats { + s := &SnapshotStats{ + revisionId: revisionId, + numUsing: 0, + TotalSizeMiB: sizeMiB, + coldStartTimeMs: coldStartTimeMs, + lastUsedClock: lastUsed, // Initialize with used now to avoid immediately removing + usable: false, + } + + return s +} + +// UpdateScore updates the score of the snapshot used by the keepalive policy +func (snp *SnapshotStats) UpdateScore() { + snp.score = snp.lastUsedClock + (snp.freq * snp.coldStartTimeMs) / snp.TotalSizeMiB +} + +func (snp *SnapshotStats) UpdateSize(sizeMib int64) { + snp.TotalSizeMiB = sizeMib +} \ No newline at end of file diff --git a/snapshotting/manager.go b/snapshotting/manager.go new file mode 100644 index 000000000..88f20446a --- /dev/null +++ b/snapshotting/manager.go @@ -0,0 +1,59 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package snapshotting + +import ( + "sync" +) + +type SnapshotManager struct { + sync.Mutex + + // generic snapshot manager + mgr SnapshotManagerInterface +} + +func NewSnapshotManager(mgr SnapshotManagerInterface) *SnapshotManager { + sm := &SnapshotManager{ + mgr: mgr, + } + + return sm +} + +func (mgr *SnapshotManager) AcquireSnapshot(identifier string) (*Snapshot, error) { + return mgr.mgr.AcquireSnapshot(identifier) +} + +func (mgr *SnapshotManager) ReleaseSnapshot(identifier string) error { + return mgr.mgr.ReleaseSnapshot(identifier) +} + +func (mgr *SnapshotManager) InitSnapshot(identifier, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *Snapshot, error) { + return mgr.mgr.InitSnapshot(identifier, image, coldStartTimeMs, memSizeMib, vCPUCount, sparse) +} + +func (mgr *SnapshotManager) CommitSnapshot(identifier string) error { + return mgr.mgr.CommitSnapshot(identifier) +} + diff --git a/snapshotting/regular/manager.go b/snapshotting/regular/manager.go new file mode 100644 index 000000000..ae825671a --- /dev/null +++ b/snapshotting/regular/manager.go @@ -0,0 +1,127 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste, Plamen Petrov and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package regular + +import ( + "fmt" + "github.com/ease-lab/vhive/snapshotting" + "github.com/pkg/errors" + "os" + "sync" +) + + +// ImprovedSnapshotManager manages snapshots stored on the node. +type RegularSnapshotManager struct { + sync.Mutex + activeSnapshots map[string]*snapshotting.Snapshot + creatingSnapshots map[string]*snapshotting.Snapshot + idleSnapshots map[string][]*snapshotting.Snapshot + baseFolder string +} + +func NewRegularSnapshotManager(baseFolder string) *RegularSnapshotManager { + manager := new(RegularSnapshotManager) + manager.activeSnapshots = make(map[string]*snapshotting.Snapshot) + manager.creatingSnapshots = make(map[string]*snapshotting.Snapshot) + manager.idleSnapshots = make(map[string][]*snapshotting.Snapshot) + manager.baseFolder = baseFolder + + // Clean & init basefolder + os.RemoveAll(manager.baseFolder) + os.MkdirAll(manager.baseFolder, os.ModePerm) + + return manager +} + +func (mgr *RegularSnapshotManager) AcquireSnapshot(image string) (*snapshotting.Snapshot, error) { + mgr.Lock() + defer mgr.Unlock() + + idles, ok := mgr.idleSnapshots[image] + if !ok { + mgr.idleSnapshots[image] = []*snapshotting.Snapshot{} + return nil, errors.New(fmt.Sprintf("There is no snapshot available for image %s", image)) + } + + if len(idles) != 0 { + snp := idles[0] + mgr.idleSnapshots[image] = idles[1:] + mgr.activeSnapshots[snp.GetId()] = snp + return snp, nil + } + + return nil, errors.New(fmt.Sprintf("There is no snapshot available fo rimage %s", image)) +} + +func (mgr *RegularSnapshotManager) ReleaseSnapshot(vmID string) error { + mgr.Lock() + defer mgr.Unlock() + + snap, present := mgr.activeSnapshots[vmID] + if !present { + return errors.New(fmt.Sprintf("Get: Snapshot for container %s does not exist", vmID)) + } + + delete(mgr.activeSnapshots, vmID) + mgr.idleSnapshots[snap.Image] = append(mgr.idleSnapshots[snap.Image], snap) + + return nil +} + +// InitSnapshot initializes a snapshot by adding its metadata to the ImprovedSnapshotManager. Once the snapshot has been created, +// CommitSnapshot must be run to finalize the snapshot creation and make the snapshot available fo ruse +func (mgr *RegularSnapshotManager) InitSnapshot(vmID, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *snapshotting.Snapshot, error) { + mgr.Lock() + var removeContainerSnaps *[]string + + // Add snapshot and snapshot metadata to manager + snap := snapshotting.NewSnapshot(vmID, mgr.baseFolder, image, memSizeMib, vCPUCount, sparse) + mgr.creatingSnapshots[snap.GetId()] = snap + mgr.Unlock() + + // Create directory to store snapshot data + err := snap.CreateSnapDir() + if err != nil { + return removeContainerSnaps, nil, errors.Wrapf(err, "creating snapDir for snapshots %s", vmID) + } + + return removeContainerSnaps, snap, nil +} + +// CommitSnapshot finalizes the snapshot creation and makes it available for use. +func (mgr *RegularSnapshotManager) CommitSnapshot(vmID string) error { + mgr.Lock() + defer mgr.Unlock() + snap := mgr.creatingSnapshots[vmID] + delete(mgr.creatingSnapshots, vmID) + + _, ok := mgr.idleSnapshots[snap.Image] + if !ok { + mgr.idleSnapshots[snap.Image] = []*snapshotting.Snapshot{} + } + + mgr.idleSnapshots[snap.Image] = append(mgr.idleSnapshots[snap.Image], snap) + + return nil +} diff --git a/snapshotting/snapshot.go b/snapshotting/snapshot.go index bdac36f4f..c82ee1b30 100644 --- a/snapshotting/snapshot.go +++ b/snapshotting/snapshot.go @@ -36,46 +36,31 @@ import ( // Snapshot identified by revision // Only capitalized fields are serialised / deserialised type Snapshot struct { - revisionId string - containerSnapName string - snapDir string - Image string - MemSizeMib uint32 - VCPUCount uint32 - usable bool - sparse bool - - // Eviction - numUsing uint32 - TotalSizeMiB int64 - freq int64 - coldStartTimeMs int64 - lastUsedClock int64 - score int64 + id string // id for deduplicated + ContainerSnapName string + snapDir string + Image string + MemSizeMib uint32 + VCPUCount uint32 + sparse bool } -func NewSnapshot(revisionId, baseFolder, image string, sizeMiB, coldStartTimeMs, lastUsed int64, memSizeMib, vCPUCount uint32, sparse bool) *Snapshot { +func NewSnapshot(id, baseFolder, image string, memSizeMib, vCPUCount uint32, sparse bool) *Snapshot { s := &Snapshot{ - revisionId: revisionId, - snapDir: filepath.Join(baseFolder, revisionId), - containerSnapName: fmt.Sprintf("%s%s", revisionId, time.Now().Format("20060102150405")), - Image: image, - MemSizeMib: memSizeMib, - VCPUCount: vCPUCount, - usable: false, - numUsing: 0, - TotalSizeMiB: sizeMiB, - coldStartTimeMs: coldStartTimeMs, - lastUsedClock: lastUsed, // Initialize with used now to avoid immediately removing - sparse: sparse, + id: id, + snapDir: filepath.Join(baseFolder, id), + ContainerSnapName: fmt.Sprintf("%s%s", id, time.Now().Format("20060102150405")), + Image: image, + MemSizeMib: memSizeMib, + VCPUCount: vCPUCount, + sparse: sparse, } return s } -// UpdateDiskSize Updates the estimated disk size to real disk size in use by snapshot -func (snp *Snapshot) UpdateDiskSize() { - snp.TotalSizeMiB = getRealSizeMib(snp.GetMemFilePath()) + getRealSizeMib(snp.GetSnapFilePath()) + getRealSizeMib(snp.GetInfoFilePath()) + getRealSizeMib(snp.GetPatchFilePath()) +func (snp *Snapshot) CalculateDiskSize() int64 { + return getRealSizeMib(snp.GetMemFilePath()) + getRealSizeMib(snp.GetSnapFilePath()) + getRealSizeMib(snp.GetInfoFilePath()) + getRealSizeMib(snp.GetPatchFilePath()) } // getRealSizeMib returns the disk space used by a certain file @@ -87,21 +72,20 @@ func getRealSizeMib(filePath string) int64 { return int64(math.Ceil((float64(st.Blocks) * 512) / (1024 * 1024))) } -// UpdateScore updates the score of the snapshot used by the keepalive policy -func (snp *Snapshot) UpdateScore() { - snp.score = snp.lastUsedClock + (snp.freq * snp.coldStartTimeMs) / snp.TotalSizeMiB +func (snp *Snapshot) CreateSnapDir() error { + return os.Mkdir(snp.snapDir, 0755) } func (snp *Snapshot) GetImage() string { return snp.Image } -func (snp *Snapshot) GetRevisionId() string { - return snp.revisionId +func (snp *Snapshot) GetId() string { + return snp.id } func (snp *Snapshot) GetContainerSnapName() string { - return snp.containerSnapName + return snp.ContainerSnapName } func (snp *Snapshot) GetSnapFilePath() string { @@ -163,4 +147,8 @@ func (snp *Snapshot) LoadSnapInfo(infoPath string) error { } return nil +} + +func (snp *Snapshot) Cleanup() error { + return os.RemoveAll(snp.snapDir) } \ No newline at end of file diff --git a/snapshotting/types.go b/snapshotting/types.go new file mode 100644 index 000000000..f31a504be --- /dev/null +++ b/snapshotting/types.go @@ -0,0 +1,30 @@ +// MIT License +// +// Copyright (c) 2021 Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package snapshotting + +type SnapshotManagerInterface interface { + AcquireSnapshot(identifier string) (*Snapshot, error) + ReleaseSnapshot(identifier string) error + InitSnapshot(identifier, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *Snapshot, error) + CommitSnapshot(identifier string) error +} diff --git a/vhive.go b/vhive.go index b10efa2eb..cb48851e9 100644 --- a/vhive.go +++ b/vhive.go @@ -26,6 +26,9 @@ import ( "context" "flag" "fmt" + "github.com/ease-lab/vhive/ctriface" + "github.com/ease-lab/vhive/ctriface/deduplicated" + "github.com/ease-lab/vhive/ctriface/regular" "math/rand" "net" @@ -36,7 +39,6 @@ import ( "github.com/ease-lab/vhive/cri" fccri "github.com/ease-lab/vhive/cri/firecracker" gvcri "github.com/ease-lab/vhive/cri/gvisor" - ctriface "github.com/ease-lab/vhive/ctriface" hpb "github.com/ease-lab/vhive/examples/protobuf/helloworld" pb "github.com/ease-lab/vhive/proto" log "github.com/sirupsen/logrus" @@ -58,6 +60,7 @@ var ( isSaveMemory *bool snapsCapacityMiB *int64 isSparseSnaps *bool + isDeduplicatedSnaps *bool isSnapshotsEnabled *bool isUPFEnabled *bool isLazyMode *bool @@ -87,6 +90,7 @@ func main() { // Snapshotting isSnapshotsEnabled = flag.Bool("snapshots", false, "Use VM snapshots when adding function instances") isSparseSnaps = flag.Bool("sparsesnaps", false, "Makes memory files sparse after storing to reduce disk utilization") + isDeduplicatedSnaps = flag.Bool("deduplicatedsnaps", false, "Use improved deduplicated snapshotting") snapsCapacityMiB = flag.Int64("snapcapacity", 102400, "Capacity set aside for storing snapshots (Mib)") isUPFEnabled = flag.Bool("upf", false, "Enable user-level page faults guest memory management") isLazyMode = flag.Bool("lazy", false, "Enable lazy serving mode when UPFs are enabled") @@ -150,18 +154,36 @@ func main() { testModeOn := false - orch = ctriface.NewOrchestrator( - *snapshotter, - *hostIface, - *poolName, - *metadataDev, - *netPoolSize, - ctriface.WithTestModeOn(testModeOn), - ctriface.WithSnapshots(*isSnapshotsEnabled), - ctriface.WithUPF(*isUPFEnabled), - ctriface.WithMetricsMode(*isMetricsMode), - ctriface.WithLazyMode(*isLazyMode), - ) + if *isDeduplicatedSnaps { + orch = ctriface.NewOrchestrator(deduplicated.NewDedupOrchestrator( + *snapshotter, + *hostIface, + *poolName, + *metadataDev, + *netPoolSize, + deduplicated.WithTestModeOn(testModeOn), + deduplicated.WithSnapshots(*isSnapshotsEnabled), + deduplicated.WithUPF(*isUPFEnabled), + deduplicated.WithMetricsMode(*isMetricsMode), + deduplicated.WithLazyMode(*isLazyMode), + )) + } else { + orch = ctriface.NewOrchestrator(regular.NewRegOrchestrator( + *snapshotter, + *hostIface, + *poolName, + *metadataDev, + *netPoolSize, + regular.WithTestModeOn(testModeOn), + regular.WithSnapshots(*isSnapshotsEnabled), + regular.WithUPF(*isUPFEnabled), + regular.WithMetricsMode(*isMetricsMode), + regular.WithLazyMode(*isLazyMode), + )) + } + + + funcPool = NewFuncPool(*isSaveMemory, *servedThreshold, *pinnedFuncNum, testModeOn) @@ -186,7 +208,7 @@ func setupFirecrackerCRI() { s := grpc.NewServer() - fcService, err := fccri.NewFirecrackerService(orch, *snapsCapacityMiB, *isSparseSnaps) + fcService, err := fccri.NewFirecrackerService(orch, *snapsCapacityMiB, *isSparseSnaps, *isDeduplicatedSnaps) if err != nil { log.Fatalf("failed to create firecracker service %v", err) } diff --git a/vhive_test.go b/vhive_test.go index 9777fc379..d5b2e9224 100644 --- a/vhive_test.go +++ b/vhive_test.go @@ -25,13 +25,14 @@ package main import ( "context" "flag" + "github.com/ease-lab/vhive/ctriface" + "github.com/ease-lab/vhive/ctriface/regular" "os" "strconv" "sync" "testing" ctrdlog "github.com/containerd/containerd/log" - ctriface "github.com/ease-lab/vhive/ctriface" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" ) @@ -65,25 +66,25 @@ func TestMain(m *testing.M) { flag.Parse() - log.Infof("Orchestrator snapshots enabled: %t", *isSnapshotsEnabledTest) - log.Infof("Orchestrator UPF enabled: %t", *isUPFEnabledTest) - log.Infof("Orchestrator lazy serving mode enabled: %t", *isLazyModeTest) - log.Infof("Orchestrator UPF metrics enabled: %t", *isMetricsModeTest) + log.Infof("DedupOrchestrator snapshots enabled: %t", *isSnapshotsEnabledTest) + log.Infof("DedupOrchestrator UPF enabled: %t", *isUPFEnabledTest) + log.Infof("DedupOrchestrator lazy serving mode enabled: %t", *isLazyModeTest) + log.Infof("DedupOrchestrator UPF metrics enabled: %t", *isMetricsModeTest) log.Infof("Drop cache: %t", !*isWithCache) log.Infof("Bench dir: %s", *benchDir) - orch = ctriface.NewOrchestrator( + orch = ctriface.NewOrchestrator(regular.NewRegOrchestrator( "devmapper", "", "fc-dev-thinpool", "", 10, - ctriface.WithTestModeOn(true), - ctriface.WithSnapshots(*isSnapshotsEnabledTest), - ctriface.WithUPF(*isUPFEnabledTest), - ctriface.WithMetricsMode(*isMetricsModeTest), - ctriface.WithLazyMode(*isLazyModeTest), - ) + regular.WithTestModeOn(true), + regular.WithSnapshots(*isSnapshotsEnabledTest), + regular.WithUPF(*isUPFEnabledTest), + regular.WithMetricsMode(*isMetricsModeTest), + regular.WithLazyMode(*isLazyModeTest), + )) ret := m.Run() From c78f1cbc714b2a05c96ffd8b58053aba7395bc2d Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Tue, 22 Feb 2022 21:59:06 +0000 Subject: [PATCH 09/15] Cleanup and integrate PR comments Signed-off-by: Amory Hoste --- cri/firecracker/coordinator.go | 63 ++- cri/firecracker/service.go | 14 +- ctriface/bench_test.go | 35 +- ctriface/deduplicated/iface.go | 524 ------------------------- ctriface/deduplicated/orch.go | 198 ---------- ctriface/deduplicated/orch_options.go | 74 ---- ctriface/failing_test.go | 41 +- ctriface/{regular => }/iface.go | 245 ++++++++---- ctriface/iface_test.go | 189 ++++----- ctriface/manual_cleanup_test.go | 181 ++++----- ctriface/orch.go | 214 +++++++--- ctriface/{regular => }/orch_options.go | 24 +- ctriface/regular/orch.go | 229 ----------- functions.go | 31 +- metrics/metrics.go | 2 +- misc/vm_pool.go | 2 +- scripts/install_pmutools.sh | 2 +- vhive.go | 57 +-- vhive_test.go | 27 +- 19 files changed, 656 insertions(+), 1496 deletions(-) delete mode 100644 ctriface/deduplicated/iface.go delete mode 100644 ctriface/deduplicated/orch.go delete mode 100644 ctriface/deduplicated/orch_options.go rename ctriface/{regular => }/iface.go (66%) rename ctriface/{regular => }/orch_options.go (82%) delete mode 100644 ctriface/regular/orch.go diff --git a/cri/firecracker/coordinator.go b/cri/firecracker/coordinator.go index 2e4b043f3..efbc568e4 100644 --- a/cri/firecracker/coordinator.go +++ b/cri/firecracker/coordinator.go @@ -41,14 +41,12 @@ import ( const snapshotsDir = "/fccd/snapshots" -// TODO: interface for orchestrator - type coordinator struct { sync.Mutex orch *ctriface.Orchestrator nextID uint64 isSparseSnaps bool - isDeduplicatedSnaps bool + isFullLocal bool activeInstances map[string]*FuncInstance snapshotManager *snapshotting.SnapshotManager @@ -64,15 +62,15 @@ func withoutOrchestrator() coordinatorOption { } } -func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool, isDeduplicatedSnaps bool, opts ...coordinatorOption) *coordinator { +func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool, isFullLocal bool, opts ...coordinatorOption) *coordinator { c := &coordinator{ activeInstances: make(map[string]*FuncInstance), orch: orch, isSparseSnaps: isSparseSnaps, - isDeduplicatedSnaps: isDeduplicatedSnaps, + isFullLocal: isFullLocal, } - if isDeduplicatedSnaps { + if isFullLocal { c.snapshotManager = snapshotting.NewSnapshotManager(deduplicated.NewSnapshotManager(snapshotsDir, snapsCapacityMiB)) } else { c.snapshotManager = snapshotting.NewSnapshotManager(regular.NewRegularSnapshotManager(snapshotsDir)) @@ -88,26 +86,25 @@ func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int func (c *coordinator) startVM(ctx context.Context, image string, revision string, memSizeMib, vCPUCount uint32) (*FuncInstance, error) { if c.orch != nil && c.orch.GetSnapshotsEnabled() { id := image - if c.isDeduplicatedSnaps { + if c.isFullLocal { id = revision } // Check if snapshot is available if snap, err := c.snapshotManager.AcquireSnapshot(id); err == nil { if snap.MemSizeMib != memSizeMib || snap.VCPUCount != vCPUCount { - return nil, errors.New("Please create a new revision when updating uVM memory size or vCPU count") + return nil, errors.New("uVM memory size or vCPU count in the snapshot do not match the requested ones.") + } + + vmID := "" + if c.isFullLocal { + vmID = strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1))) } else { - vmID := "" - if c.isDeduplicatedSnaps { - vmID = strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1))) - } else { - vmID = snap.GetId() - } - - return c.orchStartVMSnapshot(ctx, snap, memSizeMib, vCPUCount, vmID) + vmID = snap.GetId() } - } else { - return c.orchStartVM(ctx, image, revision, memSizeMib, vCPUCount) + + return c.orchStartVMSnapshot(ctx, snap, memSizeMib, vCPUCount, vmID) + } } @@ -134,7 +131,7 @@ func (c *coordinator) stopVM(ctx context.Context, containerID string) error { } id := fi.vmID - if c.isDeduplicatedSnaps { + if c.isFullLocal { id = fi.revisionId } @@ -148,7 +145,7 @@ func (c *coordinator) stopVM(ctx context.Context, containerID string) error { } } - if c.isDeduplicatedSnaps { + if c.isFullLocal { return c.orchStopVM(ctx, fi) } else { return c.orchOffloadVM(ctx, fi) @@ -201,7 +198,7 @@ func (c *coordinator) orchStartVM(ctx context.Context, image, revision string, m if !c.withoutOrchestrator { trackDirtyPages := c.isSparseSnaps - resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount, trackDirtyPages) + resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount, trackDirtyPages, c.isFullLocal) if err != nil { logger.WithError(err).Error("coordinator failed to start VM") } @@ -233,7 +230,7 @@ func (c *coordinator) orchStartVMSnapshot(ctx context.Context, snap *snapshottin ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*30) defer cancel() - resp, _, err = c.orch.LoadSnapshot(ctxTimeout, vmID, snap) + resp, _, err = c.orch.LoadSnapshot(ctxTimeout, vmID, snap, c.isFullLocal) if err != nil { logger.WithError(err).Error("failed to load VM") return nil, err @@ -260,20 +257,18 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) ) id := fi.vmID - if c.isDeduplicatedSnaps { + if c.isFullLocal { id = fi.revisionId } removeContainerSnaps, snap, err := c.snapshotManager.InitSnapshot(id, fi.image, fi.coldStartTimeMs, fi.memSizeMib, fi.vCPUCount, c.isSparseSnaps) if err != nil { - if fmt.Sprint(err) == "There is not enough free space available" { - fi.logger.Info(fmt.Sprintf("There is not enough space available for snapshots of %s", fi.revisionId)) - } + fi.logger.Warn(fmt.Sprint(err)) return nil } - if c.isDeduplicatedSnaps && removeContainerSnaps != nil { + if c.isFullLocal && removeContainerSnaps != nil { for _, cleanupSnapId := range *removeContainerSnaps { if err := c.orch.CleanupSnapshot(ctx, cleanupSnapId); err != nil { return errors.Wrap(err, "removing devmapper revision snapshot") @@ -292,7 +287,7 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) return nil } - err = c.orch.CreateSnapshot(ctxTimeout, fi.vmID, snap) + err = c.orch.CreateSnapshot(ctxTimeout, fi.vmID, snap, c.isFullLocal) if err != nil { fi.logger.WithError(err).Error("failed to create snapshot") return nil @@ -306,26 +301,26 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) return nil } -func (c *coordinator) orchStopVM(ctx context.Context, fi *FuncInstance) error { +func (c *coordinator) orchOffloadVM(ctx context.Context, fi *FuncInstance) error { if c.withoutOrchestrator { return nil } - if err := c.orch.StopSingleVM(ctx, fi.vmID); err != nil { - fi.logger.WithError(err).Error("failed to stop VM for instance") + if err := c.orch.OffloadVM(ctx, fi.vmID, c.isFullLocal); err != nil { + fi.logger.WithError(err).Error("failed to offload VM") return err } return nil } -func (c *coordinator) orchOffloadVM(ctx context.Context, fi *FuncInstance) error { +func (c *coordinator) orchStopVM(ctx context.Context, fi *FuncInstance) error { if c.withoutOrchestrator { return nil } - if err := c.orch.OffloadVM(ctx, fi.vmID); err != nil { - fi.logger.WithError(err).Error("failed to offload VM") + if err := c.orch.StopSingleVM(ctx, fi.vmID, c.isFullLocal); err != nil { + fi.logger.WithError(err).Error("failed to stop VM for instance") return err } diff --git a/cri/firecracker/service.go b/cri/firecracker/service.go index 0dcfc3f06..b083f4c70 100644 --- a/cri/firecracker/service.go +++ b/cri/firecracker/service.go @@ -42,7 +42,7 @@ const ( guestPortEnv = "GUEST_PORT" guestImageEnv = "GUEST_IMAGE" guestMemorySizeMibEnv = "MEM_SIZE_MB" - guestvCPUCount = "VCPU_COUNT" + guestvCPUCountEnv = "VCPU_COUNT" ) type FirecrackerService struct { @@ -61,7 +61,7 @@ type VMConfig struct { guestPort string } -func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps, isDeduplicatedSnaps bool) (*FirecrackerService, error) { +func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps, isFullLocal bool) (*FirecrackerService, error) { fs := new(FirecrackerService) stockRuntimeClient, err := cri.NewStockRuntimeServiceClient() if err != nil { @@ -69,7 +69,7 @@ func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, return nil, err } fs.stockRuntimeClient = stockRuntimeClient - fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps, isDeduplicatedSnaps) + fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps, isFullLocal) fs.vmConfigs = make(map[string]*VMConfig) return fs, nil } @@ -251,14 +251,13 @@ func getMemorySize(config *criapi.ContainerConfig) (uint32, error) { envs := config.GetEnvs() for _, kv := range envs { if kv.GetKey() == guestMemorySizeMibEnv { - memSize, err := strconv.Atoi(kv.GetValue()) + memSize, err := strconv.ParseUint(kv.GetValue(), 10, 32) if err == nil { return uint32(memSize), nil } else { return 0, err } } - } return uint32(256), nil @@ -267,15 +266,14 @@ func getMemorySize(config *criapi.ContainerConfig) (uint32, error) { func getvCPUCount(config *criapi.ContainerConfig) (uint32, error) { envs := config.GetEnvs() for _, kv := range envs { - if kv.GetKey() == guestvCPUCount { - vCPUCount, err := strconv.Atoi(kv.GetValue()) + if kv.GetKey() == guestvCPUCountEnv { + vCPUCount, err := strconv.ParseUint(kv.GetValue(), 10, 32) if err == nil { return uint32(vCPUCount), nil } else { return 0, err } } - } return uint32(1), nil diff --git a/ctriface/bench_test.go b/ctriface/bench_test.go index 7f2f7c74f..7128866f8 100644 --- a/ctriface/bench_test.go +++ b/ctriface/bench_test.go @@ -24,7 +24,6 @@ package ctriface import ( "context" - "github.com/ease-lab/vhive/ctriface/regular" "os" "os/exec" "path/filepath" @@ -44,22 +43,6 @@ const ( ) func TestBenchmarkStart(t *testing.T) { - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - )) - - benchCount := 10 - vmID := 0 - benchmarkStart(t, orch, benchCount, vmID) -} - -func benchmarkStart(t *testing.T, orch *Orchestrator, benchCount, vmID int) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -70,10 +53,22 @@ func benchmarkStart(t *testing.T, orch *Orchestrator, benchCount, vmID int) { log.SetLevel(log.InfoLevel) testTimeout := 2000 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + ) + images := getAllImages() + benchCount := 10 + vmID := 0 createResultsDir() @@ -88,11 +83,11 @@ func benchmarkStart(t *testing.T, orch *Orchestrator, benchCount, vmID int) { for i := 0; i < benchCount; i++ { dropPageCache() - _, metric, err := orch.StartVM(ctx, vmIDString, imageName, 256, 1, false) + _, metric, err := orch.StartVM(ctx, vmIDString, imageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM") startMetrics[i] = metric - err = orch.StopSingleVM(ctx, vmIDString) + err = orch.StopSingleVM(ctx, vmIDString, false) require.NoError(t, err, "Failed to stop VM") } diff --git a/ctriface/deduplicated/iface.go b/ctriface/deduplicated/iface.go deleted file mode 100644 index 0e0bd0346..000000000 --- a/ctriface/deduplicated/iface.go +++ /dev/null @@ -1,524 +0,0 @@ -// MIT License -// -// Copyright (c) 2020 Dmitrii Ustiugov, Plamen Petrov and EASE lab -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package deduplicated - -import ( - "context" - "github.com/ease-lab/vhive/ctriface" - "github.com/ease-lab/vhive/snapshotting" - "os" - "os/exec" - "strings" - "sync" - "syscall" - "time" - - log "github.com/sirupsen/logrus" - - "github.com/containerd/containerd" - "github.com/containerd/containerd/cio" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/oci" - "github.com/firecracker-microvm/firecracker-containerd/proto" // note: from the original repo - "github.com/firecracker-microvm/firecracker-containerd/runtime/firecrackeroci" - "github.com/pkg/errors" - - _ "google.golang.org/grpc/codes" //tmp - _ "google.golang.org/grpc/status" //tmp - - "github.com/ease-lab/vhive/metrics" - "github.com/ease-lab/vhive/misc" - "github.com/go-multierror/multierror" - - _ "github.com/davecgh/go-spew/spew" //tmp -) - -// StartVM Boots a VM if it does not exist -func (o *DedupOrchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages bool) (_ *ctriface.StartVMResponse, _ *metrics.Metric, retErr error) { - var ( - startVMMetric *metrics.Metric = metrics.NewMetric() - tStart time.Time - ) - - logger := log.WithFields(log.Fields{"vmID": vmID, "image": imageName}) - logger.Debug("StartVM: Received StartVM") - - // 1. Allocate VM metadata & create vm network - vm, err := o.vmPool.Allocate(vmID) - if err != nil { - logger.Error("failed to allocate VM in VM pool") - return nil, nil, err - } - - // Set VM vCPU and Memory - if memSizeMib != 0 { - vm.MemSizeMib = memSizeMib - } - if vCPUCount != 0 { - vm.VCPUCount = vCPUCount - } - - defer func() { - // Free the VM from the pool if function returns error - if retErr != nil { - if err := o.vmPool.Free(vmID); err != nil { - logger.WithError(err).Errorf("failed to free VM from pool after failure") - } - } - }() - - ctx = namespaces.WithNamespace(ctx, namespaceName) - - // 2. Fetch VM image - tStart = time.Now() - if vm.Image, err = o.GetImage(ctx, imageName); err != nil { - return nil, nil, errors.Wrapf(err, "Failed to get/pull image") - } - startVMMetric.MetricMap[metrics.GetImage] = metrics.ToUS(time.Since(tStart)) - - // 3. Create VM - tStart = time.Now() - conf := o.getVMConfig(vm, trackDirtyPages) - _, err = o.fcClient.CreateVM(ctx, conf) - startVMMetric.MetricMap[metrics.FcCreateVM] = metrics.ToUS(time.Since(tStart)) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create the microVM in firecracker-containerd") - } - - defer func() { - if retErr != nil { - if _, err := o.fcClient.StopVM(ctx, &proto.StopVMRequest{VMID: vmID}); err != nil { - logger.WithError(err).Errorf("failed to stop firecracker-containerd VM after failure") - } - } - }() - - // 4. Create container - logger.Debug("StartVM: Creating a new container") - tStart = time.Now() - container, err := o.client.NewContainer( - ctx, - vm.ContainerSnapKey, - containerd.WithSnapshotter(o.snapshotter), - containerd.WithNewSnapshot(vm.ContainerSnapKey, *vm.Image), - containerd.WithNewSpec( - oci.WithImageConfig(*vm.Image), - firecrackeroci.WithVMID(vmID), - firecrackeroci.WithVMNetwork, - ), - containerd.WithRuntime("aws.firecracker", nil), - ) - startVMMetric.MetricMap[metrics.NewContainer] = metrics.ToUS(time.Since(tStart)) - vm.Container = &container - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create a container") - } - - defer func() { - if retErr != nil { - if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { - logger.WithError(err).Errorf("failed to delete container after failure") - } - } - }() - - // 5. Turn container into runnable process - iologger := NewWorkloadIoWriter(vmID) - o.workloadIo.Store(vmID, &iologger) - logger.Debug("StartVM: Creating a new task") - tStart = time.Now() - task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(os.Stdin, iologger, iologger))) - startVMMetric.MetricMap[metrics.NewTask] = metrics.ToUS(time.Since(tStart)) - vm.Task = &task - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to create a task") - } - - defer func() { - if retErr != nil { - if _, err := task.Delete(ctx); err != nil { - logger.WithError(err).Errorf("failed to delete task after failure") - } - } - }() - - // 6. Wait for task to get ready - logger.Debug("StartVM: Waiting for the task to get ready") - tStart = time.Now() - ch, err := task.Wait(ctx) - startVMMetric.MetricMap[metrics.TaskWait] = metrics.ToUS(time.Since(tStart)) - vm.TaskCh = ch - if err != nil { - return nil, nil, errors.Wrap(err, "failed to wait for a task") - } - - defer func() { - if retErr != nil { - if err := task.Kill(ctx, syscall.SIGKILL); err != nil { - logger.WithError(err).Errorf("failed to kill task after failure") - } - } - }() - - // 7. Start process inside container - logger.Debug("StartVM: Starting the task") - tStart = time.Now() - if err := task.Start(ctx); err != nil { - return nil, nil, errors.Wrap(err, "failed to start a task") - } - startVMMetric.MetricMap[metrics.TaskStart] = metrics.ToUS(time.Since(tStart)) - - defer func() { - if retErr != nil { - if err := task.Kill(ctx, syscall.SIGKILL); err != nil { - logger.WithError(err).Errorf("failed to kill task after failure") - } - } - }() - - logger.Debug("Successfully started a VM") - - return &ctriface.StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, startVMMetric, nil -} - -// StopSingleVM Shuts down a VM -// Note: VMs are not quisced before being stopped -func (o *DedupOrchestrator) StopSingleVM(ctx context.Context, vmID string) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("DedupOrchestrator received StopVM") - - ctx = namespaces.WithNamespace(ctx, namespaceName) - vm, err := o.vmPool.GetVM(vmID) - if err != nil { - if _, ok := err.(*misc.NonExistErr); ok { - logger.Panic("StopVM: VM does not exist") - } - logger.Panic("StopVM: GetVM() failed for an unknown reason") - - } - - logger = log.WithFields(log.Fields{"vmID": vmID}) - - // Cleanup and remove container if VM not booted from snapshot - if ! vm.SnapBooted { - task := *vm.Task - if err := task.Kill(ctx, syscall.SIGKILL); err != nil { - logger.WithError(err).Error("Failed to kill the task") - return err - } - - <-vm.TaskCh - //FIXME: Seems like some tasks need some extra time to die Issue#15, lr_training - time.Sleep(500 * time.Millisecond) - - if _, err := task.Delete(ctx); err != nil { - logger.WithError(err).Error("failed to delete task") - return err - } - - container := *vm.Container - if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { - logger.WithError(err).Error("failed to delete container") - return err - } - } - - // Stop VM - if _, err := o.fcClient.StopVM(ctx, &proto.StopVMRequest{VMID: vmID}); err != nil { - logger.WithError(err).Error("failed to stop firecracker-containerd VM") - return err - } - - // Free VM metadata and clean up network - if err := o.vmPool.Free(vmID); err != nil { - logger.Error("failed to free VM from VM pool") - return err - } - - o.workloadIo.Delete(vmID) - - // Cleanup VM devmapper container snapshot if booted from snapshot - if vm.SnapBooted { - if err := o.devMapper.RemoveDeviceSnapshot(ctx, vm.ContainerSnapKey); err != nil { - logger.Error("failed to deactivate container snapshot") - return err - } - } - - logger.Debug("Stopped VM successfully") - - return nil -} - -func getK8sDNS() []string { - //using googleDNS as a backup - dnsIPs := []string{"8.8.8.8"} - //get k8s DNS clusterIP - cmd := exec.Command( - "kubectl", "get", "service", "-n", "kube-system", "kube-dns", "-o=custom-columns=:.spec.clusterIP", "--no-headers", - ) - stdoutStderr, err := cmd.CombinedOutput() - if err != nil { - log.Warnf("Failed to Fetch k8s dns clusterIP %v\n%s\n", err, stdoutStderr) - log.Warnf("Using google dns %s\n", dnsIPs[0]) - } else { - //adding k8s DNS clusterIP to the list - dnsIPs = []string{strings.TrimSpace(string(stdoutStderr)), dnsIPs[0]} - } - return dnsIPs -} - -func (o *DedupOrchestrator) getVMConfig(vm *misc.VM, trackDirtyPages bool) *proto.CreateVMRequest { - kernelArgs := "ro noapic reboot=k panic=1 pci=off nomodules systemd.log_color=false systemd.unit=firecracker.target init=/sbin/overlay-init tsc=reliable quiet 8250.nr_uarts=0 ipv6.disable=1" - - return &proto.CreateVMRequest{ - VMID: vm.ID, - TimeoutSeconds: 100, - KernelArgs: kernelArgs, - MachineCfg: &proto.FirecrackerMachineConfiguration{ - VcpuCount: vm.VCPUCount, - MemSizeMib: vm.MemSizeMib, - TrackDirtyPages: trackDirtyPages, - }, - NetworkInterfaces: []*proto.FirecrackerNetworkInterface{{ - StaticConfig: &proto.StaticNetworkConfiguration{ - MacAddress: vm.NetConfig.GetMacAddress(), - HostDevName: vm.NetConfig.GetHostDevName(), - IPConfig: &proto.IPConfiguration{ - PrimaryAddr: vm.NetConfig.GetContainerCIDR(), - GatewayAddr: vm.NetConfig.GetGatewayIP(), - Nameservers: getK8sDNS(), - }, - }, - }}, - NetworkNamespace: vm.NetConfig.GetNamespacePath(), - OffloadEnabled: false, - } -} - -// Offload Shuts down the VM but leaves shim and other resources running. -func (o *DedupOrchestrator) OffloadVM(ctx context.Context, vmID string) error { - return errors.New("Deduplicated snapshots do not support offloading") -} - -// StopActiveVMs Shuts down all active VMs -func (o *DedupOrchestrator) StopActiveVMs() error { - var vmGroup sync.WaitGroup - for vmID, vm := range o.vmPool.GetVMMap() { - vmGroup.Add(1) - logger := log.WithFields(log.Fields{"vmID": vmID}) - go func(vmID string, vm *misc.VM) { - defer vmGroup.Done() - err := o.StopSingleVM(context.Background(), vmID) - if err != nil { - logger.Warn(err) - } - }(vmID, vm) - } - - log.Info("waiting for goroutines") - vmGroup.Wait() - log.Info("waiting done") - - log.Info("Closing fcClient") - o.fcClient.Close() - log.Info("Closing containerd client") - o.client.Close() - - return nil -} - -// PauseVM Pauses a VM -func (o *DedupOrchestrator) PauseVM(ctx context.Context, vmID string) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("DedupOrchestrator received PauseVM") - - ctx = namespaces.WithNamespace(ctx, namespaceName) - - if _, err := o.fcClient.PauseVM(ctx, &proto.PauseVMRequest{VMID: vmID}); err != nil { - logger.WithError(err).Error("failed to pause the VM") - return err - } - - return nil -} - -// ResumeVM Resumes a VM -func (o *DedupOrchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) { - var ( - resumeVMMetric *metrics.Metric = metrics.NewMetric() - tStart time.Time - ) - - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("DedupOrchestrator received ResumeVM") - - ctx = namespaces.WithNamespace(ctx, namespaceName) - - tStart = time.Now() - if _, err := o.fcClient.ResumeVM(ctx, &proto.ResumeVMRequest{VMID: vmID}); err != nil { - logger.WithError(err).Error("failed to resume the VM") - return nil, err - } - resumeVMMetric.MetricMap[metrics.FcResume] = metrics.ToUS(time.Since(tStart)) - - return resumeVMMetric, nil -} - -// CreateSnapshot Creates a snapshot of a VM -func (o *DedupOrchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("DedupOrchestrator received CreateSnapshot") - - ctx = namespaces.WithNamespace(ctx, namespaceName) - - // 1. Get VM metadata - vm, err := o.vmPool.GetVM(vmID) - if err != nil { - return err - } - - // 2. Create VM & VM memory state snapshot - req := &proto.CreateSnapshotRequest{ - VMID: vmID, - SnapshotFilePath: snap.GetSnapFilePath(), - MemFilePath: snap.GetMemFilePath(), - SnapshotType: snap.GetSnapType(), - } - - if _, err := o.fcClient.CreateSnapshot(ctx, req); err != nil { - logger.WithError(err).Error("failed to create snapshot of the VM") - return err - } - - // 3. Backup disk state difference. - // 3.B Alternatively could also do ForkContainerSnap(ctx, vm.ContainerSnapKey, snap.GetContainerSnapName(), *vm.Image, forkMetric) - if err := o.devMapper.CreatePatch(ctx, snap.GetPatchFilePath(), vm.ContainerSnapKey, *vm.Image); err != nil { - logger.WithError(err).Error("failed to create container patch file") - return err - } - - // 4. Serialize snapshot info - if err := snap.SerializeSnapInfo(); err != nil { - logger.WithError(err).Error("failed to serialize snapshot info") - return err - } - - // 5. Resume - if _, err := o.fcClient.ResumeVM(ctx, &proto.ResumeVMRequest{VMID: vmID}); err != nil { - log.Printf("failed to resume the VM") - return err - } - - return nil -} - -// LoadSnapshot Loads a snapshot of a VM -func (o *DedupOrchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *ctriface.StartVMResponse, _ *metrics.Metric, retErr error) { - var ( - loadSnapshotMetric *metrics.Metric = metrics.NewMetric() - tStart time.Time - loadErr, activateErr error - ) - - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("DedupOrchestrator received LoadSnapshot") - - ctx = namespaces.WithNamespace(ctx, namespaceName) - - // 1. Allocate VM metadata & create vm network - vm, err := o.vmPool.Allocate(vmID) - if err != nil { - logger.Error("failed to allocate VM in VM pool") - return nil, nil, err - } - - defer func() { - // Free the VM from the pool if function returns error - if retErr != nil { - if err := o.vmPool.Free(vmID); err != nil { - logger.WithError(err).Errorf("failed to free VM from pool after failure") - } - } - }() - - // 2. Fetch image for VM - if vm.Image, err = o.GetImage(ctx, snap.GetImage()); err != nil { - return nil, nil, errors.Wrapf(err, "Failed to get/pull image") - } - - // 3. Create snapshot for container to run - // 3.B Alternatively could also do CreateDeviceSnapshot(ctx, vm.ContainerSnapKey, snap.GetContainerSnapName()) - if err := o.devMapper.CreateDeviceSnapshotFromImage(ctx, vm.ContainerSnapKey, *vm.Image); err != nil { - return nil, nil, errors.Wrapf(err, "creating container snapshot") - } - - containerSnap, err := o.devMapper.GetDeviceSnapshot(ctx, vm.ContainerSnapKey) - if err != nil { - return nil, nil, errors.Wrapf(err, "previously created container device does not exist") - } - - // 4. Unpack patch into container snapshot - if err := o.devMapper.RestorePatch(ctx, vm.ContainerSnapKey, snap.GetPatchFilePath()); err != nil { - return nil, nil, errors.Wrapf(err, "unpacking patch into container snapshot") - } - - // 5. Load VM from snapshot - req := &proto.LoadSnapshotRequest{ - VMID: vmID, - SnapshotFilePath: snap.GetSnapFilePath(), - MemFilePath: snap.GetMemFilePath(), - EnableUserPF: false, - NetworkNamespace: vm.NetConfig.GetNamespacePath(), - NewSnapshotPath: containerSnap.GetDevicePath(), - Offloaded: false, - } - - tStart = time.Now() - - if _, loadErr = o.fcClient.LoadSnapshot(ctx, req); loadErr != nil { - logger.Error("Failed to load snapshot of the VM: ", loadErr) - } - - loadSnapshotMetric.MetricMap[metrics.LoadVMM] = metrics.ToUS(time.Since(tStart)) - - if loadErr != nil || activateErr != nil { - multierr := multierror.Of(loadErr, activateErr) - return nil, nil, multierr - } - - vm.SnapBooted = true - - return &ctriface.StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, nil, nil -} - -func (o *DedupOrchestrator) CleanupSnapshot(ctx context.Context, revisionID string) error { - if err := o.devMapper.RemoveDeviceSnapshot(ctx, revisionID); err != nil { - return errors.Wrapf(err, "removing revision snapshot") - } - return nil -} - -func (o *DedupOrchestrator) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { - return o.imageManager.GetImage(ctx, imageName) -} diff --git a/ctriface/deduplicated/orch.go b/ctriface/deduplicated/orch.go deleted file mode 100644 index 1bc671706..000000000 --- a/ctriface/deduplicated/orch.go +++ /dev/null @@ -1,198 +0,0 @@ -// MIT License -// -// Copyright (c) 2020 Plamen Petrov and EASE lab -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package deduplicated - -import ( - "github.com/ease-lab/vhive/ctrimages" - "github.com/ease-lab/vhive/devmapper" - "os" - "os/signal" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - log "github.com/sirupsen/logrus" - - "github.com/containerd/containerd" - - fcclient "github.com/firecracker-microvm/firecracker-containerd/firecracker-control/client" - // note: from the original repo - - _ "google.golang.org/grpc/codes" //tmp - _ "google.golang.org/grpc/status" //tmp - - "github.com/ease-lab/vhive/metrics" - "github.com/ease-lab/vhive/misc" - - _ "github.com/davecgh/go-spew/spew" //tmp -) - -const ( - containerdAddress = "/run/firecracker-containerd/containerd.sock" - containerdTTRPCAddress = containerdAddress + ".ttrpc" - namespaceName = "firecracker-containerd" -) - -type WorkloadIoWriter struct { - logger *log.Entry -} - -func NewWorkloadIoWriter(vmID string) WorkloadIoWriter { - return WorkloadIoWriter{log.WithFields(log.Fields{"vmID": vmID})} -} - -func (wio WorkloadIoWriter) Write(p []byte) (n int, err error) { - s := string(p) - lines := strings.Split(s, "\n") - for i := range lines { - wio.logger.Info(string(lines[i])) - } - return len(p), nil -} - -// DedupOrchestrator Drives all VMs -type DedupOrchestrator struct { - vmPool *misc.VMPool - workloadIo sync.Map // vmID string -> WorkloadIoWriter - snapshotter string - client *containerd.Client - fcClient *fcclient.Client - devMapper *devmapper.DeviceMapper - imageManager *ctrimages.ImageManager - // store *skv.KVStore - snapshotsEnabled bool - isUPFEnabled bool - isLazyMode bool - snapshotsDir string - isMetricsMode bool - hostIface string -} - -// NewDedupOrchestrator Initializes a new orchestrator -func NewDedupOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPoolSize int, opts ...OrchestratorOption) *DedupOrchestrator { // TODO: args - var err error - - o := new(DedupOrchestrator) - o.vmPool = misc.NewVMPool(hostIface, netPoolSize) - o.snapshotter = snapshotter - o.snapshotsDir = "/fccd/snapshots" - o.hostIface = hostIface - - for _, opt := range opts { - opt(o) - } - - if _, err := os.Stat(o.snapshotsDir); err != nil { - if !os.IsNotExist(err) { - log.Panicf("Snapshot dir %s exists", o.snapshotsDir) - } - } - - if err := os.MkdirAll(o.snapshotsDir, 0777); err != nil { - log.Panicf("Failed to create snapshots dir %s", o.snapshotsDir) - } - - log.Info("Creating containerd client") - o.client, err = containerd.New(containerdAddress) - if err != nil { - log.Fatal("Failed to start containerd client", err) - } - log.Info("Created containerd client") - - log.Info("Creating firecracker client") - o.fcClient, err = fcclient.New(containerdTTRPCAddress) - if err != nil { - log.Fatal("Failed to start firecracker client", err) - } - log.Info("Created firecracker client") - - o.devMapper = devmapper.NewDeviceMapper(o.client, poolName, metadataDev) - - o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) - - return o -} - -func (o *DedupOrchestrator) setupCloseHandler() { - c := make(chan os.Signal, 2) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - log.Info("\r- Ctrl+C pressed in Terminal") - _ = o.StopActiveVMs() - o.Cleanup() - os.Exit(0) - }() -} - -// Cleanup Removes the bridges created by the VM pool's tap manager -// Cleans up snapshots directory -func (o *DedupOrchestrator) Cleanup() { - o.vmPool.CleanupNetwork() - if err := os.RemoveAll(o.snapshotsDir); err != nil { - log.Panic("failed to delete snapshots dir", err) - } -} - -// GetSnapshotsEnabled Returns the snapshots mode of the orchestrator -func (o *DedupOrchestrator) GetSnapshotsEnabled() bool { - return o.snapshotsEnabled -} - -// GetUPFEnabled Returns the UPF mode of the orchestrator -func (o *DedupOrchestrator) GetUPFEnabled() bool { - return false -} - -// DumpUPFPageStats Dumps the memory manager's stats about the number of -// the unique pages and the number of the pages that are reused across invocations -func (o *DedupOrchestrator) DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error { - return nil -} - -// DumpUPFLatencyStats Dumps the memory manager's latency stats -func (o *DedupOrchestrator) DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error { - return nil -} - -// GetUPFLatencyStats Returns the memory manager's latency stats -func (o *DedupOrchestrator) GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) { - return make([]*metrics.Metric, 0), nil -} - -func (o *DedupOrchestrator) getVMBaseDir(vmID string) string { - return filepath.Join(o.snapshotsDir, vmID) -} - -func (o *DedupOrchestrator) setupHeartbeat() { - heartbeat := time.NewTicker(60 * time.Second) - - go func() { - for { - <-heartbeat.C - log.Info("HEARTBEAT: number of active VMs: ", len(o.vmPool.GetVMMap())) - } // for - }() // go func -} diff --git a/ctriface/deduplicated/orch_options.go b/ctriface/deduplicated/orch_options.go deleted file mode 100644 index 88186b3e8..000000000 --- a/ctriface/deduplicated/orch_options.go +++ /dev/null @@ -1,74 +0,0 @@ -// MIT License -// -// Copyright (c) 2020 Plamen Petrov and EASE lab -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package deduplicated - -// OrchestratorOption Options to pass to DedupOrchestrator -type OrchestratorOption func(*DedupOrchestrator) - -// WithTestModeOn Sets the test mode -func WithTestModeOn(testModeOn bool) OrchestratorOption { - return func(o *DedupOrchestrator) { - if !testModeOn { - o.setupCloseHandler() - o.setupHeartbeat() - } - } -} - -// WithSnapshots Sets the snapshot mode on or off -func WithSnapshots(snapshotsEnabled bool) OrchestratorOption { - return func(o *DedupOrchestrator) { - o.snapshotsEnabled = snapshotsEnabled - } -} - -// WithUPF Sets the user-page faults mode on or off -func WithUPF(isUPFEnabled bool) OrchestratorOption { - return func(o *DedupOrchestrator) { - o.isUPFEnabled = isUPFEnabled - } -} - -// WithSnapshotsDir Sets the directory where -// snapshots should be stored -func WithSnapshotsDir(snapshotsDir string) OrchestratorOption { - return func(o *DedupOrchestrator) { - o.snapshotsDir = snapshotsDir - } -} - -// WithLazyMode Sets the lazy paging mode on (or off), -// where all guest memory pages are brought on demand. -// Only works if snapshots are enabled -func WithLazyMode(isLazyMode bool) OrchestratorOption { - return func(o *DedupOrchestrator) { - o.isLazyMode = isLazyMode - } -} - -// WithMetricsMode Sets the metrics mode -func WithMetricsMode(isMetricsMode bool) OrchestratorOption { - return func(o *DedupOrchestrator) { - o.isMetricsMode = isMetricsMode - } -} diff --git a/ctriface/failing_test.go b/ctriface/failing_test.go index 1235198d6..e93bf5821 100644 --- a/ctriface/failing_test.go +++ b/ctriface/failing_test.go @@ -24,7 +24,6 @@ package ctriface import ( "context" - "github.com/ease-lab/vhive/ctriface/regular" "github.com/ease-lab/vhive/snapshotting" "os" "testing" @@ -37,22 +36,6 @@ import ( ) func TestStartSnapStop(t *testing.T) { - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true)), - ) - - vmID := "2" - revisionID := "myrev-2" - - startSnapStop(t, orch, vmID, revisionID) -} - -func startSnapStop(t *testing.T, orch *Orchestrator, vmID, revisionID string) { // BROKEN BECAUSE StopVM does not work yet. t.Skip("skipping failing test") log.SetFormatter(&log.TextFormatter{ @@ -66,26 +49,38 @@ func startSnapStop(t *testing.T, orch *Orchestrator, vmID, revisionID string) { log.SetLevel(log.DebugLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + ) + + vmID := "2" + revisionID := "myrev-2" + + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) + err = orch.CreateSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to create snapshot of VM") - _, _, err = orch.LoadSnapshot(ctx, vmID, snap) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - err = orch.StopSingleVM(ctx, vmID) + err = orch.StopSingleVM(ctx, vmID, false) require.NoError(t, err, "Failed to stop VM") orch.Cleanup() diff --git a/ctriface/regular/iface.go b/ctriface/iface.go similarity index 66% rename from ctriface/regular/iface.go rename to ctriface/iface.go index 7a4da7e13..a06748b61 100644 --- a/ctriface/regular/iface.go +++ b/ctriface/iface.go @@ -20,11 +20,11 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package regular +package ctriface import ( "context" - "github.com/ease-lab/vhive/ctriface" + "github.com/ease-lab/vhive/devmapper" "github.com/ease-lab/vhive/snapshotting" "os" "os/exec" @@ -58,8 +58,10 @@ const ( TestImageName = "ghcr.io/ease-lab/helloworld:var_workload" ) +// TODO: isFullLocql param for all functions + // StartVM Boots a VM if it does not exist -func (o *RegOrchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages bool) (_ *ctriface.StartVMResponse, _ *metrics.Metric, retErr error) { +func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages, isFullLocal bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { var ( startVMMetric *metrics.Metric = metrics.NewMetric() tStart time.Time @@ -103,7 +105,7 @@ func (o *RegOrchestrator) StartVM(ctx context.Context, vmID, imageName string, m // 3. Create VM tStart = time.Now() - conf := o.getVMConfig(vm, trackDirtyPages) + conf := o.getVMConfig(vm, trackDirtyPages, isFullLocal) resp, err := o.fcClient.CreateVM(ctx, conf) startVMMetric.MetricMap[metrics.FcCreateVM] = metrics.ToUS(time.Since(tStart)) if err != nil { @@ -121,11 +123,17 @@ func (o *RegOrchestrator) StartVM(ctx context.Context, vmID, imageName string, m // 4. Create container logger.Debug("StartVM: Creating a new container") tStart = time.Now() + + containerId := vmID + if isFullLocal { + containerId = vm.ContainerSnapKey + } + container, err := o.client.NewContainer( ctx, - vmID, + containerId, containerd.WithSnapshotter(o.snapshotter), - containerd.WithNewSnapshot(vmID, *vm.Image), + containerd.WithNewSnapshot(containerId, *vm.Image), containerd.WithNewSpec( oci.WithImageConfig(*vm.Image), firecrackeroci.WithVMID(vmID), @@ -201,39 +209,42 @@ func (o *RegOrchestrator) StartVM(ctx context.Context, vmID, imageName string, m } }() - if err := os.MkdirAll(o.getVMBaseDir(vmID), 0777); err != nil { - logger.Error("Failed to create VM base dir") - return nil, nil, err - } - if o.GetUPFEnabled() { - logger.Debug("Registering VM with the memory manager") - - stateCfg := manager.SnapshotStateCfg{ - VMID: vmID, - GuestMemPath: o.getMemoryFile(vmID), - BaseDir: o.getVMBaseDir(vmID), - GuestMemSize: int(conf.MachineCfg.MemSizeMib) * 1024 * 1024, - IsLazyMode: o.isLazyMode, - VMMStatePath: o.getSnapshotFile(vmID), - WorkingSetPath: o.getWorkingSetFile(vmID), - InstanceSockAddr: resp.UPFSockPath, + if ! isFullLocal { + if err := os.MkdirAll(o.getVMBaseDir(vmID), 0777); err != nil { + logger.Error("Failed to create VM base dir") + return nil, nil, err } - if err := o.memoryManager.RegisterVM(stateCfg); err != nil { - return nil, nil, errors.Wrap(err, "failed to register VM with memory manager") - // NOTE (Plamen): Potentially need a defer(DeregisteVM) here if RegisterVM is not last to execute + if o.GetUPFEnabled() { + logger.Debug("Registering VM with the memory manager") + + stateCfg := manager.SnapshotStateCfg{ + VMID: vmID, + GuestMemPath: o.getMemoryFile(vmID), + BaseDir: o.getVMBaseDir(vmID), + GuestMemSize: int(conf.MachineCfg.MemSizeMib) * 1024 * 1024, + IsLazyMode: o.isLazyMode, + VMMStatePath: o.getSnapshotFile(vmID), + WorkingSetPath: o.getWorkingSetFile(vmID), + InstanceSockAddr: resp.UPFSockPath, + } + if err := o.memoryManager.RegisterVM(stateCfg); err != nil { + return nil, nil, errors.Wrap(err, "failed to register VM with memory manager") + // NOTE (Plamen): Potentially need a defer(DeregisteVM) here if RegisterVM is not last to execute + } } } logger.Debug("Successfully started a VM") - return &ctriface.StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, startVMMetric, nil + return &StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, startVMMetric, nil } // StopSingleVM Shuts down a VM // Note: VMs are not quisced before being stopped -func (o *RegOrchestrator) StopSingleVM(ctx context.Context, vmID string) error { +func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string, isFullLocal bool) error { + logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("RegOrchestrator received StopVM") + logger.Debug("Orchestrator received StopVM") ctx = namespaces.WithNamespace(ctx, NamespaceName) vm, err := o.vmPool.GetVM(vmID) @@ -247,32 +258,37 @@ func (o *RegOrchestrator) StopSingleVM(ctx context.Context, vmID string) error { logger = log.WithFields(log.Fields{"vmID": vmID}) - task := *vm.Task - if err := task.Kill(ctx, syscall.SIGKILL); err != nil { - logger.WithError(err).Error("Failed to kill the task") - return err - } + // Cleanup and remove container if VM not booted from snapshot + if ! isFullLocal || ! vm.SnapBooted { + task := *vm.Task + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + logger.WithError(err).Error("Failed to kill the task") + return err + } - <-vm.TaskCh - //FIXME: Seems like some tasks need some extra time to die Issue#15, lr_training - time.Sleep(500 * time.Millisecond) + <-vm.TaskCh + //FIXME: Seems like some tasks need some extra time to die Issue#15, lr_training + time.Sleep(500 * time.Millisecond) - if _, err := task.Delete(ctx); err != nil { - logger.WithError(err).Error("failed to delete task") - return err - } + if _, err := task.Delete(ctx); err != nil { + logger.WithError(err).Error("failed to delete task") + return err + } - container := *vm.Container - if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { - logger.WithError(err).Error("failed to delete container") - return err + container := *vm.Container + if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { + logger.WithError(err).Error("failed to delete container") + return err + } } + // Stop VM if _, err := o.fcClient.StopVM(ctx, &proto.StopVMRequest{VMID: vmID}); err != nil { logger.WithError(err).Error("failed to stop firecracker-containerd VM") return err } + // Free VM metadata and clean up network if err := o.vmPool.Free(vmID); err != nil { logger.Error("failed to free VM from VM pool") return err @@ -280,11 +296,23 @@ func (o *RegOrchestrator) StopSingleVM(ctx context.Context, vmID string) error { o.workloadIo.Delete(vmID) + // Cleanup VM devmapper container snapshot if booted from snapshot + if isFullLocal && vm.SnapBooted { + if err := o.devMapper.RemoveDeviceSnapshot(ctx, vm.ContainerSnapKey); err != nil { + logger.Error("failed to deactivate container snapshot") + return err + } + } + logger.Debug("Stopped VM successfully") return nil } +func (o *Orchestrator) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { + return o.imageManager.GetImage(ctx, imageName) +} + func getK8sDNS() []string { //using googleDNS as a backup dnsIPs := []string{"8.8.8.8"} @@ -303,7 +331,7 @@ func getK8sDNS() []string { return dnsIPs } -func (o *RegOrchestrator) getVMConfig(vm *misc.VM, trackDirtyPages bool) *proto.CreateVMRequest { +func (o *Orchestrator) getVMConfig(vm *misc.VM, trackDirtyPages, isFullLocal bool) *proto.CreateVMRequest { kernelArgs := "ro noapic reboot=k panic=1 pci=off nomodules systemd.log_color=false systemd.unit=firecracker.target init=/sbin/overlay-init tsc=reliable quiet 8250.nr_uarts=0 ipv6.disable=1" return &proto.CreateVMRequest{ @@ -327,19 +355,19 @@ func (o *RegOrchestrator) getVMConfig(vm *misc.VM, trackDirtyPages bool) *proto. }, }}, NetworkNamespace: vm.NetConfig.GetNamespacePath(), - OffloadEnabled: true, + OffloadEnabled: ! isFullLocal, } } // StopActiveVMs Shuts down all active VMs -func (o *RegOrchestrator) StopActiveVMs() error { +func (o *Orchestrator) StopActiveVMs(isFullLocal bool) error { var vmGroup sync.WaitGroup for vmID, vm := range o.vmPool.GetVMMap() { vmGroup.Add(1) logger := log.WithFields(log.Fields{"vmID": vmID}) go func(vmID string, vm *misc.VM) { defer vmGroup.Done() - err := o.StopSingleVM(context.Background(), vmID) + err := o.StopSingleVM(context.Background(), vmID, isFullLocal) if err != nil { logger.Warn(err) } @@ -359,9 +387,9 @@ func (o *RegOrchestrator) StopActiveVMs() error { } // PauseVM Pauses a VM -func (o *RegOrchestrator) PauseVM(ctx context.Context, vmID string) error { +func (o *Orchestrator) PauseVM(ctx context.Context, vmID string) error { logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("RegOrchestrator received PauseVM") + logger.Debug("Orchestrator received PauseVM") ctx = namespaces.WithNamespace(ctx, NamespaceName) @@ -374,14 +402,14 @@ func (o *RegOrchestrator) PauseVM(ctx context.Context, vmID string) error { } // ResumeVM Resumes a VM -func (o *RegOrchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) { +func (o *Orchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) { var ( resumeVMMetric *metrics.Metric = metrics.NewMetric() tStart time.Time ) logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("RegOrchestrator received ResumeVM") + logger.Debug("Orchestrator received ResumeVM") ctx = namespaces.WithNamespace(ctx, NamespaceName) @@ -396,16 +424,25 @@ func (o *RegOrchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.M } // CreateSnapshot Creates a snapshot of a VM -func (o *RegOrchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { +func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot, isFullLocal bool) error { logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("RegOrchestrator received CreateSnapshot") + logger.Debug("Orchestrator received CreateSnapshot") ctx = namespaces.WithNamespace(ctx, NamespaceName) + // 1. Create VM & VM memory state snapshot + snapFilePath := o.getSnapshotFile(vmID) + memFilePath := o.getMemoryFile(vmID) + + if isFullLocal { + snapFilePath = snap.GetSnapFilePath() + memFilePath = snap.GetMemFilePath() + } + req := &proto.CreateSnapshotRequest{ VMID: vmID, - SnapshotFilePath: o.getSnapshotFile(vmID), - MemFilePath: o.getMemoryFile(vmID), + SnapshotFilePath: snapFilePath, + MemFilePath: memFilePath, SnapshotType: snap.GetSnapType(), } @@ -414,11 +451,41 @@ func (o *RegOrchestrator) CreateSnapshot(ctx context.Context, vmID string, snap return err } + // For the non full-local snapshots, no additional steps are necessary + if ! isFullLocal { + return nil + } + + // 2. Get VM metadata + vm, err := o.vmPool.GetVM(vmID) + if err != nil { + return err + } + + // 3. Backup disk state difference. + // 3.B Alternatively could also do ForkContainerSnap(ctx, vm.ContainerSnapKey, snap.GetContainerSnapName(), *vm.Image, forkMetric) + if err := o.devMapper.CreatePatch(ctx, snap.GetPatchFilePath(), vm.ContainerSnapKey, *vm.Image); err != nil { + logger.WithError(err).Error("failed to create container patch file") + return err + } + + // 4. Serialize snapshot info + if err := snap.SerializeSnapInfo(); err != nil { + logger.WithError(err).Error("failed to serialize snapshot info") + return err + } + + // 5. Resume + if _, err := o.fcClient.ResumeVM(ctx, &proto.ResumeVMRequest{VMID: vmID}); err != nil { + log.Printf("failed to resume the VM") + return err + } + return nil } // LoadSnapshot Loads a snapshot of a VM -func (o *RegOrchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *ctriface.StartVMResponse, _ *metrics.Metric, retErr error) { +func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot, isFullLocal bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { var ( loadSnapshotMetric *metrics.Metric = metrics.NewMetric() tStart time.Time @@ -426,13 +493,12 @@ func (o *RegOrchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *s loadDone = make(chan int) ) - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("RegOrchestrator received LoadSnapshot") + logger.Debug("Orchestrator received LoadSnapshot") ctx = namespaces.WithNamespace(ctx, NamespaceName) - // Allocate VM metadata & create vm network + // 1. Allocate VM metadata & create vm network vm, err := o.vmPool.Allocate(vmID) if err != nil { logger.Error("failed to allocate VM in VM pool") @@ -448,13 +514,50 @@ func (o *RegOrchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *s } }() + var containerSnap *devmapper.DeviceSnapshot + if isFullLocal { + // 2. Fetch image for VM + if vm.Image, err = o.GetImage(ctx, snap.GetImage()); err != nil { + return nil, nil, errors.Wrapf(err, "Failed to get/pull image") + } + + // 3. Create snapshot for container to run + // 3.B Alternatively could also do CreateDeviceSnapshot(ctx, vm.ContainerSnapKey, snap.GetContainerSnapName()) + if err := o.devMapper.CreateDeviceSnapshotFromImage(ctx, vm.ContainerSnapKey, *vm.Image); err != nil { + return nil, nil, errors.Wrapf(err, "creating container snapshot") + } + + containerSnap, err = o.devMapper.GetDeviceSnapshot(ctx, vm.ContainerSnapKey) + if err != nil { + return nil, nil, errors.Wrapf(err, "previously created container device does not exist") + } + + // 4. Unpack patch into container snapshot + if err := o.devMapper.RestorePatch(ctx, vm.ContainerSnapKey, snap.GetPatchFilePath()); err != nil { + return nil, nil, errors.Wrapf(err, "unpacking patch into container snapshot") + } + } + + // 5. Load VM from snapshot + snapFilePath := o.getSnapshotFile(vmID) + memFilePath := o.getMemoryFile(vmID) + + if isFullLocal { + snapFilePath = snap.GetSnapFilePath() + memFilePath = snap.GetMemFilePath() + } + req := &proto.LoadSnapshotRequest{ VMID: vmID, - SnapshotFilePath: snap.GetSnapFilePath(), - MemFilePath: snap.GetMemFilePath(), + SnapshotFilePath: snapFilePath, + MemFilePath: memFilePath, EnableUserPF: o.GetUPFEnabled(), NetworkNamespace: vm.NetConfig.GetNamespacePath(), - Offloaded: true, + Offloaded: ! isFullLocal, + } + + if isFullLocal { + req.NewSnapshotPath = containerSnap.GetDevicePath() } if o.GetUPFEnabled() { @@ -488,11 +591,17 @@ func (o *RegOrchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *s return nil, nil, multierr } - return &ctriface.StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, nil, nil + vm.SnapBooted = true + + return &StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, nil, nil } // Offload Shuts down the VM but leaves shim and other resources running. -func (o *RegOrchestrator) OffloadVM(ctx context.Context, vmID string) error { +func (o *Orchestrator) OffloadVM(ctx context.Context, vmID string, isFullLocal bool) error { + if isFullLocal { + return errors.New("Fully local snapshots do not support offloading") + } + logger := log.WithFields(log.Fields{"vmID": vmID}) logger.Debug("Orchestrator received Offload") @@ -527,13 +636,9 @@ func (o *RegOrchestrator) OffloadVM(ctx context.Context, vmID string) error { return nil } -func (o *RegOrchestrator) CleanupSnapshot(ctx context.Context, revisionID string) error { +func (o *Orchestrator) CleanupSnapshot(ctx context.Context, revisionID string) error { if err := o.devMapper.RemoveDeviceSnapshot(ctx, revisionID); err != nil { return errors.Wrapf(err, "removing revision snapshot") } return nil } - -func (o *RegOrchestrator) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { - return o.imageManager.GetImage(ctx, imageName) -} \ No newline at end of file diff --git a/ctriface/iface_test.go b/ctriface/iface_test.go index 72e464e0e..30fc8098b 100644 --- a/ctriface/iface_test.go +++ b/ctriface/iface_test.go @@ -25,7 +25,6 @@ import ( "context" "flag" "fmt" - "github.com/ease-lab/vhive/ctriface/regular" "github.com/ease-lab/vhive/snapshotting" "os" "sync" @@ -47,24 +46,6 @@ var ( ) func TestPauseSnapResume(t *testing.T) { - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) - - vmID := "4" - revisionID := "myrev-4" - - pauseSnapResume(t, orch, vmID, revisionID) -} - -func pauseSnapResume(t *testing.T, orch *Orchestrator, vmID, revisionID string) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -76,46 +57,43 @@ func pauseSnapResume(t *testing.T, orch *Orchestrator, vmID, revisionID string) log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) + + vmID := "4" + revisionID := "myrev-4" + + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) + err = orch.CreateSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to create snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - err = orch.StopSingleVM(ctx, vmID) + err = orch.StopSingleVM(ctx, vmID, false) require.NoError(t, err, "Failed to stop VM") orch.Cleanup() } func TestStartStopSerial(t *testing.T) { - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "fc-dev-thinpool", - "", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) - - vmID := "5" - - startStopSerial(t, orch, vmID) -} - -func startStopSerial(t *testing.T, orch *Orchestrator, vmID string) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -127,36 +105,32 @@ func startStopSerial(t *testing.T, orch *Orchestrator, vmID string) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) - require.NoError(t, err, "Failed to start VM") - - err = orch.StopSingleVM(ctx, vmID) - require.NoError(t, err, "Failed to stop VM") - - orch.Cleanup() -} - -func TestPauseResumeSerial(t *testing.T) { - orch := NewOrchestrator(regular.NewRegOrchestrator( + orch := NewOrchestrator( "devmapper", "fc-dev-thinpool", "", "", 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) - vmID := "6" + vmID := "5" + + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + require.NoError(t, err, "Failed to start VM") + + err = orch.StopSingleVM(ctx, vmID, false) + require.NoError(t, err, "Failed to stop VM") - pauseResumeSerial(t, orch, vmID) + orch.Cleanup() } -func pauseResumeSerial(t *testing.T, orch *Orchestrator, vmID string) { +func TestPauseResumeSerial(t *testing.T) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -168,10 +142,23 @@ func pauseResumeSerial(t *testing.T, orch *Orchestrator, vmID string) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + orch := NewOrchestrator( + "devmapper", + "fc-dev-thinpool", + "", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) + + vmID := "6" + + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) @@ -180,29 +167,13 @@ func pauseResumeSerial(t *testing.T, orch *Orchestrator, vmID string) { _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - err = orch.StopSingleVM(ctx, vmID) + err = orch.StopSingleVM(ctx, vmID, false) require.NoError(t, err, "Failed to stop VM") orch.Cleanup() } func TestStartStopParallel(t *testing.T) { - vmNum := 10 - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) - - startStopParallel(t, orch, vmNum) -} - -func startStopParallel(t *testing.T, orch *Orchestrator, vmNum int) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -214,12 +185,24 @@ func startStopParallel(t *testing.T, orch *Orchestrator, vmNum int) { log.SetLevel(log.InfoLevel) testTimeout := 360 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() + vmNum := 10 + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) + // Pull image - _, err := orch.GetImage(ctx, regular.TestImageName) - require.NoError(t, err, "Failed to pull image "+regular.TestImageName) + _, err := orch.GetImage(ctx, TestImageName) + require.NoError(t, err, "Failed to pull image "+TestImageName) { var vmGroup sync.WaitGroup @@ -228,7 +211,7 @@ func startStopParallel(t *testing.T, orch *Orchestrator, vmNum int) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM "+vmID) }(i) } @@ -242,7 +225,7 @@ func startStopParallel(t *testing.T, orch *Orchestrator, vmNum int) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - err := orch.StopSingleVM(ctx, vmID) + err := orch.StopSingleVM(ctx, vmID, false) require.NoError(t, err, "Failed to stop VM "+vmID) }(i) } @@ -253,22 +236,6 @@ func startStopParallel(t *testing.T, orch *Orchestrator, vmNum int) { } func TestPauseResumeParallel(t *testing.T) { - vmNum := 10 - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) - - pauseResumeParallel(t, orch, vmNum) -} - -func pauseResumeParallel(t *testing.T, orch *Orchestrator, vmNum int) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, FullTimestamp: true, @@ -280,12 +247,24 @@ func pauseResumeParallel(t *testing.T, orch *Orchestrator, vmNum int) { log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() + vmNum := 10 + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) + // Pull image - _, err := orch.GetImage(ctx, regular.TestImageName) - require.NoError(t, err, "Failed to pull image "+regular.TestImageName) + _, err := orch.GetImage(ctx, TestImageName) + require.NoError(t, err, "Failed to pull image "+TestImageName) { var vmGroup sync.WaitGroup @@ -294,7 +273,7 @@ func pauseResumeParallel(t *testing.T, orch *Orchestrator, vmNum int) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM") }(i) } @@ -336,7 +315,7 @@ func pauseResumeParallel(t *testing.T, orch *Orchestrator, vmNum int) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - err := orch.StopSingleVM(ctx, vmID) + err := orch.StopSingleVM(ctx, vmID, false) require.NoError(t, err, "Failed to stop VM") }(i) } diff --git a/ctriface/manual_cleanup_test.go b/ctriface/manual_cleanup_test.go index 752c9e0ed..943c4b81b 100644 --- a/ctriface/manual_cleanup_test.go +++ b/ctriface/manual_cleanup_test.go @@ -25,7 +25,6 @@ package ctriface import ( "context" "fmt" - "github.com/ease-lab/vhive/ctriface/regular" "github.com/ease-lab/vhive/snapshotting" "os" "sync" @@ -39,24 +38,6 @@ import ( ) func TestSnapLoad(t *testing.T) { - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) - - vmID := "1" - revisionID := "myrev-1" - - snapLoad(t, orch, vmID, revisionID) -} - -func snapLoad(t *testing.T, orch *Orchestrator, vmID string, revisionID string) { // Need to clean up manually after this test because StopVM does not // work for stopping machines which are loaded from snapshots yet log.SetFormatter(&log.TextFormatter{ @@ -70,23 +51,37 @@ func snapLoad(t *testing.T, orch *Orchestrator, vmID string, revisionID string) log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) + + vmID := "1" + revisionID := "myrev-1" + + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) + err = orch.CreateSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to create snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - _, _, err = orch.LoadSnapshot(ctx, vmID, snap) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) @@ -96,24 +91,6 @@ func snapLoad(t *testing.T, orch *Orchestrator, vmID string, revisionID string) } func TestSnapLoadMultiple(t *testing.T) { - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) - - vmID := "3" - revisionID := "myrev-3" - - snapLoadMultiple(t, orch, vmID, revisionID) -} - -func snapLoadMultiple(t *testing.T, orch *Orchestrator, vmID string, revisionID string) { // Needs to be cleaned up manually. log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, @@ -126,26 +103,40 @@ func snapLoadMultiple(t *testing.T, orch *Orchestrator, vmID string, revisionID log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) + + vmID := "3" + revisionID := "myrev-3" + + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0,false) - err = orch.CreateSnapshot(ctx, vmID, snap) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0,false) + err = orch.CreateSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to create snapshot of VM") - _, _, err = orch.LoadSnapshot(ctx, vmID, snap) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - _, _, err = orch.LoadSnapshot(ctx, vmID, snap) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) @@ -155,24 +146,6 @@ func snapLoadMultiple(t *testing.T, orch *Orchestrator, vmID string, revisionID } func TestParallelSnapLoad(t *testing.T) { - vmNum := 5 - vmIDBase := 6 - - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) - - parallelSnapLoad(t, orch, vmNum, vmIDBase) -} - -func parallelSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBase int) { // Needs to be cleaned up manually. log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, @@ -185,12 +158,26 @@ func parallelSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBase int) log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() + vmNum := 5 + vmIDBase := 6 + + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) + // Pull image - _, err := orch.GetImage(ctx, regular.TestImageName) - require.NoError(t, err, "Failed to pull image "+regular.TestImageName) + _, err := orch.GetImage(ctx, TestImageName) + require.NoError(t, err, "Failed to pull image "+TestImageName) var vmGroup sync.WaitGroup for i := 0; i < vmNum; i++ { @@ -200,17 +187,17 @@ func parallelSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBase int) vmID := fmt.Sprintf("%d", i+vmIDBase) revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM, "+vmID) err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM, "+vmID) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) + err = orch.CreateSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to create snapshot of VM, "+vmID) - _, _, err = orch.LoadSnapshot(ctx, vmID, snap) + _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to load snapshot of VM, "+vmID) _, err = orch.ResumeVM(ctx, vmID) @@ -223,24 +210,6 @@ func parallelSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBase int) } func TestParallelPhasedSnapLoad(t *testing.T) { - vmNum := 10 - vmIDBase := 11 - - orch := NewOrchestrator(regular.NewRegOrchestrator( - "devmapper", - "", - "fc-dev-thinpool", - "", - 10, - regular.WithTestModeOn(true), - regular.WithUPF(*isUPFEnabled), - regular.WithLazyMode(*isLazyMode), - )) - - parallelPhasedSnapLoad(t, orch, vmNum, vmIDBase) -} - -func parallelPhasedSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBase int) { // Needs to be cleaned up manually. log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, @@ -253,12 +222,26 @@ func parallelPhasedSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBas log.SetLevel(log.InfoLevel) testTimeout := 120 * time.Second - ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout) + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() + vmNum := 10 + vmIDBase := 11 + + orch := NewOrchestrator( + "devmapper", + "", + "fc-dev-thinpool", + "", + 10, + WithTestModeOn(true), + WithUPF(*isUPFEnabled), + WithLazyMode(*isLazyMode), + ) + // Pull image - _, err := orch.GetImage(ctx, regular.TestImageName) - require.NoError(t, err, "Failed to pull image "+regular.TestImageName) + _, err := orch.GetImage(ctx, TestImageName) + require.NoError(t, err, "Failed to pull image "+TestImageName) { var vmGroup sync.WaitGroup @@ -267,7 +250,7 @@ func parallelPhasedSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBas go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, regular.TestImageName, 256, 1, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) require.NoError(t, err, "Failed to start VM, "+vmID) }(i) } @@ -296,8 +279,8 @@ func parallelPhasedSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBas defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) + err = orch.CreateSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to create snapshot of VM, "+vmID) }(i) } @@ -312,8 +295,8 @@ func parallelPhasedSnapLoad(t *testing.T, orch *Orchestrator, vmNum int, vmIDBas defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", regular.TestImageName, 0, 0, false) - _, _, err := orch.LoadSnapshot(ctx, vmID, snap) + snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) + _, _, err := orch.LoadSnapshot(ctx, vmID, snap, false) require.NoError(t, err, "Failed to load snapshot of VM, "+vmID) }(i) } diff --git a/ctriface/orch.go b/ctriface/orch.go index 3e42a5742..28cce84ed 100644 --- a/ctriface/orch.go +++ b/ctriface/orch.go @@ -1,6 +1,6 @@ // MIT License // -// Copyright (c) 2021 Amory Hoste and EASE lab +// Copyright (c) 2020 Plamen Petrov and EASE lab // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,85 +23,207 @@ package ctriface import ( - "context" + "github.com/ease-lab/vhive/ctrimages" + "github.com/ease-lab/vhive/devmapper" + "github.com/ease-lab/vhive/memory/manager" + "os" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + log "github.com/sirupsen/logrus" + "github.com/containerd/containerd" + + fcclient "github.com/firecracker-microvm/firecracker-containerd/firecracker-control/client" + // note: from the original repo + + _ "google.golang.org/grpc/codes" //tmp + _ "google.golang.org/grpc/status" //tmp + "github.com/ease-lab/vhive/metrics" - "github.com/ease-lab/vhive/snapshotting" -) + "github.com/ease-lab/vhive/misc" -type Orchestrator struct { - // generic snapshot manager - orch OrchestratorInterface -} + _ "github.com/davecgh/go-spew/spew" //tmp +) -func NewOrchestrator(orch OrchestratorInterface) *Orchestrator { - o := &Orchestrator{ - orch: orch, - } +const ( + containerdAddress = "/run/firecracker-containerd/containerd.sock" + containerdTTRPCAddress = containerdAddress + ".ttrpc" + NamespaceName = "firecracker-containerd" +) - return o +type WorkloadIoWriter struct { + logger *log.Entry } -func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib, vCPUCount uint32, trackDirtyPages bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { - return o.orch.StartVM(ctx, vmID, imageName, memSizeMib, vCPUCount, trackDirtyPages) +func NewWorkloadIoWriter(vmID string) WorkloadIoWriter { + return WorkloadIoWriter {log.WithFields(log.Fields{"vmID": vmID})} } -func (o *Orchestrator) OffloadVM(ctx context.Context, vmID string) error { - return o.orch.OffloadVM(ctx, vmID) +func (wio WorkloadIoWriter) Write(p []byte) (n int, err error) { + s := string(p) + lines := strings.Split(s, "\n") + for i := range lines { + wio.logger.Info(string(lines[i])) + } + return len(p), nil } -func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string) error { - return o.orch.StopSingleVM(ctx, vmID) -} +// Orchestrator Drives all VMs +type Orchestrator struct { + vmPool *misc.VMPool + workloadIo sync.Map // vmID string -> WorkloadIoWriter + snapshotter string + client *containerd.Client + fcClient *fcclient.Client + devMapper *devmapper.DeviceMapper + imageManager *ctrimages.ImageManager + // store *skv.KVStore + snapshotsEnabled bool + isUPFEnabled bool + isLazyMode bool + snapshotsDir string + isMetricsMode bool + hostIface string + + memoryManager *manager.MemoryManager +} + +// NewOrchestrator Initializes a new orchestrator +func NewOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPoolSize int, opts ...OrchestratorOption) *Orchestrator { + var err error + + o := new(Orchestrator) + o.vmPool = misc.NewVMPool(hostIface, netPoolSize) + o.snapshotter = snapshotter + o.snapshotsDir = "/fccd/snapshots" + o.hostIface = hostIface + + for _, opt := range opts { + opt(o) + } -func (o *Orchestrator) StopActiveVMs() error { - return o.orch.StopActiveVMs() -} + if _, err := os.Stat(o.snapshotsDir); err != nil { + if !os.IsNotExist(err) { + log.Panicf("Snapshot dir %s exists", o.snapshotsDir) + } + } -func (o *Orchestrator) PauseVM(ctx context.Context, vmID string) error { - return o.orch.PauseVM(ctx, vmID) -} + if err := os.MkdirAll(o.snapshotsDir, 0777); err != nil { + log.Panicf("Failed to create snapshots dir %s", o.snapshotsDir) + } -func (o *Orchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) { - return o.orch.ResumeVM(ctx, vmID) -} + if o.GetUPFEnabled() { + managerCfg := manager.MemoryManagerCfg{ + MetricsModeOn: o.isMetricsMode, + } + o.memoryManager = manager.NewMemoryManager(managerCfg) + } -func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { - return o.orch.CreateSnapshot(ctx, vmID, snap) -} + log.Info("Creating containerd client") + o.client, err = containerd.New(containerdAddress) + if err != nil { + log.Fatal("Failed to start containerd client", err) + } + log.Info("Created containerd client") -func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { - return o.orch.LoadSnapshot(ctx, vmID, snap) -} + log.Info("Creating firecracker client") + o.fcClient, err = fcclient.New(containerdTTRPCAddress) + if err != nil { + log.Fatal("Failed to start firecracker client", err) + } + log.Info("Created firecracker client") -func (o *Orchestrator) CleanupSnapshot(ctx context.Context, id string) error { - return o.orch.CleanupSnapshot(ctx, id) + o.devMapper = devmapper.NewDeviceMapper(o.client, poolName, metadataDev) + + o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) + + return o } -func (o *Orchestrator) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { - return o.orch.GetImage(ctx, imageName) +func (o *Orchestrator) setupCloseHandler() { + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + log.Info("\r- Ctrl+C pressed in Terminal") + _ = o.StopActiveVMs(false) + o.Cleanup() + os.Exit(0) + }() } +// Cleanup Removes the bridges created by the VM pool's tap manager +// Cleans up snapshots directory func (o *Orchestrator) Cleanup() { - o.orch.Cleanup() + o.vmPool.CleanupNetwork() + if err := os.RemoveAll(o.snapshotsDir); err != nil { + log.Panic("failed to delete snapshots dir", err) + } } +// GetSnapshotsEnabled Returns the snapshots mode of the orchestrator func (o *Orchestrator) GetSnapshotsEnabled() bool { - return o.orch.GetSnapshotsEnabled() + return o.snapshotsEnabled } +// GetUPFEnabled Returns the UPF mode of the orchestrator func (o *Orchestrator) GetUPFEnabled() bool { - return o.orch.GetUPFEnabled() + return o.isUPFEnabled } +// DumpUPFPageStats Dumps the memory manager's stats about the number of +// the unique pages and the number of the pages that are reused across invocations func (o *Orchestrator) DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error { - return o.orch.DumpUPFPageStats(vmID, functionName, metricsOutFilePath) + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("Orchestrator received DumpUPFPageStats") + + return o.memoryManager.DumpUPFPageStats(vmID, functionName, metricsOutFilePath) } +// DumpUPFLatencyStats Dumps the memory manager's latency stats func (o *Orchestrator) DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error { - return o.orch.DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath) + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("Orchestrator received DumpUPFPageStats") + + return o.memoryManager.DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath) } +// GetUPFLatencyStats Returns the memory manager's latency stats func (o *Orchestrator) GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) { - return o.orch.GetUPFLatencyStats(vmID) -} \ No newline at end of file + logger := log.WithFields(log.Fields{"vmID": vmID}) + logger.Debug("Orchestrator received DumpUPFPageStats") + + return o.memoryManager.GetUPFLatencyStats(vmID) +} + +func (o *Orchestrator) getSnapshotFile(vmID string) string { + return filepath.Join(o.getVMBaseDir(vmID), "snap_file") +} + +func (o *Orchestrator) getMemoryFile(vmID string) string { + return filepath.Join(o.getVMBaseDir(vmID), "mem_file") +} + +func (o *Orchestrator) getWorkingSetFile(vmID string) string { + return filepath.Join(o.getVMBaseDir(vmID), "working_set_pages") +} + +func (o *Orchestrator) getVMBaseDir(vmID string) string { + return filepath.Join(o.snapshotsDir, vmID) +} + +func (o *Orchestrator) setupHeartbeat() { + heartbeat := time.NewTicker(60 * time.Second) + + go func() { + for { + <-heartbeat.C + log.Info("HEARTBEAT: number of active VMs: ", len(o.vmPool.GetVMMap())) + } // for + }() // go func +} diff --git a/ctriface/regular/orch_options.go b/ctriface/orch_options.go similarity index 82% rename from ctriface/regular/orch_options.go rename to ctriface/orch_options.go index fdaaf2c6b..6ac641a0c 100644 --- a/ctriface/regular/orch_options.go +++ b/ctriface/orch_options.go @@ -20,14 +20,14 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package regular +package ctriface // OrchestratorOption Options to pass to Orchestrator -type OrchestratorOption func(*RegOrchestrator) +type OrchestratorOption func(*Orchestrator) // WithTestModeOn Sets the test mode func WithTestModeOn(testModeOn bool) OrchestratorOption { - return func(o *RegOrchestrator) { + return func(o *Orchestrator) { if !testModeOn { o.setupCloseHandler() o.setupHeartbeat() @@ -37,14 +37,14 @@ func WithTestModeOn(testModeOn bool) OrchestratorOption { // WithSnapshots Sets the snapshot mode on or off func WithSnapshots(snapshotsEnabled bool) OrchestratorOption { - return func(o *RegOrchestrator) { + return func(o *Orchestrator) { o.snapshotsEnabled = snapshotsEnabled } } // WithUPF Sets the user-page faults mode on or off func WithUPF(isUPFEnabled bool) OrchestratorOption { - return func(o *RegOrchestrator) { + return func(o *Orchestrator) { o.isUPFEnabled = isUPFEnabled } } @@ -52,7 +52,7 @@ func WithUPF(isUPFEnabled bool) OrchestratorOption { // WithSnapshotsDir Sets the directory where // snapshots should be stored func WithSnapshotsDir(snapshotsDir string) OrchestratorOption { - return func(o *RegOrchestrator) { + return func(o *Orchestrator) { o.snapshotsDir = snapshotsDir } } @@ -61,14 +61,22 @@ func WithSnapshotsDir(snapshotsDir string) OrchestratorOption { // where all guest memory pages are brought on demand. // Only works if snapshots are enabled func WithLazyMode(isLazyMode bool) OrchestratorOption { - return func(o *RegOrchestrator) { + return func(o *Orchestrator) { o.isLazyMode = isLazyMode } } // WithMetricsMode Sets the metrics mode func WithMetricsMode(isMetricsMode bool) OrchestratorOption { - return func(o *RegOrchestrator) { + return func(o *Orchestrator) { o.isMetricsMode = isMetricsMode } } + +// WithCustomHostIface Sets the custom host net interface +// for the VMs to link to +func WithCustomHostIface(hostIface string) OrchestratorOption { + return func(o *Orchestrator) { + o.hostIface = hostIface + } +} \ No newline at end of file diff --git a/ctriface/regular/orch.go b/ctriface/regular/orch.go deleted file mode 100644 index 5d35be8ec..000000000 --- a/ctriface/regular/orch.go +++ /dev/null @@ -1,229 +0,0 @@ -// MIT License -// -// Copyright (c) 2020 Plamen Petrov and EASE lab -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package regular - -import ( - "github.com/ease-lab/vhive/ctrimages" - "github.com/ease-lab/vhive/devmapper" - "os" - "os/signal" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - log "github.com/sirupsen/logrus" - - "github.com/containerd/containerd" - - fcclient "github.com/firecracker-microvm/firecracker-containerd/firecracker-control/client" - // note: from the original repo - - _ "google.golang.org/grpc/codes" //tmp - _ "google.golang.org/grpc/status" //tmp - - "github.com/ease-lab/vhive/memory/manager" - "github.com/ease-lab/vhive/metrics" - "github.com/ease-lab/vhive/misc" - - _ "github.com/davecgh/go-spew/spew" //tmp -) - -const ( - containerdAddress = "/run/firecracker-containerd/containerd.sock" - containerdTTRPCAddress = containerdAddress + ".ttrpc" - NamespaceName = "firecracker-containerd" -) - -type WorkloadIoWriter struct { - logger *log.Entry -} - -func NewWorkloadIoWriter(vmID string) WorkloadIoWriter { - return WorkloadIoWriter{log.WithFields(log.Fields{"vmID": vmID})} -} - -func (wio WorkloadIoWriter) Write(p []byte) (n int, err error) { - s := string(p) - lines := strings.Split(s, "\n") - for i := range lines { - wio.logger.Info(string(lines[i])) - } - return len(p), nil -} - -// RegOrchestrator Drives all VMs -type RegOrchestrator struct { - vmPool *misc.VMPool - workloadIo sync.Map // vmID string -> WorkloadIoWriter - snapshotter string - client *containerd.Client - fcClient *fcclient.Client - devMapper *devmapper.DeviceMapper - imageManager *ctrimages.ImageManager - // store *skv.KVStore - snapshotsEnabled bool - isUPFEnabled bool - isLazyMode bool - snapshotsDir string - isMetricsMode bool - hostIface string - - memoryManager *manager.MemoryManager -} - -// NewDedupOrchestrator Initializes a new orchestrator -func NewRegOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPoolSize int, opts ...OrchestratorOption) *RegOrchestrator { // TODO: args - var err error - - o := new(RegOrchestrator) - o.vmPool = misc.NewVMPool(hostIface, netPoolSize) - o.snapshotter = snapshotter - o.snapshotsDir = "/fccd/snapshots" - o.hostIface = hostIface - - for _, opt := range opts { - opt(o) - } - - if _, err := os.Stat(o.snapshotsDir); err != nil { - if !os.IsNotExist(err) { - log.Panicf("Snapshot dir %s exists", o.snapshotsDir) - } - } - - if err := os.MkdirAll(o.snapshotsDir, 0777); err != nil { - log.Panicf("Failed to create snapshots dir %s", o.snapshotsDir) - } - - if o.GetUPFEnabled() { - managerCfg := manager.MemoryManagerCfg{ - MetricsModeOn: o.isMetricsMode, - } - o.memoryManager = manager.NewMemoryManager(managerCfg) - } - - log.Info("Creating containerd client") - o.client, err = containerd.New(containerdAddress) - if err != nil { - log.Fatal("Failed to start containerd client", err) - } - log.Info("Created containerd client") - - log.Info("Creating firecracker client") - o.fcClient, err = fcclient.New(containerdTTRPCAddress) - if err != nil { - log.Fatal("Failed to start firecracker client", err) - } - log.Info("Created firecracker client") - - o.devMapper = devmapper.NewDeviceMapper(o.client, poolName, metadataDev) - - o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) - - return o -} - -func (o *RegOrchestrator) setupCloseHandler() { - c := make(chan os.Signal, 2) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - log.Info("\r- Ctrl+C pressed in Terminal") - _ = o.StopActiveVMs() - o.Cleanup() - os.Exit(0) - }() -} - -// Cleanup Removes the bridges created by the VM pool's tap manager -// Cleans up snapshots directory -func (o *RegOrchestrator) Cleanup() { - o.vmPool.CleanupNetwork() - if err := os.RemoveAll(o.snapshotsDir); err != nil { - log.Panic("failed to delete snapshots dir", err) - } -} - -// GetSnapshotsEnabled Returns the snapshots mode of the orchestrator -func (o *RegOrchestrator) GetSnapshotsEnabled() bool { - return o.snapshotsEnabled -} - -// GetUPFEnabled Returns the UPF mode of the orchestrator -func (o *RegOrchestrator) GetUPFEnabled() bool { - return o.isUPFEnabled -} - -// DumpUPFPageStats Dumps the memory manager's stats about the number of -// the unique pages and the number of the pages that are reused across invocations -func (o *RegOrchestrator) DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("RegOrchestrator received DumpUPFPageStats") - - return o.memoryManager.DumpUPFPageStats(vmID, functionName, metricsOutFilePath) -} - -// DumpUPFLatencyStats Dumps the memory manager's latency stats -func (o *RegOrchestrator) DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("RegOrchestrator received DumpUPFPageStats") - - return o.memoryManager.DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath) -} - -// GetUPFLatencyStats Returns the memory manager's latency stats -func (o *RegOrchestrator) GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) { - logger := log.WithFields(log.Fields{"vmID": vmID}) - logger.Debug("RegOrchestrator received DumpUPFPageStats") - - return o.memoryManager.GetUPFLatencyStats(vmID) -} - -func (o *RegOrchestrator) getSnapshotFile(vmID string) string { - return filepath.Join(o.getVMBaseDir(vmID), "snap_file") -} - -func (o *RegOrchestrator) getMemoryFile(vmID string) string { - return filepath.Join(o.getVMBaseDir(vmID), "mem_file") -} - -func (o *RegOrchestrator) getWorkingSetFile(vmID string) string { - return filepath.Join(o.getVMBaseDir(vmID), "working_set_pages") -} - -func (o *RegOrchestrator) getVMBaseDir(vmID string) string { - return filepath.Join(o.snapshotsDir, vmID) -} - -func (o *RegOrchestrator) setupHeartbeat() { - heartbeat := time.NewTicker(60 * time.Second) - - go func() { - for { - <-heartbeat.C - log.Info("HEARTBEAT: number of active VMs: ", len(o.vmPool.GetVMMap())) - } // for - }() // go func -} diff --git a/functions.go b/functions.go index 5dc734421..951990e71 100644 --- a/functions.go +++ b/functions.go @@ -357,7 +357,7 @@ func (f *Function) AddInstance() *metrics.Metric { if f.isSnapshotReady { metr = f.LoadInstance() } else { - resp, _, err := orch.StartVM(ctx, f.getVMID(), f.imageName, 256, 1, false) + resp, _, err := orch.StartVM(ctx, f.getVMID(), f.imageName, 256, 1, false, false) if err != nil { log.Panic(err) } @@ -388,7 +388,7 @@ func (f *Function) RemoveInstanceAsync() { logger.Debug("Removing instance (async)") go func() { - err := orch.StopSingleVM(context.Background(), f.vmID) + err := orch.StopSingleVM(context.Background(), f.vmID, false) if err != nil { log.Warn(err) } @@ -411,9 +411,12 @@ func (f *Function) RemoveInstance(isSync bool) (string, error) { f.OnceAddInstance = new(sync.Once) - if ! orch.GetSnapshotsEnabled() { + if orch.GetSnapshotsEnabled() { + f.OffloadInstance() + r = "Successfully offloaded instance " + f.vmID + } else { if isSync { - err = orch.StopSingleVM(context.Background(), f.vmID) + err = orch.StopSingleVM(context.Background(), f.vmID, false) } else { f.RemoveInstanceAsync() r = "Successfully removed (async) instance " + f.vmID @@ -450,7 +453,7 @@ func (f *Function) CreateInstanceSnapshot() { revisionID := fmt.Sprintf("myrev-%s", f.vmID) snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, f.vmID, snap) + err = orch.CreateSnapshot(ctx, f.vmID, snap, false) if err != nil { log.Panic(err) } @@ -461,6 +464,22 @@ func (f *Function) CreateInstanceSnapshot() { } } +// OffloadInstance Offloads the instance +func (f *Function) OffloadInstance() { + logger := log.WithFields(log.Fields{"fID": f.fID}) + + logger.Debug("Offloading instance") + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer cancel() + + err := orch.OffloadVM(ctx, f.vmID, false) + if err != nil { + log.Panic(err) + } + f.conn.Close() +} + // LoadInstance Loads a new instance of the function from its snapshot and resumes it // The tap, the shim and the vmID remain the same func (f *Function) LoadInstance() *metrics.Metric { @@ -473,7 +492,7 @@ func (f *Function) LoadInstance() *metrics.Metric { revisionID := fmt.Sprintf("myrev-%s", f.vmID) snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, false) - _, loadMetr, err := orch.LoadSnapshot(ctx, f.vmID, snap) + _, loadMetr, err := orch.LoadSnapshot(ctx, f.vmID, snap, false) if err != nil { log.Panic(err) } diff --git a/metrics/metrics.go b/metrics/metrics.go index c31e8d7d6..f4fd4f848 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -190,7 +190,7 @@ func ToUS(dur time.Duration) float64 { return float64(dur.Microseconds()) } -// ToUS Converts Duration to milliseconds +// ToMs Converts Duration to milliseconds func ToMs(dur time.Duration) int64 { return int64(dur.Milliseconds()) } diff --git a/misc/vm_pool.go b/misc/vm_pool.go index b83696601..8cccfa122 100644 --- a/misc/vm_pool.go +++ b/misc/vm_pool.go @@ -110,5 +110,5 @@ func (p *VMPool) GetVM(vmID string) (*VM, error) { // CleanupNetwork removes and deallocates all network configurations func (p *VMPool) CleanupNetwork() { - _ = p.networkManager.Cleanup() + p.networkManager.Cleanup() } diff --git a/scripts/install_pmutools.sh b/scripts/install_pmutools.sh index 8fc03537d..62e2061af 100755 --- a/scripts/install_pmutools.sh +++ b/scripts/install_pmutools.sh @@ -32,4 +32,4 @@ sudo git clone https://github.com/ease-lab/pmu-tools -b master /usr/local/pmu-to sudo sysctl -w kernel.perf_event_paranoid=-1 # first run, download essential files -#/usr/local/pmu-tools/toplev --print > /dev/null +/usr/local/pmu-tools/toplev --print > /dev/null diff --git a/vhive.go b/vhive.go index cb48851e9..3bc129139 100644 --- a/vhive.go +++ b/vhive.go @@ -27,8 +27,6 @@ import ( "flag" "fmt" "github.com/ease-lab/vhive/ctriface" - "github.com/ease-lab/vhive/ctriface/deduplicated" - "github.com/ease-lab/vhive/ctriface/regular" "math/rand" "net" @@ -60,7 +58,7 @@ var ( isSaveMemory *bool snapsCapacityMiB *int64 isSparseSnaps *bool - isDeduplicatedSnaps *bool + isFullLocal *bool isSnapshotsEnabled *bool isUPFEnabled *bool isLazyMode *bool @@ -90,7 +88,7 @@ func main() { // Snapshotting isSnapshotsEnabled = flag.Bool("snapshots", false, "Use VM snapshots when adding function instances") isSparseSnaps = flag.Bool("sparsesnaps", false, "Makes memory files sparse after storing to reduce disk utilization") - isDeduplicatedSnaps = flag.Bool("deduplicatedsnaps", false, "Use improved deduplicated snapshotting") + isFullLocal = flag.Bool("fulllocal", false, "Use improved full local snapshotting") snapsCapacityMiB = flag.Int64("snapcapacity", 102400, "Capacity set aside for storing snapshots (Mib)") isUPFEnabled = flag.Bool("upf", false, "Enable user-level page faults guest memory management") isLazyMode = flag.Bool("lazy", false, "Enable lazy serving mode when UPFs are enabled") @@ -128,6 +126,11 @@ func main() { return } + if *isUPFEnabled && *isFullLocal { + log.Error("UPF is not supported for full local snapshots") + return + } + if flog, err = os.Create("/tmp/fccd.log"); err != nil { panic(err) } @@ -154,36 +157,18 @@ func main() { testModeOn := false - if *isDeduplicatedSnaps { - orch = ctriface.NewOrchestrator(deduplicated.NewDedupOrchestrator( - *snapshotter, - *hostIface, - *poolName, - *metadataDev, - *netPoolSize, - deduplicated.WithTestModeOn(testModeOn), - deduplicated.WithSnapshots(*isSnapshotsEnabled), - deduplicated.WithUPF(*isUPFEnabled), - deduplicated.WithMetricsMode(*isMetricsMode), - deduplicated.WithLazyMode(*isLazyMode), - )) - } else { - orch = ctriface.NewOrchestrator(regular.NewRegOrchestrator( - *snapshotter, - *hostIface, - *poolName, - *metadataDev, - *netPoolSize, - regular.WithTestModeOn(testModeOn), - regular.WithSnapshots(*isSnapshotsEnabled), - regular.WithUPF(*isUPFEnabled), - regular.WithMetricsMode(*isMetricsMode), - regular.WithLazyMode(*isLazyMode), - )) - } - - - + orch = ctriface.NewOrchestrator( + *snapshotter, + *hostIface, + *poolName, + *metadataDev, + *netPoolSize, + ctriface.WithTestModeOn(testModeOn), + ctriface.WithSnapshots(*isSnapshotsEnabled), + ctriface.WithUPF(*isUPFEnabled), + ctriface.WithMetricsMode(*isMetricsMode), + ctriface.WithLazyMode(*isLazyMode), + ) funcPool = NewFuncPool(*isSaveMemory, *servedThreshold, *pinnedFuncNum, testModeOn) @@ -208,7 +193,7 @@ func setupFirecrackerCRI() { s := grpc.NewServer() - fcService, err := fccri.NewFirecrackerService(orch, *snapsCapacityMiB, *isSparseSnaps, *isDeduplicatedSnaps) + fcService, err := fccri.NewFirecrackerService(orch, *snapsCapacityMiB, *isSparseSnaps, *isFullLocal) if err != nil { log.Fatalf("failed to create firecracker service %v", err) } @@ -285,7 +270,7 @@ func (s *server) StopSingleVM(ctx context.Context, in *pb.StopSingleVMReq) (*pb. // Note: this function is to be used only before tearing down the whole orchestrator func (s *server) StopVMs(ctx context.Context, in *pb.StopVMsReq) (*pb.Status, error) { log.Info("Received StopVMs") - err := orch.StopActiveVMs() + err := orch.StopActiveVMs(*isFullLocal) if err != nil { log.Printf("Failed to stop VMs, err: %v\n", err) return &pb.Status{Message: "Failed to stop VMs"}, err diff --git a/vhive_test.go b/vhive_test.go index d5b2e9224..d1c74c9c5 100644 --- a/vhive_test.go +++ b/vhive_test.go @@ -26,7 +26,6 @@ import ( "context" "flag" "github.com/ease-lab/vhive/ctriface" - "github.com/ease-lab/vhive/ctriface/regular" "os" "strconv" "sync" @@ -47,6 +46,7 @@ var ( isSnapshotsEnabledTest = flag.Bool("snapshotsTest", false, "Use VM snapshots when adding function instances") isMetricsModeTest = flag.Bool("metricsTest", false, "Calculate UPF metrics") isLazyModeTest = flag.Bool("lazyTest", false, "Enable lazy serving mode when UPFs are enabled") + isFullLocalTest = flag.Bool("fullLocalTest", false, "Enable full local snapshots") isWithCache = flag.Bool("withCache", false, "Do not drop the cache before measurements") benchDir = flag.String("benchDirTest", "bench_results", "Directory where stats should be saved") ) @@ -66,29 +66,30 @@ func TestMain(m *testing.M) { flag.Parse() - log.Infof("DedupOrchestrator snapshots enabled: %t", *isSnapshotsEnabledTest) - log.Infof("DedupOrchestrator UPF enabled: %t", *isUPFEnabledTest) - log.Infof("DedupOrchestrator lazy serving mode enabled: %t", *isLazyModeTest) - log.Infof("DedupOrchestrator UPF metrics enabled: %t", *isMetricsModeTest) + log.Infof("Orchestrator snapshots enabled: %t", *isSnapshotsEnabledTest) + log.Infof("Orchestrator UPF enabled: %t", *isUPFEnabledTest) + log.Infof("Orchestrator lazy serving mode enabled: %t", *isLazyModeTest) + log.Infof("Orchestrator UPF metrics enabled: %t", *isMetricsModeTest) log.Infof("Drop cache: %t", !*isWithCache) log.Infof("Bench dir: %s", *benchDir) - orch = ctriface.NewOrchestrator(regular.NewRegOrchestrator( + // TODO: set correct params if full local test + orch = ctriface.NewOrchestrator( "devmapper", "", "fc-dev-thinpool", "", 10, - regular.WithTestModeOn(true), - regular.WithSnapshots(*isSnapshotsEnabledTest), - regular.WithUPF(*isUPFEnabledTest), - regular.WithMetricsMode(*isMetricsModeTest), - regular.WithLazyMode(*isLazyModeTest), - )) + ctriface.WithTestModeOn(true), + ctriface.WithSnapshots(*isSnapshotsEnabledTest), + ctriface.WithUPF(*isUPFEnabledTest), + ctriface.WithMetricsMode(*isMetricsModeTest), + ctriface.WithLazyMode(*isLazyModeTest), + ) ret := m.Run() - err := orch.StopActiveVMs() + err := orch.StopActiveVMs(*isFullLocalTest) if err != nil { log.Printf("Failed to stop VMs, err: %v\n", err) } From d6827fb0bb69a1e6d60745d70db6478df594a294 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Sat, 12 Mar 2022 14:20:42 +0000 Subject: [PATCH 10/15] Address PR remarks Signed-off-by: Amory Hoste --- cri/firecracker/coordinator.go | 86 +++++++++++++++++----------- cri/firecracker/service.go | 23 ++++---- ctriface/iface.go | 7 ++- snapshotting/deduplicated/manager.go | 2 +- vhive.go | 7 ++- 5 files changed, 76 insertions(+), 49 deletions(-) diff --git a/cri/firecracker/coordinator.go b/cri/firecracker/coordinator.go index efbc568e4..5228cc88f 100644 --- a/cri/firecracker/coordinator.go +++ b/cri/firecracker/coordinator.go @@ -62,7 +62,13 @@ func withoutOrchestrator() coordinatorOption { } } -func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool, isFullLocal bool, opts ...coordinatorOption) *coordinator { +func newFirecrackerCoordinator( + orch *ctriface.Orchestrator, + snapsCapacityMiB int64, + isSparseSnaps bool, + isFullLocal bool, + opts ...coordinatorOption) *coordinator { + c := &coordinator{ activeInstances: make(map[string]*FuncInstance), orch: orch, @@ -114,7 +120,7 @@ func (c *coordinator) startVM(ctx context.Context, image string, revision string func (c *coordinator) stopVM(ctx context.Context, containerID string) error { c.Lock() - fi, present := c.activeInstances[containerID] + funcInst, present := c.activeInstances[containerID] if present { delete(c.activeInstances, containerID) } @@ -127,28 +133,28 @@ func (c *coordinator) stopVM(ctx context.Context, containerID string) error { } if c.orch == nil || ! c.orch.GetSnapshotsEnabled() { - return c.orchStopVM(ctx, fi) + return c.orchStopVM(ctx, funcInst) } - id := fi.vmID + id := funcInst.vmID if c.isFullLocal { - id = fi.revisionId + id = funcInst.revisionId } - if fi.snapBooted { + if funcInst.snapBooted { defer c.snapshotManager.ReleaseSnapshot(id) } else { // Create snapshot - err := c.orchCreateSnapshot(ctx, fi) + err := c.orchCreateSnapshot(ctx, funcInst) if err != nil { log.Printf("Err creating snapshot %s\n", err) } } if c.isFullLocal { - return c.orchStopVM(ctx, fi) + return c.orchStopVM(ctx, funcInst) } else { - return c.orchOffloadVM(ctx, fi) + return c.orchOffloadVM(ctx, funcInst) } } @@ -161,18 +167,18 @@ func (c *coordinator) isActive(containerID string) bool { return ok } -func (c *coordinator) insertActive(containerID string, fi *FuncInstance) error { +func (c *coordinator) insertActive(containerID string, funcInst *FuncInstance) error { c.Lock() defer c.Unlock() - logger := log.WithFields(log.Fields{"containerID": containerID, "vmID": fi.vmID}) + logger := log.WithFields(log.Fields{"containerID": containerID, "vmID": funcInst.vmID}) if fi, present := c.activeInstances[containerID]; present { logger.Errorf("entry for container already exists with vmID %s" + fi.vmID) return errors.New("entry for container already exists") } - c.activeInstances[containerID] = fi + c.activeInstances[containerID] = funcInst return nil } @@ -206,12 +212,18 @@ func (c *coordinator) orchStartVM(ctx context.Context, image, revision string, m coldStartTimeMs := metrics.ToMs(time.Since(tStartCold)) - fi := NewFuncInstance(vmID, image, revision, resp, false, memSizeMib, vCPUCount, coldStartTimeMs) + funcInst := NewFuncInstance(vmID, image, revision, resp, false, memSizeMib, vCPUCount, coldStartTimeMs) logger.Debug("successfully created fresh instance") - return fi, err + return funcInst, err } -func (c *coordinator) orchStartVMSnapshot(ctx context.Context, snap *snapshotting.Snapshot, memSizeMib, vCPUCount uint32, vmID string) (*FuncInstance, error) { +func (c *coordinator) orchStartVMSnapshot( + ctx context.Context, + snap *snapshotting.Snapshot, + memSizeMib, + vCPUCount uint32, + vmID string) (*FuncInstance, error) { + tStartCold := time.Now() logger := log.WithFields( log.Fields{ @@ -242,29 +254,35 @@ func (c *coordinator) orchStartVMSnapshot(ctx context.Context, snap *snapshottin } coldStartTimeMs := metrics.ToMs(time.Since(tStartCold)) - fi := NewFuncInstance(vmID, snap.GetImage(), snap.GetId(), resp, true, memSizeMib, vCPUCount, coldStartTimeMs) + funcInst := NewFuncInstance(vmID, snap.GetImage(), snap.GetId(), resp, true, memSizeMib, vCPUCount, coldStartTimeMs) logger.Debug("successfully loaded instance from snapshot") - return fi, err + return funcInst, err } -func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) error { +func (c *coordinator) orchCreateSnapshot(ctx context.Context, funcInst *FuncInstance) error { logger := log.WithFields( log.Fields{ - "vmID": fi.vmID, - "image": fi.image, + "vmID": funcInst.vmID, + "image": funcInst.image, }, ) - id := fi.vmID + id := funcInst.vmID if c.isFullLocal { - id = fi.revisionId + id = funcInst.revisionId } - removeContainerSnaps, snap, err := c.snapshotManager.InitSnapshot(id, fi.image, fi.coldStartTimeMs, fi.memSizeMib, fi.vCPUCount, c.isSparseSnaps) + removeContainerSnaps, snap, err := c.snapshotManager.InitSnapshot( + id, + funcInst.image, + funcInst.coldStartTimeMs, + funcInst.memSizeMib, + funcInst.vCPUCount, + c.isSparseSnaps) if err != nil { - fi.logger.Warn(fmt.Sprint(err)) + funcInst.logger.Warn(fmt.Sprint(err)) return nil } @@ -281,46 +299,46 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance) logger.Debug("creating instance snapshot before stopping") - err = c.orch.PauseVM(ctxTimeout, fi.vmID) + err = c.orch.PauseVM(ctxTimeout, funcInst.vmID) if err != nil { logger.WithError(err).Error("failed to pause VM") return nil } - err = c.orch.CreateSnapshot(ctxTimeout, fi.vmID, snap, c.isFullLocal) + err = c.orch.CreateSnapshot(ctxTimeout, funcInst.vmID, snap, c.isFullLocal) if err != nil { - fi.logger.WithError(err).Error("failed to create snapshot") + funcInst.logger.WithError(err).Error("failed to create snapshot") return nil } if err := c.snapshotManager.CommitSnapshot(id); err != nil { - fi.logger.WithError(err).Error("failed to commit snapshot") + funcInst.logger.WithError(err).Error("failed to commit snapshot") return err } return nil } -func (c *coordinator) orchOffloadVM(ctx context.Context, fi *FuncInstance) error { +func (c *coordinator) orchOffloadVM(ctx context.Context, funcInst *FuncInstance) error { if c.withoutOrchestrator { return nil } - if err := c.orch.OffloadVM(ctx, fi.vmID, c.isFullLocal); err != nil { - fi.logger.WithError(err).Error("failed to offload VM") + if err := c.orch.OffloadVM(ctx, funcInst.vmID, c.isFullLocal); err != nil { + funcInst.logger.WithError(err).Error("failed to offload VM") return err } return nil } -func (c *coordinator) orchStopVM(ctx context.Context, fi *FuncInstance) error { +func (c *coordinator) orchStopVM(ctx context.Context, funcInst *FuncInstance) error { if c.withoutOrchestrator { return nil } - if err := c.orch.StopSingleVM(ctx, fi.vmID, c.isFullLocal); err != nil { - fi.logger.WithError(err).Error("failed to stop VM for instance") + if err := c.orch.StopSingleVM(ctx, funcInst.vmID, c.isFullLocal); err != nil { + funcInst.logger.WithError(err).Error("failed to stop VM for instance") return err } diff --git a/cri/firecracker/service.go b/cri/firecracker/service.go index b083f4c70..347a61970 100644 --- a/cri/firecracker/service.go +++ b/cri/firecracker/service.go @@ -35,14 +35,16 @@ import ( ) const ( - userContainerName = "user-container" - queueProxyName = "queue-proxy" - revisionEnv = "K_REVISION" - guestIPEnv = "GUEST_ADDR" - guestPortEnv = "GUEST_PORT" - guestImageEnv = "GUEST_IMAGE" - guestMemorySizeMibEnv = "MEM_SIZE_MB" - guestvCPUCountEnv = "VCPU_COUNT" + userContainerName = "user-container" + queueProxyName = "queue-proxy" + revisionEnv = "K_REVISION" + guestIPEnv = "GUEST_ADDR" + guestPortEnv = "GUEST_PORT" + guestImageEnv = "GUEST_IMAGE" + guestMemorySizeMibEnv = "MEM_SIZE_MB" + guestvCPUCountEnv = "VCPU_COUNT" + defaultMemSize uint32 = 256 + defaultvCPUCount uint32 = 1 ) type FirecrackerService struct { @@ -69,6 +71,7 @@ func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, return nil, err } fs.stockRuntimeClient = stockRuntimeClient + fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps, isFullLocal) fs.vmConfigs = make(map[string]*VMConfig) return fs, nil @@ -260,7 +263,7 @@ func getMemorySize(config *criapi.ContainerConfig) (uint32, error) { } } - return uint32(256), nil + return defaultMemSize, nil } func getvCPUCount(config *criapi.ContainerConfig) (uint32, error) { @@ -276,5 +279,5 @@ func getvCPUCount(config *criapi.ContainerConfig) (uint32, error) { } } - return uint32(1), nil + return defaultvCPUCount, nil } \ No newline at end of file diff --git a/ctriface/iface.go b/ctriface/iface.go index a06748b61..24d68cea2 100644 --- a/ctriface/iface.go +++ b/ctriface/iface.go @@ -485,7 +485,12 @@ func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *sn } // LoadSnapshot Loads a snapshot of a VM -func (o *Orchestrator) LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot, isFullLocal bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { +func (o *Orchestrator) LoadSnapshot( + ctx context.Context, + vmID string, + snap *snapshotting.Snapshot, + isFullLocal bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { + var ( loadSnapshotMetric *metrics.Metric = metrics.NewMetric() tStart time.Time diff --git a/snapshotting/deduplicated/manager.go b/snapshotting/deduplicated/manager.go index 30e5dc7dd..1e5cfdd6b 100644 --- a/snapshotting/deduplicated/manager.go +++ b/snapshotting/deduplicated/manager.go @@ -79,7 +79,7 @@ func (mgr *ImprovedSnapshotManager) AcquireSnapshot(revision string) (*snapshott // Snapshot registered in manager but creation not finished yet if ! snapStat.usable { // Could also wait until snapshot usable (trade-off) - return nil, errors.New(fmt.Sprintf("Snapshot is not yet usable")) + return nil, errors.New("Snapshot is not yet usable") } if snapStat.numUsing == 0 { diff --git a/vhive.go b/vhive.go index 3bc129139..3fda98a5a 100644 --- a/vhive.go +++ b/vhive.go @@ -56,7 +56,7 @@ var ( funcPool *FuncPool isSaveMemory *bool - snapsCapacityMiB *int64 + snapsStorageSize *int64 isSparseSnaps *bool isFullLocal *bool isSnapshotsEnabled *bool @@ -89,7 +89,7 @@ func main() { isSnapshotsEnabled = flag.Bool("snapshots", false, "Use VM snapshots when adding function instances") isSparseSnaps = flag.Bool("sparsesnaps", false, "Makes memory files sparse after storing to reduce disk utilization") isFullLocal = flag.Bool("fulllocal", false, "Use improved full local snapshotting") - snapsCapacityMiB = flag.Int64("snapcapacity", 102400, "Capacity set aside for storing snapshots (Mib)") + snapsStorageSize = flag.Int64("snapcapacity", 100, "Total storage reserved for storing snapshots (GiB)") isUPFEnabled = flag.Bool("upf", false, "Enable user-level page faults guest memory management") isLazyMode = flag.Bool("lazy", false, "Enable lazy serving mode when UPFs are enabled") @@ -193,7 +193,8 @@ func setupFirecrackerCRI() { s := grpc.NewServer() - fcService, err := fccri.NewFirecrackerService(orch, *snapsCapacityMiB, *isSparseSnaps, *isFullLocal) + snapsCapacityMiB := *snapsStorageSize * 1024 + fcService, err := fccri.NewFirecrackerService(orch, snapsCapacityMiB, *isSparseSnaps, *isFullLocal) if err != nil { log.Fatalf("failed to create firecracker service %v", err) } From b62820a934e6da4746df0f4d3743b5773da926b9 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Sat, 12 Mar 2022 17:51:01 +0000 Subject: [PATCH 11/15] Add tests and compatibility with offloaded snapshots Signed-off-by: Amory Hoste --- bin/containerd-shim-aws-firecracker | 4 +- bin/firecracker-containerd | 2 +- bin/firecracker-ctr | 2 +- cri/firecracker/coordinator.go | 32 +- cri/firecracker/coordinator_test.go | 10 +- cri/firecracker/service.go | 1 - ctriface/Makefile | 27 +- ctriface/bench_test.go | 7 +- ctriface/failing_test.go | 18 +- ctriface/iface.go | 107 ++--- ctriface/iface_test.go | 57 ++- ctriface/image/Makefile | 33 ++ .../image/manager.go | 24 +- ctriface/image/manager_test.go | 134 ++++++ ctriface/manual_cleanup_test.go | 186 ++++++-- ctriface/orch.go | 14 +- ctriface/orch_options.go | 7 + ctriface/types.go | 28 -- devmapper/Makefile | 33 ++ devmapper/deviceSnapshot.go | 9 +- devmapper/devicemapper.go | 16 +- devmapper/devicemapper_test.go | 137 ++++++ devmapper/thindelta/blockDelta.go | 4 +- devmapper/thindelta/thinDelta.go | 2 +- functions.go | 18 +- go.mod | 2 +- go.sum | 4 +- misc/Makefile | 3 +- misc/misc_test.go | 39 +- misc/types.go | 60 ++- misc/vm_pool.go | 70 ++- networking/Makefile | 33 ++ networking/networkManager.go | 25 +- networking/networkconfig.go | 8 +- networking/networking_test.go | 110 +++++ scripts/clean_fcctr.sh | 12 +- scripts/github_runner/clean_cri_runner.sh | 3 + snapshotting/Makefile | 33 ++ .../{deduplicated => fulllocal}/manager.go | 47 +- .../{deduplicated => fulllocal}/snapHeap.go | 2 +- .../{deduplicated => fulllocal}/snapStats.go | 5 +- snapshotting/manager_test.go | 167 ++++++++ snapshotting/regular/manager.go | 48 ++- snapshotting/snapshot.go | 9 +- taps/Makefile | 33 ++ taps/tapManager.go | 403 ++++++++++++++++++ taps/taps_test.go | 105 +++++ taps/types.go | 55 +++ vhive.go | 8 +- vhive_test.go | 3 +- 50 files changed, 1912 insertions(+), 287 deletions(-) create mode 100644 ctriface/image/Makefile rename ctrimages/imageManager.go => ctriface/image/manager.go (87%) create mode 100644 ctriface/image/manager_test.go create mode 100644 devmapper/Makefile create mode 100644 devmapper/devicemapper_test.go create mode 100644 networking/Makefile create mode 100644 networking/networking_test.go create mode 100644 snapshotting/Makefile rename snapshotting/{deduplicated => fulllocal}/manager.go (83%) rename snapshotting/{deduplicated => fulllocal}/snapHeap.go (98%) rename snapshotting/{deduplicated => fulllocal}/snapStats.go (89%) create mode 100644 snapshotting/manager_test.go create mode 100644 taps/Makefile create mode 100644 taps/tapManager.go create mode 100644 taps/taps_test.go create mode 100644 taps/types.go diff --git a/bin/containerd-shim-aws-firecracker b/bin/containerd-shim-aws-firecracker index 67ff4cdcc..1db7544cb 100755 --- a/bin/containerd-shim-aws-firecracker +++ b/bin/containerd-shim-aws-firecracker @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b48fcdff74c342e8b4f65659139056dea1c27fdb99a0c2f267070b6b3b97b0b -size 26530283 +oid sha256:b3d8525300d6ce747c63847f0f688d56d61be927648f19a86abee2e8f1e9e0e4 +size 26534379 diff --git a/bin/firecracker-containerd b/bin/firecracker-containerd index 9887c4b33..138deccc3 100755 --- a/bin/firecracker-containerd +++ b/bin/firecracker-containerd @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc908873170a25ca713ca2e80323cf1496d5d9b7a3449778d0018a84825dd0f7 +oid sha256:17da34088e3c544328545e39037110bb1ab2c09543e9f25614d5425ea90793ad size 47224352 diff --git a/bin/firecracker-ctr b/bin/firecracker-ctr index 1acca2487..73417e8d8 100755 --- a/bin/firecracker-ctr +++ b/bin/firecracker-ctr @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51a994f7cb2cd48087a4b5a27476577c60d9fd6ce34a470435de5f33c2fb3508 +oid sha256:d61c35b77178fbabc4c996f4e2411f565b6974ef40773b68d465db17e725bb99 size 34510472 diff --git a/cri/firecracker/coordinator.go b/cri/firecracker/coordinator.go index 5228cc88f..c6f3104b4 100644 --- a/cri/firecracker/coordinator.go +++ b/cri/firecracker/coordinator.go @@ -28,7 +28,7 @@ import ( "github.com/ease-lab/vhive/ctriface" "github.com/ease-lab/vhive/metrics" "github.com/ease-lab/vhive/snapshotting" - "github.com/ease-lab/vhive/snapshotting/deduplicated" + "github.com/ease-lab/vhive/snapshotting/fulllocal" "github.com/ease-lab/vhive/snapshotting/regular" "github.com/pkg/errors" "strconv" @@ -77,9 +77,9 @@ func newFirecrackerCoordinator( } if isFullLocal { - c.snapshotManager = snapshotting.NewSnapshotManager(deduplicated.NewSnapshotManager(snapshotsDir, snapsCapacityMiB)) + c.snapshotManager = snapshotting.NewSnapshotManager(fulllocal.NewSnapshotManager(snapshotsDir, snapsCapacityMiB)) } else { - c.snapshotManager = snapshotting.NewSnapshotManager(regular.NewRegularSnapshotManager(snapshotsDir)) + c.snapshotManager = snapshotting.NewSnapshotManager(regular.NewSnapshotManager(snapshotsDir)) } for _, opt := range opts { @@ -142,7 +142,8 @@ func (c *coordinator) stopVM(ctx context.Context, containerID string) error { } if funcInst.snapBooted { - defer c.snapshotManager.ReleaseSnapshot(id) + // Release snapshot after the VM has been stopped / offloaded + defer func() { _ = c.snapshotManager.ReleaseSnapshot(id) }() } else { // Create snapshot err := c.orchCreateSnapshot(ctx, funcInst) @@ -204,7 +205,7 @@ func (c *coordinator) orchStartVM(ctx context.Context, image, revision string, m if !c.withoutOrchestrator { trackDirtyPages := c.isSparseSnaps - resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount, trackDirtyPages, c.isFullLocal) + resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount, trackDirtyPages) if err != nil { logger.WithError(err).Error("coordinator failed to start VM") } @@ -242,7 +243,7 @@ func (c *coordinator) orchStartVMSnapshot( ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*30) defer cancel() - resp, _, err = c.orch.LoadSnapshot(ctxTimeout, vmID, snap, c.isFullLocal) + resp, _, err = c.orch.LoadSnapshot(ctxTimeout, vmID, snap) if err != nil { logger.WithError(err).Error("failed to load VM") return nil, err @@ -273,7 +274,7 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, funcInst *FuncInst id = funcInst.revisionId } - removeContainerSnaps, snap, err := c.snapshotManager.InitSnapshot( + _, snap, err := c.snapshotManager.InitSnapshot( id, funcInst.image, funcInst.coldStartTimeMs, @@ -286,13 +287,14 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, funcInst *FuncInst return nil } - if c.isFullLocal && removeContainerSnaps != nil { + // This call is only necessary if the alternative approach in devicemapper with thin-delta is used. + /*if c.isFullLocal && removeContainerSnaps != nil { for _, cleanupSnapId := range *removeContainerSnaps { if err := c.orch.CleanupSnapshot(ctx, cleanupSnapId); err != nil { return errors.Wrap(err, "removing devmapper revision snapshot") } } - } + }*/ ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*60) defer cancel() @@ -305,12 +307,18 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, funcInst *FuncInst return nil } - err = c.orch.CreateSnapshot(ctxTimeout, funcInst.vmID, snap, c.isFullLocal) + err = c.orch.CreateSnapshot(ctxTimeout, funcInst.vmID, snap) if err != nil { funcInst.logger.WithError(err).Error("failed to create snapshot") return nil } + // TODO: StopVM does not work for fullLocal snapshots without resuming. Might be the same for offloaded since + // those are never stopped + if c.isFullLocal { + _, err = c.orch.ResumeVM(ctx, funcInst.vmID) + } + if err := c.snapshotManager.CommitSnapshot(id); err != nil { funcInst.logger.WithError(err).Error("failed to commit snapshot") return err @@ -324,7 +332,7 @@ func (c *coordinator) orchOffloadVM(ctx context.Context, funcInst *FuncInstance) return nil } - if err := c.orch.OffloadVM(ctx, funcInst.vmID, c.isFullLocal); err != nil { + if err := c.orch.OffloadVM(ctx, funcInst.vmID); err != nil { funcInst.logger.WithError(err).Error("failed to offload VM") return err } @@ -337,7 +345,7 @@ func (c *coordinator) orchStopVM(ctx context.Context, funcInst *FuncInstance) er return nil } - if err := c.orch.StopSingleVM(ctx, funcInst.vmID, c.isFullLocal); err != nil { + if err := c.orch.StopSingleVM(ctx, funcInst.vmID); err != nil { funcInst.logger.WithError(err).Error("failed to stop VM for instance") return err } diff --git a/cri/firecracker/coordinator_test.go b/cri/firecracker/coordinator_test.go index 0affa7a14..7660ae2cc 100644 --- a/cri/firecracker/coordinator_test.go +++ b/cri/firecracker/coordinator_test.go @@ -24,6 +24,7 @@ package firecracker import ( "context" + "flag" "fmt" "os" "strconv" @@ -37,13 +38,18 @@ const ( testImageName = "ghcr.io/ease-lab/helloworld:var_workload" ) +var ( + isFullLocal = flag.Bool("fulllocal", false, "Set full local snapshots") + isSparseSnaps = flag.Bool("sparsesnaps", false, "Use sparse snapshots") +) + var ( coord *coordinator ) func TestMain(m *testing.M) { - coord = newFirecrackerCoordinator(nil, 10240, false, false, withoutOrchestrator()) - + coord = newFirecrackerCoordinator(nil, 10240, *isSparseSnaps, *isFullLocal, withoutOrchestrator()) + flag.Parse() ret := m.Run() os.Exit(ret) } diff --git a/cri/firecracker/service.go b/cri/firecracker/service.go index 347a61970..bdf9602f9 100644 --- a/cri/firecracker/service.go +++ b/cri/firecracker/service.go @@ -71,7 +71,6 @@ func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, return nil, err } fs.stockRuntimeClient = stockRuntimeClient - fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps, isFullLocal) fs.vmConfigs = make(map[string]*VMConfig) return fs, nil diff --git a/ctriface/Makefile b/ctriface/Makefile index d88495d57..84f78957f 100644 --- a/ctriface/Makefile +++ b/ctriface/Makefile @@ -21,10 +21,12 @@ # SOFTWARE. EXTRAGOARGS:=-v -race -cover -EXTRATESTFILES:=iface_test.go iface.go orch_options.go orch.go +EXTRATESTFILES:=iface_test.go iface.go orch_options.go orch.go types.go BENCHFILES:=bench_test.go iface.go orch_options.go orch.go WITHUPF:=-upf +WITHFULLLOCAL:=-fulllocal WITHLAZY:=-lazy +WITHSPARSESNAPS:=-sparsesnaps GOBENCH:=-v -timeout 1500s CTRDLOGDIR:=/tmp/ctrd-logs @@ -34,6 +36,12 @@ test: sudo env "PATH=$(PATH)" go test $(EXTRATESTFILES) $(EXTRAGOARGS) sudo env "PATH=$(PATH)" go test $(EXTRATESTFILES) $(EXTRAGOARGS) -args $(WITHUPF) ./../scripts/clean_fcctr.sh + sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log.out 2>$(CTRDLOGDIR)/ctriface_log.err & + sudo env "PATH=$(PATH)" go test $(EXTRATESTFILES) $(EXTRAGOARGS) -args $(WITHFULLLOCAL) + ./../scripts/clean_fcctr.sh + sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log.out 2>$(CTRDLOGDIR)/ctriface_log.err & + sudo env "PATH=$(PATH)" go test $(EXTRATESTFILES) $(EXTRAGOARGS) -args $(WITHFULLLOCAL) $(WITHSPARSESNAPS) + ./../scripts/clean_fcctr.sh test-man: sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log_noupf_man_travis.out 2>$(CTRDLOGDIR)/ctriface_log_noupf_man_travis.err & @@ -45,7 +53,12 @@ test-man: sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log_lazy_man_travis.out 2>$(CTRDLOGDIR)/ctriface_log_lazy_man_travis.err & sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestSnapLoad -args $(WITHUPF) $(WITHLAZY) ./../scripts/clean_fcctr.sh - + sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log_lazy_man_travis.out 2>$(CTRDLOGDIR)/ctriface_log_lazy_man_travis.err & + sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestSnapLoad -args $(WITHFULLLOCAL) + ./../scripts/clean_fcctr.sh + sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log_lazy_man_travis.out 2>$(CTRDLOGDIR)/ctriface_log_lazy_man_travis.err & + sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestSnapLoad -args $(WITHFULLLOCAL) $(WITHSPARSESNAPS) + ./../scripts/clean_fcctr.sh test-skip: sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log_noupf_man_skip.out 2>$(CTRDLOGDIR)/ctriface_log_noupf_man_skip.err & sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestParallelSnapLoad @@ -59,8 +72,14 @@ test-skip: sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestParallelSnapLoad -args $(WITHUPF) $(WITHLAZY) sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestParallelPhasedSnapLoad -args $(WITHUPF) $(WITHLAZY) ./../scripts/clean_fcctr.sh - - + sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log_lazy_man_skip.out 2>$(CTRDLOGDIR)/ctriface_log_lazy_man_skip.err & + sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestParallelSnapLoad -args $(WITHFULLLOCAL) + sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestParallelPhasedSnapLoad -args $(WITHFULLLOCAL) + ./../scripts/clean_fcctr.sh + sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log_lazy_man_skip.out 2>$(CTRDLOGDIR)/ctriface_log_lazy_man_skip.err & + sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestParallelSnapLoad -args $(WITHFULLLOCAL) $(WITHSPARSESNAPS) + sudo env "PATH=$(PATH)" go test $(EXTRAGOARGS) -run TestParallelPhasedSnapLoad -args $(WITHFULLLOCAL) $(WITHSPARSESNAPS) + ./../scripts/clean_fcctr.sh bench: sudo env "PATH=$(PATH)" go test $(BENCHFILES) $(GOBENCH) ./../scripts/clean_fcctr.sh diff --git a/ctriface/bench_test.go b/ctriface/bench_test.go index 7128866f8..131dc66b2 100644 --- a/ctriface/bench_test.go +++ b/ctriface/bench_test.go @@ -64,7 +64,9 @@ func TestBenchmarkStart(t *testing.T) { 10, WithTestModeOn(true), WithUPF(*isUPFEnabled), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() images := getAllImages() benchCount := 10 @@ -83,11 +85,11 @@ func TestBenchmarkStart(t *testing.T) { for i := 0; i < benchCount; i++ { dropPageCache() - _, metric, err := orch.StartVM(ctx, vmIDString, imageName, 256, 1, false, false) + _, metric, err := orch.StartVM(ctx, vmIDString, imageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") startMetrics[i] = metric - err = orch.StopSingleVM(ctx, vmIDString, false) + err = orch.StopSingleVM(ctx, vmIDString) require.NoError(t, err, "Failed to stop VM") } @@ -99,7 +101,6 @@ func TestBenchmarkStart(t *testing.T) { } - orch.Cleanup() } func dropPageCache() { diff --git a/ctriface/failing_test.go b/ctriface/failing_test.go index e93bf5821..5e4904d64 100644 --- a/ctriface/failing_test.go +++ b/ctriface/failing_test.go @@ -59,29 +59,31 @@ func TestStartSnapStop(t *testing.T) { "", 10, WithTestModeOn(true), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() vmID := "2" - revisionID := "myrev-2" - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap, false) + snap := snapshotting.NewSnapshot(vmID, "/fccd/snapshots", TestImageName, 256, 1, false) + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") - _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) + err = orch.OffloadVM(ctx, vmID) + require.NoError(t, err, "Failed to offload VM") + + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - err = orch.StopSingleVM(ctx, vmID, false) + err = orch.StopSingleVM(ctx, vmID) require.NoError(t, err, "Failed to stop VM") - - orch.Cleanup() } diff --git a/ctriface/iface.go b/ctriface/iface.go index 24d68cea2..752bfdd48 100644 --- a/ctriface/iface.go +++ b/ctriface/iface.go @@ -58,10 +58,8 @@ const ( TestImageName = "ghcr.io/ease-lab/helloworld:var_workload" ) -// TODO: isFullLocql param for all functions - // StartVM Boots a VM if it does not exist -func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages, isFullLocal bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { +func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memSizeMib ,vCPUCount uint32, trackDirtyPages bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { var ( startVMMetric *metrics.Metric = metrics.NewMetric() tStart time.Time @@ -105,7 +103,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS // 3. Create VM tStart = time.Now() - conf := o.getVMConfig(vm, trackDirtyPages, isFullLocal) + conf := o.getVMConfig(vm, trackDirtyPages, o.isFullLocal) resp, err := o.fcClient.CreateVM(ctx, conf) startVMMetric.MetricMap[metrics.FcCreateVM] = metrics.ToUS(time.Since(tStart)) if err != nil { @@ -125,7 +123,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS tStart = time.Now() containerId := vmID - if isFullLocal { + if o.isFullLocal { containerId = vm.ContainerSnapKey } @@ -209,7 +207,7 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS } }() - if ! isFullLocal { + if ! o.isFullLocal { if err := os.MkdirAll(o.getVMBaseDir(vmID), 0777); err != nil { logger.Error("Failed to create VM base dir") return nil, nil, err @@ -236,12 +234,12 @@ func (o *Orchestrator) StartVM(ctx context.Context, vmID, imageName string, memS logger.Debug("Successfully started a VM") - return &StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, startVMMetric, nil + return &StartVMResponse{GuestIP: vm.GetIP()}, startVMMetric, nil } // StopSingleVM Shuts down a VM // Note: VMs are not quisced before being stopped -func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string, isFullLocal bool) error { +func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string) error { logger := log.WithFields(log.Fields{"vmID": vmID}) logger.Debug("Orchestrator received StopVM") @@ -259,7 +257,7 @@ func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string, isFullLoca logger = log.WithFields(log.Fields{"vmID": vmID}) // Cleanup and remove container if VM not booted from snapshot - if ! isFullLocal || ! vm.SnapBooted { + if ! o.isFullLocal || ! vm.SnapBooted { task := *vm.Task if err := task.Kill(ctx, syscall.SIGKILL); err != nil { logger.WithError(err).Error("Failed to kill the task") @@ -297,7 +295,7 @@ func (o *Orchestrator) StopSingleVM(ctx context.Context, vmID string, isFullLoca o.workloadIo.Delete(vmID) // Cleanup VM devmapper container snapshot if booted from snapshot - if isFullLocal && vm.SnapBooted { + if o.isFullLocal && vm.SnapBooted { if err := o.devMapper.RemoveDeviceSnapshot(ctx, vm.ContainerSnapKey); err != nil { logger.Error("failed to deactivate container snapshot") return err @@ -345,29 +343,29 @@ func (o *Orchestrator) getVMConfig(vm *misc.VM, trackDirtyPages, isFullLocal boo }, NetworkInterfaces: []*proto.FirecrackerNetworkInterface{{ StaticConfig: &proto.StaticNetworkConfiguration{ - MacAddress: vm.NetConfig.GetMacAddress(), - HostDevName: vm.NetConfig.GetHostDevName(), + MacAddress: vm.GetMacAddress(), + HostDevName: vm.GetHostDevName(), IPConfig: &proto.IPConfiguration{ - PrimaryAddr: vm.NetConfig.GetContainerCIDR(), - GatewayAddr: vm.NetConfig.GetGatewayIP(), + PrimaryAddr: vm.GetPrimaryAddr(), + GatewayAddr: vm.GetGatewayAddr(), Nameservers: getK8sDNS(), }, }, }}, - NetworkNamespace: vm.NetConfig.GetNamespacePath(), + NetworkNamespace: vm.GetNetworkNamespace(), OffloadEnabled: ! isFullLocal, } } // StopActiveVMs Shuts down all active VMs -func (o *Orchestrator) StopActiveVMs(isFullLocal bool) error { +func (o *Orchestrator) StopActiveVMs() error { var vmGroup sync.WaitGroup for vmID, vm := range o.vmPool.GetVMMap() { vmGroup.Add(1) logger := log.WithFields(log.Fields{"vmID": vmID}) go func(vmID string, vm *misc.VM) { defer vmGroup.Done() - err := o.StopSingleVM(context.Background(), vmID, isFullLocal) + err := o.StopSingleVM(context.Background(), vmID) if err != nil { logger.Warn(err) } @@ -424,7 +422,7 @@ func (o *Orchestrator) ResumeVM(ctx context.Context, vmID string) (*metrics.Metr } // CreateSnapshot Creates a snapshot of a VM -func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot, isFullLocal bool) error { +func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error { logger := log.WithFields(log.Fields{"vmID": vmID}) logger.Debug("Orchestrator received CreateSnapshot") @@ -434,7 +432,7 @@ func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *sn snapFilePath := o.getSnapshotFile(vmID) memFilePath := o.getMemoryFile(vmID) - if isFullLocal { + if o.isFullLocal { snapFilePath = snap.GetSnapFilePath() memFilePath = snap.GetMemFilePath() } @@ -452,7 +450,7 @@ func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *sn } // For the non full-local snapshots, no additional steps are necessary - if ! isFullLocal { + if ! o.isFullLocal { return nil } @@ -475,12 +473,6 @@ func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *sn return err } - // 5. Resume - if _, err := o.fcClient.ResumeVM(ctx, &proto.ResumeVMRequest{VMID: vmID}); err != nil { - log.Printf("failed to resume the VM") - return err - } - return nil } @@ -488,8 +480,7 @@ func (o *Orchestrator) CreateSnapshot(ctx context.Context, vmID string, snap *sn func (o *Orchestrator) LoadSnapshot( ctx context.Context, vmID string, - snap *snapshotting.Snapshot, - isFullLocal bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { + snap *snapshotting.Snapshot) (_ *StartVMResponse, _ *metrics.Metric, retErr error) { var ( loadSnapshotMetric *metrics.Metric = metrics.NewMetric() @@ -503,24 +494,27 @@ func (o *Orchestrator) LoadSnapshot( ctx = namespaces.WithNamespace(ctx, NamespaceName) - // 1. Allocate VM metadata & create vm network - vm, err := o.vmPool.Allocate(vmID) - if err != nil { - logger.Error("failed to allocate VM in VM pool") - return nil, nil, err - } + var containerSnap *devmapper.DeviceSnapshot + var vm *misc.VM + if o.isFullLocal { + var err error - defer func() { - // Free the VM from the pool if function returns error - if retErr != nil { - if err := o.vmPool.Free(vmID); err != nil { - logger.WithError(err).Errorf("failed to free VM from pool after failure") - } + // 1. Allocate VM metadata & create vm network + vm, err = o.vmPool.Allocate(vmID) + if err != nil { + logger.Error("failed to allocate VM in VM pool") + return nil, nil, err } - }() - var containerSnap *devmapper.DeviceSnapshot - if isFullLocal { + defer func() { + // Free the VM from the pool if function returns error + if retErr != nil { + if err := o.vmPool.Free(vmID); err != nil { + logger.WithError(err).Errorf("failed to free VM from pool after failure") + } + } + }() + // 2. Fetch image for VM if vm.Image, err = o.GetImage(ctx, snap.GetImage()); err != nil { return nil, nil, errors.Wrapf(err, "Failed to get/pull image") @@ -541,13 +535,19 @@ func (o *Orchestrator) LoadSnapshot( if err := o.devMapper.RestorePatch(ctx, vm.ContainerSnapKey, snap.GetPatchFilePath()); err != nil { return nil, nil, errors.Wrapf(err, "unpacking patch into container snapshot") } + } else { + var err error + vm, err = o.vmPool.GetVM(vmID) + if err != nil { + return nil, nil, err + } } // 5. Load VM from snapshot snapFilePath := o.getSnapshotFile(vmID) memFilePath := o.getMemoryFile(vmID) - if isFullLocal { + if o.isFullLocal { snapFilePath = snap.GetSnapFilePath() memFilePath = snap.GetMemFilePath() } @@ -557,12 +557,13 @@ func (o *Orchestrator) LoadSnapshot( SnapshotFilePath: snapFilePath, MemFilePath: memFilePath, EnableUserPF: o.GetUPFEnabled(), - NetworkNamespace: vm.NetConfig.GetNamespacePath(), - Offloaded: ! isFullLocal, + NetworkNamespace: "", + Offloaded: ! o.isFullLocal, } - if isFullLocal { + if o.isFullLocal { req.NewSnapshotPath = containerSnap.GetDevicePath() + req.NetworkNamespace = vm.GetNetworkNamespace() } if o.GetUPFEnabled() { @@ -598,12 +599,12 @@ func (o *Orchestrator) LoadSnapshot( vm.SnapBooted = true - return &StartVMResponse{GuestIP: vm.NetConfig.GetCloneIP()}, nil, nil + return &StartVMResponse{GuestIP: vm.GetIP()}, loadSnapshotMetric, nil } // Offload Shuts down the VM but leaves shim and other resources running. -func (o *Orchestrator) OffloadVM(ctx context.Context, vmID string, isFullLocal bool) error { - if isFullLocal { +func (o *Orchestrator) OffloadVM(ctx context.Context, vmID string) error { + if o.isFullLocal { return errors.New("Fully local snapshots do not support offloading") } @@ -633,14 +634,16 @@ func (o *Orchestrator) OffloadVM(ctx context.Context, vmID string, isFullLocal b return err } - if err := o.vmPool.Free(vmID); err != nil { - logger.Error("failed to free VM from VM pool") + if err := o.vmPool.RecreateTap(vmID); err != nil { + logger.Error("Failed to recreate tap upon offloading") return err } return nil } +// CleanupSnapshot removes a devicemapper snapshot. This function is only necessary if the alternative approach with +// thin-delta is used. Otherwise, snapshots created from within vHive get already cleaned up during stopVM. func (o *Orchestrator) CleanupSnapshot(ctx context.Context, revisionID string) error { if err := o.devMapper.RemoveDeviceSnapshot(ctx, revisionID); err != nil { return errors.Wrapf(err, "removing revision snapshot") diff --git a/ctriface/iface_test.go b/ctriface/iface_test.go index 30fc8098b..139d48d8c 100644 --- a/ctriface/iface_test.go +++ b/ctriface/iface_test.go @@ -40,6 +40,8 @@ import ( // TODO: Make it impossible to use lazy mode without UPF var ( isUPFEnabled = flag.Bool("upf", false, "Set UPF enabled") + isFullLocal = flag.Bool("fulllocal", false, "Set full local snapshots") + isSparseSnaps = flag.Bool("sparsesnaps", false, "Use sparse snapshots") isLazyMode = flag.Bool("lazy", false, "Set lazy serving on or off") //nolint:deadcode,unused,varcheck isWithCache = flag.Bool("withCache", false, "Do not drop the cache before measurements") @@ -56,6 +58,8 @@ func TestPauseSnapResume(t *testing.T) { log.SetLevel(log.InfoLevel) + flag.Parse() + testTimeout := 120 * time.Second ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) defer cancel() @@ -69,28 +73,35 @@ func TestPauseSnapResume(t *testing.T) { WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() vmID := "4" - revisionID := "myrev-4" + snapId := vmID + if *isFullLocal { + snapId = "myrev-4" + } - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap, false) + snap := snapshotting.NewSnapshot(snapId, "/fccd/snapshots", TestImageName, 256, 1, *isSparseSnaps) + if *isFullLocal { + err = snap.CreateSnapDir() + } + require.NoError(t, err, "Failed to create directory for snapshot") + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - err = orch.StopSingleVM(ctx, vmID, false) + err = orch.StopSingleVM(ctx, vmID) require.NoError(t, err, "Failed to stop VM") - - orch.Cleanup() } func TestStartStopSerial(t *testing.T) { @@ -117,17 +128,17 @@ func TestStartStopSerial(t *testing.T) { WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() vmID := "5" - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM") - err = orch.StopSingleVM(ctx, vmID, false) + err = orch.StopSingleVM(ctx, vmID) require.NoError(t, err, "Failed to stop VM") - - orch.Cleanup() } func TestPauseResumeSerial(t *testing.T) { @@ -154,11 +165,13 @@ func TestPauseResumeSerial(t *testing.T) { WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() vmID := "6" - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) @@ -167,10 +180,8 @@ func TestPauseResumeSerial(t *testing.T) { _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - err = orch.StopSingleVM(ctx, vmID, false) + err = orch.StopSingleVM(ctx, vmID) require.NoError(t, err, "Failed to stop VM") - - orch.Cleanup() } func TestStartStopParallel(t *testing.T) { @@ -198,7 +209,9 @@ func TestStartStopParallel(t *testing.T) { WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() // Pull image _, err := orch.GetImage(ctx, TestImageName) @@ -211,7 +224,7 @@ func TestStartStopParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM "+vmID) }(i) } @@ -225,14 +238,12 @@ func TestStartStopParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - err := orch.StopSingleVM(ctx, vmID, false) + err := orch.StopSingleVM(ctx, vmID) require.NoError(t, err, "Failed to stop VM "+vmID) }(i) } vmGroup.Wait() } - - orch.Cleanup() } func TestPauseResumeParallel(t *testing.T) { @@ -260,7 +271,9 @@ func TestPauseResumeParallel(t *testing.T) { WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() // Pull image _, err := orch.GetImage(ctx, TestImageName) @@ -273,7 +286,7 @@ func TestPauseResumeParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM") }(i) } @@ -315,12 +328,10 @@ func TestPauseResumeParallel(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i) - err := orch.StopSingleVM(ctx, vmID, false) + err := orch.StopSingleVM(ctx, vmID) require.NoError(t, err, "Failed to stop VM") }(i) } vmGroup.Wait() } - - orch.Cleanup() } diff --git a/ctriface/image/Makefile b/ctriface/image/Makefile new file mode 100644 index 000000000..5f39411fd --- /dev/null +++ b/ctriface/image/Makefile @@ -0,0 +1,33 @@ +# MIT License +# +# Copyright (c) 2020 Dmitrii Ustiugov, Plamen Petrov and EASE lab +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +EXTRAGOARGS:=-v -race -cover + +test: + # Need to pass GOROOT because GitHub-hosted runners may have several + # go versions installed so that calling go from root may fail + sudo env "PATH=$(PATH)" "GOROOT=$(GOROOT)" go test ./ $(EXTRAGOARGS) + +test-man: + echo "Nothing to test manually" + +.PHONY: test test-man \ No newline at end of file diff --git a/ctrimages/imageManager.go b/ctriface/image/manager.go similarity index 87% rename from ctrimages/imageManager.go rename to ctriface/image/manager.go index 04bda27f1..a3c39a15c 100644 --- a/ctrimages/imageManager.go +++ b/ctriface/image/manager.go @@ -21,7 +21,7 @@ // SOFTWARE. // Package ctrimages provides an image manager that manages and caches container images. -package ctrimages +package image import ( "context" @@ -35,29 +35,29 @@ import ( "sync" ) -// ImageState is used to synchronize image pulling to avoid pulling the same image multiple times concurrently. +// ImageState is used for synchronization to avoid pulling the same image multiple times concurrently. type ImageState struct { sync.Mutex - pulled bool + isCached bool } -// NewImageState creates a new ImageState object that can be used to synchronize image pulling. +// NewImageState creates a new ImageState object that can be used to synchronize pulling a single image func NewImageState() *ImageState { state := new(ImageState) - state.pulled = false + state.isCached = false return state } // ImageManager manages the images that have been pulled to the node. type ImageManager struct { sync.Mutex - snapshotter string // image snapshotter + snapshotter string // image snapshotter cachedImages map[string]containerd.Image // Cached container images imageStates map[string]*ImageState client *containerd.Client } -// NewImageManager creates a new imagemanager that can be used to fetch container images. +// NewImageManager creates a new image manager that can be used to fetch container images. func NewImageManager(client *containerd.Client, snapshotter string) *ImageManager { log.Info("Creating image manager") manager := new(ImageManager) @@ -104,8 +104,10 @@ func (mgr *ImageManager) pullImage(ctx context.Context, imageName string) error return nil } -// GetImage fetches an image that can be used to create a container using containerd +// GetImage fetches an image that can be used to create a container using containerd. Synchronization is implemented +// on a per image level to keep waiting to a minimum. func (mgr *ImageManager) GetImage(ctx context.Context, imageName string) (*containerd.Image, error) { + // Get reference to synchronization object for image mgr.Lock() imgState, found := mgr.imageStates[imageName] if !found { @@ -114,14 +116,14 @@ func (mgr *ImageManager) GetImage(ctx context.Context, imageName string) (*conta } mgr.Unlock() - // Pull image if necessary + // Pull image if necessary. The image will only be pulled by the first thread to take the lock. imgState.Lock() - if !imgState.pulled { + if !imgState.isCached { if err := mgr.pullImage(ctx, imageName); err != nil { imgState.Unlock() return nil, err } - imgState.pulled = true + imgState.isCached = true } imgState.Unlock() diff --git a/ctriface/image/manager_test.go b/ctriface/image/manager_test.go new file mode 100644 index 000000000..13ea71ac7 --- /dev/null +++ b/ctriface/image/manager_test.go @@ -0,0 +1,134 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov, Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package image + +import ( + "context" + "fmt" + "github.com/containerd/containerd" + "github.com/containerd/containerd/namespaces" + "os" + "sync" + "testing" + "time" + + ctrdlog "github.com/containerd/containerd/log" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +const ( + TestImageName = "ghcr.io/ease-lab/helloworld:var_workload" + containerdAddress = "/run/firecracker-containerd/containerd.sock" + NamespaceName = "firecracker-containerd" +) + +func getAllImages() map[string]string { + return map[string]string{ + "helloworld": "ghcr.io/ease-lab/helloworld:var_workload", + "chameleon": "ghcr.io/ease-lab/chameleon:var_workload", + "pyaes": "ghcr.io/ease-lab/pyaes:var_workload", + "image_rotate": "ghcr.io/ease-lab/image_rotate:var_workload", + "lr_training": "ghcr.io/ease-lab/lr_training:var_workload", + } +} + +func TestMain(m *testing.M) { + // call flag.Parse() here if TestMain uses flags + + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: ctrdlog.RFC3339NanoFixed, + FullTimestamp: true, + }) + + log.SetOutput(os.Stdout) + + log.SetLevel(log.InfoLevel) + + os.Exit(m.Run()) +} + +func TestSingleConcurrent(t *testing.T) { + // Create client + client, err := containerd.New(containerdAddress) + defer func() { _ = client.Close() }() + require.NoError(t, err, "Containerd client creation returned error") + + // Create image manager + mgr := NewImageManager(client, "devmapper") + + testTimeout := 120 * time.Second + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) + defer cancel() + + // Pull image + var wg sync.WaitGroup + concurrentPulls := 100 + wg.Add(concurrentPulls) + + for i := 0; i < concurrentPulls; i++ { + go func(i int) { + defer wg.Done() + _, err := mgr.GetImage(ctx, TestImageName) + require.NoError(t, err, fmt.Sprintf("Failed to pull image %s", TestImageName)) + }(i) + } + wg.Wait() +} + +func TestMultipleConcurrent(t *testing.T) { + // Create client + client, err := containerd.New(containerdAddress) + defer func() { _ = client.Close() }() + require.NoError(t, err, "Containerd client creation returned error") + + // Create image manager + mgr := NewImageManager(client, "devmapper") + + testTimeout := 300 * time.Second + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) + defer cancel() + + // Pull image + var wg sync.WaitGroup + concurrentPulls := 100 + wg.Add(len(getAllImages())) + + for _, imgName := range getAllImages() { + go func(imgName string) { + var imgWg sync.WaitGroup + imgWg.Add(concurrentPulls) + for i := 0; i < concurrentPulls; i++ { + go func(i int) { + defer imgWg.Done() + _, err := mgr.GetImage(ctx, imgName) + require.NoError(t, err, fmt.Sprintf("Failed to pull image %s", imgName)) + }(i) + } + imgWg.Wait() + wg.Done() + }(imgName) + } + + wg.Wait() +} \ No newline at end of file diff --git a/ctriface/manual_cleanup_test.go b/ctriface/manual_cleanup_test.go index 943c4b81b..3038f108d 100644 --- a/ctriface/manual_cleanup_test.go +++ b/ctriface/manual_cleanup_test.go @@ -59,35 +59,55 @@ func TestSnapLoad(t *testing.T) { "", "fc-dev-thinpool", "", - 10, + 1, WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() vmID := "1" - revisionID := "myrev-1" + snapId := vmID + if *isFullLocal { + snapId = "myrev-1" + } - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap, false) + snap := snapshotting.NewSnapshot(snapId, "/fccd/snapshots", TestImageName, 256, 1, *isSparseSnaps) + if *isFullLocal { + err = snap.CreateSnapDir() + } + + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) + if *isFullLocal { + err = orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM") + } else { + err = orch.OffloadVM(ctx, vmID) + require.NoError(t, err, "Failed to offload VM") + } + + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - orch.Cleanup() + if *isFullLocal { + err = orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM") + } } func TestSnapLoadMultiple(t *testing.T) { @@ -115,34 +135,67 @@ func TestSnapLoadMultiple(t *testing.T) { WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() vmID := "3" - revisionID := "myrev-3" + snapId := vmID + if *isFullLocal { + snapId = "myrev-3" + } - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM") err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM") - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0,false) - err = orch.CreateSnapshot(ctx, vmID, snap, false) + snap := snapshotting.NewSnapshot(snapId, "/fccd/snapshots", TestImageName, 256, 1, *isSparseSnaps) + if *isFullLocal { + err = snap.CreateSnapDir() + } + require.NoError(t, err, "Failed to create directory for snapshot") + + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM") - _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) + if *isFullLocal { + // TODO: stopVM does not work without resuming + _, err = orch.ResumeVM(ctx, vmID) + require.NoError(t, err, "Failed to resume VM") + + err = orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM") + } else { + err = orch.OffloadVM(ctx, vmID) + require.NoError(t, err, "Failed to offload VM") + } + + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM") - _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) + if *isFullLocal { + err = orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM") + } else { + err = orch.OffloadVM(ctx, vmID) + require.NoError(t, err, "Failed to offload VM") + } + + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM") _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM, ") - orch.Cleanup() + if *isFullLocal { + err = orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM") + } } func TestParallelSnapLoad(t *testing.T) { @@ -173,7 +226,9 @@ func TestParallelSnapLoad(t *testing.T) { WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() // Pull image _, err := orch.GetImage(ctx, TestImageName) @@ -185,28 +240,51 @@ func TestParallelSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) + snapId := vmID + if *isFullLocal { + snapId = fmt.Sprintf("myrev-%d", i+vmIDBase) + } - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM, "+vmID) err = orch.PauseVM(ctx, vmID) require.NoError(t, err, "Failed to pause VM, "+vmID) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap, false) + snap := snapshotting.NewSnapshot(snapId, "/fccd/snapshots", TestImageName, 256, 1, *isSparseSnaps) + if *isFullLocal { + err = snap.CreateSnapDir() + } + require.NoError(t, err, "Failed to create directory for snapshot") + + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM, "+vmID) - _, _, err = orch.LoadSnapshot(ctx, vmID, snap, false) + if *isFullLocal { + // TODO: stopVM does not work without resuming + _, err = orch.ResumeVM(ctx, vmID) + require.NoError(t, err, "Failed to resume VM") + + err = orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM, "+vmID) + } else { + err = orch.OffloadVM(ctx, vmID) + require.NoError(t, err, "Failed to offload VM, "+vmID) + } + + _, _, err = orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM, "+vmID) _, err = orch.ResumeVM(ctx, vmID) require.NoError(t, err, "Failed to resume VM, "+vmID) + + if *isFullLocal { + err = orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM, "+vmID) + } }(i) } vmGroup.Wait() - - orch.Cleanup() } func TestParallelPhasedSnapLoad(t *testing.T) { @@ -237,7 +315,9 @@ func TestParallelPhasedSnapLoad(t *testing.T) { WithTestModeOn(true), WithUPF(*isUPFEnabled), WithLazyMode(*isLazyMode), + WithFullLocal(*isFullLocal), ) + defer orch.Cleanup() // Pull image _, err := orch.GetImage(ctx, TestImageName) @@ -250,7 +330,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, false, false) + _, _, err := orch.StartVM(ctx, vmID, TestImageName, 256, 1, *isSparseSnaps) require.NoError(t, err, "Failed to start VM, "+vmID) }(i) } @@ -278,9 +358,17 @@ func TestParallelPhasedSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, vmID, snap, false) + snapId := vmID + if *isFullLocal { + snapId = fmt.Sprintf("myrev-%d", i+vmIDBase) + } + snap := snapshotting.NewSnapshot(snapId, "/fccd/snapshots", TestImageName, 256, 1, *isSparseSnaps) + if *isFullLocal { + err = snap.CreateSnapDir() + } + require.NoError(t, err, "Failed to create directory for snapshot") + + err = orch.CreateSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to create snapshot of VM, "+vmID) }(i) } @@ -294,9 +382,35 @@ func TestParallelPhasedSnapLoad(t *testing.T) { go func(i int) { defer vmGroup.Done() vmID := fmt.Sprintf("%d", i+vmIDBase) - revisionID := fmt.Sprintf("myrev-%d", i+vmIDBase) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", TestImageName, 0, 0, false) - _, _, err := orch.LoadSnapshot(ctx, vmID, snap, false) + if *isFullLocal { + // TODO: stopVM does not work without resuming + _, err = orch.ResumeVM(ctx, vmID) + require.NoError(t, err, "Failed to resume VM") + + err = orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM, "+vmID) + } else { + err = orch.OffloadVM(ctx, vmID) + require.NoError(t, err, "Failed to offload VM, "+vmID) + } + }(i) + } + vmGroup.Wait() + } + + { + var vmGroup sync.WaitGroup + for i := 0; i < vmNum; i++ { + vmGroup.Add(1) + go func(i int) { + defer vmGroup.Done() + vmID := fmt.Sprintf("%d", i+vmIDBase) + snapId := vmID + if *isFullLocal { + snapId = fmt.Sprintf("myrev-%d", i+vmIDBase) + } + snap := snapshotting.NewSnapshot(snapId, "/fccd/snapshots", TestImageName, 256, 1, *isSparseSnaps) + _, _, err := orch.LoadSnapshot(ctx, vmID, snap) require.NoError(t, err, "Failed to load snapshot of VM, "+vmID) }(i) } @@ -317,5 +431,19 @@ func TestParallelPhasedSnapLoad(t *testing.T) { vmGroup.Wait() } - orch.Cleanup() + if *isFullLocal { + { + var vmGroup sync.WaitGroup + for i := 0; i < vmNum; i++ { + vmGroup.Add(1) + go func(i int) { + defer vmGroup.Done() + vmID := fmt.Sprintf("%d", i+vmIDBase) + err := orch.StopSingleVM(ctx, vmID) + require.NoError(t, err, "Failed to stop VM, "+vmID) + }(i) + } + vmGroup.Wait() + } + } } diff --git a/ctriface/orch.go b/ctriface/orch.go index 28cce84ed..e83f56476 100644 --- a/ctriface/orch.go +++ b/ctriface/orch.go @@ -23,7 +23,7 @@ package ctriface import ( - "github.com/ease-lab/vhive/ctrimages" + "github.com/ease-lab/vhive/ctriface/image" "github.com/ease-lab/vhive/devmapper" "github.com/ease-lab/vhive/memory/manager" "os" @@ -81,7 +81,7 @@ type Orchestrator struct { client *containerd.Client fcClient *fcclient.Client devMapper *devmapper.DeviceMapper - imageManager *ctrimages.ImageManager + imageManager *image.ImageManager // store *skv.KVStore snapshotsEnabled bool isUPFEnabled bool @@ -89,6 +89,7 @@ type Orchestrator struct { snapshotsDir string isMetricsMode bool hostIface string + isFullLocal bool memoryManager *manager.MemoryManager } @@ -98,7 +99,6 @@ func NewOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPo var err error o := new(Orchestrator) - o.vmPool = misc.NewVMPool(hostIface, netPoolSize) o.snapshotter = snapshotter o.snapshotsDir = "/fccd/snapshots" o.hostIface = hostIface @@ -140,7 +140,9 @@ func NewOrchestrator(snapshotter, hostIface, poolName, metadataDev string, netPo o.devMapper = devmapper.NewDeviceMapper(o.client, poolName, metadataDev) - o.imageManager = ctrimages.NewImageManager(o.client, o.snapshotter) + o.imageManager = image.NewImageManager(o.client, o.snapshotter) + + o.vmPool = misc.NewVMPool(hostIface, netPoolSize, o.isFullLocal) return o } @@ -151,7 +153,7 @@ func (o *Orchestrator) setupCloseHandler() { go func() { <-c log.Info("\r- Ctrl+C pressed in Terminal") - _ = o.StopActiveVMs(false) + _ = o.StopActiveVMs() o.Cleanup() os.Exit(0) }() @@ -214,7 +216,7 @@ func (o *Orchestrator) getWorkingSetFile(vmID string) string { } func (o *Orchestrator) getVMBaseDir(vmID string) string { - return filepath.Join(o.snapshotsDir, vmID) + return filepath.Join(o.snapshotsDir, vmID) } func (o *Orchestrator) setupHeartbeat() { diff --git a/ctriface/orch_options.go b/ctriface/orch_options.go index 6ac641a0c..08456e9b3 100644 --- a/ctriface/orch_options.go +++ b/ctriface/orch_options.go @@ -42,6 +42,13 @@ func WithSnapshots(snapshotsEnabled bool) OrchestratorOption { } } +// WithSnapshots Sets the snapshot mode on or off +func WithFullLocal(fullLocal bool) OrchestratorOption { + return func(o *Orchestrator) { + o.isFullLocal = fullLocal + } +} + // WithUPF Sets the user-page faults mode on or off func WithUPF(isUPFEnabled bool) OrchestratorOption { return func(o *Orchestrator) { diff --git a/ctriface/types.go b/ctriface/types.go index 81c0a2e19..98d5fc9fb 100644 --- a/ctriface/types.go +++ b/ctriface/types.go @@ -22,36 +22,8 @@ package ctriface -import ( - "context" - "github.com/containerd/containerd" - "github.com/ease-lab/vhive/metrics" - "github.com/ease-lab/vhive/snapshotting" -) - // StartVMResponse is the response returned by StartVM type StartVMResponse struct { // GuestIP is the IP of the guest MicroVM GuestIP string } - -type OrchestratorInterface interface { - StartVM(ctx context.Context, vmID, imageName string, memSizeMib, vCPUCount uint32, trackDirtyPages bool) (_ *StartVMResponse, _ *metrics.Metric, retErr error) - StopSingleVM(ctx context.Context, vmID string) error - OffloadVM(ctx context.Context, vmID string) error - StopActiveVMs() error - PauseVM(ctx context.Context, vmID string) error - ResumeVM(ctx context.Context, vmID string) (*metrics.Metric, error) - CreateSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) error - LoadSnapshot(ctx context.Context, vmID string, snap *snapshotting.Snapshot) (_ *StartVMResponse, _ *metrics.Metric, retErr error) - CleanupSnapshot(ctx context.Context, id string) error - GetImage(ctx context.Context, imageName string) (*containerd.Image, error) - GetSnapshotsEnabled() bool - GetUPFEnabled() bool - Cleanup() - - // TODO: these should be removed in the future - DumpUPFPageStats(vmID, functionName, metricsOutFilePath string) error - DumpUPFLatencyStats(vmID, functionName, latencyOutFilePath string) error - GetUPFLatencyStats(vmID string) ([]*metrics.Metric, error) -} diff --git a/devmapper/Makefile b/devmapper/Makefile new file mode 100644 index 000000000..389c0bd76 --- /dev/null +++ b/devmapper/Makefile @@ -0,0 +1,33 @@ +# MIT License +# +# Copyright (c) 2020 Dmitrii Ustiugov, Plamen Petrov and EASE lab +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +EXTRAGOARGS:=-v -race -cover + +test: + # Need to pass GOROOT because GitHub-hosted runners may have several + # go versions installed so that calling go from root may fail + sudo env "PATH=$(PATH)" "GOROOT=$(GOROOT)" go test ./ $(EXTRAGOARGS) + ./../scripts/clean_fcctr.sh +test-man: + echo "Nothing to test manually" + +.PHONY: test test-man \ No newline at end of file diff --git a/devmapper/deviceSnapshot.go b/devmapper/deviceSnapshot.go index b3cc5af80..738d8706c 100644 --- a/devmapper/deviceSnapshot.go +++ b/devmapper/deviceSnapshot.go @@ -23,6 +23,7 @@ package devmapper import ( + "bytes" "fmt" "github.com/pkg/errors" "io/ioutil" @@ -76,12 +77,14 @@ func (dsnp *DeviceSnapshot) Activate() error { if dsnp.numActivated == 0 { tableEntry := fmt.Sprintf("0 20971520 thin %s %s", dsnp.getPoolPath(), dsnp.deviceId) - cmd := exec.Command("sudo", "dmsetup", "create", dsnp.deviceName, "--table", fmt.Sprintf("%s", tableEntry)) + var errb bytes.Buffer + cmd := exec.Command("sudo", "dmsetup", "create", dsnp.deviceName, "--table", tableEntry) + cmd.Stderr = &errb err := cmd.Run() + if err != nil { - return errors.Wrapf(err, "activating snapshot %s", dsnp.deviceName) + return errors.Wrapf(err, "activating snapshot %s: %s", dsnp.deviceName, errb.String()) } - } dsnp.numActivated += 1 diff --git a/devmapper/devicemapper.go b/devmapper/devicemapper.go index 7142b1350..cd5e7980d 100644 --- a/devmapper/devicemapper.go +++ b/devmapper/devicemapper.go @@ -38,6 +38,12 @@ import ( "sync" ) +// Own managed snapshots in snapmanager and in containerd identified by snapkey. Has name deviceName. +// snapshotId used internally by containerd, needed for thin_delta. Once committed, snapshots identified +// by snapName within containerd? + +// Only remove snapshots created through createSnapshot. Use key used there to delete them. + // DeviceMapper creates and manages device snapshots used to store container images. type DeviceMapper struct { sync.Mutex @@ -178,6 +184,7 @@ func (dmpr *DeviceMapper) GetDeviceSnapshot(ctx context.Context, snapKey string) _, present := dmpr.snapDevices[snapKey] if !present { + // Get snapshot from containerd if not yet stored by vHive devicemapper info, err := dmpr.snapshotService.Stat(ctx, snapKey) if err != nil { return nil, err @@ -210,7 +217,6 @@ func getDeviceName(poolName, snapshotId string) string { return fmt.Sprintf("%s-snap-%s", poolName, snapshotId) } -// CreatePatch creates a patch file storing the difference between an image and the container filesystem // CreatePatch creates a patch file storing the file differences between and image and the changes applied // by the container using rsync. Note that this is a different approach than using thin_delta which is able to // extract blocks directly by leveraging the metadata stored by the device mapper. @@ -231,20 +237,20 @@ func (dmpr *DeviceMapper) CreatePatch(ctx context.Context, patchPath, containerS if err != nil { return errors.Wrapf(err, "failed to activate image snapshot") } - defer imageSnap.Deactivate() + defer func() { _ = imageSnap.Deactivate() }() // 2. Mount original and snapshot image imageMountPath, err := imageSnap.Mount(true) if err != nil { return err } - defer imageSnap.UnMount() + defer func() { _ = imageSnap.UnMount() }() containerMountPath, err := containerSnap.Mount(true) if err != nil { return err } - defer containerSnap.UnMount() + defer func() { _ = containerSnap.UnMount() }() // 3. Save changes to file result := extractPatch(imageMountPath, containerMountPath, patchPath) @@ -281,7 +287,7 @@ func (dmpr *DeviceMapper) RestorePatch(ctx context.Context, containerSnapKey, pa if err != nil { return err } - defer containerSnap.UnMount() + defer func() { _ = containerSnap.UnMount() }() // 2. Apply changes to container mounted file system return applyPatch(containerMountPath, patchPath) diff --git a/devmapper/devicemapper_test.go b/devmapper/devicemapper_test.go new file mode 100644 index 000000000..96b684131 --- /dev/null +++ b/devmapper/devicemapper_test.go @@ -0,0 +1,137 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov, Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package devmapper_test + +import ( + "context" + "fmt" + "github.com/containerd/containerd" + ctrdlog "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/ease-lab/vhive/ctriface/image" + "github.com/ease-lab/vhive/devmapper" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "os" + "sync" + "testing" + "time" +) + +const ( + containerdAddress = "/run/firecracker-containerd/containerd.sock" + poolName = "fc-dev-thinpool" + NamespaceName = "firecracker-containerd" + TestImageName = "ghcr.io/ease-lab/helloworld:var_workload" +) + +func getAllImages() map[string]string { + return map[string]string{ + "helloworld": "ghcr.io/ease-lab/helloworld:var_workload", + "chameleon": "ghcr.io/ease-lab/chameleon:var_workload", + "pyaes": "ghcr.io/ease-lab/pyaes:var_workload", + "image_rotate": "ghcr.io/ease-lab/image_rotate:var_workload", + "lr_training": "ghcr.io/ease-lab/lr_training:var_workload", + } +} + +func TestMain(m *testing.M) { + // call flag.Parse() here if TestMain uses flags + + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: ctrdlog.RFC3339NanoFixed, + FullTimestamp: true, + }) + + log.SetOutput(os.Stdout) + + log.SetLevel(log.InfoLevel) + + os.Exit(m.Run()) +} + +func testDevmapper(t *testing.T, mgr *image.ImageManager, dmpr *devmapper.DeviceMapper, snapKey, imageName string) { + // Pull image + testTimeout := 120 * time.Second + ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout) + defer cancel() + + img, err := mgr.GetImage(ctx, imageName) + require.NoError(t, err, fmt.Sprintf("Failed to pull image %s", imageName)) + + // Test devmapper + err = dmpr.CreateDeviceSnapshotFromImage(ctx, snapKey, *img) + require.NoError(t, err, fmt.Sprintf("Failed to create snapshot from image %s", imageName)) + + _, err = dmpr.GetDeviceSnapshot(ctx, snapKey) + if err != nil { + _ = dmpr.RemoveDeviceSnapshot(ctx, snapKey) + } + require.NoError(t, err, fmt.Sprintf("Failed to fetch previously created snapshot %s", snapKey)) + + err = dmpr.RemoveDeviceSnapshot(ctx, snapKey) + require.NoError(t, err, fmt.Sprintf("Failed to remove snapshot %s", snapKey)) +} + +func TestDevmapper(t *testing.T) { + snapKey := "testsnap-1" + + // Create containerd client + client, err := containerd.New(containerdAddress) + defer client.Close() + require.NoError(t, err, "Containerd client creation returned error") + + // Create image manager + mgr := image.NewImageManager(client, "devmapper") + + // Create devmapper + dmpr := devmapper.NewDeviceMapper(client, poolName, "") + + testDevmapper(t, mgr, dmpr, snapKey, TestImageName) +} + +func TestDevmapperConcurrent(t *testing.T) { + // Create containerd client + client, err := containerd.New(containerdAddress) + defer client.Close() + require.NoError(t, err, "Containerd client creation returned error") + + // Create image manager + mgr := image.NewImageManager(client, "devmapper") + + // Create devmapper + dmpr := devmapper.NewDeviceMapper(client, poolName, "") + + // Test concurrent devmapper + var wg sync.WaitGroup + wg.Add(len(getAllImages())) + + for _, imgName := range getAllImages() { + go func(imgName string) { + snapKey := fmt.Sprintf("testsnap-%s", imgName) + testDevmapper(t, mgr, dmpr, snapKey, imgName) + wg.Done() + }(imgName) + } + wg.Wait() +} diff --git a/devmapper/thindelta/blockDelta.go b/devmapper/thindelta/blockDelta.go index 3470fd781..622861298 100644 --- a/devmapper/thindelta/blockDelta.go +++ b/devmapper/thindelta/blockDelta.go @@ -91,7 +91,7 @@ func (bld *BlockDelta) DeserializeDiffBlocks(storePath string) error { // ReadBlocks directly reads the computed differing blocks from the specified data device. func (bld *BlockDelta) ReadBlocks(dataDevPath string) error { file, err := os.Open(dataDevPath) - defer file.Close() + defer func() { _ = file.Close() }() if err != nil { return errors.Wrapf(err, "opening data device for reading") @@ -121,7 +121,7 @@ func (bld *BlockDelta) ReadBlocks(dataDevPath string) error { // WriteBlocks directly writes the differing blocks to the specified destination data device. func (bld *BlockDelta) WriteBlocks(dataDevPath string) error { file, err := os.OpenFile(dataDevPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) - defer file.Close() + defer func() { file.Close() }() if err != nil { return errors.Wrapf(err, "opening data device for writing") diff --git a/devmapper/thindelta/thinDelta.go b/devmapper/thindelta/thinDelta.go index ef25ebe14..581f735b7 100644 --- a/devmapper/thindelta/thinDelta.go +++ b/devmapper/thindelta/thinDelta.go @@ -113,7 +113,7 @@ func (thd *ThinDelta) getBlocksRawDelta(snap1DeviceId, snap2DeviceId string) (*b return nil, errors.Wrapf(err, "failed to reserve metadata snapshot") } defer func() { - thd.releaseMetadataSnap() + _ = thd.releaseMetadataSnap() }() cmd := exec.Command("sudo", "thin_delta", "-m", thd.metaDataDev, "--snap1", snap1DeviceId, "--snap2", snap2DeviceId) diff --git a/functions.go b/functions.go index 951990e71..14f490485 100644 --- a/functions.go +++ b/functions.go @@ -357,7 +357,7 @@ func (f *Function) AddInstance() *metrics.Metric { if f.isSnapshotReady { metr = f.LoadInstance() } else { - resp, _, err := orch.StartVM(ctx, f.getVMID(), f.imageName, 256, 1, false, false) + resp, _, err := orch.StartVM(ctx, f.getVMID(), f.imageName, 256, 1, false) if err != nil { log.Panic(err) } @@ -388,7 +388,7 @@ func (f *Function) RemoveInstanceAsync() { logger.Debug("Removing instance (async)") go func() { - err := orch.StopSingleVM(context.Background(), f.vmID, false) + err := orch.StopSingleVM(context.Background(), f.vmID) if err != nil { log.Warn(err) } @@ -416,7 +416,7 @@ func (f *Function) RemoveInstance(isSync bool) (string, error) { r = "Successfully offloaded instance " + f.vmID } else { if isSync { - err = orch.StopSingleVM(context.Background(), f.vmID, false) + err = orch.StopSingleVM(context.Background(), f.vmID) } else { f.RemoveInstanceAsync() r = "Successfully removed (async) instance " + f.vmID @@ -451,9 +451,8 @@ func (f *Function) CreateInstanceSnapshot() { log.Panic(err) } - revisionID := fmt.Sprintf("myrev-%s", f.vmID) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, false) - err = orch.CreateSnapshot(ctx, f.vmID, snap, false) + snap := snapshotting.NewSnapshot(f.vmID, "/fccd/snapshots", f.imageName, 256, 1, false) + err = orch.CreateSnapshot(ctx, f.vmID, snap) if err != nil { log.Panic(err) } @@ -473,7 +472,7 @@ func (f *Function) OffloadInstance() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) defer cancel() - err := orch.OffloadVM(ctx, f.vmID, false) + err := orch.OffloadVM(ctx, f.vmID) if err != nil { log.Panic(err) } @@ -490,9 +489,8 @@ func (f *Function) LoadInstance() *metrics.Metric { ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) defer cancel() - revisionID := fmt.Sprintf("myrev-%s", f.vmID) - snap := snapshotting.NewSnapshot(revisionID, "/fccd/snapshots", f.imageName, 0, 0, false) - _, loadMetr, err := orch.LoadSnapshot(ctx, f.vmID, snap, false) + snap := snapshotting.NewSnapshot(f.vmID, "/fccd/snapshots", f.imageName, 256, 1, false) + _, loadMetr, err := orch.LoadSnapshot(ctx, f.vmID, snap) if err != nil { log.Panic(err) } diff --git a/go.mod b/go.mod index 4d2e1d834..10c9d77e3 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ replace ( replace ( // github.com/firecracker-microvm/firecracker-containerd => github.com/ease-lab/firecracker-containerd v0.0.0-20210618165033-6af02db30bc4 - github.com/containerd/containerd => github.com/amohoste/containerd v1.5.5-ids // TODO: change to vhive + github.com/containerd/containerd => github.com/ease-lab/containerd v1.5.5-ids // TODO: change to vhive github.com/ease-lab/vhive/examples/protobuf/helloworld => ./examples/protobuf/helloworld github.com/firecracker-microvm/firecracker-containerd => github.com/amohoste/firecracker-containerd v1.0.0-enhanced-snap // TODO: change to vhive ) diff --git a/go.sum b/go.sum index 29ba21888..76d9b6c39 100644 --- a/go.sum +++ b/go.sum @@ -71,8 +71,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/amohoste/containerd v1.5.5-ids h1:ewus7bzwx6j8ZlKqNjoctyQ2EOKAK+9nYBqC+D4XKfg= -github.com/amohoste/containerd v1.5.5-ids/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo= github.com/amohoste/firecracker-containerd v1.0.0-enhanced-snap h1:PRbQkUGXhT+4V0WBzEbv9dS2xPD5kmIpi7RH56Zo2Ig= github.com/amohoste/firecracker-containerd v1.0.0-enhanced-snap/go.mod h1:DlVSzah7WtO75UNGqzBqlEL3acKBd2ZJB6n6K8nbevk= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= @@ -187,6 +185,8 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/ease-lab/containerd v1.5.5-ids h1:lxkfLF1hzXEC1q+BNHNi4W9aja0xHp3rg+EOe+04YgU= +github.com/ease-lab/containerd v1.5.5-ids/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= diff --git a/misc/Makefile b/misc/Makefile index ad6822934..010fc822d 100644 --- a/misc/Makefile +++ b/misc/Makefile @@ -21,12 +21,13 @@ # SOFTWARE. EXTRAGOARGS:=-v -race -cover +WITHFULLLOCAL:=-fulllocal test: # Need to pass GOROOT because GitHub-hosted runners may have several # go versions installed so that calling go from root may fail sudo env "PATH=$(PATH)" "GOROOT=$(GOROOT)" go test ./ $(EXTRAGOARGS) - + sudo env "PATH=$(PATH)" "GOROOT=$(GOROOT)" go test ./ $(EXTRAGOARGS) -args $(WITHFULLLOCAL) test-man: echo "Nothing to test manually" diff --git a/misc/misc_test.go b/misc/misc_test.go index bf3e918ee..8caeadbfb 100644 --- a/misc/misc_test.go +++ b/misc/misc_test.go @@ -23,6 +23,7 @@ package misc import ( + "flag" "fmt" "os" "sync" @@ -34,6 +35,10 @@ import ( "github.com/stretchr/testify/require" ) +var ( + isFullLocal = flag.Bool("fulllocal", false, "Set full local snapshots") +) + func TestMain(m *testing.M) { log.SetFormatter(&log.TextFormatter{ TimestampFormat: ctrdlog.RFC3339NanoFixed, @@ -44,11 +49,13 @@ func TestMain(m *testing.M) { log.SetLevel(log.InfoLevel) + flag.Parse() + os.Exit(m.Run()) } func TestAllocateFreeVMs(t *testing.T) { - vmPool := NewVMPool("", 10) + vmPool := NewVMPool("", 10, *isFullLocal) vmIDs := [2]string{"test1", "test2"} @@ -68,7 +75,7 @@ func TestAllocateFreeVMs(t *testing.T) { func TestAllocateFreeVMsParallel(t *testing.T) { vmNum := 100 - vmPool := NewVMPool("", 10) + vmPool := NewVMPool("", 10, *isFullLocal) var vmGroup sync.WaitGroup for i := 0; i < vmNum; i++ { @@ -98,9 +105,13 @@ func TestAllocateFreeVMsParallel(t *testing.T) { } func TestReuseTaps(t *testing.T) { + if *isFullLocal { + return + } + vmNum := 100 - vmPool := NewVMPool("", 10) + vmPool := NewVMPool("", 10, false) var vmGroup sync.WaitGroup for i := 0; i < vmNum; i++ { @@ -114,18 +125,6 @@ func TestReuseTaps(t *testing.T) { } vmGroup.Wait() - var vmGroupFree sync.WaitGroup - for i := 0; i < vmNum; i++ { - vmGroupFree.Add(1) - go func(i int) { - defer vmGroupFree.Done() - vmID := fmt.Sprintf("test_%d", i) - err := vmPool.Free(vmID) - require.NoError(t, err, "Failed to free a VM") - }(i) - } - vmGroupFree.Wait() - var vmGroupRecreate sync.WaitGroup tStart := time.Now() @@ -135,7 +134,7 @@ func TestReuseTaps(t *testing.T) { go func(i int) { defer vmGroupRecreate.Done() vmID := fmt.Sprintf("test_%d", i) - err := vmPool.Free(vmID) + err := vmPool.RecreateTap(vmID) require.NoError(t, err, "Failed to recreate tap") }(i) } @@ -144,17 +143,17 @@ func TestReuseTaps(t *testing.T) { tElapsed := time.Since(tStart) log.Infof("Recreated %d taps in %d ms", vmNum, tElapsed.Milliseconds()) - var vmGroupCleanup sync.WaitGroup + var vmGroupFree sync.WaitGroup for i := 0; i < vmNum; i++ { - vmGroupCleanup.Add(1) + vmGroupFree.Add(1) go func(i int) { - defer vmGroupCleanup.Done() + defer vmGroupFree.Done() vmID := fmt.Sprintf("test_%d", i) err := vmPool.Free(vmID) require.NoError(t, err, "Failed to free a VM") }(i) } - vmGroupCleanup.Wait() + vmGroupFree.Wait() vmPool.CleanupNetwork() } diff --git a/misc/types.go b/misc/types.go index bfdb2f6c3..226482231 100644 --- a/misc/types.go +++ b/misc/types.go @@ -25,6 +25,7 @@ package misc import ( "fmt" "github.com/ease-lab/vhive/networking" + "github.com/ease-lab/vhive/taps" "sync" "github.com/containerd/containerd" @@ -44,15 +45,72 @@ type VM struct { Container *containerd.Container Task *containerd.Task TaskCh <-chan containerd.ExitStatus - NetConfig *networking.NetworkConfig VCPUCount uint32 MemSizeMib uint32 + netConfig *networking.NetworkConfig + ni *taps.NetworkInterface +} + +// GetIP returns the IP at which the VM is reachable +func (vm *VM) GetIP() string { + if vm.netConfig != nil { + return vm.netConfig.GetCloneIP() + } else { + return vm.ni.PrimaryAddress + } +} + +// GetMacAddress returns the name of the VM MAC address +func (vm *VM) GetMacAddress() string { + if vm.netConfig != nil { + return vm.netConfig.GetMacAddress() + } else { + return vm.ni.MacAddress + } +} + +// GetHostDevName returns the name of the VM host device +func (vm *VM) GetHostDevName() string { + if vm.netConfig != nil { + return vm.netConfig.GetHostDevName() + } else { + return vm.ni.HostDevName + } +} + +// GetPrimaryAddr returns the primary IP address of the VM +func (vm *VM) GetPrimaryAddr() string { + if vm.netConfig != nil { + return vm.netConfig.GetContainerCIDR() + } else { + return vm.ni.PrimaryAddress + vm.ni.Subnet + } +} + +func (vm *VM) GetGatewayAddr() string { + if vm.netConfig != nil { + return vm.netConfig.GetGatewayIP() + } else { + return vm.ni.GatewayAddress + } +} + +func (vm *VM) GetNetworkNamespace() string { + if vm.netConfig != nil { + return vm.netConfig.GetNamespacePath() + } else { + return "" + } } // VMPool Pool of active VMs (can be in several states though) type VMPool struct { vmMap sync.Map + isFullLocal bool + // Used to create network for fullLocal snapshot VMs networkManager *networking.NetworkManager + // Used to create snapshots for regular VMs + tapManager *taps.TapManager } // NewVM Initialize a VM diff --git a/misc/vm_pool.go b/misc/vm_pool.go index 8cccfa122..0fd0c495a 100644 --- a/misc/vm_pool.go +++ b/misc/vm_pool.go @@ -24,17 +24,25 @@ package misc import ( "github.com/ease-lab/vhive/networking" + "github.com/ease-lab/vhive/taps" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) // NewVMPool Initializes a pool of VMs -func NewVMPool(hostIface string, netPoolSize int) *VMPool { +func NewVMPool(hostIface string, netPoolSize int, isFullLocal bool) *VMPool { p := new(VMPool) - mgr, err := networking.NewNetworkManager(hostIface, netPoolSize) + p.isFullLocal = isFullLocal + + var err error + if p.isFullLocal { + p.networkManager, err = networking.NewNetworkManager(hostIface, netPoolSize) + } else { + p.tapManager, err = taps.NewTapManager(hostIface) + } if err != nil { log.Println(err) } - p.networkManager = mgr return p } @@ -53,7 +61,12 @@ func (p *VMPool) Allocate(vmID string) (*VM, error) { vm := NewVM(vmID) var err error - vm.NetConfig, err = p.networkManager.CreateNetwork(vmID) + if p.isFullLocal { + vm.netConfig, err = p.networkManager.CreateNetwork(vmID) + } else { + vm.ni, err = p.tapManager.AddTap(vmID+"_tap") + } + if err != nil { logger.Warn("VM network creation failed") return nil, err @@ -76,9 +89,16 @@ func (p *VMPool) Free(vmID string) error { return nil } - if err := p.networkManager.RemoveNetwork(vmID); err != nil { - logger.Error("Could not remove network config") - return err + if p.isFullLocal { + if err := p.networkManager.RemoveNetwork(vmID); err != nil { + logger.Error("Could not remove network config") + return err + } + } else { + if err := p.tapManager.RemoveTap(vmID + "_tap"); err != nil { + logger.Error("Could not delete tap") + return err + } } p.vmMap.Delete(vmID) @@ -86,6 +106,36 @@ func (p *VMPool) Free(vmID string) error { return nil } +// RecreateTap Deletes and creates the tap for a VM +func (p *VMPool) RecreateTap(vmID string) error { + if p.isFullLocal { + return errors.New("RecreateTap is not supported for full local snapshots") + } + + logger := log.WithFields(log.Fields{"vmID": vmID}) + + logger.Debug("Recreating tap") + + _, isPresent := p.vmMap.Load(vmID) + if !isPresent { + log.WithFields(log.Fields{"vmID": vmID}).Panic("RecreateTap: VM does not exist in the map") + return NonExistErr("RecreateTap: VM does not exist when recreating its tap") + } + + if err := p.tapManager.RemoveTap(vmID + "_tap"); err != nil { + logger.Error("Failed to delete tap") + return err + } + + _, err := p.tapManager.AddTap(vmID+"_tap") + if err != nil { + logger.Error("Failed to add tap") + return err + } + + return nil +} + // GetVMMap Returns a copy of vmMap as a regular concurrency-unsafe map func (p *VMPool) GetVMMap() map[string]*VM { m := make(map[string]*VM) @@ -110,5 +160,9 @@ func (p *VMPool) GetVM(vmID string) (*VM, error) { // CleanupNetwork removes and deallocates all network configurations func (p *VMPool) CleanupNetwork() { - p.networkManager.Cleanup() + if p.isFullLocal { + _ = p.networkManager.Cleanup() + } else { + p.tapManager.RemoveBridges() + } } diff --git a/networking/Makefile b/networking/Makefile new file mode 100644 index 000000000..5f39411fd --- /dev/null +++ b/networking/Makefile @@ -0,0 +1,33 @@ +# MIT License +# +# Copyright (c) 2020 Dmitrii Ustiugov, Plamen Petrov and EASE lab +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +EXTRAGOARGS:=-v -race -cover + +test: + # Need to pass GOROOT because GitHub-hosted runners may have several + # go versions installed so that calling go from root may fail + sudo env "PATH=$(PATH)" "GOROOT=$(GOROOT)" go test ./ $(EXTRAGOARGS) + +test-man: + echo "Nothing to test manually" + +.PHONY: test test-man \ No newline at end of file diff --git a/networking/networkManager.go b/networking/networkManager.go index bdf0dfcf7..8f45a1295 100644 --- a/networking/networkManager.go +++ b/networking/networkManager.go @@ -42,6 +42,9 @@ type NetworkManager struct { // Mapping of function instance IDs to their network config netConfigs map[string]*NetworkConfig + + // Network configs that are being created + inCreation sync.WaitGroup } // NewNetworkManager creates and returns a new network manager that connects function instances to the network @@ -98,6 +101,7 @@ func (mgr *NetworkManager) addNetConfig() { mgr.Lock() id := mgr.nextID mgr.nextID += 1 + mgr.inCreation.Add(1) mgr.Unlock() netCfg := NewNetworkConfig(id, mgr.hostIfaceName) @@ -110,6 +114,7 @@ func (mgr *NetworkManager) addNetConfig() { // Signal in case someone is waiting for a new config to become available in the pool mgr.poolCond.Signal() mgr.poolCond.L.Unlock() + mgr.inCreation.Done() } // allocNetConfig allocates a new network config from the pool to a function instance identified by funcID @@ -119,7 +124,7 @@ func (mgr *NetworkManager) allocNetConfig(funcID string) *NetworkConfig { // Pop a network config from the pool and allocate it to the function instance mgr.poolCond.L.Lock() - if len(mgr.networkPool) == 0 { + for len(mgr.networkPool) == 0 { // Wait until a new network config has been created mgr.poolCond.Wait() } @@ -171,16 +176,30 @@ func (mgr *NetworkManager) RemoveNetwork(funcID string) error { return nil } -// Cleanup removes and deallocates all network configurations that are in use or in the network pool. +// Cleanup removes and deallocates all network configurations that are in use or in the network pool. Make sure to first +// clean up all running functions before removing their network configs. func (mgr *NetworkManager) Cleanup() error { log.Info("Cleaning up network manager") mgr.Lock() defer mgr.Unlock() + // Wait till all network configs still in creation are added + mgr.inCreation.Wait() + // Release network configs still in use + var wgu sync.WaitGroup + wgu.Add(len(mgr.netConfigs)) for funcID := range mgr.netConfigs { - mgr.releaseNetConfig(funcID) + config := mgr.netConfigs[funcID] + go func(config *NetworkConfig) { + if err := config.RemoveNetwork(); err != nil { + log.Errorf("failed to remove network %s:", err) + } + wgu.Done() + }(config) } + wgu.Wait() + mgr.netConfigs = make(map[string]*NetworkConfig) // Cleanup network pool mgr.poolCond.L.Lock() diff --git a/networking/networkconfig.go b/networking/networkconfig.go index 256489cf1..c0305d6d0 100644 --- a/networking/networkconfig.go +++ b/networking/networkconfig.go @@ -39,12 +39,12 @@ const ( ) // NetworkConfig represents the network devices, IPs, namespaces, routes and filter rules to connect a uVM -// to the network. Note that due to the current allocation of IPs at most 2^14 VMs can be simultaneously be -// available on a single host. +// to the network. The network config ID is deterministically mapped to IP addresses to be used for the uVM. +// Note that due to the current allocation of IPs at most 2^14 VMs can be simultaneously be available on a single host. type NetworkConfig struct { id int containerCIDR string // Container IP address (CIDR notation) - gatewayCIDR string // Container gateway IP address + gatewayCIDR string // Container gateway IP address (CIDR notation) containerTap string // Container tap name containerMac string // Container Mac address hostIfaceName string // Host network interface name @@ -203,7 +203,7 @@ func (cfg *NetworkConfig) CreateNetwork() error { // 3. Setup networking in instance namespace if err := cfg.createVmNetwork(hostNsHandle); err != nil { - netns.Set(hostNsHandle) + _ = netns.Set(hostNsHandle) runtime.UnlockOSThread() return err } diff --git a/networking/networking_test.go b/networking/networking_test.go new file mode 100644 index 000000000..9b7775ac6 --- /dev/null +++ b/networking/networking_test.go @@ -0,0 +1,110 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov, Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package networking + +import ( + "fmt" + "os" + "sync" + "testing" + + ctrdlog "github.com/containerd/containerd/log" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { + // call flag.Parse() here if TestMain uses flags + + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: ctrdlog.RFC3339NanoFixed, + FullTimestamp: true, + }) + //log.SetReportCaller(true) // FIXME: make sure it's false unless debugging + + log.SetOutput(os.Stdout) + + log.SetLevel(log.InfoLevel) + + os.Exit(m.Run()) +} + +func TestCreateCleanManager(t *testing.T) { + poolSize := []int{1, 5, 20} + + for _, n := range poolSize { + mgr, createErr := NewNetworkManager("", n) + require.NoError(t, createErr, "Network manager creation returned error") + + cleanErr := mgr.Cleanup() + require.NoError(t, cleanErr, "Network manager cleanup returned error") + } +} + +func TestCreateRemoveNetworkParallel(t *testing.T) { + netNum := []int{50, 200} + + mgr, err := NewNetworkManager("", 10) + require.NoError(t, err, "Network manager creation returned error") + defer func() { _ = mgr.Cleanup() }() + + for _, n := range netNum { + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + _, err := mgr.CreateNetwork(fmt.Sprintf("func_%d", i)) + require.NoError(t, err, fmt.Sprintf("Failed to create network for func_%d", i)) + }(i) + } + wg.Wait() + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + err := mgr.RemoveNetwork(fmt.Sprintf("func_%d", i)) + require.NoError(t, err, fmt.Sprintf("Failed to remove network for func_%d", i)) + }(i) + } + wg.Wait() + } +} + +func TestCreateRemoveNetworkSerial(t *testing.T) { + netNum := 50 + + mgr, err := NewNetworkManager("", 50) + require.NoError(t, err, "Network manager creation returned error") + defer func() { _ = mgr.Cleanup() }() + + for i := 0; i < netNum; i++ { + _, err = mgr.CreateNetwork(fmt.Sprintf("func_%d", i)) + require.NoError(t, err, "Failed to create network") + } + + for i := 0; i < netNum; i++ { + err = mgr.RemoveNetwork(fmt.Sprintf("func_%d", i)) + require.NoError(t, err, "Failed to remove network") + } +} \ No newline at end of file diff --git a/scripts/clean_fcctr.sh b/scripts/clean_fcctr.sh index c21958ffd..22a5b37ff 100755 --- a/scripts/clean_fcctr.sh +++ b/scripts/clean_fcctr.sh @@ -29,9 +29,9 @@ sudo pkill -9 firec sudo pkill -9 containerd echo Resetting nftables -nft flush table ip filter -nft "add chain ip filter FORWARD { type filter hook forward priority 0; policy accept; }" -nft "add rule ip filter FORWARD ct state related,established counter accept" +sudo nft flush table ip filter +sudo nft "add chain ip filter FORWARD { type filter hook forward priority 0; policy accept; }" +sudo nft "add rule ip filter FORWARD ct state related,established counter accept" echo Deleting veth* devices created by CNI cat /proc/net/dev | grep veth | cut -d" " -f1| cut -d":" -f1 | while read in; do sudo ip link delete "$in"; done @@ -48,6 +48,7 @@ for d in `find /var/lib/cni/ -mindepth 1 -maxdepth 1 -type d | grep -v networks sudo rm -rf $d done + # When executed inside a docker container, this command returns the container ID of the container. # on a non container environment, this returns "/". CONTAINERID=$(basename $(cat /proc/1/cpuset)) @@ -71,11 +72,14 @@ echo Cleaning /run/firecracker-containerd/* sudo rm -rf /run/firecracker-containerd/containerd.sock.ttrpc \ /run/firecracker-containerd/io.containerd.runtime.v1.linux \ /run/firecracker-containerd/io.containerd.runtime.v2.task \ - /run/containerd/s + /run/containerd/* echo Cleaning CNI state, e.g., allocated addresses sudo rm /var/lib/cni/networks/fcnet*/last_reserved_ip.0 || echo clean already sudo rm /var/lib/cni/networks/fcnet*/19* || echo clean already +echo Cleaning snapshots +sudo rm -rf /fccd/snapshots/* + echo Creating a fresh devmapper source $DIR/create_devmapper.sh diff --git a/scripts/github_runner/clean_cri_runner.sh b/scripts/github_runner/clean_cri_runner.sh index 4d75a1d61..51547bef3 100755 --- a/scripts/github_runner/clean_cri_runner.sh +++ b/scripts/github_runner/clean_cri_runner.sh @@ -99,6 +99,9 @@ if [ "$SANDBOX" == "gvisor" ]; then fi if [ "$SANDBOX" == "firecracker" ]; then + echo Cleaning snapshots + sudo rm -rf /fccd/snapshots/* + echo Creating a fresh devmapper $PWD/../create_devmapper.sh fi \ No newline at end of file diff --git a/snapshotting/Makefile b/snapshotting/Makefile new file mode 100644 index 000000000..5f39411fd --- /dev/null +++ b/snapshotting/Makefile @@ -0,0 +1,33 @@ +# MIT License +# +# Copyright (c) 2020 Dmitrii Ustiugov, Plamen Petrov and EASE lab +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +EXTRAGOARGS:=-v -race -cover + +test: + # Need to pass GOROOT because GitHub-hosted runners may have several + # go versions installed so that calling go from root may fail + sudo env "PATH=$(PATH)" "GOROOT=$(GOROOT)" go test ./ $(EXTRAGOARGS) + +test-man: + echo "Nothing to test manually" + +.PHONY: test test-man \ No newline at end of file diff --git a/snapshotting/deduplicated/manager.go b/snapshotting/fulllocal/manager.go similarity index 83% rename from snapshotting/deduplicated/manager.go rename to snapshotting/fulllocal/manager.go index 1e5cfdd6b..c301710a4 100644 --- a/snapshotting/deduplicated/manager.go +++ b/snapshotting/fulllocal/manager.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package deduplicated +package fulllocal import ( "container/heap" @@ -32,15 +32,18 @@ import ( "sync" ) -// ImprovedSnapshotManager manages snapshots stored on the node. -type ImprovedSnapshotManager struct { +// FullLocalSnapshotManager manages snapshots stored on the node. +type FullLocalSnapshotManager struct { sync.Mutex + baseFolder string + + // Stored snapshots snapshots map[string]*snapshotting.Snapshot + // Eviction metadata for stored snapshots snapStats map[string]*SnapshotStats - // Heap of snapshots not in use sorted on score + // Heap of snapshots not in use that can be freed to save space. Sorted by score freeSnaps SnapHeap - baseFolder string // Eviction clock int64 // When container last used. Increased to priority terminated container on termination @@ -48,8 +51,8 @@ type ImprovedSnapshotManager struct { usedMib int64 } -func NewSnapshotManager(baseFolder string, capacityMib int64) *ImprovedSnapshotManager { - manager := new(ImprovedSnapshotManager) +func NewSnapshotManager(baseFolder string, capacityMib int64) *FullLocalSnapshotManager { + manager := new(FullLocalSnapshotManager) manager.snapshots = make(map[string]*snapshotting.Snapshot) manager.snapStats = make(map[string]*SnapshotStats) heap.Init(&manager.freeSnaps) @@ -59,15 +62,15 @@ func NewSnapshotManager(baseFolder string, capacityMib int64) *ImprovedSnapshotM manager.usedMib = 0 // Clean & init basefolder - os.RemoveAll(manager.baseFolder) - os.MkdirAll(manager.baseFolder, os.ModePerm) + _ = os.RemoveAll(manager.baseFolder) + _ = os.MkdirAll(manager.baseFolder, os.ModePerm) return manager } // AcquireSnapshot returns a snapshot for the specified revision if it is available and increments the internal counter // such that the snapshot can't get removed. Similar to how a RW lock works -func (mgr *ImprovedSnapshotManager) AcquireSnapshot(revision string) (*snapshotting.Snapshot, error) { +func (mgr *FullLocalSnapshotManager) AcquireSnapshot(revision string) (*snapshotting.Snapshot, error) { mgr.Lock() defer mgr.Unlock() @@ -105,7 +108,7 @@ func (mgr *ImprovedSnapshotManager) AcquireSnapshot(revision string) (*snapshott // ReleaseSnapshot releases the snapshot with the given revision so that it can possibly get deleted if it is not in use // by any other VMs. -func (mgr *ImprovedSnapshotManager) ReleaseSnapshot(revision string) error { +func (mgr *FullLocalSnapshotManager) ReleaseSnapshot(revision string) error { mgr.Lock() defer mgr.Unlock() @@ -114,6 +117,10 @@ func (mgr *ImprovedSnapshotManager) ReleaseSnapshot(revision string) error { return errors.New(fmt.Sprintf("Get: Snapshot for revision %s does not exist", revision)) } + if snapStat.numUsing == 0 { + return errors.New("Can't release a snapshot that is not in use") + } + snapStat.numUsing -= 1 if snapStat.numUsing == 0 { @@ -125,9 +132,9 @@ func (mgr *ImprovedSnapshotManager) ReleaseSnapshot(revision string) error { return nil } -// InitSnapshot initializes a snapshot by adding its metadata to the ImprovedSnapshotManager. Once the snapshot has been created, +// InitSnapshot initializes a snapshot by adding its metadata to the FullLocalSnapshotManager. Once the snapshot has been created, // CommitSnapshot must be run to finalize the snapshot creation and make the snapshot available fo ruse -func (mgr *ImprovedSnapshotManager) InitSnapshot(revision, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *snapshotting.Snapshot, error) { +func (mgr *FullLocalSnapshotManager) InitSnapshot(revision, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *snapshotting.Snapshot, error) { mgr.Lock() if _, present := mgr.snapshots[revision]; present { @@ -172,18 +179,24 @@ func (mgr *ImprovedSnapshotManager) InitSnapshot(revision, image string, coldSta } // CommitSnapshot finalizes the snapshot creation and makes it available for use. -func (mgr *ImprovedSnapshotManager) CommitSnapshot(revision string) error { +func (mgr *FullLocalSnapshotManager) CommitSnapshot(revision string) error { mgr.Lock() snapStat, present := mgr.snapStats[revision] if !present { mgr.Unlock() return errors.New(fmt.Sprintf("Snapshot for revision %s to commit does not exist", revision)) } + + if snapStat.usable { + mgr.Unlock() + return errors.New(fmt.Sprintf("Snapshot for revision %s has already been committed", revision)) + } + snap := mgr.snapshots[revision] mgr.Unlock() // Calculate actual disk size used - var sizeIncrement int64 = 0 + var sizeIncrement int64 oldSize := snapStat.TotalSizeMiB snapStat.UpdateSize(snap.CalculateDiskSize()) // Should always result in a decrease or equal! @@ -201,7 +214,7 @@ func (mgr *ImprovedSnapshotManager) CommitSnapshot(revision string) error { // freeSpace makes sure neededMib of disk space is available by removing unused snapshots. Make sure to have a lock // when calling this function. -func (mgr *ImprovedSnapshotManager) freeSpace(neededMib int64) (*[]string, error) { +func (mgr *FullLocalSnapshotManager) freeSpace(neededMib int64) (*[]string, error) { var toDelete []string var freedMib int64 = 0 var removeContainerSnaps []string @@ -213,7 +226,7 @@ func (mgr *ImprovedSnapshotManager) freeSpace(neededMib int64) (*[]string, error toDelete = append(toDelete, snapStat.revisionId) snap := mgr.snapshots[snapStat.revisionId] - removeContainerSnaps = append(removeContainerSnaps, snap.ContainerSnapName) + removeContainerSnaps = append(removeContainerSnaps, snap.GetContainerSnapName()) freedMib += snapStat.TotalSizeMiB } diff --git a/snapshotting/deduplicated/snapHeap.go b/snapshotting/fulllocal/snapHeap.go similarity index 98% rename from snapshotting/deduplicated/snapHeap.go rename to snapshotting/fulllocal/snapHeap.go index 31f364ff4..f20cf68a1 100644 --- a/snapshotting/deduplicated/snapHeap.go +++ b/snapshotting/fulllocal/snapHeap.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package deduplicated +package fulllocal type SnapHeap []*SnapshotStats diff --git a/snapshotting/deduplicated/snapStats.go b/snapshotting/fulllocal/snapStats.go similarity index 89% rename from snapshotting/deduplicated/snapStats.go rename to snapshotting/fulllocal/snapStats.go index fb0b7b8f9..8cf29ffd7 100644 --- a/snapshotting/deduplicated/snapStats.go +++ b/snapshotting/fulllocal/snapStats.go @@ -1,7 +1,6 @@ -package deduplicated +package fulllocal -// Snapshot identified by revision -// Only capitalized fields are serialised / deserialised +// SnapshotStats contains snapshot data used by the snapshot manager for its keepalive policy. type SnapshotStats struct { revisionId string diff --git a/snapshotting/manager_test.go b/snapshotting/manager_test.go new file mode 100644 index 000000000..f11b95fc1 --- /dev/null +++ b/snapshotting/manager_test.go @@ -0,0 +1,167 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov, Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package snapshotting_test + +import ( + "fmt" + ctrdlog "github.com/containerd/containerd/log" + "github.com/ease-lab/vhive/snapshotting" + "github.com/ease-lab/vhive/snapshotting/fulllocal" + "github.com/ease-lab/vhive/snapshotting/regular" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "os" + "sync" + "testing" +) + +const snapshotsDir = "/fccd/test/snapshots" + +func TestMain(m *testing.M) { + // call flag.Parse() here if TestMain uses flags + + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: ctrdlog.RFC3339NanoFixed, + FullTimestamp: true, + }) + + log.SetOutput(os.Stdout) + + log.SetLevel(log.InfoLevel) + + os.Exit(m.Run()) +} + +func testRegular(t *testing.T, mgr *snapshotting.SnapshotManager, vmId, imageName string) { + // Create snapshot + _, snap, err := mgr.InitSnapshot(vmId, imageName, 10, 256, 1, false) + require.NoError(t, err, fmt.Sprintf("Failed to create snapshot for %s", vmId)) + _, _, err = mgr.InitSnapshot(vmId, imageName, 10, 256, 1, false) + require.Error(t, err, fmt.Sprintf("Init should fail when a snapshot has already been created for %s", vmId)) + + err = mgr.CommitSnapshot(snap.GetId()) + require.NoError(t, err, fmt.Sprintf("Failed to commit snapshot for %s", vmId)) + err = mgr.CommitSnapshot(snap.GetId()) + require.Error(t, err, fmt.Sprintf("Commit should fail when no snapshots are created for %s", vmId)) + + // Use snapshot + snp, err := mgr.AcquireSnapshot(imageName) + require.NoError(t, err, fmt.Sprintf("Failed to acquire snapshot for %s", imageName)) + _, err = mgr.AcquireSnapshot(imageName) + require.Error(t, err, fmt.Sprintf("Acquire should fail when no snapshots are available for %s", imageName)) + + // Release snapshot + err = mgr.ReleaseSnapshot(snp.GetId()) + require.NoError(t, err, fmt.Sprintf("Failed to release snapshot for %s", imageName)) + err = mgr.ReleaseSnapshot(snp.GetId()) + require.Error(t, err, fmt.Sprintf("Release should fail when there are no active snapshots for %s", vmId)) +} + +func testFullLocal(t *testing.T, mgr *snapshotting.SnapshotManager, revisionId, imageName string) { + // Create snapshot + _, snap, err := mgr.InitSnapshot(revisionId, imageName, 10, 256, 1, false) + require.NoError(t, err, fmt.Sprintf("Failed to create snapshot for %s", revisionId)) + _, _, err = mgr.InitSnapshot(revisionId, imageName, 10, 256, 1, false) + require.Error(t, err, fmt.Sprintf("Init should fail when a snapshot has already been created for %s", revisionId)) + + err = mgr.CommitSnapshot(snap.GetId()) + require.NoError(t, err, fmt.Sprintf("Failed to commit snapshot for %s", revisionId)) + err = mgr.CommitSnapshot(snap.GetId()) + require.Error(t, err, fmt.Sprintf("Commit should fail when no snapshots are created for %s", revisionId)) + + // Use snapshot + snp, err := mgr.AcquireSnapshot(revisionId) + require.NoError(t, err, fmt.Sprintf("Failed to acquire snapshot for %s", revisionId)) + snp2, err := mgr.AcquireSnapshot(revisionId) + require.NoError(t, err, fmt.Sprintf("Failed to acquire snapshot for %s", revisionId)) + if snp.GetId() != snp2.GetId() { + t.Errorf("Snapshots with same revision ID should be equal") + } + + // Release snapshot + err = mgr.ReleaseSnapshot(snp.GetId()) + require.NoError(t, err, fmt.Sprintf("Failed to release snapshot for %s", revisionId)) + err = mgr.ReleaseSnapshot(snp2.GetId()) + require.NoError(t, err, fmt.Sprintf("Failed to release snapshot for %s", revisionId)) + err = mgr.ReleaseSnapshot(snp.GetId()) + require.Error(t, err, "Release should fail when the snapshot is not in use") +} + +func TestRegular(t *testing.T) { + // Create snapshot manager + mgr := snapshotting.NewSnapshotManager(regular.NewSnapshotManager(snapshotsDir)) + + vmId := "uvm1" // Snap id = vmId + imageName := "testImage" + + testRegular(t, mgr, vmId, imageName) +} + +func TestFullLocal(t *testing.T) { + // Create snapshot manager + mgr := snapshotting.NewSnapshotManager(fulllocal.NewSnapshotManager(snapshotsDir, 102400)) + + revisionId := "rev1" // Snap id = vmId + imageName := "testImage" + + testFullLocal(t, mgr, revisionId, imageName) +} + +func TestRegularConcurrent(t *testing.T) { + // Create snapshot manager + mgr := snapshotting.NewSnapshotManager(regular.NewSnapshotManager(snapshotsDir)) + + var wg sync.WaitGroup + concurrency := 20 + wg.Add(concurrency) + + for i := 0; i < concurrency; i++ { + vmId := fmt.Sprintf("uvm%d", i) + imageName := fmt.Sprintf("testImage-%d", i) + go func(vmId, imageName string) { + defer wg.Done() + testRegular(t, mgr, vmId, imageName) + }(vmId, imageName) + } + wg.Wait() +} + +func TestFullLocalConcurrent(t *testing.T) { + // Create snapshot manager + mgr := snapshotting.NewSnapshotManager(fulllocal.NewSnapshotManager(snapshotsDir, 102400)) + + var wg sync.WaitGroup + concurrency := 20 + wg.Add(concurrency) + + for i := 0; i < concurrency; i++ { + revId := fmt.Sprintf("rev%d", i) + imageName := fmt.Sprintf("testImage-%d", i) + go func(revId, imageName string) { + defer wg.Done() + testFullLocal(t, mgr, revId, imageName) + }(revId, imageName) + } + wg.Wait() +} + diff --git a/snapshotting/regular/manager.go b/snapshotting/regular/manager.go index ae825671a..04f60a382 100644 --- a/snapshotting/regular/manager.go +++ b/snapshotting/regular/manager.go @@ -31,16 +31,22 @@ import ( ) -// ImprovedSnapshotManager manages snapshots stored on the node. +// RegularSnapshotManager manages snapshots stored on the node. Each snapshot can only be used by a single VM at +// a time and thus is always in one of three states: creating, active or idle. type RegularSnapshotManager struct { sync.Mutex + // Snapshots currently in use by a function (identified by the id of the VM using the snapshot) activeSnapshots map[string]*snapshotting.Snapshot + // Snapshots currently being created (identified by the id of the VM the snapshot is being created for) creatingSnapshots map[string]*snapshotting.Snapshot + // Offloaded snapshots available for reuse by new VMs (identified by the image name of the snapshot) idleSnapshots map[string][]*snapshotting.Snapshot baseFolder string } -func NewRegularSnapshotManager(baseFolder string) *RegularSnapshotManager { +// Snapshot identified by VM id + +func NewSnapshotManager(baseFolder string) *RegularSnapshotManager { manager := new(RegularSnapshotManager) manager.activeSnapshots = make(map[string]*snapshotting.Snapshot) manager.creatingSnapshots = make(map[string]*snapshotting.Snapshot) @@ -48,32 +54,36 @@ func NewRegularSnapshotManager(baseFolder string) *RegularSnapshotManager { manager.baseFolder = baseFolder // Clean & init basefolder - os.RemoveAll(manager.baseFolder) - os.MkdirAll(manager.baseFolder, os.ModePerm) + _ = os.RemoveAll(manager.baseFolder) + _ = os.MkdirAll(manager.baseFolder, os.ModePerm) return manager } +// AcquireSnapshot returns an idle snapshot if one is available for the given image func (mgr *RegularSnapshotManager) AcquireSnapshot(image string) (*snapshotting.Snapshot, error) { mgr.Lock() defer mgr.Unlock() + // Check if idle snapshot is available for the given image idles, ok := mgr.idleSnapshots[image] if !ok { mgr.idleSnapshots[image] = []*snapshotting.Snapshot{} return nil, errors.New(fmt.Sprintf("There is no snapshot available for image %s", image)) } + // Return snapshot for supplied image if len(idles) != 0 { snp := idles[0] mgr.idleSnapshots[image] = idles[1:] mgr.activeSnapshots[snp.GetId()] = snp return snp, nil } - return nil, errors.New(fmt.Sprintf("There is no snapshot available fo rimage %s", image)) } +// ReleaseSnapshot releases the snapshot in use by the given VM for offloading so that it can get used to handle a new +// VM creation. func (mgr *RegularSnapshotManager) ReleaseSnapshot(vmID string) error { mgr.Lock() defer mgr.Unlock() @@ -83,40 +93,52 @@ func (mgr *RegularSnapshotManager) ReleaseSnapshot(vmID string) error { return errors.New(fmt.Sprintf("Get: Snapshot for container %s does not exist", vmID)) } + // Move snapshot from active to idle state delete(mgr.activeSnapshots, vmID) mgr.idleSnapshots[snap.Image] = append(mgr.idleSnapshots[snap.Image], snap) return nil } -// InitSnapshot initializes a snapshot by adding its metadata to the ImprovedSnapshotManager. Once the snapshot has been created, -// CommitSnapshot must be run to finalize the snapshot creation and make the snapshot available fo ruse +// InitSnapshot initializes a snapshot by initializing a new snapshot and moving it to the creating state. CommitSnapshot +// must be run to finalize the snapshot creation and make the snapshot available for use func (mgr *RegularSnapshotManager) InitSnapshot(vmID, image string, coldStartTimeMs int64, memSizeMib, vCPUCount uint32, sparse bool) (*[]string, *snapshotting.Snapshot, error) { mgr.Lock() + + if _, present := mgr.creatingSnapshots[vmID]; present { + mgr.Unlock() + return nil, nil, errors.New(fmt.Sprintf("Add: Snapshot for vm %s already exists", vmID)) + } + var removeContainerSnaps *[]string - // Add snapshot and snapshot metadata to manager + // Create snapshot object and move into creating state snap := snapshotting.NewSnapshot(vmID, mgr.baseFolder, image, memSizeMib, vCPUCount, sparse) mgr.creatingSnapshots[snap.GetId()] = snap mgr.Unlock() // Create directory to store snapshot data - err := snap.CreateSnapDir() + /*err := snap.CreateSnapDir() if err != nil { return removeContainerSnaps, nil, errors.Wrapf(err, "creating snapDir for snapshots %s", vmID) - } + }*/ return removeContainerSnaps, snap, nil } -// CommitSnapshot finalizes the snapshot creation and makes it available for use. +// CommitSnapshot finalizes the snapshot creation and makes it available for use by moving it into the idle state. func (mgr *RegularSnapshotManager) CommitSnapshot(vmID string) error { mgr.Lock() defer mgr.Unlock() - snap := mgr.creatingSnapshots[vmID] + + // Move snapshot from creating to idle state + snap, ok := mgr.creatingSnapshots[vmID] + if !ok { + return errors.New(fmt.Sprintf("There has no snapshot been created with vmID %s", vmID)) + } delete(mgr.creatingSnapshots, vmID) - _, ok := mgr.idleSnapshots[snap.Image] + _, ok = mgr.idleSnapshots[snap.Image] if !ok { mgr.idleSnapshots[snap.Image] = []*snapshotting.Snapshot{} } diff --git a/snapshotting/snapshot.go b/snapshotting/snapshot.go index c82ee1b30..53eca2c80 100644 --- a/snapshotting/snapshot.go +++ b/snapshotting/snapshot.go @@ -67,9 +67,14 @@ func (snp *Snapshot) CalculateDiskSize() int64 { func getRealSizeMib(filePath string) int64 { var st unix.Stat_t if err := unix.Stat(filePath, &st); err != nil { - return 0 + return 1 } - return int64(math.Ceil((float64(st.Blocks) * 512) / (1024 * 1024))) + realSize := int64(math.Ceil((float64(st.Blocks) * 512) / (1024 * 1024))) + // Mainly for unit tests where real disk size = 0 + if realSize == 0 { + return 1 + } + return realSize } func (snp *Snapshot) CreateSnapDir() error { diff --git a/taps/Makefile b/taps/Makefile new file mode 100644 index 000000000..b3fbde9c2 --- /dev/null +++ b/taps/Makefile @@ -0,0 +1,33 @@ +# MIT License +# +# Copyright (c) 2020 Dmitrii Ustiugov, Plamen Petrov and EASE lab +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +EXTRAGOARGS:=-v -race -cover + +test: + # Need to pass GOROOT because GitHub-hosted runners may have several + # go versions installed so that calling go from root may fail + sudo env "PATH=$(PATH)" "GOROOT=$(GOROOT)" go test ./ $(EXTRAGOARGS) + +test-man: + echo "Nothing to test manually" + +.PHONY: test test-man diff --git a/taps/tapManager.go b/taps/tapManager.go new file mode 100644 index 000000000..6ad563b88 --- /dev/null +++ b/taps/tapManager.go @@ -0,0 +1,403 @@ +// MIT License +// +// Copyright (c) 2021 Plamen Petrov, Amory Hoste and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package taps + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "github.com/google/nftables" + "github.com/google/nftables/expr" + "os/exec" + "strings" + "sync/atomic" + + log "github.com/sirupsen/logrus" + + "net" + + "github.com/vishvananda/netlink" +) + +// getGatewayAddr Creates the gateway address (first address in pool) +func getGatewayAddr(bridgeID int) string { + return fmt.Sprintf("19%d.128.0.1", bridgeID) +} + +// getBridgeName Create bridge name +func getBridgeName(id int) string { + return fmt.Sprintf("br%d", id) +} + +// getPrimaryAddress Creates the primary address for a tap +func getPrimaryAddress(curTaps, bridgeID int) string { + return fmt.Sprintf("19%d.128.%d.%d", bridgeID, (curTaps+2)/256, (curTaps+2)%256) +} + +// getHostIfaceName returns the default host network interface name. +func getHostIfaceName() (string, error) { + out, err := exec.Command( + "route", + ).Output() + if err != nil { + log.Warnf("Failed to fetch host net interfaces %v\n%s\n", err, out) + return "", err + } + + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "default") { + return line[strings.LastIndex(line, " ")+1:], nil + } + } + return "", errors.New("failed to fetch host net interface") +} + +// NewTapManager Creates a new tap manager +func NewTapManager(hostIfaceName string) (*TapManager, error) { + tm := new(TapManager) + + tm.numBridges = NumBridges + tm.TapCountsPerBridge = make([]int64, NumBridges) + tm.createdTaps = make(map[string]*NetworkInterface) + + tm.hostIfaceName = hostIfaceName + if tm.hostIfaceName == "" { + hostIface, err := getHostIfaceName() + if err != nil { + return nil, err + } else { + tm.hostIfaceName = hostIface + } + } + + log.Info("Registering bridges for tap manager") + + for i := 0; i < NumBridges; i++ { + brName := getBridgeName(i) + gatewayAddr := getGatewayAddr(i) + + createBridge(brName, gatewayAddr) + } + + return tm, nil +} + +// Creates the bridge, add a gateway to it, and enables it +func createBridge(bridgeName, gatewayAddr string) { + logger := log.WithFields(log.Fields{"bridge": bridgeName}) + + logger.Debug("Creating bridge") + + la := netlink.NewLinkAttrs() + la.Name = bridgeName + + br := &netlink.Bridge{LinkAttrs: la} + + if err := netlink.LinkAdd(br); err != nil { + logger.Panic("Bridge could not be created") + } + + if err := netlink.LinkSetUp(br); err != nil { + logger.Panic("Bridge could not be enabled") + } + + bridgeAddress := gatewayAddr + Subnet + + addr, err := netlink.ParseAddr(bridgeAddress) + if err != nil { + log.Panic(fmt.Sprintf("could not parse bridge address %s", bridgeAddress)) + } + + if err := netlink.AddrAdd(br, addr); err != nil { + logger.Panic(fmt.Sprintf("could not add %s to bridge", bridgeAddress)) + } +} + +// setupForwardRules sets up forwarding rules to enable internet access inside the vm +func setupForwardRules(tapName, hostIface string) error { + conn := nftables.Conn{} + + // 1. nft add table ip filter + filterTable := &nftables.Table{ + Name: "filter", + Family: nftables.TableFamilyIPv4, + } + + // 2. nft add chain ip filter FORWARD { type filter hook forward priority 0; policy accept; } + polAccept := nftables.ChainPolicyAccept + fwdCh := &nftables.Chain{ + Name: fmt.Sprintf("FORWARD%s", tapName), + Table: filterTable, + Type: nftables.ChainTypeFilter, + Priority: 0, + Hooknum: nftables.ChainHookForward, + Policy: &polAccept, + } + + // 3. iptables -A FORWARD -i tapName -o hostIface -j ACCEPT + // 3.1 nft add rule ip filter FORWARD iifname tapName oifname hostIface counter accept + outRule := &nftables.Rule{ + Table: filterTable, + Chain: fwdCh, + Exprs: []expr.Any{ + // Load iffname in register 1 + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + // Check iifname == tapName + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", tapName)), + }, + // Load oifname in register 1 + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + // Check oifname == hostIface + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", hostIface)), + }, + &expr.Verdict{ + Kind: expr.VerdictAccept, + }, + }, + } + + // 4. iptables -A FORWARD -o tapName -i hostIface -j ACCEPT + // 4.1 nft add rule ip filter FORWARD iifname hostIface oifname tapName counter accept + inRule := &nftables.Rule{ + Table: filterTable, + Chain: fwdCh, + Exprs: []expr.Any{ + // Load oifname in register 1 + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + // Check oifname == tapName + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", tapName)), + }, + // Load iifname in register 1 + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + // Check iifname == hostIface + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte(fmt.Sprintf("%s\x00", hostIface)), + }, + &expr.Verdict{ + Kind: expr.VerdictAccept, + }, + }, + } + conn.AddTable(filterTable) + conn.AddChain(fwdCh) + conn.AddRule(outRule) + conn.AddRule(inRule) + + if err := conn.Flush(); err != nil { + log.Warnf("Failed to setup forwarding out from tap %v\n%s\n", tapName, err) + return err + } + return nil +} + +// AddTap Creates a new tap and returns the corresponding network interface +func (tm *TapManager) AddTap(tapName string) (*NetworkInterface, error) { + tm.Lock() + + if ni, ok := tm.createdTaps[tapName]; ok { + tm.Unlock() + return ni, tm.reconnectTap(tapName, ni) + } + + tm.Unlock() + + for i := 0; i < tm.numBridges; i++ { + tapsInBridge := atomic.AddInt64(&tm.TapCountsPerBridge[i], 1) + if tapsInBridge-1 < TapsPerBridge { + // Create a tap with this bridge + ni, err := tm.addTap(tapName, i, int(tapsInBridge-1)) + if err == nil { + tm.Lock() + tm.createdTaps[tapName] = ni + tm.Unlock() + err := setupForwardRules(tapName, tm.hostIfaceName) + if err != nil { + return nil, err + } + } + + return ni, err + } + } + log.Error("No space for creating taps") + return nil, errors.New("no space for creating taps") +} + +// Reconnects a single tap with the same network interface that it was +// create with previously +func (tm *TapManager) reconnectTap(tapName string, ni *NetworkInterface) error { + logger := log.WithFields(log.Fields{"tap": tapName, "bridge": ni.BridgeName}) + + la := netlink.NewLinkAttrs() + la.Name = tapName + + logger.Debug("Reconnecting tap") + + tap := &netlink.Tuntap{LinkAttrs: la, Mode: netlink.TUNTAP_MODE_TAP} + + if err := netlink.LinkAdd(tap); err != nil { + logger.Error("Tap could not be reconnected") + return err + } + + br, err := netlink.LinkByName(ni.BridgeName) + if err != nil { + logger.Error("Could not reconnect tap, because corresponding bridge does not exist") + return err + } + + hwAddr, err := net.ParseMAC(ni.MacAddress) + if err != nil { + logger.Error("Could not parse MAC") + return err + } + + if err := netlink.LinkSetHardwareAddr(tap, hwAddr); err != nil { + logger.Error("Could not set MAC address") + return err + } + + if err := netlink.LinkSetMaster(tap, br); err != nil { + logger.Error("Master could not be set") + return err + } + + if err := netlink.LinkSetUp(tap); err != nil { + logger.Error("Tap could not be enabled") + return err + } + + return nil +} + +// Creates a single tap and connects it to the corresponding bridge +func (tm *TapManager) addTap(tapName string, bridgeID, currentNumTaps int) (*NetworkInterface, error) { + bridgeName := getBridgeName(bridgeID) + + logger := log.WithFields(log.Fields{"tap": tapName, "bridge": bridgeName}) + + la := netlink.NewLinkAttrs() + la.Name = tapName + + logger.Debug("Creating tap") + + tap := &netlink.Tuntap{LinkAttrs: la, Mode: netlink.TUNTAP_MODE_TAP} + + if err := netlink.LinkAdd(tap); err != nil { + logger.Error("Tap could not be created") + return nil, err + } + + br, err := netlink.LinkByName(bridgeName) + if err != nil { + logger.Error("Could not create tap, because corresponding bridge does not exist") + return nil, err + } + + if err := netlink.LinkSetMaster(tap, br); err != nil { + logger.Error("Master could not be set") + return nil, err + } + + macIndex := bridgeID*TapsPerBridge + currentNumTaps + macAddress := fmt.Sprintf("02:FC:00:00:%02X:%02X", macIndex/256, macIndex%256) + + hwAddr, err := net.ParseMAC(macAddress) + if err != nil { + logger.Error("Could not parse MAC") + return nil, err + } + + if err := netlink.LinkSetHardwareAddr(tap, hwAddr); err != nil { + logger.Error("Could not set MAC address") + return nil, err + } + + if err := netlink.LinkSetUp(tap); err != nil { + logger.Error("Tap could not be enabled") + return nil, err + } + + return &NetworkInterface{ + BridgeName: bridgeName, + MacAddress: macAddress, + PrimaryAddress: getPrimaryAddress(currentNumTaps, bridgeID), + HostDevName: tapName, + Subnet: Subnet, + GatewayAddress: getGatewayAddr(bridgeID), + }, nil +} + +// RemoveTap Removes the tap +func (tm *TapManager) RemoveTap(tapName string) error { + logger := log.WithFields(log.Fields{"tap": tapName}) + + logger.Debug("Removing tap") + + tap, err := netlink.LinkByName(tapName) + if err != nil { + logger.Warn("Could not find tap") + return nil + } + + if err := netlink.LinkDel(tap); err != nil { + logger.Error("Tap could not be removed") + return err + } + + return nil +} + +// RemoveBridges Removes the bridges created by the tap manager +func (tm *TapManager) RemoveBridges() { + log.Info("Removing bridges") + for i := 0; i < tm.numBridges; i++ { + bridgeName := getBridgeName(i) + + logger := log.WithFields(log.Fields{"bridge": bridgeName}) + + br, err := netlink.LinkByName(bridgeName) + if err != nil { + logger.Warn("Could not find bridge") + continue + } + + if err := netlink.LinkDel(br); err != nil { + logger.WithFields(log.Fields{"bridge": bridgeName}).Panic("Bridge could not be deleted") + } + } +} diff --git a/taps/taps_test.go b/taps/taps_test.go new file mode 100644 index 000000000..692c0f057 --- /dev/null +++ b/taps/taps_test.go @@ -0,0 +1,105 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package taps + +import ( + "fmt" + "os" + "sync" + "testing" + + ctrdlog "github.com/containerd/containerd/log" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { + // call flag.Parse() here if TestMain uses flags + + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: ctrdlog.RFC3339NanoFixed, + FullTimestamp: true, + }) + //log.SetReportCaller(true) // FIXME: make sure it's false unless debugging + + log.SetOutput(os.Stdout) + + log.SetLevel(log.InfoLevel) + + os.Exit(m.Run()) +} + +func TestCreateCleanBridges(t *testing.T) { + tm, _ := NewTapManager("") + tm.RemoveBridges() +} + +func TestCreateRemoveTaps(t *testing.T) { + tapsNum := []int{100, 1100} + + tm, _ := NewTapManager("") + defer tm.RemoveBridges() + + for _, n := range tapsNum { + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + _, _ = tm.AddTap(fmt.Sprintf("tap_%d", i)) + }(i) + } + wg.Wait() + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + _ = tm.RemoveTap(fmt.Sprintf("tap_%d", i)) + }(i) + } + wg.Wait() + } +} + +func TestCreateRemoveExtra(t *testing.T) { + + t.Skip("Test disabled due to execution failure in GitHub Actions and it doesn't seem essential for the test coverage") + + tapsNum := 2001 + + tm, _ := NewTapManager("") + defer tm.RemoveBridges() + + for i := 0; i < tapsNum; i++ { + _, err := tm.AddTap(fmt.Sprintf("tap_%d", i)) + if i < tm.numBridges*TapsPerBridge { + require.NoError(t, err, "Failed to create tap") + } else { + require.Error(t, err, "Did not fail to create extra taps") + } + } + + for i := 0; i < tapsNum; i++ { + _ = tm.RemoveTap(fmt.Sprintf("tap_%d", i)) + } +} diff --git a/taps/types.go b/taps/types.go new file mode 100644 index 000000000..503f88027 --- /dev/null +++ b/taps/types.go @@ -0,0 +1,55 @@ +// MIT License +// +// Copyright (c) 2020 Plamen Petrov and EASE lab +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package taps + +import ( + "sync" +) + +const ( + // Subnet Number of bits in the subnet mask + Subnet = "/10" + // TapsPerBridge Number of taps per bridge + TapsPerBridge = 1000 + // NumBridges is the number of bridges for the TapManager + NumBridges = 2 +) + +// TapManager A Tap Manager +type TapManager struct { + sync.Mutex + hostIfaceName string + numBridges int + TapCountsPerBridge []int64 + createdTaps map[string]*NetworkInterface +} + +// NetworkInterface Network interface type, NI names are generated based on expected tap names +type NetworkInterface struct { + BridgeName string + MacAddress string + HostDevName string + PrimaryAddress string + Subnet string + GatewayAddress string +} diff --git a/vhive.go b/vhive.go index 3fda98a5a..88068e6d5 100644 --- a/vhive.go +++ b/vhive.go @@ -131,6 +131,11 @@ func main() { return } + if !*isFullLocal && *isSparseSnaps { + log.Error("Sparse snaps are only supported for full local snapshots") + return + } + if flog, err = os.Create("/tmp/fccd.log"); err != nil { panic(err) } @@ -168,6 +173,7 @@ func main() { ctriface.WithUPF(*isUPFEnabled), ctriface.WithMetricsMode(*isMetricsMode), ctriface.WithLazyMode(*isLazyMode), + ctriface.WithFullLocal(*isFullLocal), ) funcPool = NewFuncPool(*isSaveMemory, *servedThreshold, *pinnedFuncNum, testModeOn) @@ -271,7 +277,7 @@ func (s *server) StopSingleVM(ctx context.Context, in *pb.StopSingleVMReq) (*pb. // Note: this function is to be used only before tearing down the whole orchestrator func (s *server) StopVMs(ctx context.Context, in *pb.StopVMsReq) (*pb.Status, error) { log.Info("Received StopVMs") - err := orch.StopActiveVMs(*isFullLocal) + err := orch.StopActiveVMs() if err != nil { log.Printf("Failed to stop VMs, err: %v\n", err) return &pb.Status{Message: "Failed to stop VMs"}, err diff --git a/vhive_test.go b/vhive_test.go index d1c74c9c5..83169dd3c 100644 --- a/vhive_test.go +++ b/vhive_test.go @@ -46,7 +46,6 @@ var ( isSnapshotsEnabledTest = flag.Bool("snapshotsTest", false, "Use VM snapshots when adding function instances") isMetricsModeTest = flag.Bool("metricsTest", false, "Calculate UPF metrics") isLazyModeTest = flag.Bool("lazyTest", false, "Enable lazy serving mode when UPFs are enabled") - isFullLocalTest = flag.Bool("fullLocalTest", false, "Enable full local snapshots") isWithCache = flag.Bool("withCache", false, "Do not drop the cache before measurements") benchDir = flag.String("benchDirTest", "bench_results", "Directory where stats should be saved") ) @@ -89,7 +88,7 @@ func TestMain(m *testing.M) { ret := m.Run() - err := orch.StopActiveVMs(*isFullLocalTest) + err := orch.StopActiveVMs() if err != nil { log.Printf("Failed to stop VMs, err: %v\n", err) } From 862a3a4665ee06c069a5dd1f60b1ff379819a7ba Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Mon, 28 Mar 2022 22:00:48 +0100 Subject: [PATCH 12/15] Add thinpool detection if not specified Signed-off-by: Amory Hoste --- .github/workflows/cri_test.yml | 5 +++++ .github/workflows/nightly_tests.yml | 12 +++++++++- .github/workflows/unit_tests.yml | 12 +++++++++- bin/containerd-shim-aws-firecracker | 4 ++-- bin/firecracker | 2 +- bin/firecracker-containerd | 4 ++-- bin/firecracker-ctr | 4 ++-- bin/jailer | 2 +- cri/firecracker/coordinator.go | 5 ++++- ctriface/bench_test.go | 2 +- ctriface/failing_test.go | 2 +- ctriface/iface.go | 29 ++++++++++++++++++++++-- ctriface/iface_test.go | 10 ++++----- ctriface/manual_cleanup_test.go | 10 ++++----- devmapper/devicemapper.go | 34 ++++++++++++++++++++++++----- devmapper/devicemapper_test.go | 9 ++++---- networking/networkconfig.go | 8 +++---- networking/networking.go | 2 +- vhive_test.go | 2 +- 19 files changed, 117 insertions(+), 41 deletions(-) diff --git a/.github/workflows/cri_test.yml b/.github/workflows/cri_test.yml index 3ca6da86d..43030d2cd 100644 --- a/.github/workflows/cri_test.yml +++ b/.github/workflows/cri_test.yml @@ -54,6 +54,11 @@ jobs: - name: Setup firecracker-containerd run: ./scripts/setup_firecracker_containerd.sh + - name: Add rsync # TODO: add dependencies to vHive + run: | + sudo apt update + sudo apt install rsync -y + - name: Build run: go build diff --git a/.github/workflows/nightly_tests.yml b/.github/workflows/nightly_tests.yml index f51d33788..a5b841257 100644 --- a/.github/workflows/nightly_tests.yml +++ b/.github/workflows/nightly_tests.yml @@ -31,6 +31,11 @@ jobs: sudo apt update sudo apt install git -y + - name: Add rsync # TODO: add dependencies to vHive + run: | + sudo apt update + sudo apt install rsync -y + - name: Check out code into the Go module directory uses: actions/checkout@v3 with: @@ -55,7 +60,7 @@ jobs: strategy: fail-fast: false matrix: - vhive_args: ["-dbg", "-dbg -snapshots", "-dbg -snapshots -upf"] + vhive_args: ["-dbg", "-dbg -snapshots", "-dbg -snapshots -upf", "-dbg -snapshots -fulllocal"] env: GITHUB_RUN_ID: ${{ github.run_id }} GITHUB_JOB: ${{ github.job }} @@ -83,6 +88,11 @@ jobs: - name: Setup firecracker-containerd run: ./scripts/setup_firecracker_containerd.sh + - name: Add rsync # TODO: add dependencies to vHive + run: | + sudo apt update + sudo apt install rsync -y + - name: Build run: go build diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index c20f61fda..37ca72c43 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -26,7 +26,7 @@ jobs: strategy: fail-fast: false matrix: - module: [taps, misc, profile] + module: [taps, misc, profile, devmapper, networking, snapshotting, ctriface/image] steps: - name: Set up Go 1.18 @@ -48,6 +48,11 @@ jobs: - name: Install PMU tools run: ./scripts/install_pmutools.sh + - name: Add rsync # TODO: add dependencies to vHive + run: | + sudo apt update + sudo apt install rsync -y + - name: Build run: go build -race -v -a ./... @@ -84,6 +89,11 @@ jobs: - name: Pull binaries run: ./scripts/setup_firecracker_containerd.sh + - name: Add rsync # TODO: add dependencies to vHive + run: | + sudo apt update + sudo apt install rsync -y + - name: Build run: go build -race -v -a ./... diff --git a/bin/containerd-shim-aws-firecracker b/bin/containerd-shim-aws-firecracker index 1db7544cb..5571e7241 100755 --- a/bin/containerd-shim-aws-firecracker +++ b/bin/containerd-shim-aws-firecracker @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3d8525300d6ce747c63847f0f688d56d61be927648f19a86abee2e8f1e9e0e4 -size 26534379 +oid sha256:2621670b622850e209a25b189c7f7f6f9b4cc3b2e161f28a1f98072e70452cc8 +size 23910923 diff --git a/bin/firecracker b/bin/firecracker index 6fbf61872..03dd1fbde 100755 --- a/bin/firecracker +++ b/bin/firecracker @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d42ddb2c3d970d6a234e0d3f92980e085fc04a9ae17e29e05bb4ca73debfe0b8 +oid sha256:59c71f6e860d67c87f3d76cedebd0b72cb0dda3918dbae8a1f9a6cc7d57672dd size 4016240 diff --git a/bin/firecracker-containerd b/bin/firecracker-containerd index 138deccc3..7313242b4 100755 --- a/bin/firecracker-containerd +++ b/bin/firecracker-containerd @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:17da34088e3c544328545e39037110bb1ab2c09543e9f25614d5425ea90793ad -size 47224352 +oid sha256:b46f59950e1c760c243a1aa1ce52c017a06fc847cb6fa4b9ac81ebe535118856 +size 43720504 diff --git a/bin/firecracker-ctr b/bin/firecracker-ctr index 73417e8d8..70429dd03 100755 --- a/bin/firecracker-ctr +++ b/bin/firecracker-ctr @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d61c35b77178fbabc4c996f4e2411f565b6974ef40773b68d465db17e725bb99 -size 34510472 +oid sha256:78efef1c7049ca2735e29f9cba3782ea4fdfd6433fa9805076818f03150f01eb +size 32104096 diff --git a/bin/jailer b/bin/jailer index 2422b132c..9b460f21d 100755 --- a/bin/jailer +++ b/bin/jailer @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80284269eb8b44483b84a527bf7f4012932d94410e32bd20e4d25d09303336ea +oid sha256:28614cb54e9b0a959822da0018c81d7de000c4cd2253de026e1c4b4689f3d236 size 3072784 diff --git a/cri/firecracker/coordinator.go b/cri/firecracker/coordinator.go index c6f3104b4..3b320bece 100644 --- a/cri/firecracker/coordinator.go +++ b/cri/firecracker/coordinator.go @@ -316,7 +316,10 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, funcInst *FuncInst // TODO: StopVM does not work for fullLocal snapshots without resuming. Might be the same for offloaded since // those are never stopped if c.isFullLocal { - _, err = c.orch.ResumeVM(ctx, funcInst.vmID) + if _, err := c.orch.ResumeVM(ctx, funcInst.vmID); err != nil { + funcInst.logger.WithError(err).Error("failed to resume VM") + return err + } } if err := c.snapshotManager.CommitSnapshot(id); err != nil { diff --git a/ctriface/bench_test.go b/ctriface/bench_test.go index 131dc66b2..e7b513eb4 100644 --- a/ctriface/bench_test.go +++ b/ctriface/bench_test.go @@ -59,7 +59,7 @@ func TestBenchmarkStart(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), diff --git a/ctriface/failing_test.go b/ctriface/failing_test.go index 5e4904d64..c9ec42a69 100644 --- a/ctriface/failing_test.go +++ b/ctriface/failing_test.go @@ -55,7 +55,7 @@ func TestStartSnapStop(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), diff --git a/ctriface/iface.go b/ctriface/iface.go index 752bfdd48..00780112c 100644 --- a/ctriface/iface.go +++ b/ctriface/iface.go @@ -26,8 +26,10 @@ import ( "context" "github.com/ease-lab/vhive/devmapper" "github.com/ease-lab/vhive/snapshotting" + "io/ioutil" "os" "os/exec" + "path/filepath" "strings" "sync" "syscall" @@ -377,9 +379,9 @@ func (o *Orchestrator) StopActiveVMs() error { log.Info("waiting done") log.Info("Closing fcClient") - o.fcClient.Close() + defer func() { _ = o.fcClient.Close() }() log.Info("Closing containerd client") - o.client.Close() + defer func() { _ = o.client.Close() }() return nil } @@ -579,6 +581,29 @@ func (o *Orchestrator) LoadSnapshot( if _, loadErr = o.fcClient.LoadSnapshot(ctx, req); loadErr != nil { logger.Error("Failed to load snapshot of the VM: ", loadErr) + logger.Errorf("snapFilePath: %s, memFilePath: %s, newSnapshotPath: %s", snapFilePath, memFilePath, containerSnap.GetDevicePath()) + files, err := ioutil.ReadDir(filepath.Dir(snapFilePath)) + if err != nil { + logger.Error(err) + } + + snapFiles := "" + for _, f := range files { + snapFiles += f.Name() + ", " + } + + logger.Error(snapFiles) + + files, _ = ioutil.ReadDir(filepath.Dir(containerSnap.GetDevicePath())) + if err != nil { + logger.Error(err) + } + + snapFiles = "" + for _, f := range files { + snapFiles += f.Name() + ", " + } + logger.Error(snapFiles) } }() diff --git a/ctriface/iface_test.go b/ctriface/iface_test.go index 139d48d8c..7df7f4a28 100644 --- a/ctriface/iface_test.go +++ b/ctriface/iface_test.go @@ -67,7 +67,7 @@ func TestPauseSnapResume(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), @@ -121,7 +121,7 @@ func TestStartStopSerial(t *testing.T) { orch := NewOrchestrator( "devmapper", - "fc-dev-thinpool", + "", "", "", 10, @@ -158,7 +158,7 @@ func TestPauseResumeSerial(t *testing.T) { orch := NewOrchestrator( "devmapper", - "fc-dev-thinpool", + "", "", "", 10, @@ -203,7 +203,7 @@ func TestStartStopParallel(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), @@ -265,7 +265,7 @@ func TestPauseResumeParallel(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), diff --git a/ctriface/manual_cleanup_test.go b/ctriface/manual_cleanup_test.go index 3038f108d..8a8ebc784 100644 --- a/ctriface/manual_cleanup_test.go +++ b/ctriface/manual_cleanup_test.go @@ -57,7 +57,7 @@ func TestSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 1, WithTestModeOn(true), @@ -81,7 +81,7 @@ func TestSnapLoad(t *testing.T) { snap := snapshotting.NewSnapshot(snapId, "/fccd/snapshots", TestImageName, 256, 1, *isSparseSnaps) if *isFullLocal { - err = snap.CreateSnapDir() + _ = snap.CreateSnapDir() } err = orch.CreateSnapshot(ctx, vmID, snap) @@ -129,7 +129,7 @@ func TestSnapLoadMultiple(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), @@ -220,7 +220,7 @@ func TestParallelSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), @@ -309,7 +309,7 @@ func TestParallelPhasedSnapLoad(t *testing.T) { orch := NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, WithTestModeOn(true), diff --git a/devmapper/devicemapper.go b/devmapper/devicemapper.go index cd5e7980d..30fbdc1e7 100644 --- a/devmapper/devicemapper.go +++ b/devmapper/devicemapper.go @@ -23,6 +23,7 @@ package devmapper import ( + "bytes" "context" "fmt" "github.com/containerd/containerd" @@ -31,6 +32,8 @@ import ( "github.com/ease-lab/vhive/devmapper/thindelta" "github.com/opencontainers/image-spec/identity" "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "io/ioutil" "os" "os/exec" "path/filepath" @@ -38,11 +41,25 @@ import ( "sync" ) -// Own managed snapshots in snapmanager and in containerd identified by snapkey. Has name deviceName. -// snapshotId used internally by containerd, needed for thin_delta. Once committed, snapshots identified -// by snapName within containerd? +const defaultPoolName = "fc-dev-thinpool" -// Only remove snapshots created through createSnapshot. Use key used there to delete them. +// getHostIfaceName returns the default host network interface name. +func getThinPoolName() string { + b, err := ioutil.ReadFile("/proc/1/cpuset") + if err != nil { + log.Warnf("Failed to fetch thin-pool name. Falling back to default %v\n", err) + return defaultPoolName + } + + containerId := filepath.Base(strings.TrimSuffix(string(b), "\n")) + + // Check if running inside container (containerID should be 64 characters) + if len(containerId) != 64 { + return defaultPoolName + } + + return containerId + "_thinpool" +} // DeviceMapper creates and manages device snapshots used to store container images. type DeviceMapper struct { @@ -61,6 +78,9 @@ type DeviceMapper struct { func NewDeviceMapper(client *containerd.Client, poolName, metadataDev string) *DeviceMapper { devMapper := new(DeviceMapper) devMapper.poolName = poolName + if devMapper.poolName == "" { + devMapper.poolName = getThinPoolName() + } devMapper.thinDelta = thindelta.NewThinDelta(poolName, metadataDev) devMapper.snapDevices = make(map[string]*DeviceSnapshot) devMapper.snapshotService = client.SnapshotService("devmapper") @@ -262,10 +282,14 @@ func (dmpr *DeviceMapper) CreatePatch(ctx context.Context, patchPath, containerS // writes the differences to the supplied patchPath. func extractPatch(imageMountPath, containerMountPath, patchPath string) error { patchArg := fmt.Sprintf("--only-write-batch=%s", patchPath) + + var errb bytes.Buffer cmd := exec.Command("sudo", "rsync", "-ar", patchArg, addTrailingSlash(imageMountPath), addTrailingSlash(containerMountPath)) + cmd.Stderr = &errb err := cmd.Run() + if err != nil { - return errors.Wrapf(err, "creating patch between %s and %s at %s", imageMountPath, containerMountPath, patchPath) + return errors.Wrapf(err, "creating patch between %s and %s at %s: %s", imageMountPath, containerMountPath, patchPath, errb.String()) } err = os.Remove(patchPath + ".sh") // Remove unnecessary script output diff --git a/devmapper/devicemapper_test.go b/devmapper/devicemapper_test.go index 96b684131..d8f693a03 100644 --- a/devmapper/devicemapper_test.go +++ b/devmapper/devicemapper_test.go @@ -40,7 +40,6 @@ import ( const ( containerdAddress = "/run/firecracker-containerd/containerd.sock" - poolName = "fc-dev-thinpool" NamespaceName = "firecracker-containerd" TestImageName = "ghcr.io/ease-lab/helloworld:var_workload" ) @@ -98,14 +97,14 @@ func TestDevmapper(t *testing.T) { // Create containerd client client, err := containerd.New(containerdAddress) - defer client.Close() + defer func() { _ = client.Close() }() require.NoError(t, err, "Containerd client creation returned error") // Create image manager mgr := image.NewImageManager(client, "devmapper") // Create devmapper - dmpr := devmapper.NewDeviceMapper(client, poolName, "") + dmpr := devmapper.NewDeviceMapper(client, "", "") testDevmapper(t, mgr, dmpr, snapKey, TestImageName) } @@ -113,14 +112,14 @@ func TestDevmapper(t *testing.T) { func TestDevmapperConcurrent(t *testing.T) { // Create containerd client client, err := containerd.New(containerdAddress) - defer client.Close() + defer func() { _ = client.Close() }() require.NoError(t, err, "Containerd client creation returned error") // Create image manager mgr := image.NewImageManager(client, "devmapper") // Create devmapper - dmpr := devmapper.NewDeviceMapper(client, poolName, "") + dmpr := devmapper.NewDeviceMapper(client, "", "") // Test concurrent devmapper var wg sync.WaitGroup diff --git a/networking/networkconfig.go b/networking/networkconfig.go index c0305d6d0..1e093d89a 100644 --- a/networking/networkconfig.go +++ b/networking/networkconfig.go @@ -134,7 +134,7 @@ func (cfg *NetworkConfig) createVmNetwork(hostNsHandle netns.NsHandle) error { log.Println(err) return err } - defer vmNsHandle.Close() + defer func() { _ = vmNsHandle.Close() }() // A.2. Create tap device for uVM if err := createTap(cfg.containerTap, cfg.gatewayCIDR, cfg.getNamespaceName()); err != nil { @@ -195,7 +195,7 @@ func (cfg *NetworkConfig) CreateNetwork() error { // 2. Get host network namespace hostNsHandle, err := netns.Get() - defer hostNsHandle.Close() + defer func() { _ = hostNsHandle.Close() }() if err != nil { log.Printf("Failed to get host ns, %s\n", err) return err @@ -240,7 +240,7 @@ func (cfg *NetworkConfig) RemoveNetwork() error { runtime.LockOSThread() hostNsHandle, err := netns.Get() - defer hostNsHandle.Close() + defer func() { _ = hostNsHandle.Close() }() if err != nil { log.Printf("Failed to get host ns, %s\n", err) return err @@ -248,7 +248,7 @@ func (cfg *NetworkConfig) RemoveNetwork() error { // Get uVM namespace handle vmNsHandle, err := netns.GetFromName(cfg.getNamespaceName()) - defer vmNsHandle.Close() + defer func() { _ = vmNsHandle.Close() }() if err != nil { return err } diff --git a/networking/networking.go b/networking/networking.go index 23c72ca5d..f5dd69194 100644 --- a/networking/networking.go +++ b/networking/networking.go @@ -98,7 +98,7 @@ func deleteTap(tapName string) error { // createVethPair creates a virtual ethernet pair connecting the supplied namespaces func createVethPair(veth0Name, veth1Name string, veth0NsHandle, veth1NsHandle netns.NsHandle) error { - veth := &netlink.Veth{netlink.LinkAttrs{Name: veth0Name, Namespace: netlink.NsFd(veth0NsHandle), TxQLen: 1000}, veth1Name, nil, netlink.NsFd(veth1NsHandle)} + veth := &netlink.Veth{LinkAttrs: netlink.LinkAttrs{Name: veth0Name, Namespace: netlink.NsFd(veth0NsHandle), TxQLen: 1000}, PeerName: veth1Name, PeerNamespace: netlink.NsFd(veth1NsHandle)} if err := netlink.LinkAdd(veth); err != nil { return errors.Wrapf(err, "creating veth pair") } diff --git a/vhive_test.go b/vhive_test.go index 83169dd3c..cb4880a50 100644 --- a/vhive_test.go +++ b/vhive_test.go @@ -76,7 +76,7 @@ func TestMain(m *testing.M) { orch = ctriface.NewOrchestrator( "devmapper", "", - "fc-dev-thinpool", + "", "", 10, ctriface.WithTestModeOn(true), From 6c6f752d31443923f4df08bf6824a4c61591f6f2 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Mon, 25 Apr 2022 20:34:01 +0100 Subject: [PATCH 13/15] run image manager and devmapper tests on containerd runner Signed-off-by: Amory Hoste --- .github/workflows/unit_tests.yml | 78 ++++++++++++++++++++++++++++---- ctriface/image/manager_test.go | 2 +- devmapper/devicemapper_test.go | 2 +- 3 files changed, 72 insertions(+), 10 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 37ca72c43..cfbe05bd6 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -26,14 +26,14 @@ jobs: strategy: fail-fast: false matrix: - module: [taps, misc, profile, devmapper, networking, snapshotting, ctriface/image] + module: [taps, misc, profile, networking, snapshotting ] steps: - name: Set up Go 1.18 uses: actions/setup-go@v3 with: go-version: 1.18 - + - name: Set up Python 3.x uses: actions/setup-python@v3 with: @@ -48,7 +48,7 @@ jobs: - name: Install PMU tools run: ./scripts/install_pmutools.sh - - name: Add rsync # TODO: add dependencies to vHive + - name: Add rsync run: | sudo apt update sudo apt install rsync -y @@ -64,12 +64,12 @@ jobs: run: | make -C $MODULE test make -C $MODULE test-man - + firecracker-containerd-interface-test: name: "Unit tests: Firecracker-containerd interface" runs-on: [self-hosted, integ] steps: - + - name: Set up Go 1.18 uses: actions/setup-go@v3 with: @@ -80,16 +80,16 @@ jobs: sudo add-apt-repository ppa:git-core/ppa -y sudo apt update sudo apt install git -y - + - name: Check out code into the Go module directory uses: actions/checkout@v3 with: lfs: true - + - name: Pull binaries run: ./scripts/setup_firecracker_containerd.sh - - name: Add rsync # TODO: add dependencies to vHive + - name: Add rsync run: | sudo apt update sudo apt install rsync -y @@ -105,3 +105,65 @@ jobs: - name: Cleaning if: ${{ always() }} run: ./scripts/clean_fcctr.sh + + containerd-unit-test: + name: Containerd dependent unit test + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + module: [ devmapper, ctriface/image ] + steps: + + - name: Set up Go 1.18 + uses: actions/setup-go@v3 + with: + go-version: 1.18 + + - name: Set up Python 3.x + uses: actions/setup-python@v3 + with: + python-version: '3.x' + + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + + - name: Setup System + run: ./scripts/setup_system.sh + + - name: Install PMU tools + run: ./scripts/install_pmutools.sh + + - name: Add rsync # TODO: add dependencies to vHive + run: | + sudo apt update + sudo apt install rsync -y + + - name: Setup firecracker containerd + run: ./scripts/setup_firecracker_containerd.sh + + - name: Setup containerd + run: | + wget --continue --quiet https://github.com/containerd/containerd/releases/download/v1.5.7/containerd-1.5.7-linux-amd64.tar.gz + sudo tar -C /usr/local -xzf containerd-1.5.7-linux-amd64.tar.gz + + - name: Create devmapper + run: ./scripts/create_devmapper.sh + + - name: Run containerd + run: sudo containerd & + + - name: Run firecracker-containerd + run: sudo /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml & + + - name: Build + run: go build -race -v -a ./... + + - name: Run tests in submodules + env: + MODULE: ${{ matrix.module }} + AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }} + AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }} + run: | + make -C $MODULE test + make -C $MODULE test-man \ No newline at end of file diff --git a/ctriface/image/manager_test.go b/ctriface/image/manager_test.go index 13ea71ac7..4b8bbe288 100644 --- a/ctriface/image/manager_test.go +++ b/ctriface/image/manager_test.go @@ -40,7 +40,7 @@ import ( const ( TestImageName = "ghcr.io/ease-lab/helloworld:var_workload" containerdAddress = "/run/firecracker-containerd/containerd.sock" - NamespaceName = "firecracker-containerd" + NamespaceName = "containerd" ) func getAllImages() map[string]string { diff --git a/devmapper/devicemapper_test.go b/devmapper/devicemapper_test.go index d8f693a03..97b6d11c5 100644 --- a/devmapper/devicemapper_test.go +++ b/devmapper/devicemapper_test.go @@ -40,7 +40,7 @@ import ( const ( containerdAddress = "/run/firecracker-containerd/containerd.sock" - NamespaceName = "firecracker-containerd" + NamespaceName = "containerd" TestImageName = "ghcr.io/ease-lab/helloworld:var_workload" ) From 7bde4094ed32d99debfc3080a34077faf4134eea Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Mon, 16 May 2022 22:17:19 +0100 Subject: [PATCH 14/15] Add docs on fulllocal snapshots Signed-off-by: Amory Hoste --- docs/fulllocal_snapshots.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 docs/fulllocal_snapshots.md diff --git a/docs/fulllocal_snapshots.md b/docs/fulllocal_snapshots.md new file mode 100644 index 000000000..cc98435e8 --- /dev/null +++ b/docs/fulllocal_snapshots.md @@ -0,0 +1,7 @@ +# vHive fulllocal snapshots guide + +The default snapshots in vHive use an offloading based technique that leaves the shim and other resources running upon shutting down a VM such that it can be re-used in the future. This technique has the advantage that a shim does not have to be recreated and the block and network devices of the previously stopped VM can be reused. This approach does however limit the amount of VMs that can be booted from a snapshot to the amount of VMs that have been offloaded. An alternative approach is to allow loading an arbitrary amount of VMs from a single snapshot by creating a new shim, block and network devices upon loading a snapshot. This functionality can be enabled by running vHive using the `-snapshots -fulllocal` flags. Additionally, the following flags can be used to further configure the fullLocal snapshots + +* `-isSparseSnaps`: store the memory file as a sparse file to make the storage size closer to the actual memory utilized by the VM, rather than the memory allocated to the VM +* `-snapsStorageSize [capacityGiB]`: specify the amount of capacity that can be used to store snapshots +* `-netPoolSize [capacity]`: keep around a pool of [capacity] network devices that can be used by VMs to keep network creation off the cold start path From c990d065e951a0d0ebea55d7d5fc7eb9d588d1b8 Mon Sep 17 00:00:00 2001 From: Amory Hoste Date: Mon, 6 Jun 2022 21:08:53 +0100 Subject: [PATCH 15/15] Add docs on full local snapshots Signed-off-by: Amory Hoste --- CHANGELOG.md | 1 + configs/.wordlist.txt | 7 +++++ docs/developers_guide.md | 4 ++- docs/fulllocal_snapshots.md | 61 ++++++++++++++++++++++++++++++++++--- docs/quickstart_guide.md | 2 ++ 5 files changed, 69 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6f56ab2e..af264f15c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## [Unreleased] ### Added +- Add support for [fullLocal snapshots](docs/fulllocal_snapshots.md) mode ### Changed diff --git a/configs/.wordlist.txt b/configs/.wordlist.txt index 85bd30453..acc04c0ee 100644 --- a/configs/.wordlist.txt +++ b/configs/.wordlist.txt @@ -107,7 +107,9 @@ DCNs De debian deployer +deterministically dev +devicemapper devmapper df DialGRPCWithUnaryInterceptor @@ -285,6 +287,7 @@ microarchitectural Microarchitecture microbenchmark microbenchmarks +microVM microVMs minio MinIO @@ -395,11 +398,13 @@ rebasing repo Repos roadmap +rootfs RPC rperf RPerf RPERF rsquo +rsync runc runtime runtimes @@ -432,6 +437,7 @@ SinkBinding SinkBindings sms SMT +snapshotted snapshotting SoC SOCACHE @@ -461,6 +467,7 @@ TestProfileIncrementConfiguration TestProfileSingleConfiguration TextFormatter th +thinpool Timeseries timeseriesdb TimeseriesDB diff --git a/docs/developers_guide.md b/docs/developers_guide.md index fa5b07222..76672b1bb 100644 --- a/docs/developers_guide.md +++ b/docs/developers_guide.md @@ -108,7 +108,7 @@ We also offer self-hosted stock-Knative environments powered by KinD. To be able * vHive supports both the baseline Firecracker snapshots and our advanced Record-and-Prefetch (REAP) snapshots. - + * vHive integrates with Kubernetes and Knative via its built-in CRI support. Currently, only Knative Serving is supported. @@ -116,6 +116,8 @@ Currently, only Knative Serving is supported. * vHive supports arbitrary functions deployed with OCI (Docker images). +* Remote snapshot restore functionality can be integrated through the [full local snapshot functionality](./fulllocal_snapshots.md). + * vHive has robust Continuous-Integration and our team is committed to deliver high-quality code. diff --git a/docs/fulllocal_snapshots.md b/docs/fulllocal_snapshots.md index cc98435e8..b483f2284 100644 --- a/docs/fulllocal_snapshots.md +++ b/docs/fulllocal_snapshots.md @@ -1,7 +1,58 @@ -# vHive fulllocal snapshots guide +# vHive full local snapshots -The default snapshots in vHive use an offloading based technique that leaves the shim and other resources running upon shutting down a VM such that it can be re-used in the future. This technique has the advantage that a shim does not have to be recreated and the block and network devices of the previously stopped VM can be reused. This approach does however limit the amount of VMs that can be booted from a snapshot to the amount of VMs that have been offloaded. An alternative approach is to allow loading an arbitrary amount of VMs from a single snapshot by creating a new shim, block and network devices upon loading a snapshot. This functionality can be enabled by running vHive using the `-snapshots -fulllocal` flags. Additionally, the following flags can be used to further configure the fullLocal snapshots +When using Firecracker as the sandbox technology in vHive, two snapshotting modes are supported: a default mode and a +full local mode. The default snapshot mode use an offloading based technique which leaves the shim and other resources +running upon shutting down a microVM such that it can be re-used in the future. This technique has the advantage that +the shim does not have to be recreated and the block and network devices of the previously stopped microVM can be +reused, but limits the amount of microVMs that can be booted from a snapshot to the amount of microVMs that have been +offloaded. The full local snapshot mode instead allows loading an arbitrary amount of microVMs from a single snapshot. +This is done by creating a new shim and the required block and network devices upon loading a snapshot and creating an +extra patch file containing the filesystem differences written by the microVM upon snapshot creation. To enable the +full local snapshot functionality, vHive must be run with the `-snapshots` and `-fulllocal` flags. In addition, the +full local snapshot mode can be further configured using the following flags: -* `-isSparseSnaps`: store the memory file as a sparse file to make the storage size closer to the actual memory utilized by the VM, rather than the memory allocated to the VM -* `-snapsStorageSize [capacityGiB]`: specify the amount of capacity that can be used to store snapshots -* `-netPoolSize [capacity]`: keep around a pool of [capacity] network devices that can be used by VMs to keep network creation off the cold start path +- `isSparseSnaps`: store the memory file as a sparse file to make its storage size closer to the actual size of the memory utilized by the microVM, rather than the memory allocated to the microVM +- `snapsStorageSize [capacityGiB]`: specify the amount of capacity that can be used to store snapshots +- `netPoolSize [capacity]`: the amount of network devices in the network pool, which can be used by microVMs to keep the network initialization off the cold start path + +## Remote snapshots + +Rather than only using the snapshots available locally on a node, snapshots can also be transferred between nodes to +potentially accelerate cold start times and reduce memory utilization, given that proper mechanisms are in place to +minimize the snapshot network transfer latency. This could be done by storing snapshots in a global storage solution +such as S3, or directly distributing snapshots between compute nodes. The full local snapshot functionality in vHive +can be used to implement such functionality. To implement this, the container image used by the snapshotted microVM +must be available on the local node where the remote snapshot will be restored. This container image can be used in +combination with the filesystem changes stored in the snapshot patch file to create a device mapper snapshot that +contains the root filesystem needed by the restored microVM. After recreating the root filesystem block device, the +microVM can be created from the fetched memory file and microVM state similarly to how this is done for the full local +snapshots. + +## Incompatibilities and limitations + +### Snapshot filesystem changes capture and restoration + +Currently, the filesystem changes are captured in a “patch file”, which is created by mounting both the original +container image and the microVM block device and extracting the changes between both using rsync. Even though rsync +uses some optimisations such as using timestamps and file sizes to limit the amount of reads, this procedure is quite +inefficient and could be sped up by directly extracting the changed block offsets from the thinpool metadata device +and directly reading these blocks from the microVM rootfs block device. These extracted blocks could then be written +back at the correct offsets on top of the base image block device to create a root filesystem for the to be restored +microVM. Support for this alternative approach is provided through the `ForkContainerSnap` and `CreateDeviceSnapshot` +functions. However, for this approach to work across nodes for remote snapshots, support to [deterministically flatten a container image into a filesystem](https://www.youtube.com/watch?v=A-7j0QlGwFk) +would be required to ensure the block devices of identical images pulled to different nodes are bit identical. +In addition, further optimizations would be necessary to more efficiently extract filesystem changes from the thinpool +metadata device rather than current method, which relies on the devicemapper `reserve_metadata_snap` method to create +a snapshot of the current metadata state in combination with `thin_delta` to extract changed blocks. + +### Performance limitations + +The full local snapshot mode requires a new block device and network device with the exact state of the snapshotted +microVM to be created before restoring the snapshot. The network namespace and devicemapper block device creation turn +out to be a bottleneck when concurrently restoring many snapshots. Approaches that reduce the impact of these operations +could further speedup the microVM snapshot restore latency at high load. + +### UPF snapshot compatibility + +The full local snapshot functionality is currently not integrated with the [Record-and-Prefetch (REAP)](papers/REAP_ASPLOS21.pdf) +accelerated snapshots and thus cannot be used in combination with the `-upf` flag. \ No newline at end of file diff --git a/docs/quickstart_guide.md b/docs/quickstart_guide.md index 028f8ca29..88a42dd65 100644 --- a/docs/quickstart_guide.md +++ b/docs/quickstart_guide.md @@ -130,6 +130,8 @@ SSD-equipped nodes are highly recommended. Full list of CloudLab nodes can be fo > By default, the microVMs are booted, `-snapshots` enables snapshots after the 2nd invocation of each function. > > If `-snapshots` and `-upf` are specified, the snapshots are accelerated with the Record-and-Prefetch (REAP) technique that we described in our ASPLOS'21 paper ([extended abstract][ext-abstract], [full paper](papers/REAP_ASPLOS21.pdf)). + > + > If `-snapshots` and `-fulllocal` are specified, a single snapshot can be used to restore many microVMs ([full local snapshots](./fulllocal_snapshots.md)). Note that this mode is currently not compatible with the REAP technique. ### 3. Configure Master Node **On the master node**, execute the following instructions below **as a non-root user with sudo rights** using **bash**: